diff --git a/.devenv.hosting.yaml b/.devenv.hosting.yaml new file mode 100644 index 00000000..f3b8c514 --- /dev/null +++ b/.devenv.hosting.yaml @@ -0,0 +1,109 @@ +hosts: +- address: localhost + hostname: localhost + attributes: + sudo_shell: false + plugin_name: docker + healthcheck_plugin_name: basic + attributes: + skip_readiness_check: True + force_transactions: True + services: + - name: frostfs-storage_01 + attributes: + container_name: s01 + config_path: /etc/frostfs/storage/config.yml + wallet_path: ../frostfs-dev-env/services/storage/wallet01.json + local_wallet_config_path: ./TemporaryDir/empty-password.yml + local_wallet_path: ../frostfs-dev-env/services/storage/wallet01.json + wallet_password: "" + volume_name: storage_storage_s01 + endpoint_data0: s01.frostfs.devenv:8080 + control_endpoint: s01.frostfs.devenv:8081 + un_locode: "RU MOW" + - name: frostfs-storage_02 + attributes: + container_name: s02 + config_path: /etc/frostfs/storage/config.yml + wallet_path: ../frostfs-dev-env/services/storage/wallet02.json + local_wallet_config_path: ./TemporaryDir/empty-password.yml + local_wallet_path: ../frostfs-dev-env/services/storage/wallet02.json + wallet_password: "" + volume_name: storage_storage_s02 + endpoint_data0: s02.frostfs.devenv:8080 + control_endpoint: s02.frostfs.devenv:8081 + un_locode: "RU LED" + - name: frostfs-storage_03 + attributes: + container_name: s03 + config_path: /etc/frostfs/storage/config.yml + wallet_path: ../frostfs-dev-env/services/storage/wallet03.json + local_wallet_config_path: ./TemporaryDir/empty-password.yml + local_wallet_path: ../frostfs-dev-env/services/storage/wallet03.json + wallet_password: "" + volume_name: storage_storage_s03 + endpoint_data0: s03.frostfs.devenv:8080 + control_endpoint: s03.frostfs.devenv:8081 + un_locode: "SE STO" + - name: frostfs-storage_04 + attributes: + container_name: s04 + config_path: /etc/frostfs/storage/config.yml + wallet_path: ../frostfs-dev-env/services/storage/wallet04.json + local_wallet_config_path: ./TemporaryDir/empty-password.yml + local_wallet_path: ../frostfs-dev-env/services/storage/wallet04.json + wallet_password: "" + volume_name: storage_storage_s04 + endpoint_data0: s04.frostfs.devenv:8080 + control_endpoint: s04.frostfs.devenv:8081 + un_locode: "FI HEL" + - name: frostfs-s3_01 + attributes: + container_name: s3_gate + config_path: ../frostfs-dev-env/services/s3_gate/.s3.env + wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json + local_wallet_config_path: ./TemporaryDir/password-s3.yml + local_wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json + wallet_password: "s3" + endpoint_data0: https://s3.frostfs.devenv:8080 + - name: frostfs-http_01 + attributes: + container_name: http_gate + config_path: ../frostfs-dev-env/services/http_gate/.http.env + wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json + local_wallet_config_path: ./TemporaryDir/password-other.yml + local_wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json + wallet_password: "one" + endpoint_data0: http://http.frostfs.devenv + - name: frostfs-ir_01 + attributes: + container_name: ir01 + config_path: ../frostfs-dev-env/services/ir/.ir.env + wallet_path: ../frostfs-dev-env/services/ir/az.json + local_wallet_config_path: ./TemporaryDir/password-other.yml + local_wallet_path: ../frostfs-dev-env/services/ir/az.json + wallet_password: "one" + - name: neo-go_01 + attributes: + container_name: morph_chain + config_path: ../frostfs-dev-env/services/morph_chain/protocol.privnet.yml + wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json + local_wallet_config_path: ./TemporaryDir/password-other.yml + local_wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json + wallet_password: "one" + endpoint_internal0: http://morph-chain.frostfs.devenv:30333 + - name: main-chain_01 + attributes: + container_name: main_chain + config_path: ../frostfs-dev-env/services/chain/protocol.privnet.yml + wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json + local_wallet_config_path: ./TemporaryDir/password-other.yml + local_wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json + wallet_password: "one" + endpoint_internal0: http://main-chain.frostfs.devenv:30333 + - name: coredns_01 + attributes: + container_name: coredns + clis: + - name: frostfs-cli + exec_path: frostfs-cli diff --git a/.forgejo/workflows/dco.yml b/.forgejo/workflows/dco.yml new file mode 100644 index 00000000..9aa0d310 --- /dev/null +++ b/.forgejo/workflows/dco.yml @@ -0,0 +1,21 @@ +name: DCO action +on: [pull_request] + +jobs: + dco: + name: DCO + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Setup Go + uses: actions/setup-go@v3 + with: + go-version: '1.21' + + - name: Run commit format checker + uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3 + with: + from: 'origin/${{ github.event.pull_request.base.ref }}' diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index 14220628..00000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1 +0,0 @@ -* @aprasolova @vdomnich-yadro @dansingjulia @yadro-vavdeev @abereziny diff --git a/.github/workflows/dco.yml b/.github/workflows/dco.yml deleted file mode 100644 index 40ed8fcb..00000000 --- a/.github/workflows/dco.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: DCO check - -on: - pull_request: - branches: - - master - -jobs: - commits_check_job: - runs-on: ubuntu-latest - name: Commits Check - steps: - - name: Get PR Commits - id: 'get-pr-commits' - uses: tim-actions/get-pr-commits@master - with: - token: ${{ secrets.GITHUB_TOKEN }} - - name: DCO Check - uses: tim-actions/dco@master - with: - commits: ${{ steps.get-pr-commits.outputs.commits }} diff --git a/.gitignore b/.gitignore index a7f7de02..4691fe40 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ # ignore IDE files .vscode .idea +venv.* # ignore temp files under any path .DS_Store @@ -10,3 +11,4 @@ /dist /build *.egg-info +wallet_config.yml \ No newline at end of file diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 00000000..519ca425 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,3 @@ +.* @TrueCloudLab/qa-committers +.forgejo/.* @potyarkin +Makefile @potyarkin diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b0f2b580..69417d23 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -3,8 +3,8 @@ First, thank you for contributing! We love and encourage pull requests from everyone. Please follow the guidelines: -- Check the open [issues](https://github.com/TrueCloudLab/frostfs-testlib/issues) and - [pull requests](https://github.com/TrueCloudLab/frostfs-testlib/pulls) for existing +- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/issues) and + [pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/pulls) for existing discussions. - Open an issue first, to discuss a new feature or enhancement. @@ -26,8 +26,8 @@ Start by forking the `frostfs-testlib` repository, make changes in a branch and send a pull request. We encourage pull requests to discuss code changes. Here are the steps in details: -### Set up your GitHub Repository -Fork [FrostFS testlib upstream](https://github.com/TrueCloudLab/frostfs-testlib/fork) source +### Set up your Git Repository +Fork [FrostFS testlib upstream](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/forks) source repository to your own personal repository. Copy the URL of your fork and clone it: ```shell @@ -37,7 +37,7 @@ $ git clone ### Set up git remote as ``upstream`` ```shell $ cd frostfs-testlib -$ git remote add upstream https://github.com/TrueCloudLab/frostfs-testlib +$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-testlib $ git fetch upstream ``` @@ -63,9 +63,9 @@ $ git checkout -b feature/123-something_awesome ``` ### Test your changes -Before submitting any changes to the library, please, make sure that all unit tests are passing. To run the tests, please, use the following command: +Before submitting any changes to the library, please, make sure that linter and all unit tests are passing. To run the tests, please, use the following command: ```shell -$ python -m unittest discover --start-directory tests +$ make validation ``` To enable tests that interact with SSH server, please, setup SSH server and set the following environment variables before running the tests: @@ -99,8 +99,8 @@ $ git push origin feature/123-something_awesome ``` ### Create a Pull Request -Pull requests can be created via GitHub. Refer to [this -document](https://help.github.com/articles/creating-a-pull-request/) for +Pull requests can be created via Git. Refer to [this +document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for detailed steps on how to create a pull request. After a Pull Request gets peer reviewed and approved, it will be merged. @@ -116,7 +116,7 @@ contributors". To sign your work, just add a line like this at the end of your commit message: ``` -Signed-off-by: Samii Sakisaka +Signed-off-by: Andrey Berezin ``` This can easily be done with the `--signoff` option to `git commit`. diff --git a/Makefile b/Makefile index c7466084..644eab07 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,11 @@ SHELL := /bin/bash PYTHON_VERSION := 3.10 -VENV_DIR := venv.frostfs-testlib +VENV_NAME := frostfs-testlib +VENV_DIR := venv.${VENV_NAME} current_dir := $(shell pwd) +DIRECTORIES := $(sort $(dir $(wildcard ../frostfs-testlib-plugin-*/ ../*-testcases/))) +FROM_VENV := . ${VENV_DIR}/bin/activate && venv: create requirements paths precommit @echo Ready @@ -13,15 +16,35 @@ precommit: paths: @echo Append paths for project - @echo Virtual environment: ${VENV_DIR} - @sudo rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth - @sudo touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth - @echo ${current_dir}/src/frostfs_testlib_frostfs_testlib | sudo tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth + @echo Virtual environment: ${current_dir}/${VENV_DIR} + @rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth + @touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth + @echo ${current_dir}/src | tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth -create: - @echo Create virtual environment for - virtualenv --python=python${PYTHON_VERSION} --prompt=frostfs-testlib ${VENV_DIR} +create: ${VENV_DIR} + +${VENV_DIR}: + @echo Create virtual environment ${current_dir}/${VENV_DIR} + virtualenv --python=python${PYTHON_VERSION} --prompt=${VENV_NAME} ${VENV_DIR} requirements: @echo Isntalling pip requirements - . ${VENV_DIR}/bin/activate && pip install -Ur requirements.txt \ No newline at end of file + . ${VENV_DIR}/bin/activate && pip install -Ur requirements.txt + + +#### VALIDATION SECTION #### +lint: create requirements + ${FROM_VENV} pylint --disable R,C,W ./src + +unit_test: + @echo Starting unit tests + ${FROM_VENV} python -m pytest tests + +.PHONY: lint_dependent $(DIRECTORIES) +lint_dependent: $(DIRECTORIES) + +$(DIRECTORIES): + @echo checking dependent repo $@ + $(MAKE) validation -C $@ + +validation: lint unit_test lint_dependent \ No newline at end of file diff --git a/README.md b/README.md index c194df94..2f8751f4 100644 --- a/README.md +++ b/README.md @@ -92,4 +92,4 @@ The library provides the following primary components: ## Contributing -Any contributions to the library should conform to the [contribution guideline](https://github.com/TrueCloudLab/frostfs-testlib/blob/master/CONTRIBUTING.md). +Any contributions to the library should conform to the [contribution guideline](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/src/branch/master/CONTRIBUTING.md). diff --git a/pyproject.toml b/pyproject.toml index 5354e41e..d62f04b3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,10 +4,10 @@ build-backend = "setuptools.build_meta" [project] name = "frostfs-testlib" -version = "1.3.1" +version = "2.0.1" description = "Building blocks and utilities to facilitate development of automated tests for FrostFS system" readme = "README.md" -authors = [{ name = "NSPCC", email = "info@nspcc.ru" }] +authors = [{ name = "Yadro", email = "info@yadro.com" }] license = { text = "GNU General Public License v3 (GPLv3)" } classifiers = [ "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", @@ -16,15 +16,19 @@ classifiers = [ ] keywords = ["frostfs", "test"] dependencies = [ - "allure-python-commons>=2.9.45", + "allure-python-commons>=2.13.2", "docker>=4.4.0", - "importlib_metadata>=5.0; python_version < '3.10'", + "pyyaml==6.0.1", "neo-mamba==1.0.0", "paramiko>=2.10.3", "pexpect>=4.8.0", - "requests>=2.28.0", + "requests==2.28.1", "docstring_parser>=0.15", "testrail-api>=1.12.0", + "pytest==7.1.2", + "tenacity==8.0.1", + "boto3==1.35.30", + "boto3-stubs[s3,iam,sts]==1.35.30", ] requires-python = ">=3.10" @@ -32,7 +36,7 @@ requires-python = ">=3.10" dev = ["black", "bumpver", "isort", "pre-commit"] [project.urls] -Homepage = "https://github.com/TrueCloudLab/frostfs-testlib" +Homepage = "https://git.frostfs.info/TrueCloudLab/frostfs-testlib" [project.entry-points."frostfs.testlib.reporter"] allure = "frostfs_testlib.reporter.allure_handler:AllureHandler" @@ -40,17 +44,37 @@ allure = "frostfs_testlib.reporter.allure_handler:AllureHandler" [project.entry-points."frostfs.testlib.hosting"] docker = "frostfs_testlib.hosting.docker_host:DockerHost" +[project.entry-points."frostfs.testlib.healthcheck"] +basic = "frostfs_testlib.healthcheck.basic_healthcheck:BasicHealthcheck" + +[project.entry-points."frostfs.testlib.csc_managers"] +config = "frostfs_testlib.storage.controllers.state_managers.config_state_manager:ConfigStateManager" + +[project.entry-points."frostfs.testlib.services"] +frostfs-storage = "frostfs_testlib.storage.dataclasses.frostfs_services:StorageNode" +frostfs-s3 = "frostfs_testlib.storage.dataclasses.frostfs_services:S3Gate" +frostfs-http = "frostfs_testlib.storage.dataclasses.frostfs_services:HTTPGate" +neo-go = "frostfs_testlib.storage.dataclasses.frostfs_services:MorphChain" +frostfs-ir = "frostfs_testlib.storage.dataclasses.frostfs_services:InnerRing" + +[project.entry-points."frostfs.testlib.credentials_providers"] +authmate = "frostfs_testlib.credentials.authmate_s3_provider:AuthmateS3CredentialsProvider" +wallet_factory = "frostfs_testlib.credentials.wallet_factory_provider:WalletFactoryProvider" + +[project.entry-points."frostfs.testlib.bucket_cid_resolver"] +frostfs = "frostfs_testlib.clients.s3.curl_bucket_resolver:CurlBucketContainerResolver" + [tool.isort] profile = "black" src_paths = ["src", "tests"] -line_length = 100 +line_length = 140 [tool.black] -line-length = 100 +line-length = 140 target-version = ["py310"] [tool.bumpver] -current_version = "1.3.1" +current_version = "2.0.1" version_pattern = "MAJOR.MINOR.PATCH" commit_message = "Bump version {old_version} -> {new_version}" commit = true @@ -60,3 +84,12 @@ push = false [tool.bumpver.file_patterns] "pyproject.toml" = ['current_version = "{version}"', 'version = "{version}"'] "src/frostfs_testlib/__init__.py" = ["{version}"] + +[tool.pytest.ini_options] +filterwarnings = [ + "ignore:Blowfish has been deprecated:cryptography.utils.CryptographyDeprecationWarning", +] +testpaths = ["tests"] + +[project.entry-points.pytest11] +testlib = "frostfs_testlib" \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index eee5a85f..56d9b83c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,20 +1,25 @@ -allure-python-commons==2.9.45 +allure-python-commons==2.13.2 docker==4.4.0 -importlib_metadata==5.0.0 neo-mamba==1.0.0 paramiko==2.10.3 pexpect==4.8.0 requests==2.28.1 docstring_parser==0.15 testrail-api==1.12.0 +tenacity==8.0.1 +pytest==7.1.2 +boto3==1.35.30 +boto3-stubs[s3,iam,sts]==1.35.30 +pydantic==2.10.6 # Dev dependencies black==22.8.0 bumpver==2022.1118 isort==5.12.0 pre-commit==2.20.0 +pylint==2.17.4 # Packaging dependencies build==0.8.0 setuptools==65.3.0 -twine==4.0.1 +twine==4.0.1 \ No newline at end of file diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index 9c73af26..4724a8bb 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1 +1,4 @@ -__version__ = "1.3.1" +__version__ = "2.0.1" + +from .fixtures import configure_testlib, hosting, session_start_time, temp_directory +from .hooks import pytest_add_frostfs_marker, pytest_collection_modifyitems diff --git a/src/frostfs_testlib/analytics/__init__.py b/src/frostfs_testlib/analytics/__init__.py index 6995a08a..b0574180 100644 --- a/src/frostfs_testlib/analytics/__init__.py +++ b/src/frostfs_testlib/analytics/__init__.py @@ -1,5 +1,5 @@ from frostfs_testlib.analytics import test_case from frostfs_testlib.analytics.test_case import TestCasePriority from frostfs_testlib.analytics.test_collector import TestCase, TestCaseCollector -from frostfs_testlib.analytics.test_exporter import TestExporter +from frostfs_testlib.analytics.test_exporter import TСExporter from frostfs_testlib.analytics.testrail_exporter import TestrailExporter diff --git a/src/frostfs_testlib/analytics/test_collector.py b/src/frostfs_testlib/analytics/test_collector.py index 0f5398e5..56ee606b 100644 --- a/src/frostfs_testlib/analytics/test_collector.py +++ b/src/frostfs_testlib/analytics/test_collector.py @@ -6,6 +6,7 @@ from docstring_parser.google import DEFAULT_SECTIONS, Section, SectionType DEFAULT_SECTIONS.append(Section("Steps", "steps", SectionType.MULTIPLE)) + class TestCase: """ Test case object implementation for use in collector and exporters @@ -106,7 +107,9 @@ class TestCaseCollector: # Read test_case suite and section name from test class if possible and get test function from class if test.cls: suite_name = test.cls.__dict__.get("__test_case_suite_name__", suite_name) - suite_section_name = test.cls.__dict__.get("__test_case_suite_section__", suite_section_name) + suite_section_name = test.cls.__dict__.get( + "__test_case_suite_section__", suite_section_name + ) test_function = test.cls.__dict__[test.originalname] else: # If no test class, read test function from module @@ -117,7 +120,9 @@ class TestCaseCollector: test_case_title = test_function.__dict__.get("__test_case_title__", None) test_case_priority = test_function.__dict__.get("__test_case_priority__", None) suite_name = test_function.__dict__.get("__test_case_suite_name__", suite_name) - suite_section_name = test_function.__dict__.get("__test_case_suite_section__", suite_section_name) + suite_section_name = test_function.__dict__.get( + "__test_case_suite_section__", suite_section_name + ) # Parce test_steps if they define in __doc__ doc_string = parse(test_function.__doc__, style=DocstringStyle.GOOGLE) @@ -125,7 +130,9 @@ class TestCaseCollector: if doc_string.short_description: test_case_description = doc_string.short_description if doc_string.long_description: - test_case_description = f"{doc_string.short_description}\r\n{doc_string.long_description}" + test_case_description = ( + f"{doc_string.short_description}\r\n{doc_string.long_description}" + ) if doc_string.meta: for meta in doc_string.meta: @@ -140,25 +147,27 @@ class TestCaseCollector: test_case_params = test_case_call_spec.id # Format title with params if test_case_title: - test_case_title = self.__format_string_with_params__(test_case_title,test_case_call_spec.params) + test_case_title = self.__format_string_with_params__( + test_case_title, test_case_call_spec.params + ) # Format steps with params if test_case_steps: for key, value in test_case_steps.items(): - value = self.__format_string_with_params__(value,test_case_call_spec.params) + value = self.__format_string_with_params__(value, test_case_call_spec.params) test_case_steps[key] = value # If there is set basic test case attributes create TestCase and return if test_case_id and test_case_title and suite_name and suite_name: test_case = TestCase( - id=test_case_id, - title=test_case_title, - description=test_case_description, - priority=test_case_priority, - steps=test_case_steps, - params=test_case_params, - suite_name=suite_name, - suite_section_name=suite_section_name, - ) + uuid_id=test_case_id, + title=test_case_title, + description=test_case_description, + priority=test_case_priority, + steps=test_case_steps, + params=test_case_params, + suite_name=suite_name, + suite_section_name=suite_section_name, + ) return test_case # Return None if there is no enough information for return test case return None @@ -187,4 +196,4 @@ class TestCaseCollector: test_case = self.__get_test_case_from_pytest_test__(test) if test_case: test_cases.append(test_case) - return test_cases \ No newline at end of file + return test_cases diff --git a/src/frostfs_testlib/analytics/test_exporter.py b/src/frostfs_testlib/analytics/test_exporter.py index 2af3f060..dd6a7fb0 100644 --- a/src/frostfs_testlib/analytics/test_exporter.py +++ b/src/frostfs_testlib/analytics/test_exporter.py @@ -3,7 +3,8 @@ from abc import ABC, abstractmethod from frostfs_testlib.analytics.test_collector import TestCase -class TestExporter(ABC): +# TODO: REMOVE ME +class TСExporter(ABC): test_cases_cache = [] test_suites_cache = [] @@ -46,9 +47,7 @@ class TestExporter(ABC): """ @abstractmethod - def update_test_case( - self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section - ) -> None: + def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None: """ Update test case in TMS """ @@ -60,13 +59,11 @@ class TestExporter(ABC): for test_case in test_cases: test_suite = self.get_or_create_test_suite(test_case.suite_name) - test_section = self.get_or_create_suite_section( - test_suite, test_case.suite_section_name - ) + test_section = self.get_or_create_suite_section(test_suite, test_case.suite_section_name) test_case_in_tms = self.search_test_case_id(test_case.id) steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()] - if test_case: - self.update_test_case(test_case, test_case_in_tms) + if test_case_in_tms: + self.update_test_case(test_case, test_case_in_tms, test_suite, test_section) else: - self.create_test_case(test_case) + self.create_test_case(test_case, test_suite, test_section) diff --git a/src/frostfs_testlib/analytics/testrail_exporter.py b/src/frostfs_testlib/analytics/testrail_exporter.py index 1a7c8504..36c482c4 100644 --- a/src/frostfs_testlib/analytics/testrail_exporter.py +++ b/src/frostfs_testlib/analytics/testrail_exporter.py @@ -1,10 +1,10 @@ from testrail_api import TestRailAPI from frostfs_testlib.analytics.test_collector import TestCase -from frostfs_testlib.analytics.test_exporter import TestExporter +from frostfs_testlib.analytics.test_exporter import TСExporter -class TestrailExporter(TestExporter): +class TestrailExporter(TСExporter): def __init__( self, tr_url: str, @@ -38,6 +38,7 @@ class TestrailExporter(TestExporter): self.tr_id_field = tr_id_field self.tr_description_fields = tr_description_fields self.tr_steps_field = tr_steps_field + self.test_case_id_field_name = "" # TODO: Add me def fill_suite_cache(self) -> None: """ @@ -61,19 +62,13 @@ class TestrailExporter(TestExporter): It's help do not call TMS each time then we search test case """ for test_suite in self.test_suites_cache: - self.test_cases_cache.extend( - self.api.cases.get_cases(self.tr_project_id, suite_id=test_suite["id"]) - ) + self.test_cases_cache.extend(self.api.cases.get_cases(self.tr_project_id, suite_id=test_suite["id"])) def search_test_case_id(self, test_case_id: str) -> object: """ Find test cases in TestRail (cache) by ID """ - test_cases = [ - test_case - for test_case in self.test_cases_cache - if test_case["custom_autotest_name"] == test_case_id - ] + test_cases = [test_case for test_case in self.test_cases_cache if test_case["custom_autotest_name"] == test_case_id] if len(test_cases) > 1: raise RuntimeError(f"Too many results found in test rail for id {test_case_id}") @@ -86,9 +81,7 @@ class TestrailExporter(TestExporter): """ Get suite name with exact name from Testrail or create if not exist """ - test_rail_suites = [ - suite for suite in self.test_suites_cache if suite["name"] == test_suite_name - ] + test_rail_suites = [suite for suite in self.test_suites_cache if suite["name"] == test_suite_name] if not test_rail_suites: test_rail_suite = self.api.suites.add_suite( @@ -101,17 +94,13 @@ class TestrailExporter(TestExporter): elif len(test_rail_suites) == 1: return test_rail_suites.pop() else: - raise RuntimeError( - f"Too many results found in test rail for suite name {test_suite_name}" - ) + raise RuntimeError(f"Too many results found in test rail for suite name {test_suite_name}") def get_or_create_suite_section(self, test_rail_suite, section_name) -> object: """ Get suite section with exact name from Testrail or create new one if not exist """ - test_rail_sections = [ - section for section in test_rail_suite["sections"] if section["name"] == section_name - ] + test_rail_sections = [section for section in test_rail_suite["sections"] if section["name"] == section_name] if not test_rail_sections: test_rail_section = self.api.sections.add_section( @@ -127,9 +116,7 @@ class TestrailExporter(TestExporter): elif len(test_rail_sections) == 1: return test_rail_sections.pop() else: - raise RuntimeError( - f"Too many results found in test rail for section name {section_name}" - ) + raise RuntimeError(f"Too many results found in test rail for section name {section_name}") def prepare_request_body(self, test_case: TestCase, test_suite, test_suite_section) -> dict: """ @@ -163,9 +150,7 @@ class TestrailExporter(TestExporter): self.api.cases.add_case(**request_body) - def update_test_case( - self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section - ) -> None: + def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None: """ Update test case in Testrail """ diff --git a/src/frostfs_testlib/cli/__init__.py b/src/frostfs_testlib/cli/__init__.py index 3799be9c..7e3d243c 100644 --- a/src/frostfs_testlib/cli/__init__.py +++ b/src/frostfs_testlib/cli/__init__.py @@ -1,4 +1,5 @@ from frostfs_testlib.cli.frostfs_adm import FrostfsAdm from frostfs_testlib.cli.frostfs_authmate import FrostfsAuthmate from frostfs_testlib.cli.frostfs_cli import FrostfsCli +from frostfs_testlib.cli.generic_cli import GenericCli from frostfs_testlib.cli.neogo import NeoGo, NetworkType diff --git a/src/frostfs_testlib/cli/cli_command.py b/src/frostfs_testlib/cli/cli_command.py index 3600e774..224e9e3f 100644 --- a/src/frostfs_testlib/cli/cli_command.py +++ b/src/frostfs_testlib/cli/cli_command.py @@ -1,10 +1,11 @@ from typing import Optional from frostfs_testlib.shell import CommandOptions, CommandResult, InteractiveInput, Shell +from frostfs_testlib.utils.datetime_utils import parse_time class CliCommand: - + TIMEOUT_INACCURACY = 10 WALLET_SOURCE_ERROR_MSG = "Provide either wallet or wallet_config to specify wallet location" WALLET_PASSWD_ERROR_MSG = "Provide either wallet_password or wallet_config to specify password" @@ -24,9 +25,7 @@ class CliCommand: def __init__(self, shell: Shell, cli_exec_path: str, **base_params): self.shell = shell self.cli_exec_path = cli_exec_path - self.__base_params = " ".join( - [f"--{param} {value}" for param, value in base_params.items() if value] - ) + self.__base_params = " ".join([f"--{param} {value}" for param, value in base_params.items() if value]) def _format_command(self, command: str, **params) -> str: param_str = [] @@ -48,9 +47,7 @@ class CliCommand: val_str = str(value_item).replace("'", "\\'") param_str.append(f"--{param} '{val_str}'") elif isinstance(value, dict): - param_str.append( - f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\'' - ) + param_str.append(f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\'') else: if "'" in str(value): value_str = str(value).replace('"', '\\"') @@ -63,12 +60,22 @@ class CliCommand: return f"{self.cli_exec_path} {self.__base_params} {command or ''} {param_str}" def _execute(self, command: Optional[str], **params) -> CommandResult: - return self.shell.exec(self._format_command(command, **params)) + if timeout := params.get("timeout"): + timeout = parse_time(timeout) + self.TIMEOUT_INACCURACY - def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult: return self.shell.exec( self._format_command(command, **params), - options=CommandOptions( - interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)] + CommandOptions(timeout=timeout), + ) + + def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult: + if timeout := params.get("timeout"): + timeout = parse_time(timeout) + self.TIMEOUT_INACCURACY + + return self.shell.exec( + self._format_command(command, **params), + CommandOptions( + interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)], + timeout=timeout, ), ) diff --git a/src/frostfs_testlib/cli/frostfs_adm/adm.py b/src/frostfs_testlib/cli/frostfs_adm/adm.py index 283069c6..0b56fbd0 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/adm.py +++ b/src/frostfs_testlib/cli/frostfs_adm/adm.py @@ -9,14 +9,17 @@ from frostfs_testlib.shell import Shell class FrostfsAdm: - morph: Optional[FrostfsAdmMorph] = None - subnet: Optional[FrostfsAdmMorphSubnet] = None - storage_config: Optional[FrostfsAdmStorageConfig] = None - version: Optional[FrostfsAdmVersion] = None + morph: FrostfsAdmMorph + subnet: FrostfsAdmMorphSubnet + storage_config: FrostfsAdmStorageConfig + version: FrostfsAdmVersion + config: FrostfsAdmConfig def __init__(self, shell: Shell, frostfs_adm_exec_path: str, config_file: Optional[str] = None): self.config = FrostfsAdmConfig(shell, frostfs_adm_exec_path, config=config_file) self.morph = FrostfsAdmMorph(shell, frostfs_adm_exec_path, config=config_file) self.subnet = FrostfsAdmMorphSubnet(shell, frostfs_adm_exec_path, config=config_file) - self.storage_config = FrostfsAdmStorageConfig(shell, frostfs_adm_exec_path, config=config_file) + self.storage_config = FrostfsAdmStorageConfig( + shell, frostfs_adm_exec_path, config=config_file + ) self.version = FrostfsAdmVersion(shell, frostfs_adm_exec_path, config=config_file) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index aba147b7..bdf4a91a 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -27,11 +27,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph deposit-notary", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def dump_balances( @@ -56,11 +52,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph dump-balances", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def dump_config(self, rpc_endpoint: str) -> CommandResult: @@ -74,11 +66,23 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph dump-config", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, + ) + + def set_config(self, set_key_value: str, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None) -> CommandResult: + """Add/update global config value in the FrostFS network. + + Args: + set_key_value: key1=val1 [key2=val2 ...] + alphabet_wallets: Path to alphabet wallets dir + rpc_endpoint: N3 RPC node endpoint + + Returns: + Command's result. + """ + return self._execute( + f"morph set-config {set_key_value}", + **{param: param_value for param, param_value in locals().items() if param not in ["self", "set_key_value"]}, ) def dump_containers( @@ -101,14 +105,10 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph dump-containers", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) - def dump_hashes(self, rpc_endpoint: str) -> CommandResult: + def dump_hashes(self, rpc_endpoint: str, domain: Optional[str] = None) -> CommandResult: """Dump deployed contract hashes. Args: @@ -119,20 +119,16 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph dump-hashes", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def force_new_epoch( - self, rpc_endpoint: Optional[str] = None, alphabet: Optional[str] = None + self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None, delta: Optional[int] = None ) -> CommandResult: """Create new FrostFS epoch event in the side chain. Args: - alphabet: Path to alphabet wallets dir. + alphabet_wallets: Path to alphabet wallets dir. rpc_endpoint: N3 RPC node endpoint. Returns: @@ -140,11 +136,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph force-new-epoch", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def generate_alphabet( @@ -165,11 +157,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph generate-alphabet", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def generate_storage_wallet( @@ -192,11 +180,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph generate-storage-wallet", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def init( @@ -219,7 +203,7 @@ class FrostfsAdmMorph(CliCommand): container_alias_fee: Container alias fee (default 500). container_fee: Container registration fee (default 1000). contracts: Path to archive with compiled FrostFS contracts - (default fetched from latest github release). + (default fetched from latest git release). epoch_duration: Amount of side chain blocks in one FrostFS epoch (default 240). homomorphic_disabled: Disable object homomorphic hashing. local_dump: Path to the blocks dump file. @@ -232,11 +216,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph init", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def refill_gas( @@ -259,11 +239,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph refill-gas", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def restore_containers( @@ -286,11 +262,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph restore-containers", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def set_policy( @@ -340,7 +312,7 @@ class FrostfsAdmMorph(CliCommand): Args: alphabet_wallets: Path to alphabet wallets dir. contracts: Path to archive with compiled FrostFS contracts - (default fetched from latest github release). + (default fetched from latest git release). rpc_endpoint: N3 RPC node endpoint. Returns: @@ -348,9 +320,169 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph update-contracts", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, + ) + + def remove_nodes( + self, node_netmap_keys: list[str], rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None + ) -> CommandResult: + """Move node to the Offline state in the candidates list + and tick an epoch to update the netmap using frostfs-adm + + Args: + node_netmap_keys: list of nodes netmap keys. + alphabet_wallets: Path to alphabet wallets dir. + rpc_endpoint: N3 RPC node endpoint. + + Returns: + Command's result. + """ + if not len(node_netmap_keys): + raise AttributeError("Got empty node_netmap_keys list") + + return self._execute( + f"morph remove-nodes {' '.join(node_netmap_keys)}", + **{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]}, + ) + + def add_rule( + self, + chain_id: str, + target_name: str, + target_type: str, + rule: Optional[list[str]] = None, + path: Optional[str] = None, + chain_id_hex: Optional[bool] = None, + chain_name: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + chain-id: Assign ID to the parsed chain + chain-id-hex: Flag to parse chain ID as hex + path: Path to encoded chain in JSON or binary format + rule: Rule statement + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "morph ape add-rule-chain", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def get_rule( + self, + chain_id: str, + target_name: str, + target_type: str, + chain_id_hex: Optional[bool] = None, + chain_name: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + chain-id string Chain id + chain-id-hex Flag to parse chain ID as hex + target-name string Resource name in APE resource name format + target-type string Resource type(container/namespace) + timeout duration Timeout for an operation (default 15s) + wallet string Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "morph ape get-rule-chain", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def list_rules( + self, + target_type: str, + target_name: Optional[str] = None, + rpc_endpoint: Optional[str] = None, + chain_name: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "morph ape list-rule-chains", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def remove_rule( + self, + chain_id: str, + target_name: str, + target_type: str, + all: Optional[bool] = None, + chain_name: Optional[str] = None, + chain_id_hex: Optional[bool] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + all: Remove all chains + chain-id: Assign ID to the parsed chain + chain-id-hex: Flag to parse chain ID as hex + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "morph ape rm-rule-chain", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def get_nns_records( + self, + name: str, + type: Optional[str] = None, + rpc_endpoint: Optional[str] = None, + alphabet_wallets: Optional[str] = None, + ) -> CommandResult: + """Returns domain record of the specified type + + Args: + name: Domain name + type: Domain name service record type(A|CNAME|SOA|TXT) + rpc_endpoint: N3 RPC node endpoint + alphabet_wallets: path to alphabet wallets dir + + Returns: + Command's result + """ + return self._execute( + "morph nns get-records", + **{param: value for param, value in locals().items() if param not in ["self"]}, ) diff --git a/src/frostfs_testlib/cli/frostfs_authmate/authmate.py b/src/frostfs_testlib/cli/frostfs_authmate/authmate.py index ba3a3b02..7912dae2 100644 --- a/src/frostfs_testlib/cli/frostfs_authmate/authmate.py +++ b/src/frostfs_testlib/cli/frostfs_authmate/authmate.py @@ -6,8 +6,8 @@ from frostfs_testlib.shell import Shell class FrostfsAuthmate: - secret: Optional[FrostfsAuthmateSecret] = None - version: Optional[FrostfsAuthmateVersion] = None + secret: FrostfsAuthmateSecret + version: FrostfsAuthmateVersion def __init__(self, shell: Shell, frostfs_authmate_exec_path: str): self.secret = FrostfsAuthmateSecret(shell, frostfs_authmate_exec_path) diff --git a/src/frostfs_testlib/cli/frostfs_authmate/secret.py b/src/frostfs_testlib/cli/frostfs_authmate/secret.py index ba5b5f5b..5f300bc6 100644 --- a/src/frostfs_testlib/cli/frostfs_authmate/secret.py +++ b/src/frostfs_testlib/cli/frostfs_authmate/secret.py @@ -44,7 +44,6 @@ class FrostfsAuthmateSecret(CliCommand): wallet: str, wallet_password: str, peer: str, - bearer_rules: str, gate_public_key: Union[str, list[str]], address: Optional[str] = None, container_id: Optional[str] = None, diff --git a/src/frostfs_testlib/cli/frostfs_cli/acl.py b/src/frostfs_testlib/cli/frostfs_cli/acl.py index bd0f80e8..3e605823 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/acl.py +++ b/src/frostfs_testlib/cli/frostfs_cli/acl.py @@ -22,7 +22,7 @@ class FrostfsCliACL(CliCommand): Well-known system object headers start with '$Object:' prefix. User defined headers start without prefix. Read more about filter keys at: - http://github.com/TrueCloudLab/frostfs-api/blob/master/proto-docs/acl.md#message-eaclrecordfilter + https://git.frostfs.info/TrueCloudLab/frostfs-api/src/branch/master/proto-docs/acl.md#message-eaclrecord-filter Match is '=' for matching and '!=' for non-matching filter. Value is a valid unicode string corresponding to object or request header value. diff --git a/src/frostfs_testlib/cli/frostfs_cli/ape_manager.py b/src/frostfs_testlib/cli/frostfs_cli/ape_manager.py new file mode 100644 index 00000000..525a9be6 --- /dev/null +++ b/src/frostfs_testlib/cli/frostfs_cli/ape_manager.py @@ -0,0 +1,70 @@ +from typing import Optional + +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult + + +class FrostfsCliApeManager(CliCommand): + """Operations with APE manager.""" + + def add( + self, + rpc_endpoint: str, + chain_id: Optional[str] = None, + chain_id_hex: Optional[str] = None, + path: Optional[str] = None, + rule: Optional[str] | Optional[list[str]] = None, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Add rule chain for a target.""" + + return self._execute( + "ape-manager add", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def list( + self, + rpc_endpoint: str, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Generate APE override by target and APE chains. Util command. + + Generated APE override can be dumped to a file in JSON format that is passed to + "create" command. + """ + + return self._execute( + "ape-manager list", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def remove( + self, + rpc_endpoint: str, + chain_id: Optional[str] = None, + chain_id_hex: Optional[str] = None, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Generate APE override by target and APE chains. Util command. + + Generated APE override can be dumped to a file in JSON format that is passed to + "create" command. + """ + + return self._execute( + "ape-manager remove", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/bearer.py b/src/frostfs_testlib/cli/frostfs_cli/bearer.py new file mode 100644 index 00000000..e21a6c87 --- /dev/null +++ b/src/frostfs_testlib/cli/frostfs_cli/bearer.py @@ -0,0 +1,54 @@ +from typing import Optional + +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult + + +class FrostfsCliBearer(CliCommand): + def create( + self, + rpc_endpoint: str, + out: str, + issued_at: Optional[str] = None, + expire_at: Optional[str] = None, + not_valid_before: Optional[str] = None, + ape: Optional[str] = None, + eacl: Optional[str] = None, + owner: Optional[str] = None, + json: Optional[bool] = False, + impersonate: Optional[bool] = False, + wallet: Optional[str] = None, + address: Optional[str] = None, + ) -> CommandResult: + """Create bearer token. + + All epoch flags can be specified relative to the current epoch with the +n syntax. + In this case --rpc-endpoint flag should be specified and the epoch in bearer token + is set to current epoch + n. + """ + return self._execute( + "bearer create", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def generate_ape_override( + self, + chain_id: Optional[str] = None, + chain_id_hex: Optional[str] = None, + cid: Optional[str] = None, + output: Optional[str] = None, + path: Optional[str] = None, + rule: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + ) -> CommandResult: + """Generate APE override by target and APE chains. Util command. + + Generated APE override can be dumped to a file in JSON format that is passed to + "create" command. + """ + + return self._execute( + "bearer generate-ape-override", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/cli.py b/src/frostfs_testlib/cli/frostfs_cli/cli.py index 07986c28..7874f185 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/cli.py +++ b/src/frostfs_testlib/cli/frostfs_cli/cli.py @@ -2,28 +2,34 @@ from typing import Optional from frostfs_testlib.cli.frostfs_cli.accounting import FrostfsCliAccounting from frostfs_testlib.cli.frostfs_cli.acl import FrostfsCliACL +from frostfs_testlib.cli.frostfs_cli.ape_manager import FrostfsCliApeManager +from frostfs_testlib.cli.frostfs_cli.bearer import FrostfsCliBearer from frostfs_testlib.cli.frostfs_cli.container import FrostfsCliContainer +from frostfs_testlib.cli.frostfs_cli.control import FrostfsCliControl from frostfs_testlib.cli.frostfs_cli.netmap import FrostfsCliNetmap from frostfs_testlib.cli.frostfs_cli.object import FrostfsCliObject from frostfs_testlib.cli.frostfs_cli.session import FrostfsCliSession from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards from frostfs_testlib.cli.frostfs_cli.storagegroup import FrostfsCliStorageGroup +from frostfs_testlib.cli.frostfs_cli.tree import FrostfsCliTree from frostfs_testlib.cli.frostfs_cli.util import FrostfsCliUtil from frostfs_testlib.cli.frostfs_cli.version import FrostfsCliVersion from frostfs_testlib.shell import Shell class FrostfsCli: - accounting: Optional[FrostfsCliAccounting] = None - acl: Optional[FrostfsCliACL] = None - container: Optional[FrostfsCliContainer] = None - netmap: Optional[FrostfsCliNetmap] = None - object: Optional[FrostfsCliObject] = None - session: Optional[FrostfsCliSession] = None - shards: Optional[FrostfsCliShards] = None - storagegroup: Optional[FrostfsCliStorageGroup] = None - util: Optional[FrostfsCliUtil] = None - version: Optional[FrostfsCliVersion] = None + accounting: FrostfsCliAccounting + acl: FrostfsCliACL + container: FrostfsCliContainer + netmap: FrostfsCliNetmap + object: FrostfsCliObject + session: FrostfsCliSession + shards: FrostfsCliShards + storagegroup: FrostfsCliStorageGroup + util: FrostfsCliUtil + version: FrostfsCliVersion + control: FrostfsCliControl + ape_manager: FrostfsCliApeManager def __init__(self, shell: Shell, frostfs_cli_exec_path: str, config_file: Optional[str] = None): self.accounting = FrostfsCliAccounting(shell, frostfs_cli_exec_path, config=config_file) @@ -36,3 +42,7 @@ class FrostfsCli: self.storagegroup = FrostfsCliStorageGroup(shell, frostfs_cli_exec_path, config=config_file) self.util = FrostfsCliUtil(shell, frostfs_cli_exec_path, config=config_file) self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file) + self.tree = FrostfsCliTree(shell, frostfs_cli_exec_path, config=config_file) + self.control = FrostfsCliControl(shell, frostfs_cli_exec_path, config=config_file) + self.bearer = FrostfsCliBearer(shell, frostfs_cli_exec_path, config=config_file) + self.ape_manager = FrostfsCliApeManager(shell, frostfs_cli_exec_path, config=config_file) diff --git a/src/frostfs_testlib/cli/frostfs_cli/container.py b/src/frostfs_testlib/cli/frostfs_cli/container.py index 533ff1a8..8bcbe9e5 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/container.py +++ b/src/frostfs_testlib/cli/frostfs_cli/container.py @@ -8,12 +8,16 @@ class FrostfsCliContainer(CliCommand): def create( self, rpc_endpoint: str, - wallet: str, + wallet: Optional[str] = None, + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, address: Optional[str] = None, attributes: Optional[dict] = None, basic_acl: Optional[str] = None, await_mode: bool = False, disable_timestamp: bool = False, + force: bool = False, + trace: bool = False, name: Optional[str] = None, nonce: Optional[str] = None, policy: Optional[str] = None, @@ -35,6 +39,8 @@ class FrostfsCliContainer(CliCommand): basic_acl: Hex encoded basic ACL value or keywords like 'public-read-write', 'private', 'eacl-public-read' (default "private"). disable_timestamp: Disable timestamp container attribute. + force: Skip placement validity check. + trace: Generate trace ID and print it. name: Container name attribute. nonce: UUIDv4 nonce value for container. policy: QL-encoded or JSON-encoded placement policy or path to file with it. @@ -45,6 +51,8 @@ class FrostfsCliContainer(CliCommand): wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. timeout: Timeout for the operation (default 15s). + nns_zone: Container nns zone attribute. + nns_name: Container nns name attribute. Returns: Command's result. @@ -57,15 +65,15 @@ class FrostfsCliContainer(CliCommand): def delete( self, rpc_endpoint: str, - wallet: str, cid: str, + wallet: Optional[str] = None, address: Optional[str] = None, await_mode: bool = False, session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, force: bool = False, - timeout: Optional[str] = None, + trace: bool = False, ) -> CommandResult: """ Delete an existing container. @@ -75,13 +83,13 @@ class FrostfsCliContainer(CliCommand): address: Address of wallet account. await_mode: Block execution until container is removed. cid: Container ID. + trace: Generate trace ID and print it. force: Do not check whether container contains locks and remove immediately. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). session: Path to a JSON-encoded container session token. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. - timeout: Timeout for the operation (default 15s). Returns: Command's result. @@ -95,12 +103,14 @@ class FrostfsCliContainer(CliCommand): def get( self, rpc_endpoint: str, - wallet: str, cid: str, + wallet: Optional[str] = None, address: Optional[str] = None, + generate_key: Optional[bool] = None, await_mode: bool = False, to: Optional[str] = None, json_mode: bool = False, + trace: bool = False, ttl: Optional[int] = None, xhdr: Optional[dict] = None, timeout: Optional[str] = None, @@ -113,12 +123,14 @@ class FrostfsCliContainer(CliCommand): await_mode: Block execution until container is removed. cid: Container ID. json_mode: Print or dump container in JSON format. + trace: Generate trace ID and print it. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). to: Path to dump encoded container. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. timeout: Timeout for the operation (default 15s). + generate_key: Generate a new private key. Returns: Command's result. @@ -131,9 +143,10 @@ class FrostfsCliContainer(CliCommand): def get_eacl( self, rpc_endpoint: str, - wallet: str, cid: str, + wallet: Optional[str] = None, address: Optional[str] = None, + generate_key: Optional[bool] = None, await_mode: bool = False, to: Optional[str] = None, session: Optional[str] = None, @@ -150,11 +163,14 @@ class FrostfsCliContainer(CliCommand): cid: Container ID. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). to: Path to dump encoded container. + json_mode: Print or dump container in JSON format. + trace: Generate trace ID and print it. session: Path to a JSON-encoded container session token. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. timeout: Timeout for the operation (default 15s). + generate_key: Generate a new private key. Returns: Command's result. @@ -168,8 +184,10 @@ class FrostfsCliContainer(CliCommand): def list( self, rpc_endpoint: str, - wallet: str, + name: Optional[str] = None, + wallet: Optional[str] = None, address: Optional[str] = None, + generate_key: Optional[bool] = None, owner: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, @@ -181,12 +199,15 @@ class FrostfsCliContainer(CliCommand): Args: address: Address of wallet account. + name: List containers by the attribute name. owner: Owner of containers (omit to use owner from private key). rpc_endpoint: Remote node address (as 'multiaddr' or ':'). ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + trace: Generate trace ID and print it. timeout: Timeout for the operation (default 15s). + generate_key: Generate a new private key. Returns: Command's result. @@ -199,9 +220,12 @@ class FrostfsCliContainer(CliCommand): def list_objects( self, rpc_endpoint: str, - wallet: str, cid: str, + bearer: Optional[str] = None, + wallet: Optional[str] = None, address: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, ttl: Optional[int] = None, xhdr: Optional[dict] = None, timeout: Optional[str] = None, @@ -212,11 +236,14 @@ class FrostfsCliContainer(CliCommand): Args: address: Address of wallet account. cid: Container ID. + bearer: File with signed JSON or binary encoded bearer token. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + trace: Generate trace ID and print it. timeout: Timeout for the operation (default 15s). + generate_key: Generate a new private key. Returns: Command's result. @@ -226,11 +253,12 @@ class FrostfsCliContainer(CliCommand): **{param: value for param, value in locals().items() if param not in ["self"]}, ) + # TODO Deprecated method with 0.42 def set_eacl( self, rpc_endpoint: str, - wallet: str, cid: str, + wallet: Optional[str] = None, address: Optional[str] = None, await_mode: bool = False, table: Optional[str] = None, @@ -262,3 +290,43 @@ class FrostfsCliContainer(CliCommand): "container set-eacl", **{param: value for param, value in locals().items() if param not in ["self"]}, ) + + def search_node( + self, + rpc_endpoint: str, + cid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + ttl: Optional[int] = None, + from_file: Optional[str] = None, + trace: bool = False, + short: Optional[bool] = True, + xhdr: Optional[dict] = None, + generate_key: Optional[bool] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """ + Show the nodes participating in the container in the current epoch. + + Args: + rpc_endpoint: string Remote host address (as 'multiaddr' or ':') + wallet: WIF (NEP-2) string or path to the wallet or binary key. + cid: Container ID. + address: Address of wallet account. + ttl: TTL value in request meta header (default 2). + from_file: string File path with encoded container + timeout: duration Timeout for the operation (default 15 s) + short: shorten the output of node information. + trace: Generate trace ID and print it. + xhdr: Dict with request X-Headers. + generate_key: Generate a new private key. + + Returns: + + """ + from_str = f"--from {from_file}" if from_file else "" + + return self._execute( + f"container nodes {from_str}", + **{param: value for param, value in locals().items() if param not in ["self", "from_file", "from_str"]}, + ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/control.py b/src/frostfs_testlib/cli/frostfs_cli/control.py new file mode 100644 index 00000000..957bca94 --- /dev/null +++ b/src/frostfs_testlib/cli/frostfs_cli/control.py @@ -0,0 +1,232 @@ +from typing import Optional + +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult + + +class FrostfsCliControl(CliCommand): + def set_status( + self, + endpoint: str, + status: str, + wallet: Optional[str] = None, + force: Optional[bool] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Set status of the storage node in FrostFS network map + + Args: + wallet: Path to the wallet or binary key + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + force: Force turning to local maintenance + status: New netmap status keyword ('online', 'offline', 'maintenance') + timeout: Timeout for an operation (default 15s) + + Returns: + Command`s result. + """ + return self._execute( + "control set-status", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def healthcheck( + self, + endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Health check for FrostFS storage nodes + + Args: + wallet: Path to the wallet or binary key + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + timeout: Timeout for an operation (default 15s) + + Returns: + Command`s result. + """ + return self._execute( + "control healthcheck", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def drop_objects( + self, + endpoint: str, + objects: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + wallet: Path to the wallet or binary key + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + objects: List of object addresses to be removed in string format + timeout: Timeout for an operation (default 15s) + + Returns: + Command`s result. + """ + return self._execute( + "control drop-objects", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def add_rule( + self, + endpoint: str, + chain_id: str, + target_name: str, + target_type: str, + rule: Optional[list[str]] = None, + path: Optional[str] = None, + chain_id_hex: Optional[bool] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + chain-id: Assign ID to the parsed chain + chain-id-hex: Flag to parse chain ID as hex + endpoint: Remote node control address (as 'multiaddr' or ':') + path: Path to encoded chain in JSON or binary format + rule: Rule statement + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control add-rule", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def get_rule( + self, + endpoint: str, + chain_id: str, + target_name: str, + target_type: str, + chain_id_hex: Optional[bool] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address string Address of wallet account + chain-id string Chain id + chain-id-hex Flag to parse chain ID as hex + endpoint string Remote node control address (as 'multiaddr' or ':') + target-name string Resource name in APE resource name format + target-type string Resource type(container/namespace) + timeout duration Timeout for an operation (default 15s) + wallet string Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control get-rule", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def list_rules( + self, + endpoint: str, + target_name: str, + target_type: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control list-rules", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def list_targets( + self, + endpoint: str, + chain_name: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + chain-name: Chain name(ingress|s3) + endpoint: Remote node control address (as 'multiaddr' or ':') + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control list-targets", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def remove_rule( + self, + endpoint: str, + chain_id: str, + target_name: str, + target_type: str, + all: Optional[bool] = None, + chain_id_hex: Optional[bool] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + all: Remove all chains + chain-id: Assign ID to the parsed chain + chain-id-hex: Flag to parse chain ID as hex + endpoint: Remote node control address (as 'multiaddr' or ':') + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control remove-rule", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/netmap.py b/src/frostfs_testlib/cli/frostfs_cli/netmap.py index 89208936..cd197d3b 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/netmap.py +++ b/src/frostfs_testlib/cli/frostfs_cli/netmap.py @@ -8,10 +8,11 @@ class FrostfsCliNetmap(CliCommand): def epoch( self, rpc_endpoint: str, - wallet: str, + wallet: Optional[str] = None, address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, + trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -38,10 +39,11 @@ class FrostfsCliNetmap(CliCommand): def netinfo( self, rpc_endpoint: str, - wallet: str, + wallet: Optional[str] = None, address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, + trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -68,11 +70,12 @@ class FrostfsCliNetmap(CliCommand): def nodeinfo( self, rpc_endpoint: str, - wallet: str, + wallet: Optional[str] = None, address: Optional[str] = None, generate_key: bool = False, json: bool = False, ttl: Optional[int] = None, + trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -100,10 +103,11 @@ class FrostfsCliNetmap(CliCommand): def snapshot( self, rpc_endpoint: str, - wallet: str, + wallet: Optional[str] = None, address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, + trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 1c1d0acd..e5365440 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -8,11 +8,12 @@ class FrostfsCliObject(CliCommand): def delete( self, rpc_endpoint: str, - wallet: str, cid: str, oid: str, + wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, @@ -25,6 +26,7 @@ class FrostfsCliObject(CliCommand): address: Address of wallet account. bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. + generate_key: Generate new private key. oid: Object ID. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). session: Filepath to a JSON- or binary-encoded token of the object DELETE session. @@ -44,11 +46,12 @@ class FrostfsCliObject(CliCommand): def get( self, rpc_endpoint: str, - wallet: str, cid: str, oid: str, + wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, file: Optional[str] = None, header: Optional[str] = None, no_progress: bool = False, @@ -66,6 +69,7 @@ class FrostfsCliObject(CliCommand): bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. file: File to write object payload to. Default: stdout. + generate_key: Generate new private key. header: File to write header to. Default: stdout. no_progress: Do not show progress bar. oid: Object ID. @@ -88,11 +92,12 @@ class FrostfsCliObject(CliCommand): def hash( self, rpc_endpoint: str, - wallet: str, cid: str, oid: str, + wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, range: Optional[str] = None, salt: Optional[str] = None, ttl: Optional[int] = None, @@ -108,6 +113,7 @@ class FrostfsCliObject(CliCommand): address: Address of wallet account. bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. + generate_key: Generate new private key. oid: Object ID. range: Range to take hash from in the form offset1:length1,... rpc_endpoint: Remote node address (as 'multiaddr' or ':'). @@ -124,19 +130,18 @@ class FrostfsCliObject(CliCommand): """ return self._execute( "object hash", - **{ - param: value for param, value in locals().items() if param not in ["self", "params"] - }, + **{param: value for param, value in locals().items() if param not in ["self", "params"]}, ) def head( self, rpc_endpoint: str, - wallet: str, cid: str, oid: str, + wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, file: Optional[str] = None, json_mode: bool = False, main_only: bool = False, @@ -155,6 +160,7 @@ class FrostfsCliObject(CliCommand): bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. file: File to write object payload to. Default: stdout. + generate_key: Generate new private key. json_mode: Marshal output in JSON. main_only: Return only main fields. oid: Object ID. @@ -178,13 +184,14 @@ class FrostfsCliObject(CliCommand): def lock( self, rpc_endpoint: str, - wallet: str, cid: str, oid: str, + wallet: Optional[str] = None, lifetime: Optional[int] = None, expire_at: Optional[int] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, @@ -197,6 +204,7 @@ class FrostfsCliObject(CliCommand): address: Address of wallet account. bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. + generate_key: Generate new private key. oid: Object ID. lifetime: Lock lifetime. expire_at: Lock expiration epoch. @@ -218,12 +226,14 @@ class FrostfsCliObject(CliCommand): def put( self, rpc_endpoint: str, - wallet: str, cid: str, file: str, + wallet: Optional[str] = None, address: Optional[str] = None, attributes: Optional[dict] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + copies_number: Optional[int] = None, disable_filename: bool = False, disable_timestamp: bool = False, expire_at: Optional[int] = None, @@ -241,11 +251,13 @@ class FrostfsCliObject(CliCommand): address: Address of wallet account. attributes: User attributes in form of Key1=Value1,Key2=Value2. bearer: File with signed JSON or binary encoded bearer token. + copies_number: Number of copies of the object to store within the RPC call. cid: Container ID. disable_filename: Do not set well-known filename attribute. disable_timestamp: Do not set well-known timestamp attribute. expire_at: Last epoch in the life of the object. file: File with object payload. + generate_key: Generate new private key. no_progress: Do not show progress bar. notify: Object notification in the form of *epoch*:*topic*; '-' topic means using default. @@ -264,15 +276,64 @@ class FrostfsCliObject(CliCommand): **{param: value for param, value in locals().items() if param not in ["self"]}, ) + def patch( + self, + rpc_endpoint: str, + cid: str, + oid: str, + range: list[str] = None, + payload: list[str] = None, + new_attrs: Optional[str] = None, + replace_attrs: bool = False, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + trace: bool = False, + ttl: Optional[int] = None, + wallet: Optional[str] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + PATCH an object. + + Args: + rpc_endpoint: Remote node address (as 'multiaddr' or ':') + cid: Container ID + oid: Object ID + range: An array of ranges in which to replace data in the format [offset1:length1, offset2:length2] + payload: An array of file paths to be applied in each range + new_attrs: Attributes to be changed in the format Key1=Value1,Key2=Value2 + replace_attrs: Replace all attributes completely with new ones specified in new_attrs + address: Address of wallet account + bearer: File with signed JSON or binary encoded bearer token + generate_key: Generate new private key + session: Filepath to a JSON- or binary-encoded token of the object RANGE session + timeout: Timeout for the operation + trace: Generate trace ID and print it + ttl: TTL value in request meta header (default 2) + wallet: WIF (NEP-2) string or path to the wallet or binary key + xhdr: Dict with request X-Headers + + Returns: + Command's result. + """ + return self._execute( + "object patch", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + def range( self, rpc_endpoint: str, - wallet: str, cid: str, oid: str, range: str, + wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, file: Optional[str] = None, json_mode: bool = False, raw: bool = False, @@ -289,6 +350,7 @@ class FrostfsCliObject(CliCommand): bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. file: File to write object payload to. Default: stdout. + generate_key: Generate new private key. json_mode: Marshal output in JSON. oid: Object ID. range: Range to take data from in the form offset:length. @@ -311,10 +373,11 @@ class FrostfsCliObject(CliCommand): def search( self, rpc_endpoint: str, - wallet: str, cid: str, + wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, filters: Optional[list] = None, oid: Optional[str] = None, phy: bool = False, @@ -332,6 +395,7 @@ class FrostfsCliObject(CliCommand): bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. filters: Repeated filter expressions or files with protobuf JSON. + generate_key: Generate new private key. oid: Object ID. phy: Search physically stored objects. root: Search for user objects. @@ -349,3 +413,46 @@ class FrostfsCliObject(CliCommand): "object search", **{param: value for param, value in locals().items() if param not in ["self"]}, ) + + def nodes( + self, + rpc_endpoint: str, + cid: str, + oid: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """ + Search object nodes. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + generate_key: Generate new private key. + oid: Object ID. + trace: Generate trace ID and print it. + root: Search for user objects. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + verify_presence_all: Verify the actual presence of the object on all netmap nodes. + ttl: TTL value in request meta header (default 2). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Dict with request X-Headers. + timeout: Timeout for the operation (default 15s). + + Returns: + Command's result. + """ + return self._execute( + "object nodes", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/session.py b/src/frostfs_testlib/cli/frostfs_cli/session.py index e21cc235..857b13e0 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/session.py +++ b/src/frostfs_testlib/cli/frostfs_cli/session.py @@ -9,7 +9,6 @@ class FrostfsCliSession(CliCommand): self, rpc_endpoint: str, wallet: str, - wallet_password: str, out: str, lifetime: Optional[int] = None, address: Optional[str] = None, @@ -30,12 +29,7 @@ class FrostfsCliSession(CliCommand): Returns: Command's result. """ - return self._execute_with_password( + return self._execute( "session create", - wallet_password, - **{ - param: value - for param, value in locals().items() - if param not in ["self", "wallet_password"] - }, + **{param: value for param, value in locals().items() if param not in ["self"]}, ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/shards.py b/src/frostfs_testlib/cli/frostfs_cli/shards.py index 6b47ac29..68a2f544 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/shards.py +++ b/src/frostfs_testlib/cli/frostfs_cli/shards.py @@ -39,10 +39,10 @@ class FrostfsCliShards(CliCommand): def set_mode( self, endpoint: str, - wallet: str, - wallet_password: str, mode: str, - id: Optional[list[str]], + id: Optional[list[str]] = None, + wallet: Optional[str] = None, + wallet_password: Optional[str] = None, address: Optional[str] = None, all: bool = False, clear_errors: bool = False, @@ -65,14 +65,15 @@ class FrostfsCliShards(CliCommand): Returns: Command's result. """ + if not wallet_password: + return self._execute( + "control shards set-mode", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) return self._execute_with_password( "control shards set-mode", wallet_password, - **{ - param: value - for param, value in locals().items() - if param not in ["self", "wallet_password"] - }, + **{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, ) def dump( @@ -105,18 +106,14 @@ class FrostfsCliShards(CliCommand): return self._execute_with_password( "control shards dump", wallet_password, - **{ - param: value - for param, value in locals().items() - if param not in ["self", "wallet_password"] - }, + **{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, ) def list( self, endpoint: str, - wallet: str, - wallet_password: str, + wallet: Optional[str] = None, + wallet_password: Optional[str] = None, address: Optional[str] = None, json_mode: bool = False, timeout: Optional[str] = None, @@ -135,12 +132,130 @@ class FrostfsCliShards(CliCommand): Returns: Command's result. """ + if not wallet_password: + return self._execute( + "control shards list", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) return self._execute_with_password( "control shards list", wallet_password, - **{ - param: value - for param, value in locals().items() - if param not in ["self", "wallet_password"] - }, + **{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, + ) + + def evacuation_start( + self, + endpoint: str, + id: Optional[str] = None, + scope: Optional[str] = None, + all: bool = False, + no_errors: bool = True, + await_mode: bool = False, + address: Optional[str] = None, + timeout: Optional[str] = None, + no_progress: bool = False, + ) -> CommandResult: + """ + Objects evacuation from shard to other shards. + + Args: + address: Address of wallet account + all: Process all shards + await: Block execution until evacuation is completed + endpoint: Remote node control address (as 'multiaddr' or ':') + id: List of shard IDs in base58 encoding + no_errors: Skip invalid/unreadable objects (default true) + no_progress: Print progress if await provided + scope: Evacuation scope; possible values: trees, objects, all (default "all") + timeout: Timeout for an operation (default 15s) + + Returns: + Command's result. + """ + return self._execute( + "control shards evacuation start", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def evacuation_reset( + self, + endpoint: str, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """ + Reset evacuate objects from shard to other shards status. + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + timeout: Timeout for an operation (default 15s) + Returns: + Command's result. + """ + return self._execute( + "control shards evacuation reset", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def evacuation_stop( + self, + endpoint: str, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """ + Stop running evacuate process from shard to other shards. + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + timeout: Timeout for an operation (default 15s) + + Returns: + Command's result. + """ + return self._execute( + "control shards evacuation stop", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def evacuation_status( + self, + endpoint: str, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """ + Get evacuate objects from shard to other shards status. + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + timeout: Timeout for an operation (default 15s) + + Returns: + Command's result. + """ + return self._execute( + "control shards evacuation status", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def detach(self, endpoint: str, address: Optional[str] = None, id: Optional[str] = None, timeout: Optional[str] = None): + """ + Detach and close the shards + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + id: List of shard IDs in base58 encoding + timeout: Timeout for an operation (default 15s) + + Returns: + Command's result. + """ + return self._execute( + "control shards detach", + **{param: value for param, value in locals().items() if param not in ["self"]}, ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/tree.py b/src/frostfs_testlib/cli/frostfs_cli/tree.py new file mode 100644 index 00000000..c75b5260 --- /dev/null +++ b/src/frostfs_testlib/cli/frostfs_cli/tree.py @@ -0,0 +1,53 @@ +from typing import Optional + +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult + + +class FrostfsCliTree(CliCommand): + def healthcheck( + self, + wallet: Optional[str] = None, + rpc_endpoint: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Get internal balance of FrostFS account + + Args: + address: Address of wallet account. + owner: Owner of balance account (omit to use owner from private key). + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + + Returns: + Command's result. + + """ + return self._execute( + "tree healthcheck", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def list( + self, + cid: str, + rpc_endpoint: Optional[str] = None, + wallet: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Get Tree List + + Args: + cid: Container ID. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + timeout: duration Timeout for the operation (default 15 s) + + Returns: + Command's result. + + """ + return self._execute( + "tree list", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/util.py b/src/frostfs_testlib/cli/frostfs_cli/util.py index 99acd0ae..37347a5f 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/util.py +++ b/src/frostfs_testlib/cli/frostfs_cli/util.py @@ -6,12 +6,12 @@ from frostfs_testlib.shell import CommandResult class FrostfsCliUtil(CliCommand): def sign_bearer_token( - self, - wallet: str, - from_file: str, - to_file: str, - address: Optional[str] = None, - json: Optional[bool] = False, + self, + from_file: str, + to_file: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + json: Optional[bool] = False, ) -> CommandResult: """ Sign bearer token to use it in requests. @@ -33,9 +33,9 @@ class FrostfsCliUtil(CliCommand): def sign_session_token( self, - wallet: str, from_file: str, to_file: str, + wallet: Optional[str] = None, address: Optional[str] = None, ) -> CommandResult: """ @@ -54,3 +54,11 @@ class FrostfsCliUtil(CliCommand): "util sign session-token", **{param: value for param, value in locals().items() if param not in ["self"]}, ) + + def convert_eacl(self, from_file: str, to_file: str, json: Optional[bool] = False, ape: Optional[bool] = False): + """Convert representation of extended ACL table.""" + + return self._execute( + "util convert eacl", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/cli/generic_cli.py b/src/frostfs_testlib/cli/generic_cli.py new file mode 100644 index 00000000..2a801599 --- /dev/null +++ b/src/frostfs_testlib/cli/generic_cli.py @@ -0,0 +1,30 @@ +from typing import Optional + +from frostfs_testlib.hosting.interfaces import Host +from frostfs_testlib.shell.interfaces import CommandOptions, Shell + + +class GenericCli(object): + def __init__(self, cli_name: str, host: Host) -> None: + self.host = host + self.cli_name = cli_name + + def __call__( + self, + args: Optional[str] = "", + pipes: Optional[str] = "", + shell: Optional[Shell] = None, + options: Optional[CommandOptions] = None, + ): + if not shell: + shell = self.host.get_shell() + + cli_config = self.host.get_cli_config(self.cli_name, True) + extra_args = "" + exec_path = self.cli_name + if cli_config: + extra_args = " ".join(cli_config.extra_args) + exec_path = cli_config.exec_path + + cmd = f"{exec_path} {args} {extra_args} {pipes}" + return shell.exec(cmd, options) diff --git a/src/frostfs_testlib/cli/neogo/go.py b/src/frostfs_testlib/cli/neogo/go.py index a0634a4f..9e7286c6 100644 --- a/src/frostfs_testlib/cli/neogo/go.py +++ b/src/frostfs_testlib/cli/neogo/go.py @@ -12,14 +12,14 @@ from frostfs_testlib.shell import Shell class NeoGo: - candidate: Optional[NeoGoCandidate] = None - contract: Optional[NeoGoContract] = None - db: Optional[NeoGoDb] = None - nep17: Optional[NeoGoNep17] = None - node: Optional[NeoGoNode] = None - query: Optional[NeoGoQuery] = None - version: Optional[NeoGoVersion] = None - wallet: Optional[NeoGoWallet] = None + candidate: NeoGoCandidate + contract: NeoGoContract + db: NeoGoDb + nep17: NeoGoNep17 + node: NeoGoNode + query: NeoGoQuery + version: NeoGoVersion + wallet: NeoGoWallet def __init__( self, diff --git a/src/frostfs_testlib/cli/neogo/query.py b/src/frostfs_testlib/cli/neogo/query.py index 66277904..1422dafe 100644 --- a/src/frostfs_testlib/cli/neogo/query.py +++ b/src/frostfs_testlib/cli/neogo/query.py @@ -76,7 +76,7 @@ class NeoGoQuery(CliCommand): **{ param: param_value for param, param_value in locals().items() - if param not in ["self", "hash"] + if param not in ["self", "tx_hash"] }, ) diff --git a/src/frostfs_testlib/cli/netmap_parser.py b/src/frostfs_testlib/cli/netmap_parser.py new file mode 100644 index 00000000..4b4a5015 --- /dev/null +++ b/src/frostfs_testlib/cli/netmap_parser.py @@ -0,0 +1,102 @@ +import re + +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeInfo, NodeNetInfo, NodeNetmapInfo, NodeStatus + + +class NetmapParser: + @staticmethod + def netinfo(output: str) -> NodeNetInfo: + regexes = { + "epoch": r"Epoch: (?P\d+)", + "network_magic": r"Network magic: (?P.*$)", + "time_per_block": r"Time per block: (?P\d+\w+)", + "container_fee": r"Container fee: (?P\d+)", + "epoch_duration": r"Epoch duration: (?P\d+)", + "inner_ring_candidate_fee": r"Inner Ring candidate fee: (?P\d+)", + "maximum_object_size": r"Maximum object size: (?P\d+)", + "maximum_count_of_data_shards": r"Maximum count of data shards: (?P\d+)", + "maximum_count_of_parity_shards": r"Maximum count of parity shards: (?P\d+)", + "withdrawal_fee": r"Withdrawal fee: (?P\d+)", + "homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?Ptrue|false)", + "maintenance_mode_allowed": r"Maintenance mode allowed: (?Ptrue|false)", + } + parse_result = {} + + for key, regex in regexes.items(): + search_result = re.search(regex, output, flags=re.MULTILINE) + if search_result == None: + parse_result[key] = None + continue + parse_result[key] = search_result[key].strip() + + node_netinfo = NodeNetInfo(**parse_result) + + return node_netinfo + + @staticmethod + def snapshot_all_nodes(output: str) -> list[NodeNetmapInfo]: + """The code will parse each line and return each node as dataclass.""" + netmap_nodes = output.split("Node ")[1:] + dataclasses_netmap = [] + result_netmap = {} + + regexes = { + "node_id": r"\d+: (?P\w+)", + "node_data_ips": r"(?P/ip4/.+?)$", + "node_status": r"(?PONLINE|MAINTENANCE|OFFLINE)", + "cluster_name": r"ClusterName: (?P\w+)", + "continent": r"Continent: (?P\w+)", + "country": r"Country: (?P\w+)", + "country_code": r"CountryCode: (?P\w+)", + "external_address": r"ExternalAddr: (?P/ip[4].+?)$", + "location": r"Location: (?P\w+.*)", + "node": r"Node: (?P\d+\.\d+\.\d+\.\d+)", + "price": r"Price: (?P\d+)", + "sub_div": r"SubDiv: (?P.*)", + "sub_div_code": r"SubDivCode: (?P\w+)", + "un_locode": r"UN-LOCODE: (?P\w+.*)", + "role": r"role: (?P\w+)", + } + + for node in netmap_nodes: + for key, regex in regexes.items(): + search_result = re.search(regex, node, flags=re.MULTILINE) + if search_result is None: + result_netmap[key] = None + continue + if key == "node_data_ips": + result_netmap[key] = search_result[key].strip().split(" ") + continue + if key == "external_address": + result_netmap[key] = search_result[key].strip().split(",") + continue + if key == "node_status": + result_netmap[key] = NodeStatus(search_result[key].strip().lower()) + continue + result_netmap[key] = search_result[key].strip() + + dataclasses_netmap.append(NodeNetmapInfo(**result_netmap)) + + return dataclasses_netmap + + @staticmethod + def snapshot_one_node(output: str, rpc_endpoint: str) -> NodeNetmapInfo | None: + snapshot_nodes = NetmapParser.snapshot_all_nodes(output=output) + for snapshot in snapshot_nodes: + for endpoint in snapshot.external_address: + if rpc_endpoint.split(":")[0] in endpoint: + return snapshot + + @staticmethod + def node_info(output: dict) -> NodeInfo: + data_dict = {"attributes": {}} + + for key, value in output.items(): + if key != "attributes": + data_dict[key] = value + + for attribute in output["attributes"]: + data_dict["attributes"][attribute["key"]] = attribute["value"] + + return NodeInfo(**data_dict) diff --git a/src/frostfs_testlib/clients/__init__.py b/src/frostfs_testlib/clients/__init__.py new file mode 100644 index 00000000..e46766b6 --- /dev/null +++ b/src/frostfs_testlib/clients/__init__.py @@ -0,0 +1,5 @@ +from frostfs_testlib.clients.http.http_client import HttpClient +from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient +from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper +from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper +from frostfs_testlib.clients.s3.s3_http_client import S3HttpClient diff --git a/src/frostfs_testlib/clients/http/__init__.py b/src/frostfs_testlib/clients/http/__init__.py new file mode 100644 index 00000000..ab6e2b07 --- /dev/null +++ b/src/frostfs_testlib/clients/http/__init__.py @@ -0,0 +1 @@ +from frostfs_testlib.clients.http.http_client import HttpClient diff --git a/src/frostfs_testlib/clients/http/http_client.py b/src/frostfs_testlib/clients/http/http_client.py new file mode 100644 index 00000000..16d77075 --- /dev/null +++ b/src/frostfs_testlib/clients/http/http_client.py @@ -0,0 +1,152 @@ +import io +import json +import logging +import logging.config +from typing import Mapping, Sequence + +import httpx + +from frostfs_testlib import reporter + +timeout = httpx.Timeout(60, read=150) +LOGGING_CONFIG = { + "disable_existing_loggers": False, + "version": 1, + "handlers": {"default": {"class": "logging.StreamHandler", "formatter": "http", "stream": "ext://sys.stderr"}}, + "formatters": { + "http": { + "format": "%(asctime)s [%(levelname)s] %(name)s - %(message)s", + "datefmt": "%Y-%m-%d %H:%M:%S", + } + }, + "loggers": { + "httpx": { + "handlers": ["default"], + "level": "ERROR", + }, + "httpcore": { + "handlers": ["default"], + "level": "ERROR", + }, + }, +} + +logging.config.dictConfig(LOGGING_CONFIG) +logger = logging.getLogger("NeoLogger") + + +class HttpClient: + @reporter.step("Send {method} request to {url}") + def send(self, method: str, url: str, expected_status_code: int = None, **kwargs: dict) -> httpx.Response: + transport = httpx.HTTPTransport(verify=False, retries=5) + client = httpx.Client(timeout=timeout, transport=transport) + response = client.request(method, url, **kwargs) + + self._attach_response(response, **kwargs) + # logger.info(f"Response: {response.status_code} => {response.text}") + + if expected_status_code: + assert ( + response.status_code == expected_status_code + ), f"Got {response.status_code} response code while {expected_status_code} expected" + + return response + + @classmethod + def _parse_body(cls, readable: httpx.Request | httpx.Response) -> str | None: + try: + content = readable.read() + except Exception as e: + logger.warning(f"Unable to read file: {str(e)}") + return None + + if not content: + return None + + request_body = None + + try: + request_body = json.loads(content) + except (json.JSONDecodeError, UnicodeDecodeError) as e: + logger.warning(f"Unable to convert body to json: {str(e)}") + + if request_body is not None: + return json.dumps(request_body, default=str, indent=4) + + try: + request_body = content.decode() + except UnicodeDecodeError as e: + logger.warning(f"Unable to decode binary data to text using UTF-8 encoding: {str(e)}") + + request_body = content if request_body is None else request_body + request_body = "" if len(request_body) > 1000 else request_body + + return request_body + + @classmethod + def _parse_files(cls, files: Mapping | Sequence | None) -> dict: + filepaths = {} + + if not files: + return filepaths + + if isinstance(files, Sequence): + items = files + elif isinstance(files, Mapping): + items = files.items() + else: + raise TypeError(f"'files' must be either Sequence or Mapping, got: {type(files).__name__}") + + for name, file in items: + if isinstance(file, io.IOBase): + filepaths[name] = file.name + elif isinstance(file, Sequence): + filepaths[name] = file[1].name + + return filepaths + + @classmethod + def _attach_response(cls, response: httpx.Response, **kwargs): + request = response.request + request_headers = json.dumps(dict(request.headers), default=str, indent=4) + request_body = cls._parse_body(request) + + files = kwargs.get("files") + request_files = cls._parse_files(files) + + response_headers = json.dumps(dict(response.headers), default=str, indent=4) + response_body = cls._parse_body(response) + + report = ( + f"Method: {request.method}\n\n" + + f"URL: {request.url}\n\n" + + f"Request Headers: {request_headers}\n\n" + + (f"Request Body: {request_body}\n\n" if request_body else "") + + (f"Request Files: {request_files}\n\n" if request_files else "") + + f"Response Status Code: {response.status_code}\n\n" + + f"Response Headers: {response_headers}\n\n" + + (f"Response Body: {response_body}\n\n" if response_body else "") + ) + curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body, request_files) + + reporter.attach(report, "Requests Info") + reporter.attach(curl_request, "CURL") + cls._write_log(curl_request, response_body, response.status_code) + + @classmethod + def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict) -> str: + excluded_headers = {"Accept-Encoding", "Connection", "User-Agent", "Content-Length"} + headers = " ".join(f"-H '{header.title()}: {value}'" for header, value in headers.items() if header.title() not in excluded_headers) + + data = f" -d '{data}'" if data else "" + for name, path in files.items(): + data += f' -F "{name}=@{path}"' + + # Option -k means no verify SSL + return f"curl {url} -X {method} {headers}{data} -k" + + @classmethod + def _write_log(cls, curl: str, res_body: str, res_code: int) -> None: + if res_body: + curl += f"\nResponse: {res_code}\n{res_body}" + logger.info(f"{curl}") diff --git a/src/frostfs_testlib/clients/s3/__init__.py b/src/frostfs_testlib/clients/s3/__init__.py new file mode 100644 index 00000000..5481f488 --- /dev/null +++ b/src/frostfs_testlib/clients/s3/__init__.py @@ -0,0 +1,3 @@ +from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient +from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper +from frostfs_testlib.clients.s3.interfaces import ACL, BucketContainerResolver, S3ClientWrapper, VersioningStatus diff --git a/src/frostfs_testlib/clients/s3/aws_cli_client.py b/src/frostfs_testlib/clients/s3/aws_cli_client.py new file mode 100644 index 00000000..c1dd6b66 --- /dev/null +++ b/src/frostfs_testlib/clients/s3/aws_cli_client.py @@ -0,0 +1,1548 @@ +import json +import logging +import os +from datetime import datetime +from time import sleep +from typing import Literal, Optional, Union + +from frostfs_testlib import reporter +from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict +from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME +from frostfs_testlib.shell import CommandOptions +from frostfs_testlib.shell.local_shell import LocalShell +from frostfs_testlib.utils import string_utils + +# TODO: Refactor this code to use shell instead of _cmd_run +from frostfs_testlib.utils.cli_utils import _configure_aws_cli +from frostfs_testlib.utils.file_utils import TestFile + +logger = logging.getLogger("NeoLogger") +command_options = CommandOptions(timeout=480) + + +class AwsCliClient(S3ClientWrapper): + __repr_name__: str = "AWS CLI" + + # Flags that we use for all S3 commands: disable SSL verification (as we use self-signed + # certificate in devenv) and disable automatic pagination in CLI output + common_flags = "--no-verify-ssl --no-paginate" + s3gate_endpoint: str + + @reporter.step("Configure S3 client (aws cli)") + def __init__( + self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" + ) -> None: + self.s3gate_endpoint = s3gate_endpoint + self.iam_endpoint = None + + self.access_key_id: str = access_key_id + self.secret_access_key: str = secret_access_key + self.profile = profile + self.region = region + + self.local_shell = LocalShell() + try: + _configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key, region) + self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS} --profile {profile}") + self.local_shell.exec( + f"aws configure set retry_mode {RETRY_MODE} --profile {profile}", + ) + except Exception as err: + raise RuntimeError("Error while configuring AwsCliClient") from err + + @reporter.step("Set S3 endpoint to {s3gate_endpoint}") + def set_endpoint(self, s3gate_endpoint: str): + self.s3gate_endpoint = s3gate_endpoint + + @reporter.step("Set IAM endpoint to {iam_endpoint}") + def set_iam_endpoint(self, iam_endpoint: str): + self.iam_endpoint = iam_endpoint + + @reporter.step("Create bucket S3") + def create_bucket( + self, + bucket: Optional[str] = None, + object_lock_enabled_for_bucket: Optional[bool] = None, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + grant_full_control: Optional[str] = None, + location_constraint: Optional[str] = None, + ) -> str: + if bucket is None: + bucket = string_utils.unique_name("bucket-") + + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + if object_lock_enabled_for_bucket is None: + object_lock = "" + elif object_lock_enabled_for_bucket: + object_lock = " --object-lock-enabled-for-bucket" + else: + object_lock = " --no-object-lock-enabled-for-bucket" + cmd = ( + f"aws {self.common_flags} s3api create-bucket --bucket {bucket} " + f"{object_lock} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + if acl: + cmd += f" --acl {acl}" + if grant_full_control: + cmd += f" --grant-full-control {grant_full_control}" + if grant_write: + cmd += f" --grant-write {grant_write}" + if grant_read: + cmd += f" --grant-read {grant_read}" + if location_constraint: + cmd += f" --create-bucket-configuration LocationConstraint={location_constraint}" + self.local_shell.exec(cmd) + + return bucket + + @reporter.step("List buckets S3") + def list_buckets(self) -> list[str]: + cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint} --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + buckets_json = self._to_json(output) + return [bucket["Name"] for bucket in buckets_json["Buckets"]] + + @reporter.step("Delete bucket S3") + def delete_bucket(self, bucket: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + self.local_shell.exec(cmd, command_options) + + @reporter.step("Head bucket S3") + def head_bucket(self, bucket: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + self.local_shell.exec(cmd) + + @reporter.step("Put bucket versioning status") + def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api put-bucket-versioning --bucket {bucket} " + f"--versioning-configuration Status={status.value} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + self.local_shell.exec(cmd) + + @reporter.step("Get bucket versioning status") + def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response.get("Status") + + @reporter.step("Put bucket tagging") + def put_bucket_tagging(self, bucket: str, tags: list) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + tags_json = {"TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]} + cmd = ( + f"aws {self.common_flags} s3api put-bucket-tagging --bucket {bucket} " + f"--tagging '{json.dumps(tags_json)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + self.local_shell.exec(cmd) + + @reporter.step("Get bucket tagging") + def get_bucket_tagging(self, bucket: str) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response.get("TagSet") + + @reporter.step("Get bucket acl") + def get_bucket_acl(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + return self._to_json(output) + + @reporter.step("Get bucket location") + def get_bucket_location(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response.get("LocationConstraint") + + @reporter.step("List objects S3") + def list_objects( + self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None + ) -> Union[dict, list[str]]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} " + if page_size: + cmd = cmd.replace("--no-paginate", "") + cmd += f" --page-size {page_size} " + if prefix: + cmd += f" --prefix {prefix}" + if self.profile: + cmd += f" --profile {self.profile} " + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + obj_list = [obj["Key"] for obj in response.get("Contents", [])] + logger.info(f"Found s3 objects: {obj_list}") + + return response if full_output else obj_list + + @reporter.step("List objects S3 v2") + def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + obj_list = [obj["Key"] for obj in response.get("Contents", [])] + logger.info(f"Found s3 objects: {obj_list}") + + return response if full_output else obj_list + + @reporter.step("List objects versions S3") + def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response if full_output else response.get("Versions", []) + + @reporter.step("List objects delete markers S3") + def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response if full_output else response.get("DeleteMarkers", []) + + @reporter.step("Copy object S3") + def copy_object( + self, + source_bucket: str, + source_key: str, + bucket: Optional[str] = None, + key: Optional[str] = None, + acl: Optional[str] = None, + metadata_directive: Optional[Literal["COPY", "REPLACE"]] = None, + metadata: Optional[dict] = None, + tagging_directive: Optional[Literal["COPY", "REPLACE"]] = None, + tagging: Optional[str] = None, + ) -> str: + if bucket is None: + bucket = source_bucket + + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + if key is None: + key = string_utils.unique_name("copy-object-") + + copy_source = f"{source_bucket}/{source_key}" + + cmd = ( + f"aws {self.common_flags} s3api copy-object --copy-source {copy_source} " + f"--bucket {bucket} --key {key} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + if acl: + cmd += f" --acl {acl}" + if metadata_directive: + cmd += f" --metadata-directive {metadata_directive}" + if metadata: + cmd += " --metadata " + for meta_key, value in metadata.items(): + cmd += f" {meta_key}={value}" + if tagging_directive: + cmd += f" --tagging-directive {tagging_directive}" + if tagging: + cmd += f" --tagging {tagging}" + self.local_shell.exec(cmd, command_options) + return key + + @reporter.step("Put object S3") + def put_object( + self, + bucket: str, + filepath: str, + key: Optional[str] = None, + metadata: Optional[dict] = None, + tagging: Optional[str] = None, + acl: Optional[str] = None, + object_lock_mode: Optional[str] = None, + object_lock_retain_until_date: Optional[datetime] = None, + object_lock_legal_hold_status: Optional[str] = None, + grant_full_control: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> str: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + if key is None: + key = os.path.basename(filepath) + + cmd = ( + f"aws {self.common_flags} s3api put-object --bucket {bucket} --key {key} " + f"--body {filepath} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + if metadata: + cmd += " --metadata" + for key, value in metadata.items(): + cmd += f" {key}={value}" + if tagging: + cmd += f" --tagging '{tagging}'" + if acl: + cmd += f" --acl {acl}" + if object_lock_mode: + cmd += f" --object-lock-mode {object_lock_mode}" + if object_lock_retain_until_date: + cmd += f' --object-lock-retain-until-date "{object_lock_retain_until_date}"' + if object_lock_legal_hold_status: + cmd += f" --object-lock-legal-hold-status {object_lock_legal_hold_status}" + if grant_full_control: + cmd += f" --grant-full-control '{grant_full_control}'" + if grant_read: + cmd += f" --grant-read {grant_read}" + output = self.local_shell.exec(cmd, command_options).stdout + response = self._to_json(output) + return response.get("VersionId") + + @reporter.step("Head object S3") + def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api head-object --bucket {bucket} --key {key} " + f"{version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response + + @reporter.step("Get object S3") + def get_object( + self, + bucket: str, + key: str, + version_id: Optional[str] = None, + object_range: Optional[tuple[int, int]] = None, + full_output: bool = False, + ) -> dict | TestFile: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, string_utils.unique_name("dl-object-"))) + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api get-object --bucket {bucket} --key {key} " + f"{version} {test_file} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + if object_range: + cmd += f" --range bytes={object_range[0]}-{object_range[1]}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response if full_output else test_file + + @reporter.step("Get object ACL") + def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api get-object-acl --bucket {bucket} --key {key} " + f"{version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response.get("Grants") + + @reporter.step("Put object ACL") + def put_object_acl( + self, + bucket: str, + key: str, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api put-object-acl --bucket {bucket} --key {key} " + f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + if acl: + cmd += f" --acl {acl}" + if grant_write: + cmd += f" --grant-write {grant_write}" + if grant_read: + cmd += f" --grant-read {grant_read}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response.get("Grants") + + @reporter.step("Put bucket ACL") + def put_bucket_acl( + self, + bucket: str, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " + f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + if acl: + cmd += f" --acl {acl}" + if grant_write: + cmd += f" --grant-write {grant_write}" + if grant_read: + cmd += f" --grant-read {grant_read}" + self.local_shell.exec(cmd) + + @reporter.step("Delete objects S3") + def delete_objects(self, bucket: str, keys: list[str]) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json") + delete_structure = json.dumps(_make_objs_dict(keys)) + with open(file_path, "w") as out_file: + out_file.write(delete_structure) + logger.info(f"Input file for delete-objects: {delete_structure}") + + cmd = ( + f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " + f"--delete file://{file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd, command_options).stdout + response = self._to_json(output) + return response + + @reporter.step("Delete object S3") + def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api delete-object --bucket {bucket} " + f"--key {key} {version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd, command_options).stdout + return self._to_json(output) + + @reporter.step("Delete object versions S3") + def delete_object_versions(self, bucket: str, object_versions: list) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + # Build deletion list in S3 format + delete_list = { + "Objects": [ + { + "Key": object_version["Key"], + "VersionId": object_version["VersionId"], + } + for object_version in object_versions + ] + } + + file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json") + delete_structure = json.dumps(delete_list) + with open(file_path, "w") as out_file: + out_file.write(delete_structure) + logger.info(f"Input file for delete-objects: {delete_structure}") + + cmd = ( + f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " + f"--delete file://{file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd, command_options).stdout + return self._to_json(output) + + @reporter.step("Delete object versions S3 without delete markers") + def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + # Delete objects without creating delete markers + for object_version in object_versions: + self.delete_object(bucket=bucket, key=object_version["Key"], version_id=object_version["VersionId"]) + + @reporter.step("Get object attributes") + def get_object_attributes( + self, + bucket: str, + key: str, + attributes: list[str], + version_id: str = "", + max_parts: int = 0, + part_number: int = 0, + full_output: bool = True, + ) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + attrs = ",".join(attributes) + version = f" --version-id {version_id}" if version_id else "" + parts = f"--max-parts {max_parts}" if max_parts else "" + part_number_str = f"--part-number-marker {part_number}" if part_number else "" + cmd = ( + f"aws {self.common_flags} s3api get-object-attributes --bucket {bucket} " + f"--key {key} {version} {parts} {part_number_str} --object-attributes {attrs} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + for attr in attributes: + assert attr in response, f"Expected attribute {attr} in {response}" + + if full_output: + return response + else: + return response.get(attributes[0]) + + @reporter.step("Get bucket policy") + def get_bucket_policy(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response.get("Policy") + + @reporter.step("Delete bucket policy") + def delete_bucket_policy(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api delete-bucket-policy --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response + + @reporter.step("Put bucket policy") + def put_bucket_policy(self, bucket: str, policy: dict) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + # Leaving it as is was in test repo. Double dumps to escape resulting string + # Example: + # policy = {"a": 1} + # json.dumps(policy) => {"a": 1} + # json.dumps(json.dumps(policy)) => "{\"a\": 1}" + # TODO: update this + dumped_policy = json.dumps(json.dumps(policy)) + cmd = ( + f"aws {self.common_flags} s3api put-bucket-policy --bucket {bucket} " + f"--policy {dumped_policy} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + self.local_shell.exec(cmd) + + @reporter.step("Get bucket cors") + def get_bucket_cors(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response.get("CORSRules") + + @reporter.step("Put bucket cors") + def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} " + f"--cors-configuration '{json.dumps(cors_configuration)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + self.local_shell.exec(cmd) + + @reporter.step("Delete bucket cors") + def delete_bucket_cors(self, bucket: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + self.local_shell.exec(cmd) + + @reporter.step("Delete bucket tagging") + def delete_bucket_tagging(self, bucket: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + self.local_shell.exec(cmd) + + @reporter.step("Put object retention") + def put_object_retention( + self, + bucket: str, + key: str, + retention: dict, + version_id: Optional[str] = None, + bypass_governance_retention: Optional[bool] = None, + ) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api put-object-retention --bucket {bucket} --key {key} " + f"{version} --retention '{json.dumps(retention, indent=4, sort_keys=True, default=str)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + if bypass_governance_retention is not None: + cmd += " --bypass-governance-retention" + self.local_shell.exec(cmd) + + @reporter.step("Put object legal hold") + def put_object_legal_hold( + self, + bucket: str, + key: str, + legal_hold_status: Literal["ON", "OFF"], + version_id: Optional[str] = None, + ) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + version = f" --version-id {version_id}" if version_id else "" + legal_hold = json.dumps({"Status": legal_hold_status}) + cmd = ( + f"aws {self.common_flags} s3api put-object-legal-hold --bucket {bucket} --key {key} " + f"{version} --legal-hold '{legal_hold}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + self.local_shell.exec(cmd) + + @reporter.step("Put object tagging") + def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + tagging = {"TagSet": tags} + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api put-object-tagging --bucket {bucket} --key {key} " + f"{version} --tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + self.local_shell.exec(cmd) + + @reporter.step("Get object tagging") + def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api get-object-tagging --bucket {bucket} --key {key} " + f"{version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response.get("TagSet") + + @reporter.step("Delete object tagging") + def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} " + f"--key {key} {version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + self.local_shell.exec(cmd) + + @reporter.step("Sync directory S3") + def sync( + self, + bucket: str, + dir_path: str, + acl: Optional[str] = None, + metadata: Optional[dict] = None, + ) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + if metadata: + cmd += " --metadata" + for key, value in metadata.items(): + cmd += f" {key}={value}" + if acl: + cmd += f" --acl {acl}" + output = self.local_shell.exec(cmd, command_options).stdout + return self._to_json(output) + + @reporter.step("CP directory S3") + def cp( + self, + bucket: str, + dir_path: str, + acl: Optional[str] = None, + metadata: Optional[dict] = None, + ) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3 cp {dir_path} s3://{bucket} " + f"--endpoint-url {self.s3gate_endpoint} --recursive --profile {self.profile}" + ) + if metadata: + cmd += " --metadata" + for key, value in metadata.items(): + cmd += f" {key}={value}" + if acl: + cmd += f" --acl {acl}" + output = self.local_shell.exec(cmd, command_options).stdout + return self._to_json(output) + + @reporter.step("Create multipart upload S3") + def create_multipart_upload(self, bucket: str, key: str) -> str: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} " + f"--key {key} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("UploadId"), f"Expected UploadId in response:\n{response}" + + return response["UploadId"] + + @reporter.step("List multipart uploads S3") + def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} " + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response.get("Uploads") + + @reporter.step("Abort multipart upload S3") + def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} " + f"--key {key} --upload-id {upload_id} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + self.local_shell.exec(cmd) + + @reporter.step("Upload part S3") + def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api upload-part --bucket {bucket} --key {key} " + f"--upload-id {upload_id} --part-number {part_num} --body {filepath} " + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd, command_options).stdout + response = self._to_json(output) + assert response.get("ETag"), f"Expected ETag in response:\n{response}" + return response["ETag"] + + @reporter.step("Upload copy part S3") + def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api upload-part-copy --bucket {bucket} --key {key} " + f"--upload-id {upload_id} --part-number {part_num} --copy-source {copy_source} " + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd, command_options).stdout + response = self._to_json(output) + assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}" + + return response["CopyPartResult"]["ETag"] + + @reporter.step("List parts S3") + def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} " + f"--upload-id {upload_id} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("Parts"), f"Expected Parts in response:\n{response}" + + return response["Parts"] + + @reporter.step("Complete multipart upload S3") + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + file_path = os.path.join(os.getcwd(), ASSETS_DIR, "parts.json") + parts_dict = {"Parts": [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]} + + with open(file_path, "w") as out_file: + out_file.write(json.dumps(parts_dict)) + + logger.info(f"Input file for complete-multipart-upload: {json.dumps(parts_dict)}") + + cmd = ( + f"aws {self.common_flags} s3api complete-multipart-upload --bucket {bucket} " + f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} " + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Put object lock configuration") + def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} " + f"--object-lock-configuration '{json.dumps(configuration)}' --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + return self._to_json(output) + + @reporter.step("Get object lock configuration") + def get_object_lock_configuration(self, bucket: str): + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} " + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response.get("ObjectLockConfiguration") + + @reporter.step("Put bucket lifecycle configuration") + def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api put-bucket-lifecycle-configuration --bucket {bucket} " + f"--endpoint-url {self.s3gate_endpoint} --lifecycle-configuration file://{dumped_configuration} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response + + @reporter.step("Get bucket lifecycle configuration") + def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api get-bucket-lifecycle-configuration --bucket {bucket} " + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response + + @reporter.step("Delete bucket lifecycle configuration") + def delete_bucket_lifecycle(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + + cmd = ( + f"aws {self.common_flags} s3api delete-bucket-lifecycle --bucket {bucket} " + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response + + @staticmethod + def _to_json(output: str) -> dict: + json_output = {} + if "{" not in output and "}" not in output: + logger.warning(f"Could not parse json from output {output}") + return json_output + + json_output = json.loads(output[output.index("{") :]) + + return json_output + + @reporter.step("Create presign url for the object") + def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: + # AWS CLI does not support method definition and world only in 'get_object' state by default + cmd = f"aws {self.common_flags} s3 presign s3://{bucket}/{key} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + if expires_in: + cmd += f" --expires-in {expires_in}" + response = self.local_shell.exec(cmd).stdout + return response.strip() + + # IAM METHODS # + # Some methods don't have checks because AWS is silent in some cases (delete, attach, etc.) + + @reporter.step("Adds the specified user to the specified group") + def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: + cmd = f"aws {self.common_flags} iam add-user-to-group --user-name {user_name} --group-name {group_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Attaches the specified managed policy to the specified IAM group") + def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: + cmd = f"aws {self.common_flags} iam attach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 14) + + return response + + @reporter.step("Attaches the specified managed policy to the specified user") + def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: + cmd = f"aws {self.common_flags} iam attach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 14) + + return response + + @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") + def iam_create_access_key(self, user_name: Optional[str] = None) -> dict: + cmd = f"aws {self.common_flags} iam create-access-key --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + if user_name: + cmd += f" --user-name {user_name}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + access_key_id = response["AccessKey"].get("AccessKeyId") + secret_access_key = response["AccessKey"].get("SecretAccessKey") + assert access_key_id, f"Expected AccessKeyId in response:\n{response}" + assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" + + return access_key_id, secret_access_key + + @reporter.step("Creates a new group") + def iam_create_group(self, group_name: str) -> dict: + cmd = f"aws {self.common_flags} iam create-group --group-name {group_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("Group"), f"Expected Group in response:\n{response}" + assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}" + + return response + + @reporter.step("Creates a new managed policy for your AWS account") + def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: + cmd = ( + f"aws {self.common_flags} iam create-policy --endpoint {self.iam_endpoint}" + f" --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'" + ) + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("Policy"), f"Expected Policy in response:\n{response}" + assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" + + return response + + @reporter.step("Creates a new IAM user for your AWS account") + def iam_create_user(self, user_name: str) -> dict: + cmd = f"aws {self.common_flags} iam create-user --user-name {user_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("User"), f"Expected User in response:\n{response}" + assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" + + return response + + @reporter.step("Deletes the access key pair associated with the specified IAM user") + def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: + cmd = f"aws {self.common_flags} iam delete-access-key --access-key-id {access_key_id} --user-name {user_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Deletes the specified IAM group") + def iam_delete_group(self, group_name: str) -> dict: + cmd = f"aws {self.common_flags} iam delete-group --group-name {group_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") + def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: + cmd = f"aws {self.common_flags} iam delete-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Deletes the specified managed policy") + def iam_delete_policy(self, policy_arn: str) -> dict: + cmd = f"aws {self.common_flags} iam delete-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Deletes the specified IAM user") + def iam_delete_user(self, user_name: str) -> dict: + cmd = f"aws {self.common_flags} iam delete-user --user-name {user_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") + def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: + cmd = f"aws {self.common_flags} iam delete-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Removes the specified managed policy from the specified IAM group") + def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: + cmd = f"aws {self.common_flags} iam detach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 14) + + return response + + @reporter.step("Removes the specified managed policy from the specified user") + def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: + cmd = f"aws {self.common_flags} iam detach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 14) + + return response + + @reporter.step("Returns a list of IAM users that are in the specified IAM group") + def iam_get_group(self, group_name: str) -> dict: + cmd = f"aws {self.common_flags} iam get-group --group-name {group_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert "Users" in response.keys(), f"Expected Users in response:\n{response}" + assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}" + + return response + + @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") + def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: + cmd = f"aws {self.common_flags} iam get-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Retrieves information about the specified managed policy") + def iam_get_policy(self, policy_arn: str) -> dict: + cmd = f"aws {self.common_flags} iam get-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("Policy"), f"Expected Policy in response:\n{response}" + assert response["Policy"].get("Arn") == policy_arn, f"PolicyArn should be equal to {policy_arn}" + + return response + + @reporter.step("Retrieves information about the specified version of the specified managed policy") + def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: + cmd = f"aws {self.common_flags} iam get-policy-version --policy-arn {policy_arn} --version-id {version_id} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}" + assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}" + + return response + + @reporter.step("Retrieves information about the specified IAM user") + def iam_get_user(self, user_name: str) -> dict: + cmd = f"aws {self.common_flags} iam get-user --user-name {user_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("User"), f"Expected User in response:\n{response}" + assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" + + return response + + @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") + def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: + cmd = f"aws {self.common_flags} iam get-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("UserName"), f"Expected User in response:\n{response}" + + return response + + @reporter.step("Returns information about the access key IDs associated with the specified IAM user") + def iam_list_access_keys(self, user_name: str) -> dict: + cmd = f"aws {self.common_flags} iam list-access-keys --user-name {user_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Lists all managed policies that are attached to the specified IAM group") + def iam_list_attached_group_policies(self, group_name: str) -> dict: + cmd = f"aws {self.common_flags} iam list-attached-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" + + return response + + @reporter.step("Lists all managed policies that are attached to the specified IAM user") + def iam_list_attached_user_policies(self, user_name: str) -> dict: + cmd = f"aws {self.common_flags} iam list-attached-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" + + return response + + @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") + def iam_list_entities_for_policy(self, policy_arn: str) -> dict: + cmd = f"aws {self.common_flags} iam list-entities-for-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}" + assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}" + + return response + + @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") + def iam_list_group_policies(self, group_name: str) -> dict: + cmd = f"aws {self.common_flags} iam list-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" + + return response + + @reporter.step("Lists the IAM groups") + def iam_list_groups(self) -> dict: + cmd = f"aws {self.common_flags} iam list-groups --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" + + return response + + @reporter.step("Lists the IAM groups that the specified IAM user belongs to") + def iam_list_groups_for_user(self, user_name: str) -> dict: + cmd = f"aws {self.common_flags} iam list-groups-for-user --user-name {user_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" + + return response + + @reporter.step("Lists all the managed policies that are available in your AWS account") + def iam_list_policies(self) -> dict: + cmd = f"aws {self.common_flags} iam list-policies --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert "Policies" in response.keys(), f"Expected Policies in response:\n{response}" + + return response + + @reporter.step("Lists information about the versions of the specified managed policy") + def iam_list_policy_versions(self, policy_arn: str) -> dict: + cmd = f"aws {self.common_flags} iam list-policy-versions --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("Versions"), f"Expected Versions in response:\n{response}" + + return response + + @reporter.step("Lists the names of the inline policies embedded in the specified IAM user") + def iam_list_user_policies(self, user_name: str) -> dict: + cmd = f"aws {self.common_flags} iam list-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" + + return response + + @reporter.step("Lists the IAM users") + def iam_list_users(self) -> dict: + cmd = f"aws {self.common_flags} iam list-users --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert "Users" in response.keys(), f"Expected Users in response:\n{response}" + + return response + + @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") + def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: + cmd = ( + f"aws {self.common_flags} iam put-group-policy --endpoint {self.iam_endpoint}" + f" --group-name {group_name} --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 14) + + return response + + @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") + def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: + cmd = ( + f"aws {self.common_flags} iam put-user-policy --endpoint {self.iam_endpoint}" + f" --user-name {user_name} --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'" + ) + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 14) + + return response + + @reporter.step("Removes the specified user from the specified group") + def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam remove-user-from-group --endpoint {self.iam_endpoint}" + f" --group-name {group_name} --user-name {user_name}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Updates the name and/or the path of the specified IAM group") + def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: + cmd = f"aws {self.common_flags} iam update-group --group-name {group_name} --endpoint {self.iam_endpoint}" + if new_name: + cmd += f" --new-group-name {new_name}" + if new_path: + cmd += f" --new-path {new_path}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Updates the name and/or the path of the specified IAM user") + def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: + cmd = f"aws {self.common_flags} iam update-user --user-name {user_name} --endpoint {self.iam_endpoint}" + if new_name: + cmd += f" --new-user-name {new_name}" + if new_path: + cmd += f" --new-path {new_path}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Adds one or more tags to an IAM user") + def iam_tag_user(self, user_name: str, tags: list) -> dict: + tags_json = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + cmd = ( + f"aws {self.common_flags} iam tag-user --user-name {user_name} --tags '{json.dumps(tags_json)}' --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("List tags of IAM user") + def iam_list_user_tags(self, user_name: str) -> dict: + cmd = f"aws {self.common_flags} iam list-user-tags --user-name {user_name} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Removes the specified tags from the user") + def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: + tag_keys_joined = " ".join(tag_keys) + cmd = f"aws {self.common_flags} iam untag-user --user-name {user_name} --tag-keys {tag_keys_joined} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + # MFA METHODS + @reporter.step("Creates a new virtual MFA device") + def iam_create_virtual_mfa_device(self, virtual_mfa_device_name: str, outfile: str, bootstrap_method: str) -> tuple: + cmd = f"aws {self.common_flags} iam create-virtual-mfa-device --virtual-mfa-device-name {virtual_mfa_device_name}\ + --outfile {outfile} --bootstrap-method {bootstrap_method} --endpoint {self.iam_endpoint}" + + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + serial_number = response.get("VirtualMFADevice", {}).get("SerialNumber") + assert serial_number, f"Expected SerialNumber in response:\n{response}" + + return serial_number, False + + @reporter.step("Deactivates the specified MFA device and removes it from association with the user name") + def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: + cmd = f"aws {self.common_flags} iam deactivate-mfa-device --user-name {user_name} --serial-number {serial_number} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Deletes a virtual MFA device") + def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: + cmd = f"aws {self.common_flags} iam delete-virtual-mfa-device --serial-number {serial_number} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Enables the specified MFA device and associates it with the specified IAM user") + def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: + cmd = f"aws {self.common_flags} iam enable-mfa-device --user-name {user_name} --serial-number {serial_number} --authentication-code1 {authentication_code1}\ + --authentication-code2 {authentication_code2} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Lists the MFA devices for an IAM user") + def iam_list_virtual_mfa_devices(self) -> dict: + cmd = f"aws {self.common_flags} iam list-virtual-mfa-devices --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + assert response.get("VirtualMFADevices"), f"Expected VirtualMFADevices in response:\n{response}" + + return response + + @reporter.step("Get session token for user") + def sts_get_session_token( + self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None + ) -> tuple: + cmd = f"aws {self.common_flags} sts get-session-token --endpoint {self.iam_endpoint}" + if duration_seconds: + cmd += f" --duration-seconds {duration_seconds}" + if serial_number: + cmd += f" --serial-number {serial_number}" + if token_code: + cmd += f" --token-code {token_code}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + access_key = response.get("Credentials", {}).get("AccessKeyId") + secret_access_key = response.get("Credentials", {}).get("SecretAccessKey") + session_token = response.get("Credentials", {}).get("SessionToken") + assert access_key, f"Expected AccessKeyId in response:\n{response}" + assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" + assert session_token, f"Expected SessionToken in response:\n{response}" + + return access_key, secret_access_key, session_token diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py new file mode 100644 index 00000000..ac4d55b8 --- /dev/null +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -0,0 +1,1362 @@ +import json +import logging +import os +from collections.abc import Callable +from datetime import datetime +from time import sleep +from typing import Literal, Optional, Union + +import boto3 +import urllib3 +from botocore.config import Config +from botocore.exceptions import ClientError +from mypy_boto3_iam import IAMClient +from mypy_boto3_s3 import S3Client +from mypy_boto3_sts import STSClient + +from frostfs_testlib import reporter +from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict +from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME +from frostfs_testlib.utils import string_utils + +# TODO: Refactor this code to use shell instead of _cmd_run +from frostfs_testlib.utils.cli_utils import log_command_execution +from frostfs_testlib.utils.file_utils import TestFile + +logger = logging.getLogger("NeoLogger") + +# Disable warnings on self-signed certificate which the +# boto library produces on requests to S3-gate in dev-env +urllib3.disable_warnings() + + +class Boto3ClientWrapper(S3ClientWrapper): + __repr_name__: str = "Boto3 client" + + @reporter.step("Configure S3 client (boto3)") + def __init__( + self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" + ) -> None: + self.s3gate_endpoint: str = "" + self.boto3_client: S3Client = None + + self.iam_endpoint: str = "" + self.boto3_iam_client: IAMClient = None + self.boto3_sts_client: STSClient = None + + self.access_key_id = access_key_id + self.secret_access_key = secret_access_key + self.profile = profile + self.region = region + + self.session = boto3.Session() + self.config = Config( + signature_version="s3v4", + retries={ + "max_attempts": MAX_REQUEST_ATTEMPTS, + "mode": RETRY_MODE, + }, + ) + + self.set_endpoint(s3gate_endpoint) + + @reporter.step("Set endpoint S3 to {s3gate_endpoint}") + def set_endpoint(self, s3gate_endpoint: str): + if self.s3gate_endpoint == s3gate_endpoint: + return + + self.s3gate_endpoint = s3gate_endpoint + + self.boto3_client: S3Client = self.session.client( + service_name="s3", + aws_access_key_id=self.access_key_id, + aws_secret_access_key=self.secret_access_key, + region_name=self.region, + config=self.config, + endpoint_url=s3gate_endpoint, + verify=False, + ) + + @reporter.step("Set endpoint IAM to {iam_endpoint}") + def set_iam_endpoint(self, iam_endpoint: str): + if self.iam_endpoint == iam_endpoint: + return + + self.iam_endpoint = iam_endpoint + + self.boto3_iam_client = self.session.client( + service_name="iam", + aws_access_key_id=self.access_key_id, + aws_secret_access_key=self.secret_access_key, + region_name=self.region, + endpoint_url=self.iam_endpoint, + verify=False, + ) + # since the STS does not have an endpoint, IAM is used + self.boto3_sts_client = self.session.client( + service_name="sts", + aws_access_key_id=self.access_key_id, + aws_secret_access_key=self.secret_access_key, + endpoint_url=iam_endpoint, + region_name=self.region, + verify=False, + ) + + def _to_s3_param(self, param: str) -> str: + replacement_map = { + "Acl": "ACL", + "Cors": "CORS", + "_": "", + } + result = param.title() + for find, replace in replacement_map.items(): + result = result.replace(find, replace) + return result + + def _convert_to_s3_params(self, scope: dict, exclude: Optional[list[str]] = None) -> dict: + exclude = ["self", "cls"] if not exclude else exclude + ["self", "cls"] + return {self._to_s3_param(param): value for param, value in scope.items() if param not in exclude and value is not None} + + def _exec_request(self, method: Callable, params: Optional[dict] = None, **kwargs): + if not params: + params = {} + + try: + result = method(**params) + except ClientError as err: + log_command_execution(method.__name__, err.response, params, **kwargs) + raise + + log_command_execution(method.__name__, result, params, **kwargs) + return result + + # BUCKET METHODS # + @reporter.step("Create bucket S3") + def create_bucket( + self, + bucket: Optional[str] = None, + object_lock_enabled_for_bucket: Optional[bool] = None, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + grant_full_control: Optional[str] = None, + location_constraint: Optional[str] = None, + ) -> str: + if bucket is None: + bucket = string_utils.unique_name("bucket-") + + params = {"Bucket": bucket} + if object_lock_enabled_for_bucket is not None: + params.update({"ObjectLockEnabledForBucket": object_lock_enabled_for_bucket}) + + if acl is not None: + params.update({"ACL": acl}) + elif grant_write or grant_read or grant_full_control: + if grant_write: + params.update({"GrantWrite": grant_write}) + elif grant_read: + params.update({"GrantRead": grant_read}) + elif grant_full_control: + params.update({"GrantFullControl": grant_full_control}) + + if location_constraint: + params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}) + + self._exec_request(self.boto3_client.create_bucket, params, endpoint=self.s3gate_endpoint, profile=self.profile) + return bucket + + @reporter.step("List buckets S3") + def list_buckets(self) -> list[str]: + response = self._exec_request( + self.boto3_client.list_buckets, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return [bucket["Name"] for bucket in response["Buckets"]] + + @reporter.step("Delete bucket S3") + def delete_bucket(self, bucket: str) -> None: + self._exec_request( + self.boto3_client.delete_bucket, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Head bucket S3") + def head_bucket(self, bucket: str) -> None: + self._exec_request( + self.boto3_client.head_bucket, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Put bucket versioning status") + def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: + params = {"Bucket": bucket, "VersioningConfiguration": {"Status": status.value}} + self._exec_request( + self.boto3_client.put_bucket_versioning, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Get bucket versioning status") + def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: + response = self._exec_request( + self.boto3_client.get_bucket_versioning, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("Status") + + @reporter.step("Put bucket tagging") + def put_bucket_tagging(self, bucket: str, tags: list) -> None: + tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + tagging = {"TagSet": tags} + params = self._convert_to_s3_params(locals(), exclude=["tags"]) + self._exec_request( + self.boto3_client.put_bucket_tagging, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Get bucket tagging") + def get_bucket_tagging(self, bucket: str) -> list: + response = self._exec_request( + self.boto3_client.get_bucket_tagging, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("TagSet") + + @reporter.step("Get bucket acl") + def get_bucket_acl(self, bucket: str) -> dict: + return self._exec_request( + self.boto3_client.get_bucket_acl, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Delete bucket tagging") + def delete_bucket_tagging(self, bucket: str) -> None: + self._exec_request( + self.boto3_client.delete_bucket_tagging, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Put bucket ACL") + def put_bucket_acl( + self, + bucket: str, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> None: + params = self._convert_to_s3_params(locals()) + self._exec_request( + self.boto3_client.put_bucket_acl, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Put object lock configuration") + def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: + params = {"Bucket": bucket, "ObjectLockConfiguration": configuration} + return self._exec_request( + self.boto3_client.put_object_lock_configuration, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Get object lock configuration") + def get_object_lock_configuration(self, bucket: str) -> dict: + response = self._exec_request( + self.boto3_client.get_object_lock_configuration, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("ObjectLockConfiguration") + + @reporter.step("Get bucket policy") + def get_bucket_policy(self, bucket: str) -> str: + response = self._exec_request( + self.boto3_client.get_bucket_policy, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("Policy") + + @reporter.step("Delete bucket policy") + def delete_bucket_policy(self, bucket: str) -> str: + return self._exec_request( + self.boto3_client.delete_bucket_policy, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Put bucket policy") + def put_bucket_policy(self, bucket: str, policy: dict) -> None: + params = {"Bucket": bucket, "Policy": json.dumps(policy)} + return self._exec_request( + self.boto3_client.put_bucket_policy, + params, + # Overriding option for AWS CLI + policy=policy, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Get bucket cors") + def get_bucket_cors(self, bucket: str) -> dict: + response = self._exec_request( + self.boto3_client.get_bucket_cors, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("CORSRules") + + @reporter.step("Get bucket location") + def get_bucket_location(self, bucket: str) -> str: + response = self._exec_request( + self.boto3_client.get_bucket_location, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("LocationConstraint") + + @reporter.step("Put bucket cors") + def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_client.put_bucket_cors, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Delete bucket cors") + def delete_bucket_cors(self, bucket: str) -> None: + self._exec_request( + self.boto3_client.delete_bucket_cors, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Put bucket lifecycle configuration") + def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: + params = self._convert_to_s3_params(locals(), exclude=["dumped_configuration"]) + return self._exec_request( + self.boto3_client.put_bucket_lifecycle_configuration, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Get bucket lifecycle configuration") + def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: + response = self._exec_request( + self.boto3_client.get_bucket_lifecycle_configuration, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return {"Rules": response.get("Rules")} + + @reporter.step("Delete bucket lifecycle configuration") + def delete_bucket_lifecycle(self, bucket: str) -> dict: + return self._exec_request( + self.boto3_client.delete_bucket_lifecycle, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + # END OF BUCKET METHODS # + # OBJECT METHODS # + + @reporter.step("List objects S3 v2") + def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + response = self._exec_request( + self.boto3_client.list_objects_v2, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + obj_list = [obj["Key"] for obj in response.get("Contents", [])] + logger.info(f"Found s3 objects: {obj_list}") + return response if full_output else obj_list + + @reporter.step("List objects S3") + def list_objects( + self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None + ) -> Union[dict, list[str]]: + params = {"Bucket": bucket} + if page_size: + params["MaxKeys"] = page_size + if prefix: + params["Prefix"] = prefix + response = self._exec_request( + self.boto3_client.list_objects, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + obj_list = [obj["Key"] for obj in response.get("Contents", [])] + logger.info(f"Found s3 objects: {obj_list}") + return response if full_output else obj_list + + @reporter.step("List objects versions S3") + def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: + response = self._exec_request( + self.boto3_client.list_object_versions, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response if full_output else response.get("Versions", []) + + @reporter.step("List objects delete markers S3") + def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: + response = self._exec_request( + self.boto3_client.list_object_versions, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response if full_output else response.get("DeleteMarkers", []) + + @reporter.step("Put object S3") + def put_object( + self, + bucket: str, + filepath: str, + key: Optional[str] = None, + metadata: Optional[dict] = None, + tagging: Optional[str] = None, + acl: Optional[str] = None, + object_lock_mode: Optional[str] = None, + object_lock_retain_until_date: Optional[datetime] = None, + object_lock_legal_hold_status: Optional[str] = None, + grant_full_control: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> str: + if key is None: + key = os.path.basename(filepath) + + with open(filepath, "rb") as put_file: + body = put_file.read() + + params = self._convert_to_s3_params(locals(), exclude=["filepath", "put_file"]) + response = self._exec_request( + self.boto3_client.put_object, + params, + body=filepath, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("VersionId") + + @reporter.step("Head object S3") + def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_client.head_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Delete object S3") + def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_client.delete_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Delete objects S3") + def delete_objects(self, bucket: str, keys: list[str]) -> dict: + params = {"Bucket": bucket, "Delete": _make_objs_dict(keys)} + response = self._exec_request( + self.boto3_client.delete_objects, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + assert ( + "Errors" not in response + ), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}' + + return response + + @reporter.step("Delete object versions S3") + def delete_object_versions(self, bucket: str, object_versions: list) -> dict: + # Build deletion list in S3 format + delete_list = { + "Objects": [ + { + "Key": object_version["Key"], + "VersionId": object_version["VersionId"], + } + for object_version in object_versions + ] + } + params = {"Bucket": bucket, "Delete": delete_list} + return self._exec_request( + self.boto3_client.delete_objects, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Delete object versions S3 without delete markers") + def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: + # Delete objects without creating delete markers + for object_version in object_versions: + params = {"Bucket": bucket, "Key": object_version["Key"], "VersionId": object_version["VersionId"]} + self._exec_request( + self.boto3_client.delete_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Put object ACL") + def put_object_acl( + self, + bucket: str, + key: str, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> list: + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.put_object_acl, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("Grants") + + @reporter.step("Get object ACL") + def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.get_object_acl, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("Grants") + + @reporter.step("Copy object S3") + def copy_object( + self, + source_bucket: str, + source_key: str, + bucket: Optional[str] = None, + key: Optional[str] = None, + acl: Optional[str] = None, + metadata_directive: Optional[Literal["COPY", "REPLACE"]] = None, + metadata: Optional[dict] = None, + tagging_directive: Optional[Literal["COPY", "REPLACE"]] = None, + tagging: Optional[str] = None, + ) -> str: + if bucket is None: + bucket = source_bucket + + if key is None: + key = string_utils.unique_name("copy-object-") + + copy_source = f"{source_bucket}/{source_key}" + params = self._convert_to_s3_params(locals(), exclude=["source_bucket", "source_key"]) + + self._exec_request( + self.boto3_client.copy_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return key + + @reporter.step("Get object S3") + def get_object( + self, + bucket: str, + key: str, + version_id: Optional[str] = None, + object_range: Optional[tuple[int, int]] = None, + full_output: bool = False, + ) -> dict | TestFile: + range_str = None + if object_range: + range_str = f"bytes={object_range[0]}-{object_range[1]}" + + params = locals() + params.update({"Range": f"bytes={object_range[0]}-{object_range[1]}"} if object_range else {}) + params = self._convert_to_s3_params(params, exclude=["object_range", "full_output", "range_str"]) + response = self._exec_request( + self.boto3_client.get_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + if full_output: + return response + + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, string_utils.unique_name("dl-object-"))) + with open(test_file, "wb") as file: + chunk = response["Body"].read(1024) + while chunk: + file.write(chunk) + chunk = response["Body"].read(1024) + return test_file + + @reporter.step("Create multipart upload S3") + def create_multipart_upload(self, bucket: str, key: str) -> str: + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.create_multipart_upload, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + assert response.get("UploadId"), f"Expected UploadId in response:\n{response}" + return response["UploadId"] + + @reporter.step("List multipart uploads S3") + def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: + response = self._exec_request( + self.boto3_client.list_multipart_uploads, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("Uploads") + + @reporter.step("Abort multipart upload S3") + def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: + params = self._convert_to_s3_params(locals()) + self._exec_request( + self.boto3_client.abort_multipart_upload, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Upload part S3") + def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: + with open(filepath, "rb") as put_file: + body = put_file.read() + + params = self._convert_to_s3_params(locals(), exclude=["put_file", "part_num", "filepath"]) + params["PartNumber"] = part_num + + response = self._exec_request( + self.boto3_client.upload_part, + params, + body=filepath, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + assert response.get("ETag"), f"Expected ETag in response:\n{response}" + return response["ETag"] + + @reporter.step("Upload copy part S3") + def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: + params = self._convert_to_s3_params(locals(), exclude=["put_file", "part_num", "filepath"]) + params["PartNumber"] = part_num + response = self._exec_request( + self.boto3_client.upload_part_copy, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}" + return response["CopyPartResult"]["ETag"] + + @reporter.step("List parts S3") + def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.list_parts, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + assert response.get("Parts"), f"Expected Parts in response:\n{response}" + return response["Parts"] + + @reporter.step("Complete multipart upload S3") + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: + parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] + params = self._convert_to_s3_params(locals(), exclude=["parts"]) + params["MultipartUpload"] = {"Parts": parts} + return self._exec_request( + self.boto3_client.complete_multipart_upload, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Put object retention") + def put_object_retention( + self, + bucket: str, + key: str, + retention: dict, + version_id: Optional[str] = None, + bypass_governance_retention: Optional[bool] = None, + ) -> None: + params = self._convert_to_s3_params(locals()) + self._exec_request( + self.boto3_client.put_object_retention, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Put object legal hold") + def put_object_legal_hold( + self, + bucket: str, + key: str, + legal_hold_status: Literal["ON", "OFF"], + version_id: Optional[str] = None, + ) -> None: + legal_hold = {"Status": legal_hold_status} + params = self._convert_to_s3_params(locals(), exclude=["legal_hold_status"]) + self._exec_request( + self.boto3_client.put_object_legal_hold, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Put object tagging") + def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None: + tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + tagging = {"TagSet": tags} + params = self._convert_to_s3_params(locals(), exclude=["tags"]) + self._exec_request( + self.boto3_client.put_object_tagging, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Get object tagging") + def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.get_object_tagging, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("TagSet") + + @reporter.step("Delete object tagging") + def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: + params = self._convert_to_s3_params(locals()) + self._exec_request( + self.boto3_client.delete_object_tagging, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + + @reporter.step("Get object attributes") + def get_object_attributes( + self, + bucket: str, + key: str, + attributes: list[str], + version_id: Optional[str] = None, + max_parts: Optional[int] = None, + part_number: Optional[int] = None, + full_output: bool = True, + ) -> dict: + logger.warning("Method get_object_attributes is not supported by boto3 client") + return {} + + @reporter.step("Sync directory S3") + def sync( + self, + bucket: str, + dir_path: str, + acl: Optional[str] = None, + metadata: Optional[dict] = None, + ) -> dict: + raise NotImplementedError("Sync is not supported for boto3 client") + + @reporter.step("CP directory S3") + def cp( + self, + bucket: str, + dir_path: str, + acl: Optional[str] = None, + metadata: Optional[dict] = None, + ) -> dict: + raise NotImplementedError("Cp is not supported for boto3 client") + + @reporter.step("Create presign url for the object") + def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: + response = self._exec_request( + method=self.boto3_client.generate_presigned_url, + params={"ClientMethod": method, "Params": {"Bucket": bucket, "Key": key}, "ExpiresIn": expires_in}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response + + # END OBJECT METHODS # + + # IAM METHODS # + # Some methods don't have checks because boto3 is silent in some cases (delete, attach, etc.) + + @reporter.step("Adds the specified user to the specified group") + def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.add_user_to_group, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Attaches the specified managed policy to the specified IAM group") + def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.attach_group_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + sleep(S3_SYNC_WAIT_TIME * 14) + return response + + @reporter.step("Attaches the specified managed policy to the specified user") + def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.attach_user_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + sleep(S3_SYNC_WAIT_TIME * 14) + return response + + @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") + def iam_create_access_key(self, user_name: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.create_access_key, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + access_key_id = response["AccessKey"].get("AccessKeyId") + secret_access_key = response["AccessKey"].get("SecretAccessKey") + assert access_key_id, f"Expected AccessKeyId in response:\n{response}" + assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" + + return access_key_id, secret_access_key + + @reporter.step("Creates a new group") + def iam_create_group(self, group_name: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.create_group, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + assert response.get("Group"), f"Expected Group in response:\n{response}" + assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}" + + return response + + @reporter.step("Creates a new managed policy for your AWS account") + def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: + params = self._convert_to_s3_params(locals()) + params["PolicyDocument"] = json.dumps(policy_document) + response = self._exec_request( + self.boto3_iam_client.create_policy, + params, + # Overriding option for AWS CLI + policy_document=policy_document, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + assert response.get("Policy"), f"Expected Policy in response:\n{response}" + assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" + + return response + + @reporter.step("Creates a new IAM user for your AWS account") + def iam_create_user(self, user_name: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.create_user, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + assert response.get("User"), f"Expected User in response:\n{response}" + assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" + + return response + + @reporter.step("Deletes the access key pair associated with the specified IAM user") + def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.delete_access_key, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Deletes the specified IAM group") + def iam_delete_group(self, group_name: str) -> dict: + return self._exec_request( + self.boto3_iam_client.delete_group, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") + def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.delete_group_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Deletes the specified managed policy") + def iam_delete_policy(self, policy_arn: str) -> dict: + return self._exec_request( + self.boto3_iam_client.delete_policy, + params={"PolicyArn": policy_arn}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Deletes the specified IAM user") + def iam_delete_user(self, user_name: str) -> dict: + return self._exec_request( + self.boto3_iam_client.delete_user, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") + def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.delete_user_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Removes the specified managed policy from the specified IAM group") + def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.detach_group_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + sleep(S3_SYNC_WAIT_TIME * 14) + return response + + @reporter.step("Removes the specified managed policy from the specified user") + def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.detach_user_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + sleep(S3_SYNC_WAIT_TIME * 14) + return response + + @reporter.step("Returns a list of IAM users that are in the specified IAM group") + def iam_get_group(self, group_name: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.get_group, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}" + return response + + @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") + def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.get_group_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Retrieves information about the specified managed policy") + def iam_get_policy(self, policy_arn: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.get_policy, + params={"PolicyArn": policy_arn}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + assert response.get("Policy"), f"Expected Policy in response:\n{response}" + assert response["Policy"].get("Arn") == policy_arn, f"PolicyArn should be equal to {policy_arn}" + + return response + + @reporter.step("Retrieves information about the specified version of the specified managed policy") + def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.get_policy_version, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}" + assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}" + + return response + + @reporter.step("Retrieves information about the specified IAM user") + def iam_get_user(self, user_name: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.get_user, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + assert response.get("User"), f"Expected User in response:\n{response}" + assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" + + return response + + @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") + def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.get_user_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + assert response.get("UserName"), f"Expected UserName in response:\n{response}" + return response + + @reporter.step("Returns information about the access key IDs associated with the specified IAM user") + def iam_list_access_keys(self, user_name: str) -> dict: + return self._exec_request( + self.boto3_iam_client.list_access_keys, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Lists all managed policies that are attached to the specified IAM group") + def iam_list_attached_group_policies(self, group_name: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.list_attached_group_policies, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" + return response + + @reporter.step("Lists all managed policies that are attached to the specified IAM user") + def iam_list_attached_user_policies(self, user_name: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.list_attached_user_policies, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" + return response + + @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") + def iam_list_entities_for_policy(self, policy_arn: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.list_entities_for_policy, + params={"PolicyArn": policy_arn}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}" + assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}" + + return response + + @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") + def iam_list_group_policies(self, group_name: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.list_group_policies, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" + return response + + @reporter.step("Lists the IAM groups") + def iam_list_groups(self) -> dict: + response = self._exec_request( + self.boto3_iam_client.list_groups, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" + return response + + @reporter.step("Lists the IAM groups that the specified IAM user belongs to") + def iam_list_groups_for_user(self, user_name: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.list_groups_for_user, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" + return response + + @reporter.step("Lists all the managed policies that are available in your AWS account") + def iam_list_policies(self) -> dict: + response = self._exec_request( + self.boto3_iam_client.list_policies, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + assert response.get("Policies"), f"Expected Policies in response:\n{response}" + return response + + @reporter.step("Lists information about the versions of the specified managed policy") + def iam_list_policy_versions(self, policy_arn: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.list_policy_versions, + params={"PolicyArn": policy_arn}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + assert response.get("Versions"), f"Expected Versions in response:\n{response}" + return response + + @reporter.step("Lists the names of the inline policies embedded in the specified IAM user") + def iam_list_user_policies(self, user_name: str) -> dict: + response = self._exec_request( + self.boto3_iam_client.list_user_policies, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" + return response + + @reporter.step("Lists the IAM users") + def iam_list_users(self) -> dict: + response = self._exec_request( + self.boto3_iam_client.list_users, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + assert response.get("Users"), f"Expected Users in response:\n{response}" + return response + + @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") + def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: + params = self._convert_to_s3_params(locals()) + params["PolicyDocument"] = json.dumps(policy_document) + response = self._exec_request( + self.boto3_iam_client.put_group_policy, + params, + # Overriding option for AWS CLI + policy_document=policy_document, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + sleep(S3_SYNC_WAIT_TIME * 14) + return response + + @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") + def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: + params = self._convert_to_s3_params(locals()) + params["PolicyDocument"] = json.dumps(policy_document) + response = self._exec_request( + self.boto3_iam_client.put_user_policy, + params, + # Overriding option for AWS CLI + policy_document=policy_document, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + sleep(S3_SYNC_WAIT_TIME * 14) + return response + + @reporter.step("Removes the specified user from the specified group") + def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.remove_user_from_group, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Updates the name and/or the path of the specified IAM group") + def iam_update_group(self, group_name: str, new_name: str, new_path: Optional[str] = None) -> dict: + params = {"GroupName": group_name, "NewGroupName": new_name, "NewPath": "/"} + return self._exec_request( + self.boto3_iam_client.update_group, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Updates the name and/or the path of the specified IAM user") + def iam_update_user(self, user_name: str, new_name: str, new_path: Optional[str] = None) -> dict: + params = {"UserName": user_name, "NewUserName": new_name, "NewPath": "/"} + return self._exec_request( + self.boto3_iam_client.update_user, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Adds one or more tags to an IAM user") + def iam_tag_user(self, user_name: str, tags: list) -> dict: + params = self._convert_to_s3_params(locals()) + params["Tags"] = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + return self._exec_request( + self.boto3_iam_client.tag_user, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("List tags of IAM user") + def iam_list_user_tags(self, user_name: str) -> dict: + return self._exec_request( + self.boto3_iam_client.list_user_tags, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + @reporter.step("Removes the specified tags from the user") + def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.untag_user, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) + + # MFA methods + @reporter.step("Creates a new virtual MFA device") + def iam_create_virtual_mfa_device( + self, virtual_mfa_device_name: str, outfile: Optional[str] = None, bootstrap_method: Optional[str] = None + ) -> tuple: + response = self.boto3_iam_client.create_virtual_mfa_device(VirtualMFADeviceName=virtual_mfa_device_name) + + serial_number = response.get("VirtualMFADevice", {}).get("SerialNumber") + base32StringSeed = response.get("VirtualMFADevice", {}).get("Base32StringSeed") + assert serial_number, f"Expected SerialNumber in response:\n{response}" + assert base32StringSeed, f"Expected Base32StringSeed in response:\n{response}" + + return serial_number, base32StringSeed + + @reporter.step("Deactivates the specified MFA device and removes it from association with the user name") + def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: + response = self.boto3_iam_client.deactivate_mfa_device(UserName=user_name, SerialNumber=serial_number) + + return response + + @reporter.step("Deletes a virtual MFA device") + def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: + response = self.boto3_iam_client.delete_virtual_mfa_device(SerialNumber=serial_number) + + return response + + @reporter.step("Enables the specified MFA device and associates it with the specified IAM user") + def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: + response = self.boto3_iam_client.enable_mfa_device( + UserName=user_name, + SerialNumber=serial_number, + AuthenticationCode1=authentication_code1, + AuthenticationCode2=authentication_code2, + ) + + return response + + @reporter.step("Lists the MFA devices for an IAM user") + def iam_list_virtual_mfa_devices(self) -> dict: + response = self.boto3_iam_client.list_virtual_mfa_devices() + assert response.get("VirtualMFADevices"), f"Expected VirtualMFADevices in response:\n{response}" + + return response + + @reporter.step("Get session token for user") + def sts_get_session_token( + self, duration_seconds: Optional[str] = "", serial_number: Optional[str] = "", token_code: Optional[str] = "" + ) -> tuple: + response = self.boto3_sts_client.get_session_token( + DurationSeconds=duration_seconds, + SerialNumber=serial_number, + TokenCode=token_code, + ) + + access_key = response.get("Credentials", {}).get("AccessKeyId") + secret_access_key = response.get("Credentials", {}).get("SecretAccessKey") + session_token = response.get("Credentials", {}).get("SessionToken") + assert access_key, f"Expected AccessKeyId in response:\n{response}" + assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" + assert session_token, f"Expected SessionToken in response:\n{response}" + + return access_key, secret_access_key, session_token diff --git a/src/frostfs_testlib/clients/s3/curl_bucket_resolver.py b/src/frostfs_testlib/clients/s3/curl_bucket_resolver.py new file mode 100644 index 00000000..4d845cf0 --- /dev/null +++ b/src/frostfs_testlib/clients/s3/curl_bucket_resolver.py @@ -0,0 +1,16 @@ +import re + +from frostfs_testlib.cli.generic_cli import GenericCli +from frostfs_testlib.clients.s3 import BucketContainerResolver +from frostfs_testlib.storage.cluster import ClusterNode + + +class CurlBucketContainerResolver(BucketContainerResolver): + def resolve(self, node: ClusterNode, bucket_name: str, **kwargs: dict) -> str: + curl = GenericCli("curl", node.host) + output = curl(f"-I http://127.0.0.1:8084/{bucket_name}") + pattern = r"X-Container-Id: (\S+)" + cid = re.findall(pattern, output.stdout) + if cid: + return cid[0] + return None diff --git a/src/frostfs_testlib/clients/s3/interfaces.py b/src/frostfs_testlib/clients/s3/interfaces.py new file mode 100644 index 00000000..0d03a287 --- /dev/null +++ b/src/frostfs_testlib/clients/s3/interfaces.py @@ -0,0 +1,623 @@ +from abc import ABC, abstractmethod +from datetime import datetime +from typing import Literal, Optional, Union + +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.testing.readable import HumanReadableABC, HumanReadableEnum +from frostfs_testlib.utils.file_utils import TestFile + + +def _make_objs_dict(key_names): + objs_list = [] + for key in key_names: + obj_dict = {"Key": key} + objs_list.append(obj_dict) + objs_dict = {"Objects": objs_list} + return objs_dict + + +class VersioningStatus(HumanReadableEnum): + UNDEFINED = None + ENABLED = "Enabled" + SUSPENDED = "Suspended" + + +class ACL: + PRIVATE = "private" + PUBLIC_READ = "public-read" + PUBLIC_READ_WRITE = "public-read-write" + AUTHENTICATED_READ = "authenticated-read" + AWS_EXEC_READ = "aws-exec-read" + BUCKET_OWNER_READ = "bucket-owner-read" + BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control" + LOG_DELIVERY_WRITE = "log-delivery-write" + + +class BucketContainerResolver(ABC): + @abstractmethod + def resolve(self, node: ClusterNode, bucket_name: str, **kwargs: dict) -> str: + """ + Resolve Container ID from bucket name + + Args: + node: node from where we want to resolve + bucket_name: name of the bucket + **kwargs: any other required params + + Returns: Container ID + """ + raise NotImplementedError("Call from abstract class") + + +class S3ClientWrapper(HumanReadableABC): + access_key_id: str + secret_access_key: str + profile: str + region: str + + s3gate_endpoint: str + iam_endpoint: str + + @abstractmethod + def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str, region: str) -> None: + pass + + @abstractmethod + def set_endpoint(self, s3gate_endpoint: str): + """Set endpoint""" + + @abstractmethod + def set_iam_endpoint(self, iam_endpoint: str): + """Set iam endpoint""" + + @abstractmethod + def create_bucket( + self, + bucket: Optional[str] = None, + object_lock_enabled_for_bucket: Optional[bool] = None, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + grant_full_control: Optional[str] = None, + location_constraint: Optional[str] = None, + ) -> str: + """Create a bucket.""" + + # BUCKET METHODS # + + @abstractmethod + def list_buckets(self) -> list[str]: + """List buckets.""" + + @abstractmethod + def delete_bucket(self, bucket: str) -> None: + """Delete bucket""" + + @abstractmethod + def head_bucket(self, bucket: str) -> None: + """This action is useful to determine if a bucket exists and you have permission to access it. + The action returns a 200 OK if the bucket exists and you have permission to access it. + + If the bucket does not exist or you do not have permission to access it, the HEAD request + returns a generic 400 Bad Request, 403 Forbidden or 404 Not Found code. + A message body is not included, so you cannot determine the exception beyond these error codes. + """ + + @abstractmethod + def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: + """Sets the versioning state of an existing bucket. + + You can set the versioning state with one of the following values: + + Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID. + + Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null. + + If the versioning state has never been set on a bucket, it has no versioning state + """ + + @abstractmethod + def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: + """Returns the versioning state of a bucket. + + To retrieve the versioning state of a bucket, you must be the bucket owner. + """ + + @abstractmethod + def put_bucket_tagging(self, bucket: str, tags: list) -> None: + """Sets the tags for a bucket.""" + + @abstractmethod + def get_bucket_tagging(self, bucket: str) -> list: + """Returns the tag set associated with the Outposts bucket.""" + + @abstractmethod + def delete_bucket_tagging(self, bucket: str) -> None: + """Deletes the tags from the bucket.""" + + @abstractmethod + def get_bucket_acl(self, bucket: str) -> dict: + """This implementation of the GET action uses the acl subresource to return the access control list (ACL) of a bucket.""" + + @abstractmethod + def put_bucket_acl( + self, + bucket: str, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> list: + """Sets the permissions on an existing bucket using access control lists (ACL).""" + + @abstractmethod + def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: + """Places an Object Lock configuration on the specified bucket. + The rule specified in the Object Lock configuration will be applied by + default to every new object placed in the specified bucket.""" + + @abstractmethod + def get_object_lock_configuration(self, bucket: str) -> dict: + """Gets the Object Lock configuration for a bucket. + The rule specified in the Object Lock configuration will be applied by + default to every new object placed in the specified bucket.""" + + @abstractmethod + def get_bucket_policy(self, bucket: str) -> str: + """Returns the policy of a specified bucket.""" + + @abstractmethod + def delete_bucket_policy(self, bucket: str) -> str: + """Deletes the policy of a specified bucket.""" + + @abstractmethod + def put_bucket_policy(self, bucket: str, policy: dict) -> None: + """Applies S3 bucket policy to an S3 bucket.""" + + @abstractmethod + def get_bucket_cors(self, bucket: str) -> dict: + """Returns the Cross-Origin Resource Sharing (CORS) configuration information set for the bucket.""" + + @abstractmethod + def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: + """Sets the cors configuration for your bucket. If the configuration exists, S3 replaces it.""" + + @abstractmethod + def delete_bucket_cors(self, bucket: str) -> None: + """Deletes the cors configuration information set for the bucket.""" + + @abstractmethod + def get_bucket_location(self, bucket: str) -> str: + """Returns the LocationConstraint the bucket resides in. You can set the it + using the LocationConstraint request parameter in a CreateBucket request.""" + + # END OF BUCKET METHODS # + + # OBJECT METHODS # + + @abstractmethod + def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + """Returns some or all (up to 1,000) of the objects in a bucket with each request. + You can use the request parameters as selection criteria to return a subset of the objects in a bucket. + A 200 OK response can contain valid or invalid XML. Make sure to design your application + to parse the contents of the response and handle it appropriately. + """ + + @abstractmethod + def list_objects( + self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None + ) -> Union[dict, list[str]]: + """Returns some or all (up to 1,000) of the objects in a bucket with each request. + You can use the request parameters as selection criteria to return a subset of the objects in a bucket. + A 200 OK response can contain valid or invalid XML. Make sure to design your application + to parse the contents of the response and handle it appropriately. + """ + + @abstractmethod + def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: + """Returns metadata about all versions of the objects in a bucket.""" + + @abstractmethod + def list_delete_markers(self, bucket: str, full_output: bool = False) -> dict: + """Returns metadata about all delete markers of the objects in a bucket.""" + + @abstractmethod + def put_object( + self, + bucket: str, + filepath: str, + key: Optional[str] = None, + metadata: Optional[dict] = None, + tagging: Optional[str] = None, + acl: Optional[str] = None, + object_lock_mode: Optional[str] = None, + object_lock_retain_until_date: Optional[datetime] = None, + object_lock_legal_hold_status: Optional[str] = None, + grant_full_control: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> str: + """Adds an object to a bucket.""" + + @abstractmethod + def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + """The HEAD action retrieves metadata from an object without returning the object itself. + This action is useful if you're only interested in an object's metadata.""" + + @abstractmethod + def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + """Removes the null version (if there is one) of an object and inserts a delete marker, + which becomes the latest version of the object. If there isn't a null version, + S3 does not remove any objects but will still respond that the command was successful.""" + + @abstractmethod + def delete_objects(self, bucket: str, keys: list[str]) -> dict: + """This action enables you to delete multiple objects from a bucket + using a single HTTP request. If you know the object keys that + you want to delete, then this action provides a suitable alternative + to sending individual delete requests, reducing per-request overhead. + + The request contains a list of up to 1000 keys that you want to delete.""" + + @abstractmethod + def delete_object_versions(self, bucket: str, object_versions: list) -> dict: + """Delete object versions""" + + @abstractmethod + def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: + """Delete object versions without delete markers""" + + @abstractmethod + def put_object_acl( + self, + bucket: str, + key: str, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> list: + """Uses the acl subresource to set the access control + list (ACL) permissions for a new or existing object in an S3 bucket.""" + + @abstractmethod + def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + """Returns the access control list (ACL) of an object.""" + + @abstractmethod + def copy_object( + self, + source_bucket: str, + source_key: str, + bucket: Optional[str] = None, + key: Optional[str] = None, + acl: Optional[str] = None, + metadata_directive: Optional[Literal["COPY", "REPLACE"]] = None, + metadata: Optional[dict] = None, + tagging_directive: Optional[Literal["COPY", "REPLACE"]] = None, + tagging: Optional[str] = None, + ) -> str: + """Creates a copy of an object""" + + @abstractmethod + def get_object( + self, + bucket: str, + key: str, + version_id: Optional[str] = None, + object_range: Optional[tuple[int, int]] = None, + full_output: bool = False, + ) -> dict | TestFile: + """Retrieves objects from S3.""" + + @abstractmethod + def create_multipart_upload(self, bucket: str, key: str) -> str: + """This action initiates a multipart upload and returns an upload ID. + This upload ID is used to associate all of the parts in the specific multipart upload. + You specify this upload ID in each of your subsequent upload part requests (see UploadPart). + You also include this upload ID in the final request to either complete or abort the multipart upload request.""" + + @abstractmethod + def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: + """This action lists in-progress multipart uploads. + An in-progress multipart upload is a multipart upload that has been initiated + using the Initiate Multipart Upload request, but has not yet been completed or aborted. + + This action returns at most 1,000 multipart uploads in the response.""" + + @abstractmethod + def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: + """This action aborts a multipart upload. After a multipart upload is aborted, + no additional parts can be uploaded using that upload ID. + The storage consumed by any previously uploaded parts will be freed. + However, if any part uploads are currently in progress, those part + uploads might or might not succeed. As a result, it might be necessary to + abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.""" + + @abstractmethod + def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: + """Uploads a part in a multipart upload.""" + + @abstractmethod + def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: + """Uploads a part by copying data from an existing object as data source.""" + + @abstractmethod + def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: + """Lists the parts that have been uploaded for a specific multipart upload.""" + + @abstractmethod + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: + """Completes a multipart upload by assembling previously uploaded parts.""" + + @abstractmethod + def put_object_retention( + self, + bucket: str, + key: str, + retention: dict, + version_id: Optional[str] = None, + bypass_governance_retention: Optional[bool] = None, + ) -> None: + """Places an Object Retention configuration on an object.""" + + @abstractmethod + def put_object_legal_hold( + self, + bucket: str, + key: str, + legal_hold_status: Literal["ON", "OFF"], + version_id: Optional[str] = None, + ) -> None: + """Applies a legal hold configuration to the specified object.""" + + @abstractmethod + def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: + """Sets the tag-set for an object.""" + + @abstractmethod + def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + """Returns the tag-set of an object.""" + + @abstractmethod + def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: + """Removes the entire tag set from the specified object.""" + + @abstractmethod + def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: + """Adds or updates bucket lifecycle configuration""" + + @abstractmethod + def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: + """Gets bucket lifecycle configuration""" + + @abstractmethod + def delete_bucket_lifecycle(self, bucket: str) -> dict: + """Deletes bucket lifecycle""" + + @abstractmethod + def get_object_attributes( + self, + bucket: str, + key: str, + attributes: list[str], + version_id: str = "", + max_parts: int = 0, + part_number: int = 0, + full_output: bool = True, + ) -> dict: + """Retrieves all the metadata from an object without returning the object itself.""" + + @abstractmethod + def sync( + self, + bucket: str, + dir_path: str, + acl: Optional[str] = None, + metadata: Optional[dict] = None, + ) -> dict: + """sync directory TODO: Add proper description""" + + @abstractmethod + def cp( + self, + bucket: str, + dir_path: str, + acl: Optional[str] = None, + metadata: Optional[dict] = None, + ) -> dict: + """cp directory TODO: Add proper description""" + + @abstractmethod + def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: + """Creates presign URL""" + + # END OF OBJECT METHODS # + + # IAM METHODS # + + @abstractmethod + def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: + """Adds the specified user to the specified group""" + + @abstractmethod + def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: + """Attaches the specified managed policy to the specified IAM group""" + + @abstractmethod + def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: + """Attaches the specified managed policy to the specified user""" + + @abstractmethod + def iam_create_access_key(self, user_name: str) -> dict: + """Creates a new AWS secret access key and access key ID for the specified user""" + + @abstractmethod + def iam_create_group(self, group_name: str) -> dict: + """Creates a new group""" + + @abstractmethod + def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: + """Creates a new managed policy for your AWS account""" + + @abstractmethod + def iam_create_user(self, user_name: str) -> dict: + """Creates a new IAM user for your AWS account""" + + @abstractmethod + def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: + """Deletes the access key pair associated with the specified IAM user""" + + @abstractmethod + def iam_delete_group(self, group_name: str) -> dict: + """Deletes the specified IAM group""" + + @abstractmethod + def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: + """Deletes the specified inline policy that is embedded in the specified IAM group""" + + @abstractmethod + def iam_delete_policy(self, policy_arn: str) -> dict: + """Deletes the specified managed policy""" + + @abstractmethod + def iam_delete_user(self, user_name: str) -> dict: + """Deletes the specified IAM user""" + + @abstractmethod + def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: + """Deletes the specified inline policy that is embedded in the specified IAM user""" + + @abstractmethod + def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: + """Removes the specified managed policy from the specified IAM group""" + + @abstractmethod + def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: + """Removes the specified managed policy from the specified user""" + + @abstractmethod + def iam_get_group(self, group_name: str) -> dict: + """Returns a list of IAM users that are in the specified IAM group""" + + @abstractmethod + def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: + """Retrieves the specified inline policy document that is embedded in the specified IAM group""" + + @abstractmethod + def iam_get_policy(self, policy_arn: str) -> dict: + """Retrieves information about the specified managed policy""" + + @abstractmethod + def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: + """Retrieves information about the specified version of the specified managed policy""" + + @abstractmethod + def iam_get_user(self, user_name: str) -> dict: + """Retrieves information about the specified IAM user""" + + @abstractmethod + def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: + """Retrieves the specified inline policy document that is embedded in the specified IAM user""" + + @abstractmethod + def iam_list_access_keys(self, user_name: str) -> dict: + """Returns information about the access key IDs associated with the specified IAM user""" + + @abstractmethod + def iam_list_attached_group_policies(self, group_name: str) -> dict: + """Lists all managed policies that are attached to the specified IAM group""" + + @abstractmethod + def iam_list_attached_user_policies(self, user_name: str) -> dict: + """Lists all managed policies that are attached to the specified IAM user""" + + @abstractmethod + def iam_list_entities_for_policy(self, policy_arn: str) -> dict: + """Lists all IAM users, groups, and roles that the specified managed policy is attached to""" + + @abstractmethod + def iam_list_group_policies(self, group_name: str) -> dict: + """Lists the names of the inline policies that are embedded in the specified IAM group""" + + @abstractmethod + def iam_list_groups(self) -> dict: + """Lists the IAM groups""" + + @abstractmethod + def iam_list_groups_for_user(self, user_name: str) -> dict: + """Lists the IAM groups that the specified IAM user belongs to""" + + @abstractmethod + def iam_list_policies(self) -> dict: + """Lists all the managed policies that are available in your AWS account""" + + @abstractmethod + def iam_list_policy_versions(self, policy_arn: str) -> dict: + """Lists information about the versions of the specified managed policy""" + + @abstractmethod + def iam_list_user_policies(self, user_name: str) -> dict: + """Lists the names of the inline policies embedded in the specified IAM user""" + + @abstractmethod + def iam_list_users(self) -> dict: + """Lists the IAM users""" + + @abstractmethod + def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: + """Adds or updates an inline policy document that is embedded in the specified IAM group""" + + @abstractmethod + def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: + """Adds or updates an inline policy document that is embedded in the specified IAM user""" + + @abstractmethod + def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: + """Removes the specified user from the specified group""" + + @abstractmethod + def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: + """Updates the name and/or the path of the specified IAM group""" + + @abstractmethod + def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: + """Updates the name and/or the path of the specified IAM user""" + + @abstractmethod + def iam_tag_user(self, user_name: str, tags: list) -> dict: + """Adds one or more tags to an IAM user""" + + @abstractmethod + def iam_list_user_tags(self, user_name: str) -> dict: + """List tags of IAM user""" + + @abstractmethod + def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: + """Removes the specified tags from the user""" + + # MFA methods + @abstractmethod + def iam_create_virtual_mfa_device( + self, virtual_mfa_device_name: str, outfile: Optional[str] = None, bootstrap_method: Optional[str] = None + ) -> tuple: + """Creates a new virtual MFA device""" + + @abstractmethod + def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: + """Deactivates the specified MFA device and removes it from association with the user name""" + + @abstractmethod + def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: + """Deletes a virtual MFA device""" + + @abstractmethod + def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: + """Enables the specified MFA device and associates it with the specified IAM user""" + + @abstractmethod + def iam_list_virtual_mfa_devices(self) -> dict: + """Lists the MFA devices for an IAM user""" + + @abstractmethod + def sts_get_session_token( + self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None + ) -> tuple: + """Get session token for user""" diff --git a/src/frostfs_testlib/clients/s3/s3_http_client.py b/src/frostfs_testlib/clients/s3/s3_http_client.py new file mode 100644 index 00000000..f6f423d2 --- /dev/null +++ b/src/frostfs_testlib/clients/s3/s3_http_client.py @@ -0,0 +1,149 @@ +import hashlib +import logging +import xml.etree.ElementTree as ET + +import httpx +from botocore.auth import SigV4Auth +from botocore.awsrequest import AWSRequest +from botocore.credentials import Credentials + +from frostfs_testlib import reporter +from frostfs_testlib.clients import HttpClient +from frostfs_testlib.utils.file_utils import TestFile + +logger = logging.getLogger("NeoLogger") + +DEFAULT_TIMEOUT = 60.0 + + +class S3HttpClient: + def __init__( + self, s3gate_endpoint: str, access_key_id: str, secret_access_key: str, profile: str = "default", region: str = "us-east-1" + ) -> None: + self.http_client = HttpClient() + self.credentials = Credentials(access_key_id, secret_access_key) + self.profile = profile + self.region = region + + self.iam_endpoint: str = None + self.s3gate_endpoint: str = None + self.service: str = None + self.signature: SigV4Auth = None + + self.set_endpoint(s3gate_endpoint) + + def _to_s3_header(self, header: str) -> dict: + replacement_map = { + "Acl": "ACL", + "_": "-", + } + + result = header + if not header.startswith("x_amz"): + result = header.title() + + for find, replace in replacement_map.items(): + result = result.replace(find, replace) + + return result + + def _convert_to_s3_headers(self, scope: dict, exclude: list[str] = None): + exclude = ["self", "cls"] if not exclude else exclude + ["self", "cls"] + return {self._to_s3_header(header): value for header, value in scope.items() if header not in exclude and value is not None} + + def _create_aws_request( + self, method: str, url: str, headers: dict, content: str | bytes | TestFile = None, params: dict = None + ) -> AWSRequest: + data = b"" + + if content is not None: + if isinstance(content, TestFile): + with open(content, "rb") as io_content: + data = io_content.read() + elif isinstance(content, str): + data = bytes(content, encoding="utf-8") + elif isinstance(content, bytes): + data = content + else: + raise TypeError(f"Content expected as a string, bytes or TestFile object, got: {content}") + + headers["X-Amz-Content-SHA256"] = hashlib.sha256(data).hexdigest() + aws_request = AWSRequest(method, url, headers, data, params) + self.signature.add_auth(aws_request) + + return aws_request + + def _exec_request( + self, + method: str, + url: str, + headers: dict, + content: str | bytes | TestFile = None, + params: dict = None, + timeout: float = DEFAULT_TIMEOUT, + ) -> dict: + aws_request = self._create_aws_request(method, url, headers, content, params) + response = self.http_client.send( + aws_request.method, + aws_request.url, + headers=dict(aws_request.headers), + data=aws_request.data, + params=aws_request.params, + timeout=timeout, + ) + + try: + response.raise_for_status() + except httpx.HTTPStatusError: + raise httpx.HTTPStatusError(response.text, request=response.request, response=response) + + root = ET.fromstring(response.read()) + data = { + "LastModified": root.find(".//LastModified").text, + "ETag": root.find(".//ETag").text, + } + + if response.headers.get("x-amz-version-id"): + data["VersionId"] = response.headers.get("x-amz-version-id") + + return data + + @reporter.step("Set endpoint S3 to {s3gate_endpoint}") + def set_endpoint(self, s3gate_endpoint: str): + if self.s3gate_endpoint == s3gate_endpoint: + return + + self.s3gate_endpoint = s3gate_endpoint + self.service = "s3" + self.signature = SigV4Auth(self.credentials, self.service, self.region) + + @reporter.step("Set endpoint IAM to {iam_endpoint}") + def set_iam_endpoint(self, iam_endpoint: str): + if self.iam_endpoint == iam_endpoint: + return + + self.iam_endpoint = iam_endpoint + self.service = "iam" + self.signature = SigV4Auth(self.credentials, self.service, self.region) + + @reporter.step("Patch object S3") + def patch_object( + self, + bucket: str, + key: str, + content: str | bytes | TestFile, + content_range: str, + version_id: str = None, + if_match: str = None, + if_unmodified_since: str = None, + x_amz_expected_bucket_owner: str = None, + timeout: float = DEFAULT_TIMEOUT, + ) -> dict: + if content_range and not content_range.startswith("bytes"): + content_range = f"bytes {content_range}/*" + + url = f"{self.s3gate_endpoint}/{bucket}/{key}" + headers = self._convert_to_s3_headers(locals(), exclude=["bucket", "key", "content", "version_id", "timeout"]) + params = {"VersionId": version_id} if version_id is not None else None + + return self._exec_request("PATCH", url, headers, content, params, timeout=timeout) diff --git a/src/frostfs_testlib/credentials/authmate_s3_provider.py b/src/frostfs_testlib/credentials/authmate_s3_provider.py new file mode 100644 index 00000000..ed6454bc --- /dev/null +++ b/src/frostfs_testlib/credentials/authmate_s3_provider.py @@ -0,0 +1,47 @@ +import re +from typing import Optional + +from frostfs_testlib import reporter +from frostfs_testlib.cli import FrostfsAuthmate +from frostfs_testlib.credentials.interfaces import S3Credentials, S3CredentialsProvider, User +from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC +from frostfs_testlib.shell import LocalShell +from frostfs_testlib.steps.cli.container import list_containers +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate +from frostfs_testlib.utils import string_utils + + +class AuthmateS3CredentialsProvider(S3CredentialsProvider): + @reporter.step("Init S3 Credentials using Authmate CLI") + def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None) -> S3Credentials: + cluster_nodes: list[ClusterNode] = self.cluster.cluster_nodes + shell = LocalShell() + wallet = user.wallet + endpoint = cluster_node.storage_node.get_rpc_endpoint() + + gate_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes] + # unique short bucket name + bucket = string_utils.unique_name("bucket-") + + frostfs_authmate: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC) + issue_secret_output = frostfs_authmate.secret.issue( + wallet=wallet.path, + peer=endpoint, + gate_public_key=gate_public_keys, + wallet_password=wallet.password, + container_policy=location_constraints, + container_friendly_name=bucket, + ).stdout + + aws_access_key_id = str(re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group("aws_access_key_id")) + aws_secret_access_key = str( + re.search(r"secret_access_key.*:\s.(?P\w*)", issue_secret_output).group("aws_secret_access_key") + ) + cid = str(re.search(r"container_id.*:\s.(?P\w*)", issue_secret_output).group("container_id")) + + containers_list = list_containers(wallet, shell, endpoint) + assert cid in containers_list, f"Expected cid {cid} in {containers_list}" + + user.s3_credentials = S3Credentials(aws_access_key_id, aws_secret_access_key) + return user.s3_credentials diff --git a/src/frostfs_testlib/credentials/interfaces.py b/src/frostfs_testlib/credentials/interfaces.py new file mode 100644 index 00000000..b2ae6f18 --- /dev/null +++ b/src/frostfs_testlib/credentials/interfaces.py @@ -0,0 +1,51 @@ +from abc import ABC, abstractmethod +from dataclasses import dataclass, field +from typing import Any, Optional + +from frostfs_testlib.plugins import load_plugin +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo + + +@dataclass +class S3Credentials: + access_key: str + secret_key: str + + +@dataclass +class User: + name: str + attributes: dict[str, Any] = field(default_factory=dict) + wallet: WalletInfo | None = None + s3_credentials: S3Credentials | None = None + + +class S3CredentialsProvider(ABC): + def __init__(self, cluster: Cluster) -> None: + self.cluster = cluster + + @abstractmethod + def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None, **kwargs) -> S3Credentials: + raise NotImplementedError("Directly called abstract class?") + + +class GrpcCredentialsProvider(ABC): + def __init__(self, cluster: Cluster) -> None: + self.cluster = cluster + + @abstractmethod + def provide(self, user: User, cluster_node: ClusterNode, **kwargs) -> WalletInfo: + raise NotImplementedError("Directly called abstract class?") + + +class CredentialsProvider(object): + S3: S3CredentialsProvider + GRPC: GrpcCredentialsProvider + + def __init__(self, cluster: Cluster) -> None: + config = cluster.cluster_nodes[0].host.config + s3_cls = load_plugin("frostfs.testlib.credentials_providers", config.s3_creds_plugin_name) + self.S3 = s3_cls(cluster) + grpc_cls = load_plugin("frostfs.testlib.credentials_providers", config.grpc_creds_plugin_name) + self.GRPC = grpc_cls(cluster) diff --git a/src/frostfs_testlib/credentials/wallet_factory_provider.py b/src/frostfs_testlib/credentials/wallet_factory_provider.py new file mode 100644 index 00000000..d00020f7 --- /dev/null +++ b/src/frostfs_testlib/credentials/wallet_factory_provider.py @@ -0,0 +1,14 @@ +from frostfs_testlib import reporter +from frostfs_testlib.credentials.interfaces import GrpcCredentialsProvider, User +from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_PASS +from frostfs_testlib.shell.local_shell import LocalShell +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.dataclasses.wallet import WalletFactory, WalletInfo + + +class WalletFactoryProvider(GrpcCredentialsProvider): + @reporter.step("Init gRPC Credentials using wallet generation") + def provide(self, user: User, cluster_node: ClusterNode) -> WalletInfo: + wallet_factory = WalletFactory(ASSETS_DIR, LocalShell()) + user.wallet = wallet_factory.create_wallet(file_name=user.name, password=DEFAULT_WALLET_PASS) + return user.wallet diff --git a/src/frostfs_testlib/defaults.py b/src/frostfs_testlib/defaults.py index 687fbd6a..22097be7 100644 --- a/src/frostfs_testlib/defaults.py +++ b/src/frostfs_testlib/defaults.py @@ -1,5 +1,5 @@ class Options: - DEFAULT_SHELL_TIMEOUT = 90 + DEFAULT_SHELL_TIMEOUT = 120 @staticmethod def get_default_shell_timeout(): diff --git a/src/frostfs_testlib/fixtures.py b/src/frostfs_testlib/fixtures.py new file mode 100644 index 00000000..7d767d23 --- /dev/null +++ b/src/frostfs_testlib/fixtures.py @@ -0,0 +1,52 @@ +import logging +import os +from datetime import datetime +from importlib.metadata import entry_points + +import pytest +import yaml + +from frostfs_testlib import reporter +from frostfs_testlib.hosting.hosting import Hosting +from frostfs_testlib.resources.common import ASSETS_DIR, HOSTING_CONFIG_FILE +from frostfs_testlib.storage import get_service_registry + + +@pytest.fixture(scope="session", autouse=True) +def session_start_time(): + start_time = datetime.utcnow() + return start_time + + +@pytest.fixture(scope="session") +def configure_testlib(): + reporter.get_reporter().register_handler(reporter.AllureHandler()) + reporter.get_reporter().register_handler(reporter.StepsLogger()) + logging.getLogger("paramiko").setLevel(logging.INFO) + + # Register Services for cluster + registry = get_service_registry() + services = entry_points(group="frostfs.testlib.services") + for svc in services: + registry.register_service(svc.name, svc.load()) + + +@pytest.fixture(scope="session") +def temp_directory(configure_testlib): + with reporter.step("Prepare tmp directory"): + full_path = ASSETS_DIR + if not os.path.exists(full_path): + os.mkdir(full_path) + + return full_path + + +@pytest.fixture(scope="session") +def hosting(configure_testlib) -> Hosting: + with open(HOSTING_CONFIG_FILE, "r") as file: + hosting_config = yaml.full_load(file) + + hosting_instance = Hosting() + hosting_instance.configure(hosting_config) + + return hosting_instance diff --git a/src/frostfs_testlib/healthcheck/basic_healthcheck.py b/src/frostfs_testlib/healthcheck/basic_healthcheck.py new file mode 100644 index 00000000..fc7ba59a --- /dev/null +++ b/src/frostfs_testlib/healthcheck/basic_healthcheck.py @@ -0,0 +1,109 @@ +from typing import Callable + +from frostfs_testlib import reporter +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.healthcheck.interfaces import Healthcheck +from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC +from frostfs_testlib.shell import CommandOptions +from frostfs_testlib.steps.node_management import storage_node_healthcheck +from frostfs_testlib.storage.cluster import ClusterNode, ServiceClass +from frostfs_testlib.testing.test_control import wait_for_success +from frostfs_testlib.utils.failover_utils import check_services_status + + +class BasicHealthcheck(Healthcheck): + def _perform(self, cluster_node: ClusterNode, checks: dict[Callable, dict]): + issues: list[str] = [] + for check, kwargs in checks.items(): + issue = check(cluster_node, **kwargs) + if issue: + issues.append(issue) + + assert not issues, "Issues found:\n" + "\n".join(issues) + + @wait_for_success(900, 30, title="Wait for full healthcheck for {cluster_node}") + def full_healthcheck(self, cluster_node: ClusterNode): + checks = { + self.storage_healthcheck: {}, + self._tree_healthcheck: {}, + } + + self._perform(cluster_node, checks) + + @wait_for_success(900, 30, title="Wait for startup healthcheck on {cluster_node}") + def startup_healthcheck(self, cluster_node: ClusterNode): + checks = { + self.storage_healthcheck: {}, + self._tree_healthcheck: {}, + } + + self._perform(cluster_node, checks) + + @wait_for_success(900, 30, title="Wait for storage healthcheck on {cluster_node}") + def storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: + checks = { + self._storage_healthcheck: {}, + } + + self._perform(cluster_node, checks) + + @wait_for_success(900, 30, title="Wait for tree healthcheck on {cluster_node}") + def tree_healthcheck(self, cluster_node: ClusterNode) -> str | None: + checks = { + self._tree_healthcheck: {}, + } + + self._perform(cluster_node, checks) + + @wait_for_success(120, 5, title="Wait for service healthcheck on {cluster_node}") + def services_healthcheck(self, cluster_node: ClusterNode): + svcs_to_check = cluster_node.services + checks = { + check_services_status: { + "service_list": svcs_to_check, + "expected_status": "active", + }, + self._check_services: {"services": svcs_to_check}, + } + + self._perform(cluster_node, checks) + + def _check_services(self, cluster_node: ClusterNode, services: list[ServiceClass]): + for svc in services: + result = svc.service_healthcheck() + if result == False: + return f"Service {svc.get_service_systemctl_name()} healthcheck failed on node {cluster_node}." + + @reporter.step("Storage healthcheck on {cluster_node}") + def _storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: + result = storage_node_healthcheck(cluster_node.storage_node) + self._gather_socket_info(cluster_node) + if result.health_status != "READY" or result.network_status != "ONLINE": + return f"Node {cluster_node} is not healthy. Health={result.health_status}. Network={result.network_status}" + + @reporter.step("Tree healthcheck on {cluster_node}") + def _tree_healthcheck(self, cluster_node: ClusterNode) -> str | None: + host = cluster_node.host + service_config = host.get_service_config(cluster_node.storage_node.name) + wallet_path = service_config.attributes["wallet_path"] + wallet_password = service_config.attributes["wallet_password"] + + shell = host.get_shell() + wallet_config_path = f"/tmp/{cluster_node.storage_node.name}-config.yaml" + wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' + shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") + + remote_cli = FrostfsCli( + shell, + host.get_cli_config(FROSTFS_CLI_EXEC).exec_path, + config_file=wallet_config_path, + ) + result = remote_cli.tree.healthcheck(rpc_endpoint="127.0.0.1:8080") + if result.return_code != 0: + return ( + f"Error during tree healthcheck (rc={result.return_code}): {result.stdout}. \n Stderr: {result.stderr}" + ) + + @reporter.step("Gather socket info for {cluster_node}") + def _gather_socket_info(self, cluster_node: ClusterNode): + cluster_node.host.get_shell().exec("ss -tuln | grep 8080", CommandOptions(check=False)) diff --git a/src/frostfs_testlib/healthcheck/interfaces.py b/src/frostfs_testlib/healthcheck/interfaces.py new file mode 100644 index 00000000..cf178520 --- /dev/null +++ b/src/frostfs_testlib/healthcheck/interfaces.py @@ -0,0 +1,25 @@ +from abc import ABC, abstractmethod + +from frostfs_testlib.storage.cluster import ClusterNode + + +class Healthcheck(ABC): + @abstractmethod + def full_healthcheck(self, cluster_node: ClusterNode): + """Perform full healthcheck on the target cluster node""" + + @abstractmethod + def startup_healthcheck(self, cluster_node: ClusterNode): + """Perform healthcheck required on startup of target cluster node""" + + @abstractmethod + def storage_healthcheck(self, cluster_node: ClusterNode): + """Perform storage service healthcheck on target cluster node""" + + @abstractmethod + def services_healthcheck(self, cluster_node: ClusterNode): + """Perform service status check on target cluster node""" + + @abstractmethod + def tree_healthcheck(self, cluster_node: ClusterNode): + """Perform tree healthcheck on target cluster node""" diff --git a/src/frostfs_testlib/hooks.py b/src/frostfs_testlib/hooks.py new file mode 100644 index 00000000..d7e4cc84 --- /dev/null +++ b/src/frostfs_testlib/hooks.py @@ -0,0 +1,31 @@ +import pytest + + +@pytest.hookimpl(specname="pytest_collection_modifyitems") +def pytest_add_frostfs_marker(items: list[pytest.Item]): + # All tests which reside in frostfs nodeid are granted with frostfs marker, excluding + # nodeid = full path of the test + # 1. plugins + # 2. testlib itself + for item in items: + location = item.location[0] + if "frostfs" in location and "plugin" not in location and "testlib" not in location: + item.add_marker("frostfs") + + +# pytest hook. Do not rename +@pytest.hookimpl(trylast=True) +def pytest_collection_modifyitems(items: list[pytest.Item]): + # The order of running tests corresponded to the suites + items.sort(key=lambda item: item.location[0]) + + # Change order of tests based on @pytest.mark.order() marker + def order(item: pytest.Item) -> int: + order_marker = item.get_closest_marker("order") + if order_marker and (len(order_marker.args) != 1 or not isinstance(order_marker.args[0], int)): + raise RuntimeError("Incorrect usage of pytest.mark.order") + + order_value = order_marker.args[0] if order_marker else 0 + return order_value + + items.sort(key=lambda item: order(item)) diff --git a/src/frostfs_testlib/hosting/config.py b/src/frostfs_testlib/hosting/config.py index dd8b4b9b..6cdee39a 100644 --- a/src/frostfs_testlib/hosting/config.py +++ b/src/frostfs_testlib/hosting/config.py @@ -10,9 +10,7 @@ class ParsedAttributes: def parse(cls, attributes: dict[str, Any]): # Pick attributes supported by the class field_names = set(field.name for field in fields(cls)) - supported_attributes = { - key: value for key, value in attributes.items() if key in field_names - } + supported_attributes = {key: value for key, value in attributes.items() if key in field_names} return cls(**supported_attributes) @@ -29,6 +27,7 @@ class CLIConfig: name: str exec_path: str attributes: dict[str, str] = field(default_factory=dict) + extra_args: list[str] = field(default_factory=list) @dataclass @@ -52,6 +51,7 @@ class HostConfig: Attributes: plugin_name: Name of plugin that should be used to manage the host. + healthcheck_plugin_name: Name of the plugin for healthcheck operations. address: Address of the machine (IP or DNS name). services: List of services hosted on the machine. clis: List of CLI tools available on the machine. @@ -60,10 +60,17 @@ class HostConfig: """ plugin_name: str + hostname: str + healthcheck_plugin_name: str address: str + s3_creds_plugin_name: str = field(default="authmate") + grpc_creds_plugin_name: str = field(default="wallet_factory") + product: str = field(default="frostfs") services: list[ServiceConfig] = field(default_factory=list) clis: list[CLIConfig] = field(default_factory=list) attributes: dict[str, str] = field(default_factory=dict) + interfaces: dict[str, str] = field(default_factory=dict) + environment: dict[str, str] = field(default_factory=dict) def __post_init__(self) -> None: self.services = [ServiceConfig(**service) for service in self.services or []] diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 1f7b5456..d458b0a7 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -11,7 +11,7 @@ import docker from requests import HTTPError from frostfs_testlib.hosting.config import ParsedAttributes -from frostfs_testlib.hosting.interfaces import Host +from frostfs_testlib.hosting.interfaces import DiskInfo, Host, HostStatus from frostfs_testlib.shell import LocalShell, Shell, SSHShell from frostfs_testlib.shell.command_inspectors import SudoInspector @@ -61,10 +61,10 @@ class ServiceAttributes(ParsedAttributes): class DockerHost(Host): """Manages services hosted in Docker containers running on a local or remote machine.""" - def get_shell(self) -> Shell: + def get_shell(self, sudo: bool = False) -> Shell: host_attributes = HostAttributes.parse(self._config.attributes) command_inspectors = [] - if host_attributes.sudo_shell: + if sudo: command_inspectors.append(SudoInspector()) if not host_attributes.ssh_login: @@ -87,6 +87,15 @@ class DockerHost(Host): for service_config in self._config.services: self.start_service(service_config.name) + def get_host_status(self) -> HostStatus: + # We emulate host status by checking all services. + for service_config in self._config.services: + state = self._get_container_state(service_config.name) + if state != "running": + return HostStatus.OFFLINE + + return HostStatus.ONLINE + def stop_host(self) -> None: # We emulate stopping machine by stopping all services # As an alternative we can probably try to stop docker service... @@ -117,6 +126,20 @@ class DockerHost(Host): timeout=service_attributes.stop_timeout, ) + def mask_service(self, service_name: str) -> None: + # Not required for Docker + return + + def unmask_service(self, service_name: str) -> None: + # Not required for Docker + return + + def wait_success_suspend_process(self, service_name: str): + raise NotImplementedError("Not supported for docker") + + def wait_success_resume_process(self, service_name: str): + raise NotImplementedError("Not supported for docker") + def restart_service(self, service_name: str) -> None: service_attributes = self._get_service_attributes(service_name) @@ -129,19 +152,66 @@ class DockerHost(Host): timeout=service_attributes.start_timeout, ) - def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: + def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None: + raise NotImplementedError("Not implemented for docker") + + def get_data_directory(self, service_name: str) -> str: service_attributes = self._get_service_attributes(service_name) client = self._get_docker_client() volume_info = client.inspect_volume(service_attributes.volume_name) volume_path = volume_info["Mountpoint"] + return volume_path + + def send_signal_to_service(self, service_name: str, signal: str) -> None: + raise NotImplementedError("Not implemented for docker") + + def delete_metabase(self, service_name: str) -> None: + raise NotImplementedError("Not implemented for docker") + + def delete_write_cache(self, service_name: str) -> None: + raise NotImplementedError("Not implemented for docker") + + def delete_fstree(self, service_name: str) -> None: + raise NotImplementedError("Not implemented for docker") + + def delete_blobovnicza(self, service_name: str) -> None: + raise NotImplementedError("Not implemented for docker") + + def delete_pilorama(self, service_name: str) -> None: + raise NotImplementedError("Not implemented for docker") + + def delete_file(self, file_path: str) -> None: + raise NotImplementedError("Not implemented for docker") + + def is_file_exist(self, file_path: str) -> None: + raise NotImplementedError("Not implemented for docker") + + def wipefs_storage_node_data(self, service_name: str) -> None: + raise NotImplementedError("Not implemented for docker") + + def finish_wipefs(self, service_name: str) -> None: + raise NotImplementedError("Not implemented for docker") + + def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: + volume_path = self.get_data_directory(service_name) + shell = self.get_shell() meta_clean_cmd = f"rm -rf {volume_path}/meta*/*" data_clean_cmd = f"; rm -rf {volume_path}/data*/*" if not cache_only else "" cmd = f"{meta_clean_cmd}{data_clean_cmd}" shell.exec(cmd) + def attach_disk(self, device: str, disk_info: DiskInfo) -> None: + raise NotImplementedError("Not supported for docker") + + def detach_disk(self, device: str) -> DiskInfo: + raise NotImplementedError("Not supported for docker") + + def is_disk_attached(self, device: str, disk_info: DiskInfo) -> bool: + raise NotImplementedError("Not supported for docker") + def dump_logs( self, directory_path: str, @@ -172,11 +242,42 @@ class DockerHost(Host): with open(file_path, "wb") as file: file.write(logs) + def get_filtered_logs( + self, + filter_regex: str, + since: Optional[datetime] = None, + until: Optional[datetime] = None, + unit: Optional[str] = None, + exclude_filter: Optional[str] = None, + priority: Optional[str] = None, + word_count: bool = None, + ) -> str: + client = self._get_docker_client() + filtered_logs = "" + for service_config in self._config.services: + container_name = self._get_service_attributes(service_config.name).container_name + try: + filtered_logs = client.logs(container_name, since=since, until=until) + except HTTPError as exc: + logger.info(f"Got exception while dumping logs of '{container_name}': {exc}") + continue + + if exclude_filter: + filtered_logs = filtered_logs.replace(exclude_filter, "") + matches = re.findall(filter_regex, filtered_logs, re.IGNORECASE + re.MULTILINE) + found = list(matches) + + if found: + filtered_logs += f"{container_name}:\n{os.linesep.join(found)}" + + return filtered_logs + def is_message_in_logs( self, message_regex: str, since: Optional[datetime] = None, until: Optional[datetime] = None, + unit: Optional[str] = None, ) -> bool: client = self._get_docker_client() for service_config in self._config.services: @@ -219,20 +320,23 @@ class DockerHost(Host): return container return None - def _wait_for_container_to_be_in_state( - self, container_name: str, expected_state: str, timeout: int - ) -> None: + def _wait_for_container_to_be_in_state(self, container_name: str, expected_state: str, timeout: int) -> None: iterations = 10 iteration_wait_time = timeout / iterations # To speed things up, we break timeout in smaller iterations and check container state # several times. This way waiting stops as soon as container reaches the expected state for _ in range(iterations): - container = self._get_container_by_name(container_name) - logger.debug(f"Current container state\n:{json.dumps(container, indent=2)}") + state = self._get_container_state(container_name) - if container and container["State"] == expected_state: + if state == expected_state: return time.sleep(iteration_wait_time) raise RuntimeError(f"Container {container_name} is not in {expected_state} state.") + + def _get_container_state(self, container_name: str) -> str: + container = self._get_container_by_name(container_name) + logger.debug(f"Current container state\n:{json.dumps(container, indent=2)}") + + return container.get("State", None) diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 73f49541..a41161c5 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -4,6 +4,14 @@ from typing import Optional from frostfs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig from frostfs_testlib.shell.interfaces import Shell +from frostfs_testlib.testing.readable import HumanReadableEnum +from frostfs_testlib.testing.test_control import retry + + +class HostStatus(HumanReadableEnum): + ONLINE = "Online" + OFFLINE = "Offline" + UNKNOWN = "Unknown" class DiskInfo(dict): @@ -18,11 +26,12 @@ class Host(ABC): def __init__(self, config: HostConfig) -> None: self._config = config - self._service_config_by_name = { - service_config.name: service_config for service_config in config.services - } + self._service_config_by_name = {service_config.name: service_config for service_config in config.services} self._cli_config_by_name = {cli_config.name: cli_config for cli_config in config.clis} + def __repr__(self) -> str: + return self.config.address + @property def config(self) -> HostConfig: """Returns config of the host. @@ -48,7 +57,7 @@ class Host(ABC): raise ValueError(f"Unknown service name: '{service_name}'") return service_config - def get_cli_config(self, cli_name: str) -> CLIConfig: + def get_cli_config(self, cli_name: str, allow_empty: bool = False) -> CLIConfig: """Returns config of CLI tool with specified name. The CLI must be located on this host. @@ -60,14 +69,17 @@ class Host(ABC): Config of the CLI tool. """ cli_config = self._cli_config_by_name.get(cli_name) - if cli_config is None: + if cli_config is None and not allow_empty: raise ValueError(f"Unknown CLI name: '{cli_name}'") return cli_config @abstractmethod - def get_shell(self) -> Shell: + def get_shell(self, sudo: bool = True) -> Shell: """Returns shell to this host. + Args: + sudo: if True, run all commands in shell with elevated rights + Returns: Shell that executes commands on this host. """ @@ -76,6 +88,10 @@ class Host(ABC): def start_host(self) -> None: """Starts the host machine.""" + @abstractmethod + def get_host_status(self) -> HostStatus: + """Check host status.""" + @abstractmethod def stop_host(self, mode: str) -> None: """Stops the host machine. @@ -104,6 +120,37 @@ class Host(ABC): service_name: Name of the service to stop. """ + @abstractmethod + def send_signal_to_service(self, service_name: str, signal: str) -> None: + """Send signal to service with specified name using kill - + + The service must be hosted on this host. + + Args: + service_name: Name of the service to stop. + signal: signal name. See kill -l to all names + """ + + @abstractmethod + def mask_service(self, service_name: str) -> None: + """Prevent the service from start by any activity by masking it. + + The service must be hosted on this host. + + Args: + service_name: Name of the service to mask. + """ + + @abstractmethod + def unmask_service(self, service_name: str) -> None: + """Allow the service to start by any activity by unmasking it. + + The service must be hosted on this host. + + Args: + service_name: Name of the service to unmask. + """ + @abstractmethod def restart_service(self, service_name: str) -> None: """Restarts the service with specified name and waits until it starts. @@ -112,6 +159,30 @@ class Host(ABC): service_name: Name of the service to restart. """ + @abstractmethod + def get_data_directory(self, service_name: str) -> str: + """ + Getting path to data directory on node for further usage + (example: list databases pilorama.db) + + Args: + service_name: Name of storage node service. + """ + + @abstractmethod + def wait_success_suspend_process(self, process_name: str) -> None: + """Search for a service ID by its name and stop the process + Args: + process_name: Name + """ + + @abstractmethod + def wait_success_resume_process(self, process_name: str) -> None: + """Search for a service by its ID and start the process + Args: + process_name: Name + """ + @abstractmethod def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: """Erases all data of the storage node with specified name. @@ -121,6 +192,81 @@ class Host(ABC): cache_only: To delete cache only. """ + @abstractmethod + def wipefs_storage_node_data(self, service_name: str) -> None: + """Erases all data of the storage node with specified name. + + Args: + service_name: Name of storage node service. + """ + + def finish_wipefs(self, service_name: str) -> None: + """Erases all data of the storage node with specified name. + + Args: + service_name: Name of storage node service. + """ + + @abstractmethod + def delete_fstree(self, service_name: str) -> None: + """ + Deletes all fstrees in the node. + + Args: + service_name: Name of storage node service. + + """ + + @abstractmethod + def delete_metabase(self, service_name: str) -> None: + """ + Deletes all metabase*.db in the node. + + Args: + service_name: Name of storage node service. + + """ + + @abstractmethod + def delete_write_cache(self, service_name: str) -> None: + """ + Deletes all write_cache in the node. + + Args: + service_name: Name of storage node service. + + """ + + @abstractmethod + def delete_blobovnicza(self, service_name: str) -> None: + """ + Deletes all blobovniczas in the node. + + Args: + service_name: Name of storage node service. + + """ + + @abstractmethod + def delete_file(self, file_path: str) -> None: + """ + Deletes file with provided file path + + Args: + file_path: full path to the file to delete + + """ + + @abstractmethod + def is_file_exist(self, file_path: str) -> bool: + """ + Checks if file exist + + Args: + file_path: full path to the file to check + + """ + @abstractmethod def detach_disk(self, device: str) -> DiskInfo: """Detaches disk device to simulate disk offline/failover scenario. @@ -172,12 +318,40 @@ class Host(ABC): filter_regex: regex to filter output """ + @abstractmethod + def get_filtered_logs( + self, + filter_regex: str, + since: Optional[datetime] = None, + until: Optional[datetime] = None, + unit: Optional[str] = None, + exclude_filter: Optional[str] = None, + priority: Optional[str] = None, + word_count: bool = None, + ) -> str: + """Get logs from host filtered by regex. + + Args: + filter_regex: regex filter for logs. + since: If set, limits the time from which logs should be collected. Must be in UTC. + until: If set, limits the time until which logs should be collected. Must be in UTC. + unit: required unit. + priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher. + For example, if we specify the -p 2 option, journalctl will show all messages with levels 2, 1 and 0. + word_count: output type, expected values: lines, bytes, json + + Returns: + Found entries as str if any found. + Empty string otherwise. + """ + @abstractmethod def is_message_in_logs( self, message_regex: str, since: Optional[datetime] = None, until: Optional[datetime] = None, + unit: Optional[str] = None, ) -> bool: """Checks logs on host for specified message regex. @@ -190,3 +364,35 @@ class Host(ABC): True if message found in logs in the given time frame. False otherwise. """ + + @abstractmethod + def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None: + """ + Waites for service to be in specified state. + + Args: + systemd_service_name: Service to wait state of. + expected_state: State to wait for + timeout: Seconds to wait + + """ + + def down_interface(self, interface: str) -> None: + shell = self.get_shell() + shell.exec(f"ip link set {interface} down") + + def up_interface(self, interface: str) -> None: + shell = self.get_shell() + shell.exec(f"ip link set {interface} up") + + def check_state(self, interface: str) -> str: + shell = self.get_shell() + return shell.exec(f"ip link show {interface} | sed -z 's/.*state \(.*\) mode .*/\\1/'").stdout.strip() + + @retry(max_attempts=5, sleep_interval=5, expected_result="UP") + def check_state_up(self, interface: str) -> str: + return self.check_state(interface=interface) + + @retry(max_attempts=5, sleep_interval=5, expected_result="DOWN") + def check_state_down(self, interface: str) -> str: + return self.check_state(interface=interface) diff --git a/src/frostfs_testlib/load/__init__.py b/src/frostfs_testlib/load/__init__.py new file mode 100644 index 00000000..8477ee41 --- /dev/null +++ b/src/frostfs_testlib/load/__init__.py @@ -0,0 +1,15 @@ +from frostfs_testlib.load.interfaces.loader import Loader +from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner +from frostfs_testlib.load.load_config import ( + EndpointSelectionStrategy, + K6ProcessAllocationStrategy, + LoadParams, + LoadScenario, + LoadType, + NodesSelectionStrategy, + Preset, + ReadFrom, +) +from frostfs_testlib.load.load_report import LoadReport +from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader +from frostfs_testlib.load.runners import DefaultRunner, LocalRunner, S3LocalRunner diff --git a/src/frostfs_testlib/load/interfaces/loader.py b/src/frostfs_testlib/load/interfaces/loader.py new file mode 100644 index 00000000..2c818d9c --- /dev/null +++ b/src/frostfs_testlib/load/interfaces/loader.py @@ -0,0 +1,14 @@ +from abc import ABC, abstractmethod + +from frostfs_testlib.shell.interfaces import Shell + + +class Loader(ABC): + @abstractmethod + def get_shell(self) -> Shell: + """Get shell for the loader""" + + @property + @abstractmethod + def ip(self): + """Get address of the loader""" diff --git a/src/frostfs_testlib/load/interfaces/scenario_runner.py b/src/frostfs_testlib/load/interfaces/scenario_runner.py new file mode 100644 index 00000000..c0062a98 --- /dev/null +++ b/src/frostfs_testlib/load/interfaces/scenario_runner.py @@ -0,0 +1,55 @@ +from abc import ABC, abstractmethod + +from frostfs_testlib.load.interfaces.loader import Loader +from frostfs_testlib.load.k6 import K6 +from frostfs_testlib.load.load_config import LoadParams +from frostfs_testlib.storage.cluster import ClusterNode + + +class ScenarioRunner(ABC): + @abstractmethod + def prepare( + self, + load_params: LoadParams, + cluster_nodes: list[ClusterNode], + nodes_under_load: list[ClusterNode], + k6_dir: str, + ): + """Preparation steps before running the load""" + + @abstractmethod + def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): + """Init K6 instances""" + + @abstractmethod + def get_k6_instances(self) -> list[K6]: + """Get K6 instances""" + + @abstractmethod + def start(self): + """Start K6 instances""" + + @abstractmethod + def stop(self): + """Stop K6 instances""" + + @abstractmethod + def preset(self): + """Run preset for load""" + + @property + @abstractmethod + def is_running(self) -> bool: + """Returns True if load is running at the moment""" + + @abstractmethod + def wait_until_finish(self, soft_timeout: int = 0): + """Wait until load is finished""" + + @abstractmethod + def get_results(self) -> dict: + """Get results from K6 run""" + + @abstractmethod + def get_loaders(self) -> list[Loader]: + """Return loaders""" diff --git a/src/frostfs_testlib/load/interfaces/summarized.py b/src/frostfs_testlib/load/interfaces/summarized.py new file mode 100644 index 00000000..4be33ef0 --- /dev/null +++ b/src/frostfs_testlib/load/interfaces/summarized.py @@ -0,0 +1,96 @@ +from dataclasses import dataclass, field + +from frostfs_testlib.load.load_config import LoadParams, LoadScenario +from frostfs_testlib.load.load_metrics import get_metrics_object + + +@dataclass +class SummarizedErorrs: + total: int = field(default_factory=int) + percent: float = field(default_factory=float) + threshold: float = field(default_factory=float) + by_node: dict[str, int] = field(default_factory=dict) + + def calc_stats(self, operations): + self.total += sum(self.by_node.values()) + + if not operations: + return + + self.percent = self.total / operations * 100 + + +@dataclass +class SummarizedLatencies: + avg: float = field(default_factory=float) + min: float = field(default_factory=float) + max: float = field(default_factory=float) + by_node: dict[str, dict[str, int]] = field(default_factory=dict) + + def calc_stats(self): + if not self.by_node: + return + + avgs = [lt["avg"] for lt in self.by_node.values()] + self.avg = sum(avgs) / len(avgs) + + minimal = [lt["min"] for lt in self.by_node.values()] + self.min = min(minimal) + + maximum = [lt["max"] for lt in self.by_node.values()] + self.max = max(maximum) + + +@dataclass +class SummarizedStats: + threads: int = field(default_factory=int) + requested_rate: int = field(default_factory=int) + operations: int = field(default_factory=int) + rate: float = field(default_factory=float) + throughput: float = field(default_factory=float) + latencies: SummarizedLatencies = field(default_factory=SummarizedLatencies) + errors: SummarizedErorrs = field(default_factory=SummarizedErorrs) + total_bytes: int = field(default_factory=int) + passed: bool = True + + def calc_stats(self): + self.errors.calc_stats(self.operations) + self.latencies.calc_stats() + self.passed = self.errors.percent <= self.errors.threshold + + @staticmethod + def collect(load_params: LoadParams, load_summaries: dict) -> dict[str, "SummarizedStats"]: + if load_params.scenario in [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]: + delete_vus = max(load_params.preallocated_deleters or 0, load_params.max_deleters or 0) + write_vus = max(load_params.preallocated_writers or 0, load_params.max_writers or 0) + read_vus = max(load_params.preallocated_readers or 0, load_params.max_readers or 0) + else: + write_vus = load_params.writers + read_vus = load_params.readers + delete_vus = load_params.deleters + + summarized = { + "Write": SummarizedStats(threads=write_vus, requested_rate=load_params.write_rate), + "Read": SummarizedStats(threads=read_vus, requested_rate=load_params.read_rate), + "Delete": SummarizedStats(threads=delete_vus, requested_rate=load_params.delete_rate), + } + + for node_key, load_summary in load_summaries.items(): + metrics = get_metrics_object(load_params.scenario, load_summary) + for operation in metrics.operations: + target = summarized[operation._NAME] + if not operation.total_iterations: + continue + target.operations += operation.total_iterations + target.rate += operation.rate + target.latencies.by_node[node_key] = operation.latency + target.throughput += operation.throughput + target.errors.threshold = load_params.error_threshold + target.total_bytes += operation.total_bytes + if operation.failed_iterations: + target.errors.by_node[node_key] = operation.failed_iterations + + for operation in summarized.values(): + operation.calc_stats() + + return summarized diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py new file mode 100644 index 00000000..3e62a16a --- /dev/null +++ b/src/frostfs_testlib/load/k6.py @@ -0,0 +1,268 @@ +import json +import logging +import math +import os +from dataclasses import dataclass +from datetime import datetime +from threading import Event +from time import sleep +from typing import Any +from urllib.parse import urlparse + +from frostfs_testlib import reporter +from frostfs_testlib.credentials.interfaces import User +from frostfs_testlib.load.interfaces.loader import Loader +from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType +from frostfs_testlib.processes.remote_process import RemoteProcess +from frostfs_testlib.resources.common import STORAGE_USER_NAME +from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, K6_TEARDOWN_PERIOD +from frostfs_testlib.shell import Shell +from frostfs_testlib.testing.test_control import wait_for_success + +EXIT_RESULT_CODE = 0 + +logger = logging.getLogger("NeoLogger") + + +@dataclass +class LoadResults: + data_sent: float = 0.0 + data_received: float = 0.0 + read_ops: float = 0.0 + write_ops: float = 0.0 + total_ops: float = 0.0 + + +class K6: + _k6_process: RemoteProcess + + def __init__( + self, + load_params: LoadParams, + endpoints: list[str], + k6_dir: str, + shell: Shell, + loader: Loader, + user: User, + ): + if load_params.scenario is None: + raise RuntimeError("Scenario should not be none") + + self.load_params = load_params + self.endpoints = endpoints + self.loader = loader + self.shell = shell + self.user = user + self.preset_output: str = "" + self.summary_json: str = os.path.join( + self.load_params.working_dir, + f"{self.load_params.load_id}_{self.load_params.scenario.value}_summary.json", + ) + + self._k6_dir: str = k6_dir + + command = ( + f"{self._generate_env_variables()}{self._k6_dir}/k6 run {self._generate_k6_variables()} " + f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js" + ) + remote_user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None + process_id = self.load_params.load_id if self.load_params.scenario != LoadScenario.VERIFY else f"{self.load_params.load_id}_verify" + self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, remote_user, process_id) + + def _get_fill_percents(self): + fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs | grep data").stdout.split("\n") + return [line.split() for line in fill_percents][:-1] + + def check_fill_percent(self): + fill_percents = self._get_fill_percents() + percent_mean = 0 + for line in fill_percents: + percent_mean += float(line[1].split("%")[0]) + percent_mean = percent_mean / len(fill_percents) + logger.info(f"{self.loader.ip} mean fill percent is {percent_mean}") + return percent_mean >= self.load_params.fill_percent + + @property + def process_dir(self) -> str: + return self._k6_process.process_dir + + def preset(self) -> str: + with reporter.step(f"Run preset on loader {self.loader.ip} for endpoints {self.endpoints}"): + preset_grpc = f"{self._k6_dir}/scenarios/preset/preset_grpc.py" + preset_s3 = f"{self._k6_dir}/scenarios/preset/preset_s3.py" + preset_map = { + LoadType.gRPC: preset_grpc, + LoadType.S3: preset_s3, + LoadType.HTTP: preset_grpc, + } + + base_args = { + preset_grpc: [ + preset_grpc, + f"--endpoint {','.join(self.endpoints)}", + f"--wallet {self.user.wallet.path} ", + f"--config {self.user.wallet.config_path} ", + ], + preset_s3: [ + preset_s3, + f"--endpoint {','.join(self.endpoints)}", + ], + } + + preset_scenario = preset_map[self.load_params.load_type] + command_args = base_args[preset_scenario].copy() + + command_args += self.load_params.get_preset_arguments() + + command = " ".join(command_args) + result = self.shell.exec(command) + + assert result.return_code == EXIT_RESULT_CODE, f"Return code of preset is not zero: {result.stdout}" + + self.preset_output = result.stdout.strip("\n") + return self.preset_output + + @reporter.step("Generate K6 variables") + def _generate_k6_variables(self) -> str: + env_vars = self.load_params.get_k6_vars() + + env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints) + env_vars["SUMMARY_JSON"] = self.summary_json + + reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables") + return " ".join([f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None]) + + @reporter.step("Generate env variables") + def _generate_env_variables(self) -> str: + env_vars = self.load_params.get_env_vars() + if not env_vars: + return "" + reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "ENV variables") + return " ".join([f"{param}='{value}'" for param, value in env_vars.items() if value is not None]) + " " + + def get_start_time(self) -> datetime: + return datetime.fromtimestamp(self._k6_process.start_time()) + + def get_end_time(self) -> datetime: + return datetime.fromtimestamp(self._k6_process.end_time()) + + def start(self) -> None: + with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"): + self._k6_process.start() + + def wait_until_finished(self, event: Event, soft_timeout: int = 0) -> None: + with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"): + if self.load_params.scenario == LoadScenario.VERIFY: + timeout = self.load_params.verify_time or 0 + else: + timeout = self.load_params.load_time or 0 + + start_time = int(self.get_start_time().timestamp()) + + current_time = int(datetime.utcnow().timestamp()) + working_time = current_time - start_time + remaining_time = timeout - working_time + + setup_teardown_time = ( + int(K6_TEARDOWN_PERIOD) + self.load_params.get_init_time() + int(self.load_params.setup_timeout.replace("s", "").strip()) + ) + remaining_time_including_setup_and_teardown = remaining_time + setup_teardown_time + timeout = remaining_time_including_setup_and_teardown + + if soft_timeout: + timeout = min(timeout, soft_timeout) + + original_timeout = timeout + + timeouts = { + "K6 start time": start_time, + "Current time": current_time, + "K6 working time": working_time, + "Remaining time for load": remaining_time, + "Setup and teardown": setup_teardown_time, + "Remaining time including setup/teardown": remaining_time_including_setup_and_teardown, + "Soft timeout": soft_timeout, + "Selected timeout": original_timeout, + } + + reporter.attach("\n".join([f"{k}: {v}" for k, v in timeouts.items()]), "timeouts.txt") + + min_wait_interval = 10 + wait_interval = min_wait_interval + if self._k6_process is None: + assert "No k6 instances were executed" + + while timeout > 0: + if not self.load_params.fill_percent is None: + with reporter.step(f"Check the percentage of filling of all data disks on the node"): + if self.check_fill_percent(): + logger.info(f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%") + event.set() + self.stop() + return + + if event.is_set(): + self.stop() + return + + if not self._k6_process.running(): + return + + remaining_time_hours = f"{timeout//3600}h" if timeout // 3600 != 0 else "" + remaining_time_minutes = f"{timeout//60%60}m" if timeout // 60 % 60 != 0 else "" + logger.info( + f"K6 is running. Remaining time {remaining_time_hours}{remaining_time_minutes}{timeout%60}s. Next check after {wait_interval} seconds..." + ) + sleep(wait_interval) + timeout -= min(timeout, wait_interval) + wait_interval = max( + min(timeout, int(math.log2(timeout + 1)) * 15) - min_wait_interval, + min_wait_interval, + ) + + if not self._k6_process.running(): + return + + self.stop() + if not soft_timeout: + raise TimeoutError(f"Expected K6 to finish after {original_timeout} sec.") + + def get_results(self) -> Any: + with reporter.step(f"Get load results from loader {self.loader.ip} on endpoints {self.endpoints}"): + self.__log_output() + + if not self.summary_json: + return None + + summary_text = self.shell.exec(f"cat {self.summary_json}").stdout + summary_json = json.loads(summary_text) + endpoint = urlparse(self.endpoints[0]).netloc or self.endpoints[0] + allure_filenames = { + K6ProcessAllocationStrategy.PER_LOAD_NODE: f"{self.loader.ip}_{self.load_params.scenario.value}_summary.json", + K6ProcessAllocationStrategy.PER_ENDPOINT: f"{self.loader.ip}_{self.load_params.scenario.value}_{endpoint}_summary.json", + } + allure_filename = allure_filenames[self.load_params.k6_process_allocation_strategy] + + reporter.attach(summary_text, allure_filename) + return summary_json + + def stop(self) -> None: + with reporter.step(f"Stop load from loader {self.loader.ip} on endpoints {self.endpoints}"): + if self.is_running(): + self._k6_process.stop() + + self._wait_until_process_end() + + def is_running(self) -> bool: + if self._k6_process: + return self._k6_process.running() + return False + + @reporter.step("Wait until K6 process end") + @wait_for_success(K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout") + def _wait_until_process_end(self): + return self._k6_process.running() + + def __log_output(self) -> None: + reporter.attach(self._k6_process.stdout(full=True), "K6 stdout") + reporter.attach(f"{self._k6_process.process_dir}/stderr", "K6 stderr path") diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py new file mode 100644 index 00000000..38302032 --- /dev/null +++ b/src/frostfs_testlib/load/load_config.py @@ -0,0 +1,491 @@ +import math +import os +from dataclasses import dataclass, field, fields, is_dataclass +from enum import Enum +from types import MappingProxyType +from typing import Any, Callable, Optional, get_args + +from frostfs_testlib.utils.converting_utils import calc_unit + + +def convert_time_to_seconds(time: int | str | None) -> int: + if time is None: + return None + if str(time).isdigit(): + seconds = int(time) + else: + days, hours, minutes = 0, 0, 0 + if "d" in time: + days, time = time.split("d") + if "h" in time: + hours, time = time.split("h") + if "min" in time: + minutes = time.replace("min", "") + seconds = int(days) * 86400 + int(hours) * 3600 + int(minutes) * 60 + return seconds + + +def force_list(input: str | list[str]): + if input is None: + return None + + if isinstance(input, list): + return list(map(str.strip, input)) + + return [input.strip()] + + +class LoadType(Enum): + gRPC = "grpc" + S3 = "s3" + HTTP = "http" + + +class LoadScenario(Enum): + gRPC = "grpc" + gRPC_CAR = "grpc_car" + S3 = "s3" + S3_CAR = "s3_car" + S3_MULTIPART = "s3_multipart" + S3_LOCAL = "s3local" + HTTP = "http" + VERIFY = "verify" + LOCAL = "local" + + +class ReadFrom(Enum): + REGISTRY = "registry" + PRESET = "preset" + MANUAL = "manual" + + +all_load_scenarios = [ + LoadScenario.gRPC, + LoadScenario.S3, + LoadScenario.HTTP, + LoadScenario.S3_CAR, + LoadScenario.gRPC_CAR, + LoadScenario.LOCAL, + LoadScenario.S3_MULTIPART, + LoadScenario.S3_LOCAL, +] +all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY] + +constant_vus_scenarios = [ + LoadScenario.gRPC, + LoadScenario.S3, + LoadScenario.HTTP, + LoadScenario.LOCAL, + LoadScenario.S3_MULTIPART, + LoadScenario.S3_LOCAL, +] +constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR] + +grpc_preset_scenarios = [ + LoadScenario.gRPC, + LoadScenario.HTTP, + LoadScenario.gRPC_CAR, + LoadScenario.LOCAL, +] +s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART, LoadScenario.S3_LOCAL] + + +@dataclass +class MetaField: + name: str + metadata: MappingProxyType + value: Any + + +def metadata_field( + applicable_scenarios: list[LoadScenario], + preset_param: Optional[str] = None, + scenario_variable: Optional[str] = None, + string_repr: Optional[bool] = True, + distributed: Optional[bool] = False, + formatter: Optional[Callable] = None, + env_variable: Optional[str] = None, +): + return field( + default=None, + metadata={ + "applicable_scenarios": applicable_scenarios, + "preset_argument": preset_param, + "scenario_variable": scenario_variable, + "string_repr": string_repr, + "distributed": distributed, + "formatter": formatter, + "env_variable": env_variable, + }, + ) + + +class NodesSelectionStrategy(Enum): + # Select ONE random node from cluster nodes. + RANDOM_SINGLE = "RANDOM_SINGLE" + # Select All nodes. + ALL = "ALL" + # Select All nodes except node under test (useful for failover). This is DEFAULT one + ALL_EXCEPT_UNDER_TEST = "ALL_EXCEPT_UNDER_TEST" + # Select ONE random node except under test (useful for failover). + RANDOM_SINGLE_EXCEPT_UNDER_TEST = "RANDOM_SINGLE_EXCEPT_UNDER_TEST" + # Select node under test + NODE_UNDER_TEST = "NODE_UNDER_TEST" + + +class EndpointSelectionStrategy(Enum): + """Enum which defines which endpoint to select from each storage node""" + + # Select All endpoints. + ALL = "ALL" + # Select first endpoint from node + FIRST = "FIRST" + + +class K6ProcessAllocationStrategy(Enum): + """Enum which defines how K6 processes should be allocated""" + + # Each load node will get one k6 process with all endpoints (Default) + PER_LOAD_NODE = "PER_LOAD_NODE" + # Each endpoint will get it's own k6 process regardless of number of load nodes. + # If there is not enough load nodes, some nodes may have multiple k6 processes + PER_ENDPOINT = "PER_ENDPOINT" + + +class MetaConfig: + def _get_field_formatter(self, field_name: str) -> Callable | None: + data_fields = fields(self) + formatters = [ + field.metadata["formatter"] + for field in data_fields + if field.name == field_name and "formatter" in field.metadata and field.metadata["formatter"] != None + ] + if formatters: + return formatters[0] + + return None + + def __setattr__(self, field_name, value): + formatter = self._get_field_formatter(field_name) + if formatter: + value = formatter(value) + + super().__setattr__(field_name, value) + + +@dataclass +class Preset(MetaConfig): + # ------ COMMON ------ + # Amount of objects which should be created + objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None, False) + # Preset json. Filled automatically. + pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False) + # Workers count for preset + workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False) + # TODO: Deprecated. Acl for container/buckets + acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False) + # APE rule for containers instead of deprecated ACL + rule: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "rule", None, False, formatter=force_list) + + # ------ GRPC ------ + # Amount of containers which should be created + containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None, False) + # Container placement policy for containers for gRPC + container_placement_policy: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "policy", None, False, formatter=force_list) + # Number of retries for creation of container + container_creation_retry: Optional[int] = metadata_field(grpc_preset_scenarios, "retry", None, False) + + # ------ S3 ------ + # Amount of buckets which should be created + buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None, False) + # S3 region (AKA placement policy for S3 buckets) + s3_location: Optional[list[str]] = metadata_field(s3_preset_scenarios, "location", None, False, formatter=force_list) + + # Delay between containers creation and object upload for preset + object_upload_delay: Optional[int] = metadata_field(all_load_scenarios, "sleep", None, False) + + # Flag to control preset erorrs + ignore_errors: Optional[bool] = metadata_field(all_load_scenarios, "ignore-errors", None, False) + + # Flag to ensure created containers store data on local endpoints + local: Optional[bool] = metadata_field(grpc_preset_scenarios, "local", None, False) + + +@dataclass +class PrometheusParams(MetaConfig): + # Prometheus server URL + server_url: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_SERVER_URL", string_repr=False) + # Prometheus trend stats + trend_stats: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_TREND_STATS", string_repr=False) + # Additional tags + metrics_tags: Optional[str] = metadata_field(all_load_scenarios, None, "METRIC_TAGS", False) + + +@dataclass +class LoadParams(MetaConfig): + # ------- CONTROL PARAMS ------- + # Load type can be gRPC, HTTP, S3. + load_type: LoadType + # Load scenario from k6 scenarios + scenario: Optional[LoadScenario] = None + # Strategy to select nodes under load. See NodesSelectionStrategy class for more details. + # default is ALL_EXCEPT_UNDER_TEST + nodes_selection_strategy: Optional[NodesSelectionStrategy] = None + # Strategy which defines which endpoint to select from each storage node + endpoint_selection_strategy: Optional[EndpointSelectionStrategy] = None + # Strategy which defines how K6 processes should be allocated + k6_process_allocation_strategy: Optional[K6ProcessAllocationStrategy] = None + # Set to true in order to verify uploaded objects after K6 load finish. Default is True. + verify: Optional[bool] = None + # Just id for load so distinct it between runs. Filled automatically. + load_id: Optional[str] = None + # Acceptable number of load errors in % + # 100 means 100% errors allowed + # 1.5 means 1.5% errors allowed + # 0 means no errors allowed + error_threshold: Optional[float] = None + # Working directory + working_dir: Optional[str] = None + # Preset for the k6 run + preset: Optional[Preset] = None + # K6 download url + k6_url: Optional[str] = None + # Requests module url + requests_module_url: Optional[str] = None + # aws cli download url + awscli_url: Optional[str] = None + # No ssl verification flag + no_verify_ssl: Optional[bool] = metadata_field( + [ + LoadScenario.S3, + LoadScenario.S3_CAR, + LoadScenario.S3_MULTIPART, + LoadScenario.S3_LOCAL, + LoadScenario.VERIFY, + LoadScenario.HTTP, + ], + "no-verify-ssl", + "NO_VERIFY_SSL", + False, + ) + # Percentage of filling of all data disks on all nodes + fill_percent: Optional[float] = None + # if specified, max payload size in GB of the storage engine. If the storage engine is already full, no new objects will be saved. + max_total_size_gb: Optional[float] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "MAX_TOTAL_SIZE_GB") + # if set, the payload is generated on the fly and is not read into memory fully. + streaming: Optional[int] = metadata_field(all_load_scenarios, None, "STREAMING", False) + # Output format + output: Optional[str] = metadata_field(all_load_scenarios, None, "K6_OUT", False) + # Prometheus params + prometheus: Optional[PrometheusParams] = None + + # ------- COMMON SCENARIO PARAMS ------- + # Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value. + load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION", False, formatter=convert_time_to_seconds) + # Object size in KB for load and preset. + object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False) + # For read operations, controls from which set get objects to read + read_from: Optional[ReadFrom] = None + # For read operations done from REGISTRY, controls delay which object should live before it will be used for read operation + read_age: Optional[int] = metadata_field(all_load_scenarios, None, "READ_AGE", False) + # Output registry K6 file. Filled automatically. + registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False) + # In case if we want to use custom registry file left from another load run + custom_registry: Optional[str] = None + # In case if we want to use custom registry file left from another load run + force_fresh_registry: Optional[bool] = None + # Specifies the minimum duration of every single execution (i.e. iteration). + # Any iterations that are shorter than this value will cause that VU to + # sleep for the remainder of the time until the specified minimum duration is reached. + min_iteration_duration: Optional[str] = metadata_field(all_load_scenarios, None, "K6_MIN_ITERATION_DURATION", False) + # Prepare/cut objects locally on client before sending + prepare_locally: Optional[bool] = metadata_field([LoadScenario.gRPC, LoadScenario.gRPC_CAR], None, "PREPARE_LOCALLY", False) + # Specifies K6 setupTimeout time. Currently hardcoded in xk6 as 5 seconds for all scenarios + # https://k6.io/docs/using-k6/k6-options/reference/#setup-timeout + setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT", False) + + # Delay for read operations in case if we read from registry + read_age: Optional[int] = metadata_field(all_load_scenarios, None, "READ_AGE", None, False) + + # Initialization time for each VU for k6 load + vu_init_time: Optional[float] = None + + # ------- CONSTANT VUS SCENARIO PARAMS ------- + # Amount of Writers VU. + writers: Optional[int] = metadata_field(constant_vus_scenarios, None, "WRITERS", True, True) + # Amount of Readers VU. + readers: Optional[int] = metadata_field(constant_vus_scenarios, None, "READERS", True, True) + # Amount of Deleters VU. + deleters: Optional[int] = metadata_field(constant_vus_scenarios, None, "DELETERS", True, True) + + # ------- CONSTANT ARRIVAL RATE SCENARIO PARAMS ------- + # Number of iterations to start during each timeUnit period for write. + write_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "WRITE_RATE", True, True) + + # Number of iterations to start during each timeUnit period for read. + read_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "READ_RATE", True, True) + + # Number of iterations to start during each timeUnit period for delete. + delete_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "DELETE_RATE", True, True) + + # Amount of preAllocatedVUs for write operations. + preallocated_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True, True) + # Amount of maxVUs for write operations. + max_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_WRITERS", False, True) + + # Amount of preAllocatedVUs for read operations. + preallocated_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True, True) + # Amount of maxVUs for read operations. + max_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_READERS", False, True) + + # Amount of preAllocatedVUs for read operations. + preallocated_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True, True) + # Amount of maxVUs for delete operations. + max_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_DELETERS", False, True) + + # Multipart + # Number of parts to upload in parallel + writers_multipart: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITERS_MULTIPART", False, True) + # part size must be greater than (5 MB) + write_object_part_size: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITE_OBJ_PART_SIZE", False) + + # Period of time to apply the rate value. + time_unit: Optional[str] = metadata_field(constant_arrival_rate_scenarios, None, "TIME_UNIT", False) + + # ------- VERIFY SCENARIO PARAMS ------- + # Maximum verification time for k6 to verify objects. Default is BACKGROUND_LOAD_MAX_VERIFY_TIME (3600). + verify_time: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "TIME_LIMIT", False) + # Amount of Verification VU. + verify_clients: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "CLIENTS", True, False) + + # ------- LOCAL SCENARIO PARAMS ------- + # Config file location (filled automatically) + config_file: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_FILE", False) + # Config directory location (filled automatically) + config_dir: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_DIR", False) + + def set_id(self, load_id): + self.load_id = load_id + + if self.read_from == ReadFrom.REGISTRY: + self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt") + + # For now it's okay to have it this way + if self.custom_registry is not None: + self.registry_file = self.custom_registry + + if self.read_from == ReadFrom.PRESET: + self.registry_file = None + + if self.preset: + self.preset.pregen_json = os.path.join(self.working_dir, f"{load_id}_prepare.json") + + def get_k6_vars(self): + env_vars = { + meta_field.metadata["scenario_variable"]: meta_field.value + for meta_field in self._get_meta_fields(self) + if self.scenario in meta_field.metadata["applicable_scenarios"] + and meta_field.metadata["scenario_variable"] + and meta_field.value is not None + } + + return env_vars + + def get_env_vars(self): + env_vars = { + meta_field.metadata["env_variable"]: meta_field.value + for meta_field in self._get_meta_fields(self) + if self.scenario in meta_field.metadata["applicable_scenarios"] + and meta_field.metadata["env_variable"] + and meta_field.value is not None + } + + return env_vars + + def __post_init__(self): + default_scenario_map = { + LoadType.gRPC: LoadScenario.gRPC, + LoadType.HTTP: LoadScenario.HTTP, + LoadType.S3: LoadScenario.S3, + } + + if self.scenario is None: + self.scenario = default_scenario_map[self.load_type] + + def get_preset_arguments(self): + command_args = [ + self._get_preset_argument(meta_field) + for meta_field in self._get_meta_fields(self) + if self.scenario in meta_field.metadata["applicable_scenarios"] + and meta_field.metadata["preset_argument"] + and meta_field.value is not None + and self._get_preset_argument(meta_field) + ] + + return command_args + + def get_init_time(self) -> int: + return math.ceil(self._get_total_vus() * self.vu_init_time) + + def _get_total_vus(self) -> int: + vu_fields = ["writers", "preallocated_writers", "readers", "preallocated_readers"] + data_fields = [getattr(self, field.name) or 0 for field in fields(self) if field.name in vu_fields] + return sum(data_fields) + + def _get_applicable_fields(self): + applicable_fields = [ + meta_field + for meta_field in self._get_meta_fields(self) + if self.scenario in meta_field.metadata["applicable_scenarios"] and meta_field.value + ] + + return applicable_fields + + @staticmethod + def _get_preset_argument(meta_field: MetaField) -> str: + if isinstance(meta_field.value, bool): + # For preset calls, bool values are passed with just -- if the value is True + return f"--{meta_field.metadata['preset_argument']}" if meta_field.value else "" + + if isinstance(meta_field.value, list): + return ( + " ".join(f"--{meta_field.metadata['preset_argument']} '{value}'" for value in meta_field.value) if meta_field.value else "" + ) + + return f"--{meta_field.metadata['preset_argument']} '{meta_field.value}'" + + @staticmethod + def _get_meta_fields(instance) -> list[MetaField]: + data_fields = fields(instance) + + fields_with_data = [ + MetaField(field.name, field.metadata, getattr(instance, field.name)) + for field in data_fields + if field.metadata and getattr(instance, field.name) is not None + ] + + for field in data_fields: + actual_field_type = get_args(field.type)[0] if len(get_args(field.type)) else get_args(field.type) + if is_dataclass(actual_field_type) and getattr(instance, field.name): + fields_with_data += LoadParams._get_meta_fields(getattr(instance, field.name)) + + return fields_with_data or [] + + def __str__(self) -> str: + load_type_str = self.scenario.value if self.scenario else self.load_type.value + # TODO: migrate load_params defaults to testlib + if self.object_size is not None: + size, unit = calc_unit(self.object_size, 1) + static_params = [f"{load_type_str} {size:.4g} {unit}"] + else: + static_params = [f"{load_type_str}"] + + dynamic_params = [ + f"{meta_field.name}={meta_field.value}" for meta_field in self._get_applicable_fields() if meta_field.metadata["string_repr"] + ] + params = ", ".join(static_params + dynamic_params) + + return params + + def __repr__(self) -> str: + return self.__str__() diff --git a/src/frostfs_testlib/load/load_metrics.py b/src/frostfs_testlib/load/load_metrics.py new file mode 100644 index 00000000..035ce8b4 --- /dev/null +++ b/src/frostfs_testlib/load/load_metrics.py @@ -0,0 +1,243 @@ +from abc import ABC +from typing import Any, Optional + +from frostfs_testlib.load.load_config import LoadScenario + + +class OperationMetric(ABC): + _NAME = "" + _SUCCESS = "" + _ERRORS = "" + _THROUGHPUT = "" + _LATENCY = "" + + def __init__(self, summary) -> None: + self.summary = summary + self.metrics = summary["metrics"] + + @property + def total_iterations(self) -> int: + return self._get_metric(self._SUCCESS) + self._get_metric(self._ERRORS) + + @property + def success_iterations(self) -> int: + return self._get_metric(self._SUCCESS) + + @property + def latency(self) -> dict: + return self._get_metric(self._LATENCY) + + @property + def rate(self) -> float: + return self._get_metric_rate(self._SUCCESS) + + @property + def failed_iterations(self) -> int: + return self._get_metric(self._ERRORS) + + @property + def throughput(self) -> float: + return self._get_metric_rate(self._THROUGHPUT) + + @property + def total_bytes(self) -> float: + return self._get_metric(self._THROUGHPUT) + + def _get_metric(self, metric: str) -> int: + metrics_method_map = { + "counter": self._get_counter_metric, + "gauge": self._get_gauge_metric, + "trend": self._get_trend_metrics, + } + + if metric not in self.metrics: + return 0 + + metric = self.metrics[metric] + metric_type = metric["type"] + if metric_type not in metrics_method_map: + raise Exception(f"Unsupported metric type: {metric_type}, supported: {metrics_method_map.keys()}") + + return metrics_method_map[metric_type](metric) + + def _get_metric_rate(self, metric: str) -> int: + metrics_method_map = {"counter": self._get_counter_metric_rate} + + if metric not in self.metrics: + return 0 + + metric = self.metrics[metric] + metric_type = metric["type"] + if metric_type not in metrics_method_map: + raise Exception(f"Unsupported rate metric type: {metric_type}, supported: {metrics_method_map.keys()}") + + return metrics_method_map[metric_type](metric) + + def _get_counter_metric_rate(self, metric: str) -> int: + return metric["values"]["rate"] + + def _get_counter_metric(self, metric: str) -> int: + return metric["values"]["count"] + + def _get_gauge_metric(self, metric: str) -> int: + return metric["values"]["value"] + + def _get_trend_metrics(self, metric: str) -> int: + return metric["values"] + + +class WriteOperationMetric(OperationMetric): + _NAME = "Write" + _SUCCESS = "" + _ERRORS = "" + _THROUGHPUT = "data_sent" + _LATENCY = "" + + +class ReadOperationMetric(OperationMetric): + _NAME = "Read" + _SUCCESS = "" + _ERRORS = "" + _THROUGHPUT = "data_received" + _LATENCY = "" + + +class DeleteOperationMetric(OperationMetric): + _NAME = "Delete" + _SUCCESS = "" + _ERRORS = "" + _THROUGHPUT = "" + _LATENCY = "" + + +class GrpcWriteOperationMetric(WriteOperationMetric): + _SUCCESS = "frostfs_obj_put_success" + _ERRORS = "frostfs_obj_put_fails" + _LATENCY = "frostfs_obj_put_duration" + + +class GrpcReadOperationMetric(ReadOperationMetric): + _SUCCESS = "frostfs_obj_get_success" + _ERRORS = "frostfs_obj_get_fails" + _LATENCY = "frostfs_obj_get_duration" + + +class GrpcDeleteOperationMetric(DeleteOperationMetric): + _SUCCESS = "frostfs_obj_delete_success" + _ERRORS = "frostfs_obj_delete_fails" + _LATENCY = "frostfs_obj_delete_duration" + + +class S3WriteOperationMetric(WriteOperationMetric): + _SUCCESS = "aws_obj_put_success" + _ERRORS = "aws_obj_put_fails" + _LATENCY = "aws_obj_put_duration" + + +class S3ReadOperationMetric(ReadOperationMetric): + _SUCCESS = "aws_obj_get_success" + _ERRORS = "aws_obj_get_fails" + _LATENCY = "aws_obj_get_duration" + + +class S3DeleteOperationMetric(DeleteOperationMetric): + _SUCCESS = "aws_obj_delete_success" + _ERRORS = "aws_obj_delete_fails" + _LATENCY = "aws_obj_delete_duration" + + +class S3LocalWriteOperationMetric(WriteOperationMetric): + _SUCCESS = "s3local_obj_put_success" + _ERRORS = "s3local_obj_put_fails" + _LATENCY = "s3local_obj_put_duration" + + +class S3LocalReadOperationMetric(ReadOperationMetric): + _SUCCESS = "s3local_obj_get_success" + _ERRORS = "s3local_obj_get_fails" + _LATENCY = "s3local_obj_get_duration" + + +class LocalWriteOperationMetric(WriteOperationMetric): + _SUCCESS = "local_obj_put_success" + _ERRORS = "local_obj_put_fails" + _LATENCY = "local_obj_put_duration" + + +class LocalReadOperationMetric(ReadOperationMetric): + _SUCCESS = "local_obj_get_success" + _ERRORS = "local_obj_get_fails" + + +class LocalDeleteOperationMetric(DeleteOperationMetric): + _SUCCESS = "local_obj_delete_success" + _ERRORS = "local_obj_delete_fails" + + +class VerifyReadOperationMetric(ReadOperationMetric): + _SUCCESS = "verified_obj" + _ERRORS = "invalid_obj" + + +class MetricsBase(ABC): + def __init__(self) -> None: + self.write: Optional[WriteOperationMetric] = None + self.read: Optional[ReadOperationMetric] = None + self.delete: Optional[DeleteOperationMetric] = None + + @property + def operations(self) -> list[OperationMetric]: + return [metric for metric in [self.write, self.read, self.delete] if metric is not None] + + +class GrpcMetrics(MetricsBase): + def __init__(self, summary) -> None: + super().__init__() + self.write = GrpcWriteOperationMetric(summary) + self.read = GrpcReadOperationMetric(summary) + self.delete = GrpcDeleteOperationMetric(summary) + + +class S3Metrics(MetricsBase): + def __init__(self, summary) -> None: + super().__init__() + self.write = S3WriteOperationMetric(summary) + self.read = S3ReadOperationMetric(summary) + self.delete = S3DeleteOperationMetric(summary) + + +class S3LocalMetrics(MetricsBase): + def __init__(self, summary) -> None: + super().__init__() + self.write = S3LocalWriteOperationMetric(summary) + self.read = S3LocalReadOperationMetric(summary) + + +class LocalMetrics(MetricsBase): + def __init__(self, summary) -> None: + super().__init__() + self.write = LocalWriteOperationMetric(summary) + self.read = LocalReadOperationMetric(summary) + self.delete = LocalDeleteOperationMetric(summary) + + +class VerifyMetrics(MetricsBase): + def __init__(self, summary) -> None: + super().__init__() + self.read = VerifyReadOperationMetric(summary) + + +def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> MetricsBase: + class_map = { + LoadScenario.gRPC: GrpcMetrics, + LoadScenario.gRPC_CAR: GrpcMetrics, + LoadScenario.HTTP: GrpcMetrics, + LoadScenario.S3: S3Metrics, + LoadScenario.S3_CAR: S3Metrics, + LoadScenario.S3_MULTIPART: S3Metrics, + LoadScenario.S3_LOCAL: S3LocalMetrics, + LoadScenario.VERIFY: VerifyMetrics, + LoadScenario.LOCAL: LocalMetrics, + } + + return class_map[load_type](summary) diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py new file mode 100644 index 00000000..2dfac26e --- /dev/null +++ b/src/frostfs_testlib/load/load_report.py @@ -0,0 +1,178 @@ +from datetime import datetime +from typing import Optional + +import yaml + +from frostfs_testlib.load.interfaces.summarized import SummarizedStats +from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario +from frostfs_testlib.utils.converting_utils import calc_unit + + +class LoadReport: + def __init__(self, load_test) -> None: + self.load_test = load_test + # List of load summaries dict + self.load_summaries_list: Optional[list[dict]] = [] + self.load_params: Optional[LoadParams] = None + self.start_time: Optional[datetime] = None + self.end_time: Optional[datetime] = None + + def set_start_time(self, time: datetime = None): + if time is None: + time = datetime.utcnow() + self.start_time = time + + def set_end_time(self, time: datetime = None): + if time is None: + time = datetime.utcnow() + self.end_time = time + + def add_summaries(self, load_summaries: dict): + self.load_summaries_list.append(load_summaries) + + def set_load_params(self, load_params: LoadParams): + self.load_params = load_params + + def get_report_html(self): + report_sections = [ + [self.load_params, self._get_load_id_section_html], + [self.load_test, self._get_load_params_section_html], + [self.load_summaries_list, self._get_totals_section_html], + [self.end_time, self._get_test_time_html], + ] + + html = "" + for section in report_sections: + if section[0] is not None: + html += section[1]() + + return html + + def _get_load_params_section_html(self) -> str: + params: str = yaml.safe_dump([self.load_test], sort_keys=False, indent=2, explicit_start=True) + params = params.replace("\n", "
").replace(" ", " ") + section_html = f"""

Scenario params

+ +
{params}
+
""" + + return section_html + + def _get_load_id_section_html(self) -> str: + section_html = f"""

Load ID: {self.load_params.load_id}

+
""" + + return section_html + + def _get_test_time_html(self) -> str: + if not self.start_time or not self.end_time: + return "" + + html = f"""

Scenario duration

+ {self.start_time} - {self.end_time}
+
+ """ + + return html + + def _seconds_to_formatted_duration(self, seconds: int) -> str: + """Converts N number of seconds to formatted output ignoring zeroes. + Examples: + 186399 -> "2d3h46m39s" + 86399 -> "23h59m59s" + 86399 -> "23h59m59s" + 3605 -> "1h5s" + 123 -> "2m3s" + """ + units = {"d": 86400, "h": 3600, "m": 60, "s": 1} + parts = [] + remaining = seconds + for divisor in units.values(): + part = remaining // divisor + remaining -= divisor * part + parts.append(part) + + return "".join([f"{val}{unit}" for unit, val in zip(units, parts) if val > 0]) + + def _row(self, caption: str, value: str) -> str: + return f"{caption}{value}" + + def _get_model_string(self): + if self.load_params.min_iteration_duration is not None: + return f"min_iteration_duration={self.load_params.min_iteration_duration}" + + model_map = { + LoadScenario.gRPC: "closed model", + LoadScenario.S3: "closed model", + LoadScenario.S3_MULTIPART: "closed model", + LoadScenario.HTTP: "closed model", + LoadScenario.gRPC_CAR: "open model", + LoadScenario.S3_CAR: "open model", + LoadScenario.LOCAL: "local fill", + LoadScenario.S3_LOCAL: "local fill", + } + + return model_map[self.load_params.scenario] + + def _get_operations_sub_section_html(self, operation_type: str, stats: SummarizedStats): + throughput_html = "" + if stats.throughput > 0: + throughput, unit = calc_unit(stats.throughput) + throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec") + + bytes_html = "" + if stats.total_bytes > 0: + total_bytes, total_bytes_unit = calc_unit(stats.total_bytes) + bytes_html = self._row("Total transferred", f"{total_bytes:.2f} {total_bytes_unit}") + + per_node_errors_html = "" + for node_key, errors in stats.errors.by_node.items(): + if self.load_params.k6_process_allocation_strategy == K6ProcessAllocationStrategy.PER_ENDPOINT: + per_node_errors_html += self._row(f"At {node_key}", errors) + + latency_html = "" + for node_key, latencies in stats.latencies.by_node.items(): + latency_values = "N/A" + if latencies: + latency_values = "" + for param_name, param_val in latencies.items(): + latency_values += f"{param_name}={param_val:.2f}ms " + + latency_html += self._row(f"{operation_type} latency {node_key.split(':')[0]}", latency_values) + + object_size, object_size_unit = calc_unit(self.load_params.object_size, 1) + duration = self._seconds_to_formatted_duration(self.load_params.load_time) + model = self._get_model_string() + requested_rate_str = f"{stats.requested_rate}op/sec" if stats.requested_rate else "" + # write 8KB 15h49m 50op/sec 50th open model/closed model/min_iteration duration=1s - 1.636MB/s 199.57451/s + short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {stats.threads}th {model} - {throughput:.2f}{unit}/s {stats.rate:.2f}/s" + + html = f""" + + + + {self._row("Total operations", stats.operations)} + {self._row("OP/sec", f"{stats.rate:.2f}")} + {bytes_html} + {throughput_html} + {latency_html} + + {per_node_errors_html} + {self._row("Total", f"{stats.errors.total} ({stats.errors.percent:.2f}%)")} + {self._row("Threshold", f"{stats.errors.threshold:.2f}%")} +
{short_summary}
Metrics
Errors


+ """ + + return html + + def _get_totals_section_html(self): + html = "" + for i in range(len(self.load_summaries_list)): + html += f"

Load Results for load #{i+1}

" + + summarized = SummarizedStats.collect(self.load_params, self.load_summaries_list[i]) + for operation_type, stats in summarized.items(): + if stats.operations: + html += self._get_operations_sub_section_html(operation_type, stats) + + return html diff --git a/src/frostfs_testlib/load/load_verifiers.py b/src/frostfs_testlib/load/load_verifiers.py new file mode 100644 index 00000000..97b0ffaf --- /dev/null +++ b/src/frostfs_testlib/load/load_verifiers.py @@ -0,0 +1,68 @@ +from frostfs_testlib import reporter +from frostfs_testlib.load.interfaces.summarized import SummarizedStats +from frostfs_testlib.load.load_config import LoadParams, LoadScenario +from frostfs_testlib.load.load_metrics import get_metrics_object + + +class LoadVerifier: + def __init__(self, load_params: LoadParams) -> None: + self.load_params = load_params + + def collect_load_issues(self, load_summaries: dict[str, dict]) -> list[str]: + summarized = SummarizedStats.collect(self.load_params, load_summaries) + issues = [] + + for operation_type, stats in summarized.items(): + if stats.threads and not stats.operations: + issues.append(f"No any {operation_type.lower()} operation was performed") + + if stats.errors.percent > stats.errors.threshold: + rate_str = self._get_rate_str(stats.errors.percent) + issues.append(f"{operation_type} errors exceeded threshold: {rate_str} > {stats.errors.threshold}%") + + return issues + + def collect_verify_issues(self, load_summaries, verification_summaries) -> list[str]: + verify_issues: list[str] = [] + for k6_process_label in load_summaries: + with reporter.step(f"Check verify scenario results for {k6_process_label}"): + verify_issues.extend( + self._collect_verify_issues_on_process( + k6_process_label, + load_summaries[k6_process_label], + verification_summaries[k6_process_label], + ) + ) + return verify_issues + + def _get_rate_str(self, rate: float, minimal: float = 0.01) -> str: + return f"{rate:.2f}%" if rate >= minimal else f"~{minimal}%" + + def _collect_verify_issues_on_process(self, label, load_summary, verification_summary) -> list[str]: + issues = [] + + load_metrics = get_metrics_object(self.load_params.scenario, load_summary) + + writers = self.load_params.writers or self.load_params.preallocated_writers or 0 + deleters = self.load_params.deleters or self.load_params.preallocated_deleters or 0 + + delete_success = 0 + + if deleters > 0: + delete_success = load_metrics.delete.success_iterations + + if verification_summary: + verify_metrics = get_metrics_object(LoadScenario.VERIFY, verification_summary) + verified_objects = verify_metrics.read.success_iterations + invalid_objects = verify_metrics.read.failed_iterations + total_left_objects = load_metrics.write.success_iterations - delete_success + + if invalid_objects > 0: + issues.append(f"There were {invalid_objects} verification fails (hash mismatch).") + # Due to interruptions we may see total verified objects to be less than written on writers count + if abs(total_left_objects - verified_objects) > writers: + issues.append( + f"Verified objects mismatch for {label}. Total: {total_left_objects}, Verified: {verified_objects}. Writers: {writers}." + ) + + return issues diff --git a/src/frostfs_testlib/load/loaders.py b/src/frostfs_testlib/load/loaders.py new file mode 100644 index 00000000..1e0e97f4 --- /dev/null +++ b/src/frostfs_testlib/load/loaders.py @@ -0,0 +1,60 @@ +from frostfs_testlib.load.interfaces.loader import Loader +from frostfs_testlib.resources.load_params import ( + LOAD_NODE_SSH_PASSWORD, + LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, + LOAD_NODE_SSH_PRIVATE_KEY_PATH, + LOAD_NODE_SSH_USER, +) +from frostfs_testlib.shell.interfaces import Shell, SshCredentials +from frostfs_testlib.shell.ssh_shell import SSHShell +from frostfs_testlib.storage.cluster import ClusterNode + + +class RemoteLoader(Loader): + def __init__(self, ssh_credentials: SshCredentials, ip: str) -> None: + self.ssh_credentials = ssh_credentials + self._ip = ip + + @property + def ip(self): + return self._ip + + def get_shell(self) -> Shell: + ssh_client = SSHShell( + host=self.ip, + login=self.ssh_credentials.ssh_login, + password=self.ssh_credentials.ssh_password, + private_key_path=self.ssh_credentials.ssh_key_path, + private_key_passphrase=self.ssh_credentials.ssh_key_passphrase, + ) + + return ssh_client + + @classmethod + def from_ip_list(cls, ip_list: list[str]) -> list[Loader]: + loaders: list[Loader] = [] + ssh_credentials = SshCredentials( + LOAD_NODE_SSH_USER, + LOAD_NODE_SSH_PASSWORD, + LOAD_NODE_SSH_PRIVATE_KEY_PATH, + LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, + ) + + for ip in ip_list: + loaders.append(RemoteLoader(ssh_credentials, ip)) + + return loaders + + +class NodeLoader(Loader): + """When ClusterNode is the loader for itself (for Local scenario only).""" + + def __init__(self, cluster_node: ClusterNode) -> None: + self.cluster_node = cluster_node + + def get_shell(self) -> Shell: + return self.cluster_node.host.get_shell() + + @property + def ip(self): + return self.cluster_node.host_ip diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py new file mode 100644 index 00000000..1ceac091 --- /dev/null +++ b/src/frostfs_testlib/load/runners.py @@ -0,0 +1,466 @@ +import copy +import itertools +import math +import time +from dataclasses import fields +from threading import Event +from typing import Optional +from urllib.parse import urlparse + +from frostfs_testlib import reporter +from frostfs_testlib.credentials.interfaces import S3Credentials, User +from frostfs_testlib.load.interfaces.loader import Loader +from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner +from frostfs_testlib.load.k6 import K6 +from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType +from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader +from frostfs_testlib.resources import optionals +from frostfs_testlib.resources.common import STORAGE_USER_NAME +from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_VUS_COUNT_DIVISOR, LOAD_NODE_SSH_USER, LOAD_NODES +from frostfs_testlib.shell.command_inspectors import SuInspector +from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController +from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode +from frostfs_testlib.testing import parallel, run_optionally +from frostfs_testlib.testing.test_control import retry +from frostfs_testlib.utils import datetime_utils +from frostfs_testlib.utils.file_keeper import FileKeeper + + +class RunnerBase(ScenarioRunner): + k6_instances: list[K6] + loaders: list[Loader] + + @reporter.step("Run preset on loaders") + def preset(self): + parallel([k6.preset for k6 in self.k6_instances]) + + @reporter.step("Wait until load finish") + def wait_until_finish(self, soft_timeout: int = 0): + event = Event() + parallel([k6.wait_until_finished for k6 in self.k6_instances], event=event, soft_timeout=soft_timeout) + + @property + def is_running(self): + futures = parallel([k6.is_running for k6 in self.k6_instances]) + + return any([future.result() for future in futures]) + + def get_k6_instances(self): + return self.k6_instances + + def get_loaders(self) -> list[Loader]: + return self.loaders + + +class DefaultRunner(RunnerBase): + user: User + + def __init__( + self, + user: User, + load_ip_list: Optional[list[str]] = None, + ) -> None: + if load_ip_list is None: + load_ip_list = LOAD_NODES + self.loaders = RemoteLoader.from_ip_list(load_ip_list) + self.user = user + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step("Preparation steps") + def prepare( + self, + load_params: LoadParams, + cluster_nodes: list[ClusterNode], + nodes_under_load: list[ClusterNode], + k6_dir: str, + ): + if load_params.force_fresh_registry and load_params.custom_registry: + with reporter.step("Forcing fresh registry files"): + parallel(self._force_fresh_registry, self.loaders, load_params) + + if load_params.load_type != LoadType.S3: + return + + with reporter.step("Init s3 client on loaders"): + s3_credentials = self.user.s3_credentials + parallel(self._aws_configure_on_loader, self.loaders, s3_credentials) + + def _force_fresh_registry(self, loader: Loader, load_params: LoadParams): + with reporter.step(f"Forcing fresh registry on {loader.ip}"): + shell = loader.get_shell() + shell.exec(f"rm -f {load_params.registry_file}") + + def _aws_configure_on_loader( + self, + loader: Loader, + s3_credentials: S3Credentials, + ): + with reporter.step(f"Aws configure on {loader.ip}"): + configure_input = [ + InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=s3_credentials.access_key), + InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=s3_credentials.secret_key), + InteractiveInput(prompt_pattern=r".*", input=""), + InteractiveInput(prompt_pattern=r".*", input=""), + ] + loader.get_shell().exec("aws configure", CommandOptions(interactive_inputs=configure_input)) + + @reporter.step("Init k6 instances") + def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): + self.k6_instances = [] + cycled_loaders = itertools.cycle(self.loaders) + + k6_distribution_count = { + K6ProcessAllocationStrategy.PER_LOAD_NODE: len(self.loaders), + K6ProcessAllocationStrategy.PER_ENDPOINT: len(endpoints), + } + endpoints_generators = { + K6ProcessAllocationStrategy.PER_LOAD_NODE: itertools.cycle([endpoints]), + K6ProcessAllocationStrategy.PER_ENDPOINT: itertools.cycle([[endpoint] for endpoint in endpoints]), + } + k6_processes_count = k6_distribution_count[load_params.k6_process_allocation_strategy] + endpoints_gen = endpoints_generators[load_params.k6_process_allocation_strategy] + + distributed_load_params_list = self._get_distributed_load_params_list(load_params, k6_processes_count) + + futures = parallel( + self._init_k6_instance, + distributed_load_params_list, + loader=cycled_loaders, + endpoints=endpoints_gen, + k6_dir=k6_dir, + ) + self.k6_instances = [future.result() for future in futures] + + def _init_k6_instance(self, load_params_for_loader: LoadParams, loader: Loader, endpoints: list[str], k6_dir: str): + shell = loader.get_shell() + with reporter.step(f"Init K6 instance on {loader.ip} for endpoints {endpoints}"): + with reporter.step(f"Make working directory"): + shell.exec(f"sudo mkdir -p {load_params_for_loader.working_dir}") + shell.exec(f"sudo chown {LOAD_NODE_SSH_USER} {load_params_for_loader.working_dir}") + + return K6( + load_params_for_loader, + endpoints, + k6_dir, + shell, + loader, + self.user, + ) + + def _get_distributed_load_params_list(self, original_load_params: LoadParams, workers_count: int) -> list[LoadParams]: + divisor = int(BACKGROUND_LOAD_VUS_COUNT_DIVISOR) + distributed_load_params: list[LoadParams] = [] + + for i in range(workers_count): + load_params = copy.deepcopy(original_load_params) + # Append #i here in case if multiple k6 processes goes into same load node + load_params.set_id(f"{load_params.load_id}_{i}") + distributed_load_params.append(load_params) + + load_fields = fields(original_load_params) + + for field in load_fields: + if ( + field.metadata + and original_load_params.scenario in field.metadata["applicable_scenarios"] + and field.metadata["distributed"] + and getattr(original_load_params, field.name) is not None + ): + original_value = getattr(original_load_params, field.name) + distribution = self._get_distribution(math.ceil(original_value / divisor), workers_count) + for i in range(workers_count): + setattr(distributed_load_params[i], field.name, distribution[i]) + + return distributed_load_params + + def _get_distribution(self, clients_count: int, workers_count: int) -> list[int]: + """ + This function will distribute evenly as possible X clients to Y workers. + For example if we have 150 readers (clients) and we want to spread it over 4 load nodes (workers) + this will return [38, 38, 37, 37]. + + Args: + clients_count: amount of things needs to be distributed. + workers_count: amount of workers. + + Returns: + list of distribution. + """ + if workers_count < 1: + raise Exception("Workers cannot be less then 1") + + # Amount of guaranteed payload on one worker + clients_per_worker = clients_count // workers_count + # Remainder of clients left to be distributed + remainder = clients_count - clients_per_worker * workers_count + + distribution = [clients_per_worker + 1 if i < remainder else clients_per_worker for i in range(workers_count)] + return distribution + + def start(self): + load_params = self.k6_instances[0].load_params + + parallel([k6.start for k6 in self.k6_instances]) + + wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5 + with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"): + time.sleep(wait_after_start_time) + + def stop(self): + for k6_instance in self.k6_instances: + k6_instance.stop() + + def get_results(self) -> dict: + results = {} + for k6_instance in self.k6_instances: + if k6_instance.load_params.k6_process_allocation_strategy is None: + raise RuntimeError("k6_process_allocation_strategy should not be none") + + result = k6_instance.get_results() + endpoint = urlparse(k6_instance.endpoints[0]).netloc or k6_instance.endpoints[0] + keys_map = { + K6ProcessAllocationStrategy.PER_LOAD_NODE: k6_instance.loader.ip, + K6ProcessAllocationStrategy.PER_ENDPOINT: endpoint, + } + key = keys_map[k6_instance.load_params.k6_process_allocation_strategy] + results[key] = result + + return results + + +class LocalRunner(RunnerBase): + cluster_state_controller: ClusterStateController + file_keeper: FileKeeper + user: User + + def __init__( + self, + cluster_state_controller: ClusterStateController, + file_keeper: FileKeeper, + nodes_under_load: list[ClusterNode], + user: User, + ) -> None: + self.cluster_state_controller = cluster_state_controller + self.file_keeper = file_keeper + self.loaders = [NodeLoader(node) for node in nodes_under_load] + self.nodes_under_load = nodes_under_load + self.user = user + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step("Preparation steps") + def prepare( + self, + load_params: LoadParams, + cluster_nodes: list[ClusterNode], + nodes_under_load: list[ClusterNode], + k6_dir: str, + ): + parallel(self.prepare_node, nodes_under_load, k6_dir, load_params) + + @retry(3, 5, expected_result=True) + def allow_user_to_login_in_system(self, cluster_node: ClusterNode): + shell = cluster_node.host.get_shell() + + result = None + try: + shell.exec(f"sudo chsh -s /bin/bash {STORAGE_USER_NAME}") + self.lock_passwd_on_node(cluster_node) + options = CommandOptions(check=False, extra_inspectors=[SuInspector(STORAGE_USER_NAME)]) + result = shell.exec("whoami", options) + finally: + if not result or result.return_code: + self.restore_passwd_on_node(cluster_node) + return False + + return True + + @reporter.step("Prepare node {cluster_node}") + def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams): + shell = cluster_node.host.get_shell() + + with reporter.step("Allow storage user to login into system"): + self.allow_user_to_login_in_system(cluster_node) + + with reporter.step("Update limits.conf"): + limits_path = "/etc/security/limits.conf" + self.file_keeper.add(cluster_node.storage_node, limits_path) + content = f"{STORAGE_USER_NAME} hard nofile 65536\n{STORAGE_USER_NAME} soft nofile 65536\n" + shell.exec(f"echo '{content}' | sudo tee {limits_path}") + + with reporter.step("Download K6"): + shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}") + shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}") + shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz --strip-components 2 -C {k6_dir}") + shell.exec(f"sudo chmod -R 777 {k6_dir}") + + with reporter.step("chmod 777 wallet related files on loader"): + shell.exec(f"sudo chmod -R 777 {self.user.wallet.config_path}") + shell.exec(f"sudo chmod -R 777 {self.user.wallet.path}") + + @reporter.step("Init k6 instances") + def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): + self.k6_instances = [] + futures = parallel( + self._init_k6_instance, + self.loaders, + load_params, + k6_dir, + ) + self.k6_instances = [future.result() for future in futures] + + def _init_k6_instance(self, loader: Loader, load_params: LoadParams, k6_dir: str): + shell = loader.get_shell() + with reporter.step(f"Init K6 instance on {loader.ip}"): + with reporter.step(f"Make working directory"): + shell.exec(f"sudo mkdir -p {load_params.working_dir}") + # If we chmod /home/ folder we can no longer ssh to the node + # !! IMPORTANT !! + if ( + load_params.working_dir + and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}" + and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/" + ): + shell.exec(f"sudo chmod -R 777 {load_params.working_dir}") + + return K6( + load_params, + ["localhost:8080"], + k6_dir, + shell, + loader, + self.user, + ) + + def start(self): + load_params = self.k6_instances[0].load_params + + self.cluster_state_controller.stop_services_of_type(S3Gate) + self.cluster_state_controller.stop_services_of_type(StorageNode) + + parallel([k6.start for k6 in self.k6_instances]) + + wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5 + with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"): + time.sleep(wait_after_start_time) + + @reporter.step("Restore passwd on {cluster_node}") + def restore_passwd_on_node(self, cluster_node: ClusterNode): + shell = cluster_node.host.get_shell() + shell.exec("sudo chattr -i /etc/passwd") + + @reporter.step("Lock passwd on {cluster_node}") + def lock_passwd_on_node(self, cluster_node: ClusterNode): + shell = cluster_node.host.get_shell() + shell.exec("sudo chattr +i /etc/passwd") + + def stop(self): + for k6_instance in self.k6_instances: + k6_instance.stop() + + self.cluster_state_controller.start_all_stopped_services() + + def get_results(self) -> dict: + results = {} + for k6_instance in self.k6_instances: + result = k6_instance.get_results() + results[k6_instance.loader.ip] = result + + parallel(self.restore_passwd_on_node, self.nodes_under_load) + + return results + + +class S3LocalRunner(LocalRunner): + endpoints: list[str] + k6_dir: str + + @reporter.step("Run preset on loaders") + def preset(self): + LocalRunner.preset(self) + with reporter.step(f"Resolve containers in preset"): + parallel(self._resolve_containers_in_preset, self.k6_instances) + + @reporter.step("Resolve containers in preset") + def _resolve_containers_in_preset(self, k6_instance: K6): + k6_instance.shell.exec( + f"sudo {self.k6_dir}/scenarios/preset/resolve_containers_in_preset.py --endpoint {k6_instance.endpoints[0]} --preset_file {k6_instance.load_params.preset.pregen_json}" + ) + + @reporter.step("Init k6 instances") + def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): + self.k6_instances = [] + futures = parallel( + self._init_k6_instance_, + self.loaders, + load_params, + endpoints, + k6_dir, + ) + self.k6_instances = [future.result() for future in futures] + + def _init_k6_instance_(self, loader: Loader, load_params: LoadParams, endpoints: list[str], k6_dir: str): + shell = loader.get_shell() + with reporter.step(f"Init K6 instance on {loader.ip} for endpoints {endpoints}"): + with reporter.step(f"Make working directory"): + shell.exec(f"sudo mkdir -p {load_params.working_dir}") + # If we chmod /home/ folder we can no longer ssh to the node + # !! IMPORTANT !! + if ( + load_params.working_dir + and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}" + and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/" + ): + shell.exec(f"sudo chmod -R 777 {load_params.working_dir}") + + return K6( + load_params, + self.endpoints, + k6_dir, + shell, + loader, + self.user, + ) + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step("Preparation steps") + def prepare( + self, + load_params: LoadParams, + cluster_nodes: list[ClusterNode], + nodes_under_load: list[ClusterNode], + k6_dir: str, + ): + self.k6_dir = k6_dir + parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, cluster_nodes) + + @reporter.step("Prepare node {cluster_node}") + def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, cluster_nodes: list[ClusterNode]): + LocalRunner.prepare_node(self, cluster_node, k6_dir, load_params) + self.endpoints = cluster_node.s3_gate.get_all_endpoints() + shell = cluster_node.host.get_shell() + + with reporter.step("Uninstall previous installation of aws cli"): + shell.exec(f"sudo rm -rf /usr/local/aws-cli") + shell.exec(f"sudo rm -rf /usr/local/bin/aws") + shell.exec(f"sudo rm -rf /usr/local/bin/aws_completer") + + with reporter.step("Install aws cli"): + shell.exec(f"sudo curl {load_params.awscli_url} -o {k6_dir}/awscliv2.zip") + shell.exec(f"sudo unzip -q {k6_dir}/awscliv2.zip -d {k6_dir}") + shell.exec(f"sudo {k6_dir}/aws/install") + + with reporter.step("Install requests python module"): + shell.exec(f"sudo apt-get -y install python3-pip") + shell.exec(f"sudo curl -so {k6_dir}/requests.tar.gz {load_params.requests_module_url}") + shell.exec(f"sudo python3 -m pip install -I {k6_dir}/requests.tar.gz") + + with reporter.step(f"Init s3 client on {cluster_node.host_ip}"): + configure_input = [ + InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=self.user.s3_credentials.access_key), + InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=self.user.s3_credentials.secret_key), + InteractiveInput(prompt_pattern=r".*", input=""), + InteractiveInput(prompt_pattern=r".*", input=""), + ] + shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) diff --git a/src/frostfs_testlib/plugins/__init__.py b/src/frostfs_testlib/plugins/__init__.py index fcd7acc6..26b24411 100644 --- a/src/frostfs_testlib/plugins/__init__.py +++ b/src/frostfs_testlib/plugins/__init__.py @@ -1,12 +1,6 @@ -import sys +from importlib.metadata import entry_points from typing import Any -if sys.version_info < (3, 10): - # On Python prior 3.10 we need to use backport of entry points - from importlib_metadata import entry_points -else: - from importlib.metadata import entry_points - def load_plugin(plugin_group: str, name: str) -> Any: """Loads plugin using entry point specification. @@ -23,3 +17,16 @@ def load_plugin(plugin_group: str, name: str) -> Any: return None plugin = plugins[name] return plugin.load() + + +def load_all(group: str) -> Any: + """Loads all plugins using entry point specification. + + Args: + group: Name of plugin group. + + Returns: + Classes from specified group. + """ + plugins = entry_points(group=group) + return [plugin.load() for plugin in plugins] diff --git a/src/frostfs_testlib/processes/remote_process.py b/src/frostfs_testlib/processes/remote_process.py new file mode 100644 index 00000000..071675a1 --- /dev/null +++ b/src/frostfs_testlib/processes/remote_process.py @@ -0,0 +1,280 @@ +from __future__ import annotations + +import os +import uuid +from typing import Optional + +from tenacity import retry +from tenacity.stop import stop_after_attempt +from tenacity.wait import wait_fixed + +from frostfs_testlib import reporter +from frostfs_testlib.shell import Shell +from frostfs_testlib.shell.command_inspectors import SuInspector +from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions + + +class RemoteProcess: + def __init__( + self, cmd: str, process_dir: str, shell: Shell, cmd_inspector: Optional[CommandInspector], proc_id: str + ): + self.process_dir = process_dir + self.cmd = cmd + self.stdout_last_line_number = 0 + self.stderr_last_line_number = 0 + self.pid: Optional[str] = None + self.proc_rc: Optional[int] = None + self.proc_start_time: Optional[int] = None + self.proc_end_time: Optional[int] = None + self.saved_stdout: Optional[str] = None + self.saved_stderr: Optional[str] = None + self.shell = shell + self.proc_id: str = proc_id + self.cmd_inspectors: list[CommandInspector] = [cmd_inspector] if cmd_inspector else [] + + @classmethod + @reporter.step("Create remote process") + def create( + cls, + command: str, + shell: Shell, + working_dir: str = "/tmp", + user: Optional[str] = None, + proc_id: Optional[str] = None, + ) -> RemoteProcess: + """ + Create a process on a remote host. + + Created dir for process with following files: + command.sh: script to execute + pid: contains process id + rc: contains script return code + stderr: contains script errors + stdout: contains script output + user: user on behalf whom command will be executed + proc_id: process string identificator + + Args: + shell: Shell instance + command: command to be run on a remote host + working_dir: working directory for the process + + Returns: + RemoteProcess instance for further examination + """ + if proc_id is None: + proc_id = f"{uuid.uuid4()}" + + cmd_inspector = SuInspector(user) if user else None + remote_process = cls( + cmd=command, + process_dir=os.path.join(working_dir, f"proc_{proc_id}"), + shell=shell, + cmd_inspector=cmd_inspector, + proc_id=proc_id, + ) + + return remote_process + + @reporter.step("Start remote process") + def start(self): + """ + Starts a process on a remote host. + """ + + self._create_process_dir() + self._generate_command_script() + self._start_process() + self.pid = self._get_pid() + + @reporter.step("Get process stdout") + def stdout(self, full: bool = False) -> str: + """ + Method to get process stdout, either fresh info or full. + + Args: + full: returns full stdout that we have to this moment + + Returns: + Fresh stdout. By means of stdout_last_line_number only new stdout lines are returned. + If process is finished (proc_rc is not None) saved stdout is returned + """ + if self.saved_stdout is not None: + cur_stdout = self.saved_stdout + else: + terminal = self.shell.exec( + f"cat {self.process_dir}/stdout", + options=CommandOptions(no_log=True, extra_inspectors=self.cmd_inspectors), + ) + if self.proc_rc is not None: + self.saved_stdout = terminal.stdout + cur_stdout = terminal.stdout + + if full: + return cur_stdout + whole_stdout = cur_stdout.split("\n") + if len(whole_stdout) > self.stdout_last_line_number: + resulted_stdout = "\n".join(whole_stdout[self.stdout_last_line_number :]) + self.stdout_last_line_number = len(whole_stdout) + return resulted_stdout + return "" + + @reporter.step("Get process stderr") + def stderr(self, full: bool = False) -> str: + """ + Method to get process stderr, either fresh info or full. + + Args: + full: returns full stderr that we have to this moment + + Returns: + Fresh stderr. By means of stderr_last_line_number only new stderr lines are returned. + If process is finished (proc_rc is not None) saved stderr is returned + """ + if self.saved_stderr is not None: + cur_stderr = self.saved_stderr + else: + terminal = self.shell.exec( + f"cat {self.process_dir}/stderr", + options=CommandOptions(no_log=True, extra_inspectors=self.cmd_inspectors), + ) + if self.proc_rc is not None: + self.saved_stderr = terminal.stdout + cur_stderr = terminal.stdout + if full: + return cur_stderr + whole_stderr = cur_stderr.split("\n") + if len(whole_stderr) > self.stderr_last_line_number: + resulted_stderr = "\n".join(whole_stderr[self.stderr_last_line_number :]) + self.stderr_last_line_number = len(whole_stderr) + return resulted_stderr + return "" + + @reporter.step("Get process rc") + def rc(self) -> Optional[int]: + if self.proc_rc is not None: + return self.proc_rc + + result = self._cat_proc_file("rc") + if not result: + return None + + self.proc_rc = int(result) + return self.proc_rc + + @reporter.step("Get process start time") + def start_time(self) -> Optional[int]: + if self.proc_start_time is not None: + return self.proc_start_time + + result = self._cat_proc_file("start_time") + if not result: + return None + + self.proc_start_time = int(result) + return self.proc_start_time + + @reporter.step("Get process end time") + def end_time(self) -> Optional[int]: + if self.proc_end_time is not None: + return self.proc_end_time + + result = self._cat_proc_file("end_time") + if not result: + return None + + self.proc_end_time = int(result) + return self.proc_end_time + + def _cat_proc_file(self, file: str) -> Optional[str]: + terminal = self.shell.exec( + f"cat {self.process_dir}/{file}", + CommandOptions(check=False, extra_inspectors=self.cmd_inspectors, no_log=True), + ) + if "No such file or directory" in terminal.stderr: + return None + elif terminal.return_code != 0: + raise AssertionError(f"cat process {file} was not successful: {terminal.stderr}") + + return terminal.stdout + + @reporter.step("Check if process is running") + def running(self) -> bool: + return self.rc() is None + + @reporter.step("Send signal to process") + def send_signal(self, signal: int) -> None: + kill_res = self.shell.exec( + f"kill -{signal} {self.pid}", + CommandOptions(check=False, extra_inspectors=self.cmd_inspectors), + ) + if "No such process" in kill_res.stderr: + return + if kill_res.return_code: + raise AssertionError(f"Signal {signal} not sent. Return code of kill: {kill_res.return_code}") + + @reporter.step("Stop process") + def stop(self) -> None: + self.send_signal(15) + + @reporter.step("Kill process") + def kill(self) -> None: + self.send_signal(9) + + @reporter.step("Clear process directory") + def clear(self) -> None: + if self.process_dir == "/": + raise AssertionError(f"Invalid path to delete: {self.process_dir}") + self.shell.exec(f"rm -rf {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) + + @reporter.step("Start remote process") + def _start_process(self) -> None: + self.shell.exec( + f"nohup {self.process_dir}/command.sh {self.process_dir}/stdout " + f"2>{self.process_dir}/stderr &", + CommandOptions(extra_inspectors=self.cmd_inspectors), + ) + + @reporter.step("Create process directory") + def _create_process_dir(self) -> None: + self.shell.exec(f"mkdir -p {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) + self.shell.exec(f"chmod 777 {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) + terminal = self.shell.exec(f"realpath {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) + self.process_dir = terminal.stdout.strip() + + @reporter.step("Get pid") + @retry(wait=wait_fixed(10), stop=stop_after_attempt(5), reraise=True) + def _get_pid(self) -> str: + terminal = self.shell.exec(f"cat {self.process_dir}/pid", CommandOptions(extra_inspectors=self.cmd_inspectors)) + assert terminal.stdout, f"invalid pid: {terminal.stdout}" + return terminal.stdout.strip() + + @reporter.step("Generate command script") + def _generate_command_script(self) -> None: + command = self.cmd.replace('"', '\\"').replace("\\", "\\\\") + script = ( + f"#!/bin/bash\n" + f"cd {self.process_dir}\n" + f"date +%s > {self.process_dir}/start_time\n" + f"{command} &\n" + f"pid=\$!\n" + f"cd {self.process_dir}\n" + f"echo \$pid > {self.process_dir}/pid\n" + f"wait \$pid\n" + f"echo $? > {self.process_dir}/rc\n" + f"date +%s > {self.process_dir}/end_time\n" + ) + + self.shell.exec( + f'echo "{script}" > {self.process_dir}/command.sh', + CommandOptions(extra_inspectors=self.cmd_inspectors), + ) + self.shell.exec( + f"cat {self.process_dir}/command.sh", + CommandOptions(extra_inspectors=self.cmd_inspectors), + ) + self.shell.exec( + f"chmod +x {self.process_dir}/command.sh", + CommandOptions(extra_inspectors=self.cmd_inspectors), + ) diff --git a/src/frostfs_testlib/reporter/__init__.py b/src/frostfs_testlib/reporter/__init__.py index 10e4146a..848c175d 100644 --- a/src/frostfs_testlib/reporter/__init__.py +++ b/src/frostfs_testlib/reporter/__init__.py @@ -1,6 +1,9 @@ +from typing import Any + from frostfs_testlib.reporter.allure_handler import AllureHandler from frostfs_testlib.reporter.interfaces import ReporterHandler from frostfs_testlib.reporter.reporter import Reporter +from frostfs_testlib.reporter.steps_logger import StepsLogger __reporter = Reporter() @@ -15,3 +18,11 @@ def get_reporter() -> Reporter: Singleton reporter instance. """ return __reporter + + +def step(title: str): + return __reporter.step(title) + + +def attach(content: Any, file_name: str): + return __reporter.attach(content, file_name) diff --git a/src/frostfs_testlib/reporter/allure_handler.py b/src/frostfs_testlib/reporter/allure_handler.py index 92a295a8..ef63638f 100644 --- a/src/frostfs_testlib/reporter/allure_handler.py +++ b/src/frostfs_testlib/reporter/allure_handler.py @@ -1,7 +1,7 @@ import os -from contextlib import AbstractContextManager +from contextlib import AbstractContextManager, ContextDecorator from textwrap import shorten -from typing import Any +from typing import Any, Callable import allure from allure import attachment_type @@ -12,15 +12,23 @@ from frostfs_testlib.reporter.interfaces import ReporterHandler class AllureHandler(ReporterHandler): """Handler that stores test artifacts in Allure report.""" - def step(self, name: str) -> AbstractContextManager: - name = shorten(name, width=70, placeholder="...") + def step(self, name: str) -> AbstractContextManager | ContextDecorator: + name = shorten(name, width=140, placeholder="...") + return allure.step(name) + + def step_decorator(self, name: str) -> Callable: return allure.step(name) def attach(self, body: Any, file_name: str) -> None: attachment_name, extension = os.path.splitext(file_name) + if extension.startswith("."): + extension = extension[1:] attachment_type = self._resolve_attachment_type(extension) - allure.attach(body, attachment_name, attachment_type, extension) + if os.path.exists(body): + allure.attach.file(body, file_name, attachment_type, extension) + else: + allure.attach(body, attachment_name, attachment_type, extension) def _resolve_attachment_type(self, extension: str) -> attachment_type: """Try to find matching Allure attachment type by extension. diff --git a/src/frostfs_testlib/reporter/interfaces.py b/src/frostfs_testlib/reporter/interfaces.py index f2f6ce42..4e24febf 100644 --- a/src/frostfs_testlib/reporter/interfaces.py +++ b/src/frostfs_testlib/reporter/interfaces.py @@ -1,13 +1,13 @@ from abc import ABC, abstractmethod -from contextlib import AbstractContextManager -from typing import Any +from contextlib import AbstractContextManager, ContextDecorator +from typing import Any, Callable class ReporterHandler(ABC): """Interface of handler that stores test artifacts in some reporting tool.""" @abstractmethod - def step(self, name: str) -> AbstractContextManager: + def step(self, name: str) -> AbstractContextManager | ContextDecorator: """Register a new step in test execution. Args: @@ -17,6 +17,17 @@ class ReporterHandler(ABC): Step context. """ + @abstractmethod + def step_decorator(self, name: str) -> Callable: + """A step decorator from reporter. + + Args: + name: Name of the step. + + Returns: + decorator for the step + """ + @abstractmethod def attach(self, content: Any, file_name: str) -> None: """Attach specified content with given file name to the test report. diff --git a/src/frostfs_testlib/reporter/reporter.py b/src/frostfs_testlib/reporter/reporter.py index ea8330bd..2d1a43ec 100644 --- a/src/frostfs_testlib/reporter/reporter.py +++ b/src/frostfs_testlib/reporter/reporter.py @@ -1,9 +1,11 @@ from contextlib import AbstractContextManager, contextmanager +from functools import wraps from types import TracebackType -from typing import Any, Optional +from typing import Any, Callable, Optional from frostfs_testlib.plugins import load_plugin from frostfs_testlib.reporter.interfaces import ReporterHandler +from frostfs_testlib.utils.func_utils import format_by_args @contextmanager @@ -45,6 +47,33 @@ class Reporter: handler_class = load_plugin("frostfs.testlib.reporter", handler_config["plugin_name"]) self.register_handler(handler_class()) + def step_deco(self, name: str) -> Callable: + """Register a new step in test execution in a decorator fashion. + + To note: the actual decoration with handlers is happening during target function call time. + + Args: + name: Name of the step. + + Returns: + decorated function + """ + + def deco(func): + @wraps(func) + def wrapper(*a, **kw): + resulting_func = func + for handler in self.handlers: + parsed_name = format_by_args(func, name, *a, **kw) + decorator = handler.step_decorator(parsed_name) + resulting_func = decorator(resulting_func) + + return resulting_func(*a, **kw) + + return wrapper + + return deco + def step(self, name: str) -> AbstractContextManager: """Register a new step in test execution. @@ -54,11 +83,11 @@ class Reporter: Returns: Step context. """ - if not self.handlers: - return _empty_step() - step_contexts = [handler.step(name) for handler in self.handlers] - return AggregateContextManager(step_contexts) + if not step_contexts: + step_contexts = [_empty_step()] + decorated_wrapper = self.step_deco(name) + return AggregateContextManager(step_contexts, decorated_wrapper) def attach(self, content: Any, file_name: str) -> None: """Attach specified content with given file name to the test report. @@ -77,9 +106,10 @@ class AggregateContextManager(AbstractContextManager): contexts: list[AbstractContextManager] - def __init__(self, contexts: list[AbstractContextManager]) -> None: + def __init__(self, contexts: list[AbstractContextManager], decorated_wrapper: Callable) -> None: super().__init__() self.contexts = contexts + self.wrapper = decorated_wrapper def __enter__(self): for context in self.contexts: @@ -100,3 +130,6 @@ class AggregateContextManager(AbstractContextManager): # If all context agreed to suppress exception, then suppress it; # otherwise return None to reraise return True if all(suppress_decisions) else None + + def __call__(self, *args: Any, **kwds: Any) -> Any: + return self.wrapper(*args, **kwds) diff --git a/src/frostfs_testlib/reporter/steps_logger.py b/src/frostfs_testlib/reporter/steps_logger.py new file mode 100644 index 00000000..4cdfb3de --- /dev/null +++ b/src/frostfs_testlib/reporter/steps_logger.py @@ -0,0 +1,56 @@ +import logging +import threading +from contextlib import AbstractContextManager, ContextDecorator +from functools import wraps +from types import TracebackType +from typing import Any, Callable + +from frostfs_testlib.reporter.interfaces import ReporterHandler + + +class StepsLogger(ReporterHandler): + """Handler that prints steps to log.""" + + def step(self, name: str) -> AbstractContextManager | ContextDecorator: + return StepLoggerContext(name) + + def step_decorator(self, name: str) -> Callable: + return StepLoggerContext(name) + + def attach(self, body: Any, file_name: str) -> None: + pass + + +class StepLoggerContext(AbstractContextManager): + INDENT = {} + + def __init__(self, title: str): + self.title = title + self.logger = logging.getLogger("NeoLogger") + self.thread = threading.get_ident() + if self.thread not in StepLoggerContext.INDENT: + StepLoggerContext.INDENT[self.thread] = 1 + + def __enter__(self) -> Any: + indent = ">" * StepLoggerContext.INDENT[self.thread] + self.logger.info(f"[{self.thread}] {indent} {self.title}") + StepLoggerContext.INDENT[self.thread] += 1 + + def __exit__( + self, + __exc_type: type[BaseException] | None, + __exc_value: BaseException | None, + __traceback: TracebackType | None, + ) -> bool | None: + + StepLoggerContext.INDENT[self.thread] -= 1 + indent = "<" * StepLoggerContext.INDENT[self.thread] + self.logger.info(f"[{self.thread}] {indent} {self.title}") + + def __call__(self, func): + @wraps(func) + def impl(*a, **kw): + with self: + return func(*a, **kw) + + return impl diff --git a/src/frostfs_testlib/resources/cli.py b/src/frostfs_testlib/resources/cli.py new file mode 100644 index 00000000..06a98327 --- /dev/null +++ b/src/frostfs_testlib/resources/cli.py @@ -0,0 +1,12 @@ +# Paths to CLI executables on machine that runs tests +import os + +NEOGO_EXECUTABLE = os.getenv("FROSTFS_EXECUTABLE", "neo-go") +FROSTFS_CLI_EXEC = os.getenv("FROSTFS_CLI_EXEC", "frostfs-cli") +FROSTFS_AUTHMATE_EXEC = os.getenv("FROSTFS_AUTHMATE_EXEC", "frostfs-s3-authmate") +FROSTFS_ADM_EXEC = os.getenv("FROSTFS_ADM_EXEC", "frostfs-adm") + +# Config for frostfs-adm utility. Optional if tests are running against devenv +FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH") + +CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", "100s") diff --git a/src/frostfs_testlib/resources/common.py b/src/frostfs_testlib/resources/common.py index 7744c0c1..53bcfaa4 100644 --- a/src/frostfs_testlib/resources/common.py +++ b/src/frostfs_testlib/resources/common.py @@ -1,37 +1,56 @@ -# ACLs with final flag -PUBLIC_ACL_F = "1FBFBFFF" -PRIVATE_ACL_F = "1C8C8CCC" -READONLY_ACL_F = "1FBF8CFF" +import os -# ACLs without final flag set -PUBLIC_ACL = "0FBFBFFF" -INACCESSIBLE_ACL = "40000000" -STICKY_BIT_PUB_ACL = "3FFFFFFF" +import yaml -EACL_PUBLIC_READ_WRITE = "eacl-public-read-write" +CONTAINER_WAIT_INTERVAL = "1m" -# Regex patterns of status codes of Container service -CONTAINER_NOT_FOUND = "code = 3072.*message = container not found" +SIMPLE_OBJECT_SIZE = os.getenv("SIMPLE_OBJECT_SIZE", "1000") +COMPLEX_OBJECT_CHUNKS_COUNT = os.getenv("COMPLEX_OBJECT_CHUNKS_COUNT", "3") +COMPLEX_OBJECT_TAIL_SIZE = os.getenv("COMPLEX_OBJECT_TAIL_SIZE", "1000") + +SERVICE_MAX_STARTUP_TIME = os.getenv("SERVICE_MAX_STARTUP_TIME", "5m") + +STORAGE_USER_NAME = "frostfs-storage" + +MORPH_TIMEOUT = os.getenv("MORPH_BLOCK_TIME", "8s") +MORPH_BLOCK_TIME = os.getenv("MORPH_BLOCK_TIME", "8s") +FROSTFS_CONTRACT_CACHE_TIMEOUT = os.getenv("FROSTFS_CONTRACT_CACHE_TIMEOUT", "30s") + +# Time interval that allows a GC pass on storage node (this includes GC sleep interval +# of 1min plus 15 seconds for GC pass itself) +STORAGE_GC_TIME = os.getenv("STORAGE_GC_TIME", "75s") + +GAS_HASH = os.getenv("GAS_HASH", "0xd2a4cff31913016155e38e474a2c06d08be276cf") +FROSTFS_CONTRACT = os.getenv("FROSTFS_IR_CONTRACTS_FROSTFS") + +ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir") + +# Password of wallet owned by user on behalf of whom we are running tests +# Default wallet password is empty +DEFAULT_WALLET_PASS = os.getenv("WALLET_PASS", "") + +# Artificial delay that we add after object deletion and container creation +# Delay is added because sometimes immediately after deletion object still appears +# to be existing (probably because tombstone object takes some time to replicate) +# TODO: remove this wait +S3_SYNC_WAIT_TIME = 5 + +# Generate wallet config +# TODO: we should move all info about wallet configs to fixtures +DEFAULT_WALLET_CONFIG = os.path.join(os.getcwd(), "wallet_config.yml") +with open(DEFAULT_WALLET_CONFIG, "w") as file: + yaml.dump({"password": DEFAULT_WALLET_PASS}, file) + +# Number of attempts that S3 clients will attempt per each request (1 means single attempt +# without any retries) +MAX_REQUEST_ATTEMPTS = 5 +RETRY_MODE = "standard" +CREDENTIALS_CREATE_TIMEOUT = "1m" -# Regex patterns of status codes of Object service -MALFORMED_REQUEST = "code = 1024.*message = malformed request" -OBJECT_ACCESS_DENIED = "code = 2048.*message = access to object operation denied" -OBJECT_NOT_FOUND = "code = 2049.*message = object not found" -OBJECT_ALREADY_REMOVED = "code = 2052.*message = object already removed" -SESSION_NOT_FOUND = "code = 4096.*message = session token not found" -OUT_OF_RANGE = "code = 2053.*message = out of range" -EXPIRED_SESSION_TOKEN = "code = 4097.*message = expired session token" -# TODO: Due to https://github.com/nspcc-dev/neofs-node/issues/2092 we have to check only codes until fixed -# OBJECT_IS_LOCKED = "code = 2050.*message = object is locked" -# LOCK_NON_REGULAR_OBJECT = "code = 2051.*message = ..." will be available once 2092 is fixed -OBJECT_IS_LOCKED = "code = 2050" -LOCK_NON_REGULAR_OBJECT = "code = 2051" +HOSTING_CONFIG_FILE = os.getenv( + "HOSTING_CONFIG_FILE", os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..", ".devenv.hosting.yaml")) +) -LIFETIME_REQUIRED = "either expiration epoch of a lifetime is required" -LOCK_OBJECT_REMOVAL = "lock object removal" -LOCK_OBJECT_EXPIRATION = "lock object expiration: {expiration_epoch}; current: {current_epoch}" -INVALID_RANGE_ZERO_LENGTH = "invalid '{range}' range: zero length" -INVALID_RANGE_OVERFLOW = "invalid '{range}' range: uint64 overflow" -INVALID_OFFSET_SPECIFIER = "invalid '{range}' range offset specifier" -INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier" +MORE_LOG = os.getenv("MORE_LOG", "1") +EXPIRATION_EPOCH_ATTRIBUTE = "__SYSTEM__EXPIRATION_EPOCH" diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py new file mode 100644 index 00000000..15e29771 --- /dev/null +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -0,0 +1,38 @@ +# Regex patterns of status codes of Container service +CONTAINER_NOT_FOUND = "code = 3072.*message = container not found" +SUBJECT_NOT_FOUND = "code = 1024.*message =.*chain/client.*subject not found.*" + +# Regex patterns of status codes of Object service +MALFORMED_REQUEST = "code = 1024.*message = malformed request" +OBJECT_ACCESS_DENIED = "code = 2048.*message = access to object operation denied" +OBJECT_NOT_FOUND = "code = 2049.*message = object not found" +OBJECT_ALREADY_REMOVED = "code = 2052.*message = object already removed" +SESSION_NOT_FOUND = "code = 4096.*message = session token not found" +OUT_OF_RANGE = "code = 2053.*message = out of range" +EXPIRED_SESSION_TOKEN = "code = 4097.*message = expired session token" +ADD_CHAIN_ERROR = "code = 5120 message = apemanager access denied" +# TODO: Change to codes with message +# OBJECT_IS_LOCKED = "code = 2050.*message = object is locked" +# LOCK_NON_REGULAR_OBJECT = "code = 2051.*message = ..." will be available once 2092 is fixed +OBJECT_IS_LOCKED = "code = 2050" +LOCK_NON_REGULAR_OBJECT = "code = 2051" + +LIFETIME_REQUIRED = "either expiration epoch of a lifetime is required" +LOCK_OBJECT_REMOVAL = "lock object removal" +LOCK_OBJECT_EXPIRATION = "lock object expiration: {expiration_epoch}; current: {current_epoch}" +INVALID_RANGE_ZERO_LENGTH = "invalid '{range}' range: zero length" +INVALID_RANGE_OVERFLOW = "invalid '{range}' range: uint64 overflow" +INVALID_OFFSET_SPECIFIER = "invalid '{range}' range offset specifier" +INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier" + +S3_BUCKET_DOES_NOT_ALLOW_ACL = "The bucket does not allow ACLs" +S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not validate against our published schema." + +RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied" +# Errors from node missing reasons if request was forwarded. Commenting for now +# RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied" +RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request" +NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound" +# Errors from node missing reasons if request was forwarded. Commenting for now +# NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound" +NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request" diff --git a/src/frostfs_testlib/resources/load_params.py b/src/frostfs_testlib/resources/load_params.py new file mode 100644 index 00000000..ad3ed1c7 --- /dev/null +++ b/src/frostfs_testlib/resources/load_params.py @@ -0,0 +1,35 @@ +import os + +# Background load node parameters +LOAD_NODES = os.getenv("LOAD_NODES", "").split() +# Must hardcode for now +LOAD_NODE_SSH_USER = os.getenv("LOAD_NODE_SSH_USER", "service") +LOAD_NODE_SSH_PASSWORD = os.getenv("LOAD_NODE_SSH_PASSWORD") +LOAD_NODE_SSH_PRIVATE_KEY_PATH = os.getenv("LOAD_NODE_SSH_PRIVATE_KEY_PATH") +LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE = os.getenv("LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE") +BACKGROUND_WRITERS_COUNT = os.getenv("BACKGROUND_WRITERS_COUNT", 0) +BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 0) +BACKGROUND_DELETERS_COUNT = os.getenv("BACKGROUND_DELETERS_COUNT", 0) +BACKGROUND_VERIFIERS_COUNT = os.getenv("BACKGROUND_VERIFIERS_COUNT", 0) +BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 1800) +BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE = os.getenv("BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE", 32) +BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME = float(os.getenv("BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME", 0.8)) +BACKGROUND_LOAD_SETUP_TIMEOUT = os.getenv("BACKGROUND_LOAD_SETUP_TIMEOUT", "5s") + +# This will decrease load params for some weak environments +BACKGROUND_LOAD_VUS_COUNT_DIVISOR = os.getenv("BACKGROUND_LOAD_VUS_COUNT_DIVISOR", 1) + +# Wait for 1 hour for xk6 verify scenario by default (in practice means like "unlimited" time) +BACKGROUND_LOAD_MAX_VERIFY_TIME = os.getenv("BACKGROUND_LOAD_VERIFY_MAX_TIME", 3600) +BACKGROUND_LOAD_CONTAINER_PLACEMENT_POLICY = os.getenv( + "BACKGROUND_LOAD_CONTAINER_PLACEMENT_POLICY", "REP 2 IN X CBF 2 SELECT 2 FROM * AS X" +) +BACKGROUND_LOAD_S3_LOCATION = os.getenv("BACKGROUND_LOAD_S3_LOCATION", "node-off") +PRESET_CONTAINERS_COUNT = os.getenv("CONTAINERS_COUNT", "40") +PRESET_CONTAINER_CREATION_RETRY_COUNT = os.getenv("CONTAINER_CREATION_RETRY_COUNT", "20") +# TODO: At lease one object is required due to bug in xk6 (buckets with no objects produce millions exceptions in read) +PRESET_OBJECTS_COUNT = os.getenv("OBJ_COUNT", "1") +K6_DIRECTORY = os.getenv("K6_DIRECTORY", "/etc/k6") +K6_TEARDOWN_PERIOD = os.getenv("K6_TEARDOWN_PERIOD", "30") +K6_STOP_SIGNAL_TIMEOUT = int(os.getenv("K6_STOP_SIGNAL_TIMEOUT", 300)) +LOAD_CONFIG_YAML_PATH = os.getenv("LOAD_CONFIG_YAML_PATH", "load_config_yaml_file.yml") diff --git a/src/frostfs_testlib/resources/optionals.py b/src/frostfs_testlib/resources/optionals.py new file mode 100644 index 00000000..6caf158f --- /dev/null +++ b/src/frostfs_testlib/resources/optionals.py @@ -0,0 +1,25 @@ +import os + + +def str_to_bool(input: str) -> bool: + return input in ["true", "True", "1"] + + +# Override these optional params to not comment/modify code during local development. Use with caution. +# Node under test. Set this to occupy exact node. +OPTIONAL_NODE_UNDER_TEST = os.getenv("OPTIONAL_NODE_UNDER_TEST") + +# Node under load. Set this to target load on exact node. +OPTIONAL_NODE_UNDER_LOAD = os.getenv("OPTIONAL_NODE_UNDER_LOAD") + +# Set this to True to disable failover commands. I.E. node which supposed to be stopped will not be actually stopped. +OPTIONAL_FAILOVER_ENABLED = str_to_bool(os.getenv("OPTIONAL_FAILOVER_ENABLED", "true")) + +# Set this to True to disable background load. I.E. node which supposed to be stopped will not be actually stopped. +OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool(os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true")) + +# Set this to False for disable autouse fixture like node healthcheck during developing time. +OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool(os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true")) + +# Use cache for fixtures with @cachec_fixture decorator +OPTIONAL_CACHE_FIXTURES = str_to_bool(os.getenv("OPTIONAL_CACHE_FIXTURES", "false")) diff --git a/src/frostfs_testlib/resources/s3_acl_grants.py b/src/frostfs_testlib/resources/s3_acl_grants.py new file mode 100644 index 00000000..a716bc51 --- /dev/null +++ b/src/frostfs_testlib/resources/s3_acl_grants.py @@ -0,0 +1,9 @@ +ALL_USERS_GROUP_URI = "http://acs.amazonaws.com/groups/global/AllUsers" +ALL_USERS_GROUP_WRITE_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROUP_URI}, "Permission": "WRITE"} +ALL_USERS_GROUP_READ_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROUP_URI}, "Permission": "READ"} +CANONICAL_USER_FULL_CONTROL_GRANT = {"Grantee": {"Type": "CanonicalUser"}, "Permission": "FULL_CONTROL"} + +# https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl +PRIVATE_GRANTS = [] +PUBLIC_READ_GRANTS = [ALL_USERS_GROUP_READ_GRANT] +PUBLIC_READ_WRITE_GRANTS = [ALL_USERS_GROUP_WRITE_GRANT, ALL_USERS_GROUP_READ_GRANT] diff --git a/src/frostfs_testlib/resources/wellknown_acl.py b/src/frostfs_testlib/resources/wellknown_acl.py new file mode 100644 index 00000000..fe561b33 --- /dev/null +++ b/src/frostfs_testlib/resources/wellknown_acl.py @@ -0,0 +1,11 @@ +# ACLs with final flag +PUBLIC_ACL_F = "1FBFBFFF" +PRIVATE_ACL_F = "1C8C8CCC" +READONLY_ACL_F = "1FBF8CFF" + +# ACLs without final flag set +PUBLIC_ACL = "0FBFBFFF" +INACCESSIBLE_ACL = "40000000" +STICKY_BIT_PUB_ACL = "3FFFFFFF" + +EACL_PUBLIC_READ_WRITE = "eacl-public-read-write" diff --git a/src/frostfs_testlib/shell/__init__.py b/src/frostfs_testlib/shell/__init__.py index 0300ff84..980d1190 100644 --- a/src/frostfs_testlib/shell/__init__.py +++ b/src/frostfs_testlib/shell/__init__.py @@ -1,3 +1,3 @@ from frostfs_testlib.shell.interfaces import CommandOptions, CommandResult, InteractiveInput, Shell from frostfs_testlib.shell.local_shell import LocalShell -from frostfs_testlib.shell.ssh_shell import SSHShell +from frostfs_testlib.shell.ssh_shell import SshConnectionProvider, SSHShell diff --git a/src/frostfs_testlib/shell/command_inspectors.py b/src/frostfs_testlib/shell/command_inspectors.py index 8486f43c..8fe2f34e 100644 --- a/src/frostfs_testlib/shell/command_inspectors.py +++ b/src/frostfs_testlib/shell/command_inspectors.py @@ -7,7 +7,23 @@ class SudoInspector(CommandInspector): If command is already prepended with sudo, then has no effect. """ - def inspect(self, command: str) -> str: + def inspect(self, original_command: str, command: str) -> str: if not command.startswith("sudo"): return f"sudo {command}" return command + + +class SuInspector(CommandInspector): + """Allows to run command as another user via sudo su call + + If command is already prepended with sudo su, then has no effect. + """ + + def __init__(self, user: str) -> None: + self.user = user + + def inspect(self, original_command: str, command: str) -> str: + if not original_command.startswith("sudo su"): + cmd = original_command.replace('"', '\\"').replace("\$", "\\\\\\$") + return f'sudo su - {self.user} -c "{cmd}"' + return original_command diff --git a/src/frostfs_testlib/shell/interfaces.py b/src/frostfs_testlib/shell/interfaces.py index 4c87a78f..a8d33250 100644 --- a/src/frostfs_testlib/shell/interfaces.py +++ b/src/frostfs_testlib/shell/interfaces.py @@ -22,11 +22,12 @@ class CommandInspector(ABC): """Interface of inspector that processes command text before execution.""" @abstractmethod - def inspect(self, command: str) -> str: + def inspect(self, original_command: str, command: str) -> str: """Transforms command text and returns modified command. Args: command: Command to transform with this inspector. + original_command: Untransformed command to transform with this inspector. Depending on type of the inspector it might be required to modify original command Returns: Transformed command text. @@ -47,6 +48,7 @@ class CommandOptions: check: Controls whether to check return code of the command. Set to False to ignore non-zero return codes. no_log: Do not print output to logger if True. + extra_inspectors: Exctra command inspectors to process command """ interactive_inputs: Optional[list[InteractiveInput]] = None @@ -54,12 +56,30 @@ class CommandOptions: timeout: Optional[int] = None check: bool = True no_log: bool = False + extra_inspectors: Optional[list[CommandInspector]] = None def __post_init__(self): if self.timeout is None: self.timeout = Options.get_default_shell_timeout() +@dataclass +class SshCredentials: + """Represents ssh credentials. + + Attributes: + ssh_login: ssh login. + ssh_password: ssh password as plain text (unsecure, for local setup only). + ssh_key_path: path to a ssh key file. + ssh_key_passphrase: passphrase to ssh key file. + """ + + ssh_login: str + ssh_password: Optional[str] = None + ssh_key_path: Optional[str] = None + ssh_key_passphrase: Optional[str] = None + + @dataclass class CommandResult: """Represents a result of a command executed via shell. diff --git a/src/frostfs_testlib/shell/local_shell.py b/src/frostfs_testlib/shell/local_shell.py index 12f450af..c0f3b066 100644 --- a/src/frostfs_testlib/shell/local_shell.py +++ b/src/frostfs_testlib/shell/local_shell.py @@ -1,16 +1,18 @@ import logging import subprocess import tempfile +from contextlib import nullcontext from datetime import datetime from typing import IO, Optional import pexpect -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter +from frostfs_testlib.resources.common import MORE_LOG from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell logger = logging.getLogger("frostfs.testlib.shell") -reporter = get_reporter() +step_context = reporter.step if MORE_LOG == "1" else nullcontext class LocalShell(Shell): @@ -24,20 +26,22 @@ class LocalShell(Shell): # If no options were provided, use default options options = options or CommandOptions() - for inspector in self.command_inspectors: - command = inspector.inspect(command) + original_command = command + extra_inspectors = options.extra_inspectors if options.extra_inspectors else [] + for inspector in [*self.command_inspectors, *extra_inspectors]: + command = inspector.inspect(original_command, command) - logger.info(f"Executing command: {command}") - if options.interactive_inputs: - return self._exec_interactive(command, options) - return self._exec_non_interactive(command, options) + with step_context(f"Executing command: {command}"): + if options.interactive_inputs: + return self._exec_interactive(command, options) + return self._exec_non_interactive(command, options) def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult: start_time = datetime.utcnow() log_file = tempfile.TemporaryFile() # File is reliable cross-platform way to capture output try: - command_process = pexpect.spawn(command, timeout=options.timeout) + command_process = pexpect.spawn(command, timeout=options.timeout, use_poll=True) except (pexpect.ExceptionPexpect, OSError) as exc: raise RuntimeError(f"Command: {command}") from exc @@ -59,8 +63,7 @@ class LocalShell(Shell): if options.check and result.return_code != 0: raise RuntimeError( - f"Command: {command}\nreturn code: {result.return_code}\n" - f"Output: {result.stdout}" + f"Command: {command}\nreturn code: {result.return_code}\n" f"Output: {result.stdout}\n" f"Stderr: {result.stderr}\n" ) return result @@ -91,11 +94,7 @@ class LocalShell(Shell): stderr="", return_code=exc.returncode, ) - raise RuntimeError( - f"Command: {command}\nError:\n" - f"return code: {exc.returncode}\n" - f"output: {exc.output}" - ) from exc + raise RuntimeError(f"Command: {command}\nError with retcode: {exc.returncode}\n Output: {exc.output}") from exc except OSError as exc: raise RuntimeError(f"Command: {command}\nOutput: {exc.strerror}") from exc finally: @@ -129,22 +128,19 @@ class LocalShell(Shell): end_time: datetime, result: Optional[CommandResult], ) -> None: - # TODO: increase logging level if return code is non 0, should be warning at least - logger.info( - f"Command: {command}\n" - f"{'Success:' if result and result.return_code == 0 else 'Error:'}\n" - f"return code: {result.return_code if result else ''} " - f"\nOutput: {result.stdout if result else ''}" - ) + if not result: + logger.warning(f"Command: {command}\n" f"Error: result is None") + return - if result: - elapsed_time = end_time - start_time - command_attachment = ( - f"COMMAND: {command}\n" - f"RETCODE: {result.return_code}\n\n" - f"STDOUT:\n{result.stdout}\n" - f"STDERR:\n{result.stderr}\n" - f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" - ) - with reporter.step(f"COMMAND: {command}"): - reporter.attach(command_attachment, "Command execution.txt") + status, log_method = ("Success", logger.info) if result.return_code == 0 else ("Error", logger.warning) + log_method(f"Command: {command}\n" f"{status} with retcode {result.return_code}\n" f"Output: \n{result.stdout}") + + elapsed_time = end_time - start_time + command_attachment = ( + f"COMMAND: {command}\n" + f"RETCODE: {result.return_code}\n\n" + f"STDOUT:\n{result.stdout}\n" + f"STDERR:\n{result.stderr}\n" + f"Start / End / Elapsed\t {start_time} / {end_time} / {elapsed_time}" + ) + reporter.attach(command_attachment, "Command execution.txt") diff --git a/src/frostfs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py index 04d42ee7..3f13dcaf 100644 --- a/src/frostfs_testlib/shell/ssh_shell.py +++ b/src/frostfs_testlib/shell/ssh_shell.py @@ -6,39 +6,119 @@ from functools import lru_cache, wraps from time import sleep from typing import ClassVar, Optional, Tuple -from paramiko import ( - AutoAddPolicy, - Channel, - ECDSAKey, - Ed25519Key, - PKey, - RSAKey, - SSHClient, - SSHException, - ssh_exception, -) +from paramiko import AutoAddPolicy, Channel, ECDSAKey, Ed25519Key, PKey, RSAKey, SSHClient, SSHException, ssh_exception from paramiko.ssh_exception import AuthenticationException -from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell +from frostfs_testlib import reporter +from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell, SshCredentials logger = logging.getLogger("frostfs.testlib.shell") -reporter = get_reporter() + + +class SshConnectionProvider: + SSH_CONNECTION_ATTEMPTS: ClassVar[int] = 4 + SSH_ATTEMPTS_INTERVAL: ClassVar[int] = 10 + CONNECTION_TIMEOUT = 60 + + instance = None + connections: dict[str, SSHClient] = {} + creds: dict[str, SshCredentials] = {} + + def __new__(cls): + if not cls.instance: + cls.instance = super(SshConnectionProvider, cls).__new__(cls) + return cls.instance + + def store_creds(self, host: str, ssh_creds: SshCredentials): + self.creds[host] = ssh_creds + + def provide(self, host: str, port: str) -> SSHClient: + if host not in self.creds: + raise RuntimeError(f"Please add credentials for host {host}") + + if host in self.connections: + client = self.connections[host] + if client: + return client + + creds = self.creds[host] + client = self._create_connection(host, port, creds) + self.connections[host] = client + return client + + def drop(self, host: str): + if host in self.connections: + client = self.connections.pop(host) + client.close() + + def drop_all(self): + hosts = list(self.connections.keys()) + for host in hosts: + self.drop(host) + + def _create_connection( + self, + host: str, + port: str, + creds: SshCredentials, + ) -> SSHClient: + for attempt in range(self.SSH_CONNECTION_ATTEMPTS): + connection = SSHClient() + connection.set_missing_host_key_policy(AutoAddPolicy()) + try: + if creds.ssh_key_path: + logger.info( + f"Trying to connect to host {host} as {creds.ssh_login} using SSH key " f"{creds.ssh_key_path} (attempt {attempt})" + ) + connection.connect( + hostname=host, + port=port, + username=creds.ssh_login, + pkey=_load_private_key(creds.ssh_key_path, creds.ssh_key_passphrase), + timeout=self.CONNECTION_TIMEOUT, + ) + else: + logger.info(f"Trying to connect to host {host} as {creds.ssh_login} using password " f"(attempt {attempt})") + connection.connect( + hostname=host, + port=port, + username=creds.ssh_login, + password=creds.ssh_password, + timeout=self.CONNECTION_TIMEOUT, + ) + return connection + except AuthenticationException: + connection.close() + logger.exception(f"Can't connect to host {host}") + raise + except ( + SSHException, + ssh_exception.NoValidConnectionsError, + AttributeError, + socket.timeout, + OSError, + ) as exc: + connection.close() + can_retry = attempt + 1 < self.SSH_CONNECTION_ATTEMPTS + if can_retry: + logger.warn(f"Can't connect to host {host}, will retry after {self.SSH_ATTEMPTS_INTERVAL}s. Error: {exc}") + sleep(self.SSH_ATTEMPTS_INTERVAL) + continue + logger.exception(f"Can't connect to host {host}") + raise HostIsNotAvailable(host) from exc class HostIsNotAvailable(Exception): """Raised when host is not reachable via SSH connection.""" - def __init__(self, host: str = None): + def __init__(self, host: Optional[str] = None): msg = f"Host {host} is not available" super().__init__(msg) def log_command(func): @wraps(func) - def wrapper( - shell: "SSHShell", command: str, options: CommandOptions, *args, **kwargs - ) -> CommandResult: + def wrapper(shell: "SSHShell", command: str, options: CommandOptions, *args, **kwargs) -> CommandResult: command_info = command.removeprefix("$ProgressPreference='SilentlyContinue'\n") with reporter.step(command_info): logger.info(f'Execute command "{command}" on "{shell.host}"') @@ -54,7 +134,7 @@ def log_command(func): f"RC:\n {result.return_code}\n" f"STDOUT:\n{textwrap.indent(result.stdout, ' ')}\n" f"STDERR:\n{textwrap.indent(result.stderr, ' ')}\n" - f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" + f"Start / End / Elapsed\t {start_time} / {end_time} / {elapsed_time}" ) if not options.no_log: @@ -91,9 +171,6 @@ class SSHShell(Shell): # to allow remote command to flush its output buffer DELAY_AFTER_EXIT = 0.2 - SSH_CONNECTION_ATTEMPTS: ClassVar[int] = 3 - CONNECTION_TIMEOUT = 90 - def __init__( self, host: str, @@ -103,31 +180,32 @@ class SSHShell(Shell): private_key_passphrase: Optional[str] = None, port: str = "22", command_inspectors: Optional[list[CommandInspector]] = None, + custom_environment: Optional[dict] = None, ) -> None: super().__init__() + self.connection_provider = SshConnectionProvider() + self.connection_provider.store_creds(host, SshCredentials(login, password, private_key_path, private_key_passphrase)) self.host = host self.port = port - self.login = login - self.password = password - self.private_key_path = private_key_path - self.private_key_passphrase = private_key_passphrase + self.command_inspectors = command_inspectors or [] - self.__connection: Optional[SSHClient] = None + + self.environment = custom_environment @property def _connection(self): - if not self.__connection: - self.__connection = self._create_connection() - return self.__connection + return self.connection_provider.provide(self.host, self.port) def drop(self): - self._reset_connection() + self.connection_provider.drop(self.host) def exec(self, command: str, options: Optional[CommandOptions] = None) -> CommandResult: options = options or CommandOptions() - for inspector in self.command_inspectors: - command = inspector.inspect(command) + original_command = command + extra_inspectors = options.extra_inspectors if options.extra_inspectors else [] + for inspector in [*self.command_inspectors, *extra_inspectors]: + command = inspector.inspect(original_command, command) if options.interactive_inputs: result = self._exec_interactive(command, options) @@ -135,16 +213,12 @@ class SSHShell(Shell): result = self._exec_non_interactive(command, options) if options.check and result.return_code != 0: - raise RuntimeError( - f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}" - ) + raise RuntimeError(f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}\nStderr: {result.stderr}\n") return result @log_command def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult: - stdin, stdout, stderr = self._connection.exec_command( - command, timeout=options.timeout, get_pty=True - ) + stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout, get_pty=True, environment=self.environment) for interactive_input in options.interactive_inputs: input = interactive_input.input if not input.endswith("\n"): @@ -171,7 +245,7 @@ class SSHShell(Shell): @log_command def _exec_non_interactive(self, command: str, options: CommandOptions) -> CommandResult: try: - stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout) + stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout, environment=self.environment) if options.close_stdin: stdin.close() @@ -193,7 +267,7 @@ class SSHShell(Shell): socket.timeout, ) as exc: logger.exception(f"Can't execute command {command} on host: {self.host}") - self._reset_connection() + self.drop() raise HostIsNotAvailable(self.host) from exc def _read_channels( @@ -248,57 +322,3 @@ class SSHShell(Shell): full_stderr = b"".join(stderr_chunks) return (full_stdout.decode(errors="ignore"), full_stderr.decode(errors="ignore")) - - def _create_connection(self, attempts: int = SSH_CONNECTION_ATTEMPTS) -> SSHClient: - for attempt in range(attempts): - connection = SSHClient() - connection.set_missing_host_key_policy(AutoAddPolicy()) - try: - if self.private_key_path: - logger.info( - f"Trying to connect to host {self.host} as {self.login} using SSH key " - f"{self.private_key_path} (attempt {attempt})" - ) - connection.connect( - hostname=self.host, - port=self.port, - username=self.login, - pkey=_load_private_key(self.private_key_path, self.private_key_passphrase), - timeout=self.CONNECTION_TIMEOUT, - ) - else: - logger.info( - f"Trying to connect to host {self.host} as {self.login} using password " - f"(attempt {attempt})" - ) - connection.connect( - hostname=self.host, - port=self.port, - username=self.login, - password=self.password, - timeout=self.CONNECTION_TIMEOUT, - ) - return connection - except AuthenticationException: - connection.close() - logger.exception(f"Can't connect to host {self.host}") - raise - except ( - SSHException, - ssh_exception.NoValidConnectionsError, - AttributeError, - socket.timeout, - OSError, - ) as exc: - connection.close() - can_retry = attempt + 1 < attempts - if can_retry: - logger.warn(f"Can't connect to host {self.host}, will retry. Error: {exc}") - continue - logger.exception(f"Can't connect to host {self.host}") - raise HostIsNotAvailable(self.host) from exc - - def _reset_connection(self) -> None: - if self.__connection: - self.__connection.close() - self.__connection = None diff --git a/src/frostfs_testlib/steps/__init__.py b/src/frostfs_testlib/steps/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/frostfs_testlib/steps/acl.py b/src/frostfs_testlib/steps/acl.py new file mode 100644 index 00000000..da407b6f --- /dev/null +++ b/src/frostfs_testlib/steps/acl.py @@ -0,0 +1,176 @@ +import base64 +import json +import logging +import os +import uuid +from time import sleep +from typing import List, Optional, Union + +import base58 + +from frostfs_testlib import reporter +from frostfs_testlib.cli import FrostfsCli +from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC +from frostfs_testlib.resources.common import ASSETS_DIR +from frostfs_testlib.shell import Shell +from frostfs_testlib.storage.dataclasses.acl import EACL_LIFETIME, FROSTFS_CONTRACT_CACHE_TIMEOUT, EACLPubKey, EACLRole, EACLRule +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.utils import wallet_utils + +logger = logging.getLogger("NeoLogger") + + +@reporter.step("Get extended ACL") +def get_eacl(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str) -> Optional[str]: + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + try: + result = cli.container.get_eacl(rpc_endpoint=endpoint, cid=cid) + except RuntimeError as exc: + logger.info("Extended ACL table is not set for this container") + logger.info(f"Got exception while getting eacl: {exc}") + return None + if "extended ACL table is not set for this container" in result.stdout: + return None + return result.stdout + + +@reporter.step("Set extended ACL") +def set_eacl( + wallet: WalletInfo, + cid: str, + eacl_table_path: str, + shell: Shell, + endpoint: str, + session_token: Optional[str] = None, +) -> None: + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + cli.container.set_eacl( + rpc_endpoint=endpoint, + cid=cid, + table=eacl_table_path, + await_mode=True, + session=session_token, + ) + + +def _encode_cid_for_eacl(cid: str) -> str: + cid_base58 = base58.b58decode(cid) + return base64.b64encode(cid_base58).decode("utf-8") + + +def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str: + table_file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"eacl_table_{str(uuid.uuid4())}.json") + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC) + cli.acl.extended_create(cid=cid, out=table_file_path, rule=rules_list) + + with open(table_file_path, "r") as file: + table_data = file.read() + logger.info(f"Generated eACL:\n{table_data}") + + return table_file_path + + +def form_bearertoken_file( + wallet: WalletInfo, + cid: str, + eacl_rule_list: List[Union[EACLRule, EACLPubKey]], + shell: Shell, + endpoint: str, + sign: Optional[bool] = True, +) -> str: + """ + This function fetches eACL for given on behalf of , + then extends it with filters taken from , signs + with bearer token and writes to file + """ + enc_cid = _encode_cid_for_eacl(cid) if cid else None + file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) + + eacl = get_eacl(wallet, cid, shell, endpoint) + json_eacl = dict() + if eacl: + eacl = eacl.replace("eACL: ", "").split("Signature")[0] + json_eacl = json.loads(eacl) + logger.info(json_eacl) + eacl_result = { + "body": { + "eaclTable": {"containerID": {"value": enc_cid} if cid else enc_cid, "records": []}, + "lifetime": {"exp": EACL_LIFETIME, "nbf": "1", "iat": "0"}, + } + } + + assert eacl_rules, "Got empty eacl_records list" + for rule in eacl_rule_list: + op_data = { + "operation": rule.operation.value.upper(), + "action": rule.access.value.upper(), + "filters": rule.filters or [], + "targets": [], + } + + if isinstance(rule.role, EACLRole): + op_data["targets"] = [{"role": rule.role.value.upper()}] + elif isinstance(rule.role, EACLPubKey): + op_data["targets"] = [{"keys": rule.role.keys}] + + eacl_result["body"]["eaclTable"]["records"].append(op_data) + + # Add records from current eACL + if "records" in json_eacl.keys(): + for record in json_eacl["records"]: + eacl_result["body"]["eaclTable"]["records"].append(record) + + with open(file_path, "w", encoding="utf-8") as eacl_file: + json.dump(eacl_result, eacl_file, ensure_ascii=False, indent=4) + + logger.info(f"Got these extended ACL records: {eacl_result}") + if sign: + sign_bearer( + shell=shell, + wallet=wallet, + eacl_rules_file_from=file_path, + eacl_rules_file_to=file_path, + json=True, + ) + return file_path + + +def eacl_rules(access: str, verbs: list, user: str) -> list[str]: + """ + This function creates a list of eACL rules. + Args: + access (str): identifies if the following operation(s) + is allowed or denied + verbs (list): a list of operations to set rules for + user (str): a group of users (user/others) or a wallet of + a certain user for whom rules are set + Returns: + (list): a list of eACL rules + """ + if user not in ("others", "user"): + pubkey = wallet_utils.get_wallet_public_key(user, wallet_password="") + user = f"pubkey:{pubkey}" + + rules = [] + for verb in verbs: + rule = f"{access} {verb} {user}" + rules.append(rule) + return rules + + +def sign_bearer(shell: Shell, wallet: WalletInfo, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool) -> None: + frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + frostfscli.util.sign_bearer_token(eacl_rules_file_from, eacl_rules_file_to, json=json) + + +@reporter.step("Wait for eACL cache expired") +def wait_for_cache_expired(): + sleep(FROSTFS_CONTRACT_CACHE_TIMEOUT) + return + + +@reporter.step("Return bearer token in base64 to caller") +def bearer_token_base64_from_file(bearer_path: str) -> str: + with open(bearer_path, "rb") as file: + signed = file.read() + return base64.b64encode(signed).decode("utf-8") diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py new file mode 100644 index 00000000..092b1a3a --- /dev/null +++ b/src/frostfs_testlib/steps/cli/container.py @@ -0,0 +1,351 @@ +import json +import logging +import re +from dataclasses import dataclass +from time import sleep +from typing import Optional, Union + +from frostfs_testlib import reporter +from frostfs_testlib.cli import FrostfsCli +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC +from frostfs_testlib.shell import Shell +from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.utils import json_utils +from frostfs_testlib.utils.file_utils import generate_file, get_file_hash + +logger = logging.getLogger("NeoLogger") + + +@dataclass +class StorageContainerInfo: + id: str + wallet: WalletInfo + + +class StorageContainer: + def __init__( + self, + storage_container_info: StorageContainerInfo, + shell: Shell, + cluster: Cluster, + ) -> None: + self.shell = shell + self.storage_container_info = storage_container_info + self.cluster = cluster + + def get_id(self) -> str: + return self.storage_container_info.id + + def get_wallet(self) -> str: + return self.storage_container_info.wallet + + @reporter.step("Generate new object and put in container") + def generate_object( + self, + size: int, + expire_at: Optional[int] = None, + bearer_token: Optional[str] = None, + endpoint: Optional[str] = None, + ) -> StorageObjectInfo: + with reporter.step(f"Generate object with size {size}"): + file_path = generate_file(size) + file_hash = get_file_hash(file_path) + + container_id = self.get_id() + wallet = self.get_wallet() + with reporter.step(f"Put object with size {size} to container {container_id}"): + if endpoint: + object_id = put_object( + wallet=wallet, + path=file_path, + cid=container_id, + expire_at=expire_at, + shell=self.shell, + endpoint=endpoint, + bearer=bearer_token, + ) + else: + object_id = put_object_to_random_node( + wallet=wallet, + path=file_path, + cid=container_id, + expire_at=expire_at, + shell=self.shell, + cluster=self.cluster, + bearer=bearer_token, + ) + + storage_object = StorageObjectInfo( + container_id, + object_id, + size=size, + wallet=wallet, + file_path=file_path, + file_hash=file_hash, + ) + + return storage_object + + +DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" +SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X" +REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" +DEFAULT_EC_PLACEMENT_RULE = "EC 3.1" + + +@reporter.step("Create Container") +def create_container( + wallet: WalletInfo, + shell: Shell, + endpoint: str, + rule: str = DEFAULT_PLACEMENT_RULE, + basic_acl: str = "", + attributes: Optional[dict] = None, + session_token: str = "", + name: Optional[str] = None, + options: Optional[dict] = None, + await_mode: bool = True, + wait_for_creation: bool = True, + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> str: + """ + A wrapper for `frostfs-cli container create` call. + + Args: + wallet (WalletInfo): a wallet on whose behalf a container is created + rule (optional, str): placement rule for container + basic_acl (optional, str): an ACL for container, will be + appended to `--basic-acl` key + attributes (optional, dict): container attributes , will be + appended to `--attributes` key + session_token (optional, str): a path to session token file + session_wallet(optional, str): a path to the wallet which signed + the session token; this parameter makes sense + when paired with `session_token` + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + options (optional, dict): any other options to pass to the call + name (optional, str): container name attribute + await_mode (bool): block execution until container is persisted + wait_for_creation (): Wait for container shows in container list + timeout: Timeout for the operation. + + Returns: + (str): CID of the created container + """ + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + result = cli.container.create( + rpc_endpoint=endpoint, + policy=rule, + nns_name=nns_name, + nns_zone=nns_zone, + basic_acl=basic_acl, + attributes=attributes, + name=name, + session=session_token, + await_mode=await_mode, + timeout=timeout, + **options or {}, + ) + + cid = _parse_cid(result.stdout) + + logger.info("Container created; waiting until it is persisted in the sidechain") + + if wait_for_creation: + wait_for_container_creation(wallet, cid, shell, endpoint) + + return cid + + +def wait_for_container_creation(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, attempts: int = 15, sleep_interval: int = 1): + for _ in range(attempts): + containers = list_containers(wallet, shell, endpoint) + if cid in containers: + return + logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue") + sleep(sleep_interval) + raise RuntimeError(f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting") + + +def wait_for_container_deletion(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, attempts: int = 30, sleep_interval: int = 1): + for _ in range(attempts): + try: + get_container(wallet, cid, shell=shell, endpoint=endpoint) + sleep(sleep_interval) + continue + except Exception as err: + if "container not found" not in str(err): + raise AssertionError(f'Expected "container not found" in error, got\n{err}') + return + raise AssertionError(f"Expected container deleted during {attempts * sleep_interval} sec.") + + +@reporter.step("List Containers") +def list_containers(wallet: WalletInfo, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT) -> list[str]: + """ + A wrapper for `frostfs-cli container list` call. It returns all the + available containers for the given wallet. + Args: + wallet (WalletInfo): a wallet on whose behalf we list the containers + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + timeout: Timeout for the operation. + Returns: + (list): list of containers + """ + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + result = cli.container.list(rpc_endpoint=endpoint, timeout=timeout) + return result.stdout.split() + + +@reporter.step("List Objects in container") +def list_objects( + wallet: WalletInfo, + shell: Shell, + container_id: str, + endpoint: str, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> list[str]: + """ + A wrapper for `frostfs-cli container list-objects` call. It returns all the + available objects in container. + Args: + wallet (WalletInfo): a wallet on whose behalf we list the containers objects + shell: executor for cli command + container_id: cid of container + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + timeout: Timeout for the operation. + Returns: + (list): list of containers + """ + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + result = cli.container.list_objects(rpc_endpoint=endpoint, cid=container_id, timeout=timeout) + logger.info(f"Container objects: \n{result}") + return result.stdout.split() + + +@reporter.step("Get Container") +def get_container( + wallet: WalletInfo, + cid: str, + shell: Shell, + endpoint: str, + json_mode: bool = True, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> Union[dict, str]: + """ + A wrapper for `frostfs-cli container get` call. It extracts container's + attributes and rearranges them into a more compact view. + Args: + wallet (WalletInfo): path to a wallet on whose behalf we get the container + cid (str): ID of the container to get + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + json_mode (bool): return container in JSON format + timeout: Timeout for the operation. + Returns: + (dict, str): dict of container attributes + """ + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + result = cli.container.get(rpc_endpoint=endpoint, cid=cid, json_mode=json_mode, timeout=timeout) + + if not json_mode: + return result.stdout + + container_info = json.loads(result.stdout) + attributes = dict() + for attr in container_info["attributes"]: + attributes[attr["key"]] = attr["value"] + container_info["attributes"] = attributes + container_info["ownerID"] = json_utils.json_reencode(container_info["ownerID"]["value"]) + return container_info + + +@reporter.step("Delete Container") +# TODO: make the error message about a non-found container more user-friendly +def delete_container( + wallet: WalletInfo, + cid: str, + shell: Shell, + endpoint: str, + force: bool = False, + session_token: Optional[str] = None, + await_mode: bool = False, +) -> None: + """ + A wrapper for `frostfs-cli container delete` call. + Args: + await_mode: Block execution until container is removed. + wallet (WalletInfo): path to a wallet on whose behalf we delete the container + cid (str): ID of the container to delete + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + force (bool): do not check whether container contains locks and remove immediately + session_token: a path to session token file + This function doesn't return anything. + """ + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + cli.container.delete( + cid=cid, + rpc_endpoint=endpoint, + force=force, + session=session_token, + await_mode=await_mode, + ) + + +def _parse_cid(output: str) -> str: + """ + Parses container ID from a given CLI output. The input string we expect: + container ID: 2tz86kVTDpJxWHrhw3h6PbKMwkLtBEwoqhHQCKTre1FN + awaiting... + container has been persisted on sidechain + We want to take 'container ID' value from the string. + + Args: + output (str): CLI output to parse + + Returns: + (str): extracted CID + """ + try: + # taking first line from command's output + first_line = output.split("\n")[0] + except Exception: + first_line = "" + logger.error(f"Got empty output: {output}") + splitted = first_line.split(": ") + if len(splitted) != 2: + raise ValueError(f"no CID was parsed from command output: \t{first_line}") + return splitted[1] + + +@reporter.step("Search for nodes with a container") +def search_nodes_with_container( + wallet: WalletInfo, + cid: str, + shell: Shell, + endpoint: str, + cluster: Cluster, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> list[ClusterNode]: + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + result = cli.container.search_node(rpc_endpoint=endpoint, cid=cid, timeout=timeout) + + pattern = r"[0-9]+(?:\.[0-9]+){3}" + nodes_ip = list(set(re.findall(pattern, result.stdout))) + + with reporter.step(f"nodes ips = {nodes_ip}"): + nodes_list = cluster.get_nodes_by_ip(nodes_ip) + + with reporter.step(f"Return nodes - {nodes_list}"): + return nodes_list diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py new file mode 100644 index 00000000..7f8391d7 --- /dev/null +++ b/src/frostfs_testlib/steps/cli/object.py @@ -0,0 +1,762 @@ +import json +import logging +import os +import re +import uuid +from typing import Any, Optional + +from frostfs_testlib import reporter +from frostfs_testlib.cli import FrostfsCli +from frostfs_testlib.cli.neogo import NeoGo +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE +from frostfs_testlib.resources.common import ASSETS_DIR +from frostfs_testlib.shell import Shell +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.testing import wait_for_success +from frostfs_testlib.utils import json_utils +from frostfs_testlib.utils.cli_utils import parse_netmap_output +from frostfs_testlib.utils.file_utils import TestFile + +logger = logging.getLogger("NeoLogger") + + +@reporter.step("Get object from random node") +def get_object_from_random_node( + wallet: WalletInfo, + cid: str, + oid: str, + shell: Shell, + cluster: Cluster, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> str: + """ + GET from FrostFS random storage node + + Args: + wallet: wallet on whose behalf GET is done + cid: ID of Container where we get the Object from + oid: Object ID + shell: executor for cli command + cluster: cluster object + bearer (optional, str): path to Bearer Token file, appends to `--bearer` key + write_object (optional, str): path to downloaded file, appends to `--file` key + no_progress(optional, bool): do not show progress bar + xhdr (optional, dict): Request X-Headers in form of Key=Value + session (optional, dict): path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): path to downloaded file + """ + endpoint = cluster.get_random_storage_rpc_endpoint() + return get_object( + wallet, + cid, + oid, + shell, + endpoint, + bearer, + write_object, + xhdr, + no_progress, + session, + timeout, + ) + + +@reporter.step("Get object from {endpoint}") +def get_object( + wallet: WalletInfo, + cid: str, + oid: str, + shell: Shell, + endpoint: str, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> TestFile: + """ + GET from FrostFS. + + Args: + wallet (WalletInfo): wallet on whose behalf GET is done + cid (str): ID of Container where we get the Object from + oid (str): Object ID + shell: executor for cli command + bearer: path to Bearer Token file, appends to `--bearer` key + write_object: path to downloaded file, appends to `--file` key + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + no_progress(optional, bool): do not show progress bar + xhdr (optional, dict): Request X-Headers in form of Key=Value + session (optional, dict): path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): path to downloaded file + """ + + if not write_object: + write_object = str(uuid.uuid4()) + test_file = TestFile(os.path.join(ASSETS_DIR, write_object)) + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + cli.object.get( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + file=test_file, + bearer=bearer, + no_progress=no_progress, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + return test_file + + +@reporter.step("Get Range Hash from {endpoint}") +def get_range_hash( + wallet: WalletInfo, + cid: str, + oid: str, + range_cut: str, + shell: Shell, + endpoint: str, + bearer: Optional[str] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +): + """ + GETRANGEHASH of given Object. + + Args: + wallet: wallet on whose behalf GETRANGEHASH is done + cid: ID of Container where we get the Object from + oid: Object ID + shell: executor for cli command + bearer: path to Bearer Token file, appends to `--bearer` key + range_cut: Range to take hash from in the form offset1:length1,..., + value to pass to the `--range` parameter + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + xhdr: Request X-Headers in form of Key=Values + session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session. + timeout: Timeout for the operation. + Returns: + None + """ + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + result = cli.object.hash( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + range=range_cut, + bearer=bearer, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + # cutting off output about range offset and length + return result.stdout.split(":")[1].strip() + + +@reporter.step("Put object to random node") +def put_object_to_random_node( + wallet: WalletInfo, + path: str, + cid: str, + shell: Shell, + cluster: Cluster, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +): + """ + PUT of given file to a random storage node. + + Args: + wallet: wallet on whose behalf PUT is done + path: path to file to be PUT + cid: ID of Container where we get the Object from + shell: executor for cli command + cluster: cluster under test + bearer: path to Bearer Token file, appends to `--bearer` key + copies_number: Number of copies of the object to store within the RPC call + attributes: User attributes in form of Key1=Value1,Key2=Value2 + cluster: cluster under test + no_progress: do not show progress bar + expire_at: Last epoch in the life of the object + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + ID of uploaded Object + """ + + endpoint = cluster.get_random_storage_rpc_endpoint() + return put_object( + wallet, + path, + cid, + shell, + endpoint, + bearer, + copies_number, + attributes, + xhdr, + expire_at, + no_progress, + session, + timeout=timeout, + ) + + +@reporter.step("Put object at {endpoint} in container {cid}") +def put_object( + wallet: WalletInfo, + path: str, + cid: str, + shell: Shell, + endpoint: str, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +): + """ + PUT of given file. + + Args: + wallet: wallet on whose behalf PUT is done + path: path to file to be PUT + cid: ID of Container where we get the Object from + shell: executor for cli command + bearer: path to Bearer Token file, appends to `--bearer` key + copies_number: Number of copies of the object to store within the RPC call + attributes: User attributes in form of Key1=Value1,Key2=Value2 + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + no_progress: do not show progress bar + expire_at: Last epoch in the life of the object + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): ID of uploaded Object + """ + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + result = cli.object.put( + rpc_endpoint=endpoint, + file=path, + cid=cid, + attributes=attributes, + bearer=bearer, + copies_number=copies_number, + expire_at=expire_at, + no_progress=no_progress, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + # Splitting CLI output to separate lines and taking the penultimate line + id_str = result.stdout.strip().split("\n")[-2] + oid = id_str.split(":")[1] + return oid.strip() + + +@reporter.step("Delete object {cid}/{oid} from {endpoint}") +def delete_object( + wallet: WalletInfo, + cid: str, + oid: str, + shell: Shell, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +): + """ + DELETE an Object. + + Args: + wallet: wallet on whose behalf DELETE is done + cid: ID of Container where we get the Object from + oid: ID of Object we are going to delete + shell: executor for cli command + bearer: path to Bearer Token file, appends to `--bearer` key + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): Tombstone ID + """ + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + result = cli.object.delete( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + bearer=bearer, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + id_str = result.stdout.split("\n")[1] + tombstone = id_str.split(":")[1] + return tombstone.strip() + + +@reporter.step("Get Range") +def get_range( + wallet: WalletInfo, + cid: str, + oid: str, + range_cut: str, + shell: Shell, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +): + """ + GETRANGE an Object. + + Args: + wallet: wallet on whose behalf GETRANGE is done + cid: ID of Container where we get the Object from + oid: ID of Object we are going to request + range_cut: range to take data from in the form offset:length + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + bearer: path to Bearer Token file, appends to `--bearer` key + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str, bytes) - path to the file with range content and content of this file as bytes + """ + test_file = TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4()))) + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + cli.object.range( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + range=range_cut, + file=test_file, + bearer=bearer, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + with open(test_file, "rb") as file: + content = file.read() + return test_file, content + + +@reporter.step("Lock Object") +def lock_object( + wallet: WalletInfo, + cid: str, + oid: str, + shell: Shell, + endpoint: str, + lifetime: Optional[int] = None, + expire_at: Optional[int] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> str: + """ + Locks object in container. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + oid: Object ID. + lifetime: Lock lifetime. + expire_at: Lock expiration epoch. + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + session: Path to a JSON-encoded container session token. + ttl: TTL value in request meta header (default 2). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Dict with request X-Headers. + timeout: Timeout for the operation. + + Returns: + Lock object ID + """ + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + result = cli.object.lock( + rpc_endpoint=endpoint, + lifetime=lifetime, + expire_at=expire_at, + address=address, + cid=cid, + oid=oid, + bearer=bearer, + xhdr=xhdr, + session=session, + ttl=ttl, + timeout=timeout, + ) + + # Splitting CLI output to separate lines and taking the penultimate line + id_str = result.stdout.strip().split("\n")[0] + oid = id_str.split(":")[1] + return oid.strip() + + +@reporter.step("Search object") +def search_object( + wallet: WalletInfo, + cid: str, + shell: Shell, + endpoint: str, + bearer: str = "", + filters: Optional[dict] = None, + expected_objects_list: Optional[list] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + phy: bool = False, + root: bool = False, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> list: + """ + SEARCH an Object. + + Args: + wallet: wallet on whose behalf SEARCH is done + cid: ID of Container where we get the Object from + shell: executor for cli command + bearer: path to Bearer Token file, appends to `--bearer` key + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + filters: key=value pairs to filter Objects + expected_objects_list: a list of ObjectIDs to compare found Objects with + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + phy: Search physically stored objects. + root: Search for user objects. + timeout: Timeout for the operation. + + Returns: + list of found ObjectIDs + """ + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + result = cli.object.search( + rpc_endpoint=endpoint, + cid=cid, + bearer=bearer, + xhdr=xhdr, + filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None, + session=session, + phy=phy, + root=root, + timeout=timeout, + ) + + found_objects = re.findall(r"(\w{43,44})", result.stdout) + + if expected_objects_list: + if sorted(found_objects) == sorted(expected_objects_list): + logger.info(f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'") + else: + logger.warning(f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'") + + return found_objects + + +@reporter.step("Get netmap netinfo") +def get_netmap_netinfo( + wallet: WalletInfo, + shell: Shell, + endpoint: str, + address: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> dict[str, Any]: + """ + Get netmap netinfo output from node + + Args: + wallet (WalletInfo): wallet on whose behalf request is done + shell: executor for cli command + endpoint (optional, str): FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + address: Address of wallet account + ttl: TTL value in request meta header (default 2) + wallet: Path to the wallet or binary key + xhdr: Request X-Headers in form of Key=Value + timeout: Timeout for the operation. + + Returns: + (dict): dict of parsed command output + """ + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + output = cli.netmap.netinfo( + rpc_endpoint=endpoint, + address=address, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + + settings = dict() + + patterns = [ + (re.compile("(.*): (\d+)"), int), + (re.compile("(.*): (false|true)"), bool), + (re.compile("(.*): (\d+\.\d+)"), float), + ] + for pattern, func in patterns: + for setting, value in re.findall(pattern, output.stdout): + settings[setting.lower().strip().replace(" ", "_")] = func(value) + + return settings + + +@reporter.step("Head object") +def head_object( + wallet: WalletInfo, + cid: str, + oid: str, + shell: Shell, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + json_output: bool = True, + is_raw: bool = False, + is_direct: bool = False, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +): + """ + HEAD an Object. + + Args: + wallet (WalletInfo): wallet on whose behalf HEAD is done + cid (str): ID of Container where we get the Object from + oid (str): ObjectID to HEAD + shell: executor for cli command + bearer (optional, str): path to Bearer Token file, appends to `--bearer` key + endpoint(optional, str): FrostFS endpoint to send request to + json_output(optional, bool): return response in JSON format or not; this flag + turns into `--json` key + is_raw(optional, bool): send "raw" request or not; this flag + turns into `--raw` key + is_direct(optional, bool): send request directly to the node or not; this flag + turns into `--ttl 1` key + xhdr (optional, dict): Request X-Headers in form of Key=Value + session (optional, dict): path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + depending on the `json_output` parameter value, the function returns + (dict): HEAD response in JSON format + or + (str): HEAD response as a plain text + """ + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + result = cli.object.head( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + bearer=bearer, + json_mode=json_output, + raw=is_raw, + ttl=1 if is_direct else None, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + if not json_output: + return result + + try: + decoded = json.loads(result.stdout) + except Exception as exc: + # If we failed to parse output as JSON, the cause might be + # the plain text string in the beginning of the output. + # Here we cut off first string and try to parse again. + logger.info(f"failed to parse output: {exc}") + logger.info("parsing output in another way") + fst_line_idx = result.stdout.find("\n") + decoded = json.loads(result.stdout[fst_line_idx:]) + + # if response + if "chunks" in decoded.keys(): + logger.info("decoding ec chunks") + return decoded["chunks"] + + # If response is Complex Object header, it has `splitId` key + if "splitId" in decoded.keys(): + return json_utils.decode_split_header(decoded) + + # If response is Last or Linking Object header, + # it has `header` dictionary and non-null `split` dictionary + if "split" in decoded["header"].keys(): + if decoded["header"]["split"]: + return json_utils.decode_linking_object(decoded) + + if decoded["header"]["objectType"] == "STORAGE_GROUP": + return json_utils.decode_storage_group(decoded) + + if decoded["header"]["objectType"] == "TOMBSTONE": + return json_utils.decode_tombstone(decoded) + + return json_utils.decode_simple_header(decoded) + + +@reporter.step("Run neo-go dump-keys") +def neo_go_dump_keys(shell: Shell, wallet: WalletInfo) -> dict: + """ + Run neo-go dump keys command + + Args: + shell: executor for cli command + wallet: wallet path to dump from + Returns: + dict Address:Wallet Key + """ + neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) + output = neogo.wallet.dump_keys(wallet=wallet).stdout + first_line = "" + try: + # taking first line from command's output contain wallet address + first_line = output.split("\n")[0] + except Exception: + logger.error(f"Got empty output (neo-go dump keys): {output}") + address_id = first_line.split()[0] + # taking second line from command's output contain wallet key + wallet_key = output.split("\n")[1] + return {address_id: wallet_key} + + +@reporter.step("Run neo-go query height") +def neo_go_query_height(shell: Shell, endpoint: str) -> dict: + """ + Run neo-go query height command + + Args: + shell: executor for cli command + endpoint: endpoint to execute + Returns: + dict-> + Latest block: {value} + Validated state: {value} + + """ + neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) + output = neogo.query.height(rpc_endpoint=endpoint).stdout + first_line = "" + try: + # taking first line from command's output contain the latest block in blockchain + first_line = output.split("\n")[0] + except Exception: + logger.error(f"Got empty output (neo-go query height): {output}") + latest_block = first_line.split(":") + # taking second line from command's output contain wallet key + second_line = output.split("\n")[1] + if second_line != "": + validated_state = second_line.split(":") + return { + latest_block[0].replace(":", ""): int(latest_block[1]), + validated_state[0].replace(":", ""): int(validated_state[1]), + } + return {latest_block[0].replace(":", ""): int(latest_block[1])} + + +@wait_for_success() +@reporter.step("Search object nodes") +def get_object_nodes( + cluster: Cluster, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> list[ClusterNode]: + shell = alive_node.host.get_shell() + endpoint = alive_node.storage_node.get_rpc_endpoint() + wallet = alive_node.storage_node.get_remote_wallet_path() + wallet_config = alive_node.storage_node.get_remote_wallet_config_path() + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config) + + response = cli.object.nodes( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + bearer=bearer, + ttl=1 if is_direct else None, + json=True, + xhdr=xhdr, + timeout=timeout, + verify_presence_all=verify_presence_all, + ) + + response_json = json.loads(response.stdout) + # Currently, the command will show expected and confirmed nodes. + # And we (currently) count only nodes which are both expected and confirmed + object_nodes_id = { + required_node + for data_object in response_json["data_objects"] + for required_node in data_object["required_nodes"] + if required_node in data_object["confirmed_nodes"] + } + + netmap_nodes_list = parse_netmap_output( + cli.netmap.snapshot( + rpc_endpoint=endpoint, + wallet=wallet, + ).stdout + ) + netmap_nodes = [ + netmap_node for object_node in object_nodes_id for netmap_node in netmap_nodes_list if object_node == netmap_node.node_id + ] + + object_nodes = [ + cluster_node + for netmap_node in netmap_nodes + for cluster_node in cluster.cluster_nodes + if netmap_node.node == cluster_node.get_interface(Interfaces.MGMT) + ] + + return object_nodes diff --git a/src/frostfs_testlib/steps/cli/tree.py b/src/frostfs_testlib/steps/cli/tree.py new file mode 100644 index 00000000..4b0dfb34 --- /dev/null +++ b/src/frostfs_testlib/steps/cli/tree.py @@ -0,0 +1,35 @@ +import logging +from typing import Optional + +from frostfs_testlib import reporter +from frostfs_testlib.cli import FrostfsCli +from frostfs_testlib.plugins import load_plugin +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC +from frostfs_testlib.shell import Shell +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo + +logger = logging.getLogger("NeoLogger") + + + +@reporter.step("Get Tree List") +def get_tree_list( + wallet: WalletInfo, + cid: str, + shell: Shell, + endpoint: str, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> None: + """ + A wrapper for `frostfs-cli tree list` call. + Args: + wallet (WalletInfo): path to a wallet on whose behalf we delete the container + cid (str): ID of the container to delete + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + timeout: Timeout for the operation. + This function doesn't return anything. + """ + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + cli.tree.list(cid=cid, rpc_endpoint=endpoint, timeout=timeout) diff --git a/src/frostfs_testlib/steps/complex_object_actions.py b/src/frostfs_testlib/steps/complex_object_actions.py new file mode 100644 index 00000000..e1a70881 --- /dev/null +++ b/src/frostfs_testlib/steps/complex_object_actions.py @@ -0,0 +1,207 @@ +#!/usr/bin/python3 + +""" + This module contains functions which are used for Large Object assembling: + getting Last Object and split and getting Link Object. It is not enough to + simply perform a "raw" HEAD request. + Therefore, the reliable retrieval of the aforementioned objects must be done + this way: send direct "raw" HEAD request to the every Storage Node and return + the desired OID on first non-null response. +""" + +import logging +from typing import Optional, Tuple + +from frostfs_testlib import reporter +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.shell import Shell +from frostfs_testlib.steps.cli.object import head_object +from frostfs_testlib.storage.cluster import Cluster, StorageNode +from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo + +logger = logging.getLogger("NeoLogger") + + +def get_storage_object_chunks( + storage_object: StorageObjectInfo, + shell: Shell, + cluster: Cluster, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> list[str]: + """ + Get complex object split objects ids (no linker object) + + Args: + storage_object: storage_object to get it's chunks + shell: client shell to do cmd requests + cluster: cluster object under test + timeout: Timeout for an operation. + + Returns: + list of object ids of complex object chunks + """ + + with reporter.step(f"Get complex object chunks (f{storage_object.oid})"): + split_object_id = get_link_object( + storage_object.wallet, + storage_object.cid, + storage_object.oid, + shell, + cluster.services(StorageNode), + is_direct=False, + timeout=timeout, + ) + head = head_object( + storage_object.wallet, + storage_object.cid, + split_object_id, + shell, + cluster.default_rpc_endpoint, + timeout=timeout, + ) + + chunks_object_ids = [] + if "split" in head["header"] and "children" in head["header"]["split"]: + chunks_object_ids = head["header"]["split"]["children"] + + return chunks_object_ids + + +def get_complex_object_split_ranges( + storage_object: StorageObjectInfo, + shell: Shell, + cluster: Cluster, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> list[Tuple[int, int]]: + + """ + Get list of split ranges tuples (offset, length) of a complex object + For example if object size if 100 and max object size in system is 30 + the returned list should be + [(0, 30), (30, 30), (60, 30), (90, 10)] + + Args: + storage_object: storage_object to get it's chunks + shell: client shell to do cmd requests + cluster: cluster object under test + timeout: Timeout for an operation. + + Returns: + list of object ids of complex object chunks + """ + + ranges: list = [] + offset = 0 + chunks_ids = get_storage_object_chunks(storage_object, shell, cluster) + for chunk_id in chunks_ids: + head = head_object( + storage_object.wallet, + storage_object.cid, + chunk_id, + shell, + cluster.default_rpc_endpoint, + timeout=timeout, + ) + + length = int(head["header"]["payloadLength"]) + ranges.append((offset, length)) + + offset = offset + length + + return ranges + + +@reporter.step("Get Link Object") +def get_link_object( + wallet: WalletInfo, + cid: str, + oid: str, + shell: Shell, + nodes: list[StorageNode], + bearer: str = "", + is_direct: bool = True, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +): + """ + Args: + wallet (str): path to the wallet on whose behalf the Storage Nodes + are requested + cid (str): Container ID which stores the Large Object + oid (str): Large Object ID + shell: executor for cli command + nodes: list of nodes to do search on + bearer (optional, str): path to Bearer token file + wallet_config (optional, str): path to the frostfs-cli config file + is_direct: send request directly to the node or not; this flag + turns into `--ttl 1` key + timeout: Timeout for an operation. + Returns: + (str): Link Object ID + When no Link Object ID is found after all Storage Nodes polling, + the function throws an error. + """ + for node in nodes: + endpoint = node.get_rpc_endpoint() + try: + resp = head_object( + wallet, + cid, + oid, + shell=shell, + endpoint=endpoint, + is_raw=True, + is_direct=is_direct, + bearer=bearer, + timeout=timeout, + ) + if resp["link"]: + return resp["link"] + except Exception: + logger.info(f"No Link Object found on {endpoint}; continue") + logger.error(f"No Link Object for {cid}/{oid} found among all Storage Nodes") + return None + + +@reporter.step("Get Last Object") +def get_last_object( + wallet: WalletInfo, + cid: str, + oid: str, + shell: Shell, + nodes: list[StorageNode], + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> Optional[str]: + """ + Args: + wallet (str): path to the wallet on whose behalf the Storage Nodes + are requested + cid (str): Container ID which stores the Large Object + oid (str): Large Object ID + shell: executor for cli command + nodes: list of nodes to do search on + timeout: Timeout for an operation. + Returns: + (str): Last Object ID + When no Last Object ID is found after all Storage Nodes polling, + the function throws an error. + """ + for node in nodes: + endpoint = node.get_rpc_endpoint() + try: + resp = head_object( + wallet, + cid, + oid, + shell=shell, + endpoint=endpoint, + is_raw=True, + is_direct=True, + timeout=timeout, + ) + if resp["lastPart"]: + return resp["lastPart"] + except Exception: + logger.info(f"No Last Object found on {endpoint}; continue") + logger.error(f"No Last Object for {cid}/{oid} found among all Storage Nodes") + return None diff --git a/src/frostfs_testlib/steps/epoch.py b/src/frostfs_testlib/steps/epoch.py new file mode 100644 index 00000000..6ec5483a --- /dev/null +++ b/src/frostfs_testlib/steps/epoch.py @@ -0,0 +1,125 @@ +import logging +from time import sleep +from typing import Optional + +from frostfs_testlib import reporter +from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE +from frostfs_testlib.resources.common import MORPH_BLOCK_TIME +from frostfs_testlib.shell import Shell +from frostfs_testlib.steps.payment_neogo import get_contract_hash +from frostfs_testlib.storage.cluster import Cluster, StorageNode +from frostfs_testlib.storage.dataclasses.frostfs_services import InnerRing, MorphChain +from frostfs_testlib.testing.test_control import wait_for_success +from frostfs_testlib.utils import datetime_utils, wallet_utils + +logger = logging.getLogger("NeoLogger") + + +@reporter.step("Get epochs from nodes") +def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]: + """ + Get current epochs on each node. + + Args: + shell: shell to run commands on. + cluster: cluster under test. + + Returns: + Dict of {node_ip: epoch}. + """ + epochs_by_node = {} + for node in cluster.services(StorageNode): + epochs_by_node[node.host.config.address] = get_epoch(shell, cluster, node) + return epochs_by_node + + +@reporter.step("Ensure fresh epoch") +def ensure_fresh_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None) -> int: + # ensure new fresh epoch to avoid epoch switch during test session + alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] + current_epoch = get_epoch(shell, cluster, alive_node) + tick_epoch(shell, cluster, alive_node) + epoch = get_epoch(shell, cluster, alive_node) + assert epoch > current_epoch, "Epoch wasn't ticked" + return epoch + + +@reporter.step("Wait up to {timeout} seconds for nodes on cluster to align epochs") +def wait_for_epochs_align(shell: Shell, cluster: Cluster, timeout=60): + @wait_for_success(timeout, 5, None, True) + def check_epochs(): + epochs_by_node = get_epochs_from_nodes(shell, cluster) + assert len(set(epochs_by_node.values())) == 1, f"unaligned epochs found: {epochs_by_node}" + + check_epochs() + + +@reporter.step("Get Epoch") +def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None): + alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] + endpoint = alive_node.get_rpc_endpoint() + wallet_path = alive_node.get_wallet_path() + wallet_config = alive_node.get_wallet_config_path() + + cli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config) + + epoch = cli.netmap.epoch(endpoint, wallet_path, timeout=CLI_DEFAULT_TIMEOUT) + return int(epoch.stdout) + + +@reporter.step("Tick Epoch") +def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None, delta: Optional[int] = None): + """ + Tick epoch using frostfs-adm or NeoGo if frostfs-adm is not available (DevEnv) + Args: + shell: local shell to make queries about current epoch. Remote shell will be used to tick new one + cluster: cluster instance under test + alive_node: node to send requests to (first node in cluster by default) + """ + + alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] + remote_shell = alive_node.host.get_shell() + + if "force_transactions" not in alive_node.host.config.attributes: + # If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests) + frostfs_adm = FrostfsAdm( + shell=remote_shell, + frostfs_adm_exec_path=FROSTFS_ADM_EXEC, + config_file=FROSTFS_ADM_CONFIG_PATH, + ) + frostfs_adm.morph.force_new_epoch(delta=delta) + return + + # Otherwise we tick epoch using transaction + cur_epoch = get_epoch(shell, cluster) + + if delta: + next_epoch = cur_epoch + delta + else: + next_epoch = cur_epoch + 1 + + # Use first node by default + ir_node = cluster.services(InnerRing)[0] + # In case if no local_wallet_path is provided, we use wallet_path + ir_wallet_path = ir_node.get_wallet_path() + ir_wallet_pass = ir_node.get_wallet_password() + ir_address = wallet_utils.get_last_address_from_wallet(ir_wallet_path, ir_wallet_pass) + + morph_chain = cluster.services(MorphChain)[0] + morph_endpoint = morph_chain.get_endpoint() + + neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) + neogo.contract.invokefunction( + wallet=ir_wallet_path, + wallet_password=ir_wallet_pass, + scripthash=get_contract_hash(morph_chain, "netmap.frostfs", shell=shell), + method="newEpoch", + arguments=f"int:{next_epoch}", + multisig_hash=f"{ir_address}:Global", + address=ir_address, + rpc_endpoint=morph_endpoint, + force=True, + gas=1, + ) + sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) diff --git a/src/frostfs_testlib/steps/http_gate.py b/src/frostfs_testlib/steps/http_gate.py new file mode 100644 index 00000000..aa4abf29 --- /dev/null +++ b/src/frostfs_testlib/steps/http_gate.py @@ -0,0 +1,370 @@ +import logging +import os +import random +import re +import shutil +import uuid +import zipfile +from typing import Optional +from urllib.parse import quote_plus + +import requests + +from frostfs_testlib import reporter +from frostfs_testlib.cli import GenericCli +from frostfs_testlib.clients.s3.aws_cli_client import command_options +from frostfs_testlib.resources.common import ASSETS_DIR, SIMPLE_OBJECT_SIZE +from frostfs_testlib.shell import Shell +from frostfs_testlib.shell.local_shell import LocalShell +from frostfs_testlib.steps.cli.object import get_object +from frostfs_testlib.steps.storage_policy import get_nodes_without_object +from frostfs_testlib.storage.cluster import ClusterNode, StorageNode +from frostfs_testlib.testing.test_control import retry +from frostfs_testlib.utils.file_utils import TestFile, get_file_hash + +logger = logging.getLogger("NeoLogger") + +local_shell = LocalShell() + + +@reporter.step("Get via HTTP Gate") +def get_via_http_gate( + cid: str, + oid: str, + node: ClusterNode, + request_path: Optional[str] = None, + presigned_url: Optional[str] = None, + timeout: Optional[int] = 300, +): + """ + This function gets given object from HTTP gate + cid: container id to get object from + oid: object id / object key + node: node to make request + request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}] + """ + + request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" + if request_path: + request = f"{node.http_gate.get_endpoint()}{request_path}" + + if presigned_url: + request = presigned_url + + response = requests.get(request, stream=True, timeout=timeout, verify=False) + + if not response.ok: + raise Exception( + f"""Failed to get object via HTTP gate: + request: {response.request.path_url}, + response: {response.text}, + headers: {response.headers}, + status code: {response.status_code} {response.reason}""" + ) + + logger.info(f"Request: {request}") + _attach_allure_step(request, response.status_code) + + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}")) + with open(test_file, "wb") as file: + for chunk in response.iter_content(chunk_size=8192): + file.write(chunk) + + return test_file + + +@reporter.step("Get via Zip HTTP Gate") +def get_via_zip_http_gate(cid: str, prefix: str, node: ClusterNode, timeout: Optional[int] = 300): + """ + This function gets given object from HTTP gate + cid: container id to get object from + prefix: common prefix + node: node to make request + """ + request = f"{node.http_gate.get_endpoint()}/zip/{cid}/{prefix}" + resp = requests.get(request, stream=True, timeout=timeout, verify=False) + + if not resp.ok: + raise Exception( + f"""Failed to get object via HTTP gate: + request: {resp.request.path_url}, + response: {resp.text}, + headers: {resp.headers}, + status code: {resp.status_code} {resp.reason}""" + ) + + logger.info(f"Request: {request}") + _attach_allure_step(request, resp.status_code) + + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_archive.zip")) + with open(test_file, "wb") as file: + shutil.copyfileobj(resp.raw, file) + + with zipfile.ZipFile(test_file, "r") as zip_ref: + zip_ref.extractall(ASSETS_DIR) + + return os.path.join(os.getcwd(), ASSETS_DIR, prefix) + + +@reporter.step("Get via HTTP Gate by attribute") +def get_via_http_gate_by_attribute( + cid: str, + attribute: dict, + node: ClusterNode, + request_path: Optional[str] = None, + timeout: Optional[int] = 300, +): + """ + This function gets given object from HTTP gate + cid: CID to get object from + attribute: attribute {name: attribute} value pair + endpoint: http gate endpoint + request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}] + """ + + attr_name = list(attribute.keys())[0] + attr_value = quote_plus(str(attribute.get(attr_name))) + + request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" + if request_path: + request = f"{node.http_gate.get_endpoint()}{request_path}" + + resp = requests.get(request, stream=True, timeout=timeout, verify=False) + + if not resp.ok: + raise Exception( + f"""Failed to get object via HTTP gate: + request: {resp.request.path_url}, + response: {resp.text}, + headers: {resp.headers}, + status code: {resp.status_code} {resp.reason}""" + ) + + logger.info(f"Request: {request}") + _attach_allure_step(request, resp.status_code) + + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{str(uuid.uuid4())}")) + with open(test_file, "wb") as file: + shutil.copyfileobj(resp.raw, file) + return test_file + + +@reporter.step("Upload via HTTP Gate") +def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300) -> str: + """ + This function upload given object through HTTP gate + cid: CID to get object from + path: File path to upload + endpoint: http gate endpoint + headers: Object header + """ + request = f"{endpoint}/upload/{cid}" + files = {"upload_file": open(path, "rb")} + body = {"filename": path} + resp = requests.post(request, files=files, data=body, headers=headers, timeout=timeout, verify=False) + + if not resp.ok: + raise Exception( + f"""Failed to get object via HTTP gate: + request: {resp.request.path_url}, + response: {resp.text}, + status code: {resp.status_code} {resp.reason}""" + ) + + logger.info(f"Request: {request}") + _attach_allure_step(request, resp.json(), req_type="POST") + + assert resp.json().get("object_id"), f"OID found in response {resp}" + + return resp.json().get("object_id") + + +@reporter.step("Check is the passed object large") +def is_object_large(filepath: str) -> bool: + """ + This function check passed file size and return True if file_size > SIMPLE_OBJECT_SIZE + filepath: File path to check + """ + file_size = os.path.getsize(filepath) + logger.info(f"Size= {file_size}") + if file_size > int(SIMPLE_OBJECT_SIZE): + return True + else: + return False + + +@reporter.step("Upload via HTTP Gate using Curl") +def upload_via_http_gate_curl( + cid: str, + filepath: str, + endpoint: str, + headers: Optional[list] = None, + error_pattern: Optional[str] = None, +) -> str: + """ + This function upload given object through HTTP gate using curl utility. + cid: CID to get object from + filepath: File path to upload + headers: Object header + endpoint: http gate endpoint + error_pattern: [optional] expected error message from the command + """ + request = f"{endpoint}/upload/{cid}" + attributes = "" + if headers: + # parse attributes + attributes = " ".join(headers) + + large_object = is_object_large(filepath) + if large_object: + # pre-clean + local_shell.exec("rm pipe -f") + files = f"file=@pipe;filename={os.path.basename(filepath)}" + cmd = f"mkfifo pipe;cat {filepath} > pipe & curl -k --no-buffer -F '{files}' {attributes} {request}" + output = local_shell.exec(cmd, command_options) + # clean up pipe + local_shell.exec("rm pipe") + else: + files = f"file=@{filepath};filename={os.path.basename(filepath)}" + cmd = f"curl -k -F '{files}' {attributes} {request}" + output = local_shell.exec(cmd) + + if error_pattern: + match = error_pattern.casefold() in str(output).casefold() + assert match, f"Expected {output} to match {error_pattern}" + return "" + + oid_re = re.search(r'"object_id": "(.*)"', output) + if not oid_re: + raise AssertionError(f'Could not find "object_id" in {output}') + return oid_re.group(1) + + +@retry(max_attempts=3, sleep_interval=1) +@reporter.step("Get via HTTP Gate using Curl") +def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> TestFile: + """ + This function gets given object from HTTP gate using curl utility. + cid: CID to get object from + oid: object OID + node: node for request + """ + request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}")) + + curl = GenericCli("curl", node.host) + curl(f"-k ", f"{request} > {test_file}", shell=local_shell) + + return test_file + + +def _attach_allure_step(request: str, status_code: int, req_type="GET"): + command_attachment = f"REQUEST: '{request}'\n" f"RESPONSE:\n {status_code}\n" + with reporter.step(f"{req_type} Request"): + reporter.attach(command_attachment, f"{req_type} Request") + + +@reporter.step("Try to get object and expect error") +def try_to_get_object_and_expect_error( + cid: str, + oid: str, + node: ClusterNode, + error_pattern: str, +) -> None: + try: + get_via_http_gate(cid=cid, oid=oid, node=node) + raise AssertionError(f"Expected error on getting object with cid: {cid}") + except Exception as err: + match = error_pattern.casefold() in str(err).casefold() + assert match, f"Expected {err} to match {error_pattern}" + + +@reporter.step("Verify object can be get using HTTP header attribute") +def get_object_by_attr_and_verify_hashes( + oid: str, + file_name: str, + cid: str, + attrs: dict, + node: ClusterNode, +) -> None: + got_file_path_http = get_via_http_gate(cid=cid, oid=oid, node=node) + got_file_path_http_attr = get_via_http_gate_by_attribute(cid=cid, attribute=attrs, node=node) + assert_hashes_are_equal(file_name, got_file_path_http, got_file_path_http_attr) + + +def verify_object_hash( + oid: str, + file_name: str, + wallet: str, + cid: str, + shell: Shell, + nodes: list[StorageNode], + request_node: ClusterNode, + object_getter=None, +) -> None: + + nodes_list = get_nodes_without_object( + wallet=wallet, + cid=cid, + oid=oid, + shell=shell, + nodes=nodes, + ) + # for some reason we can face with case when nodes_list is empty due to object resides in all nodes + if nodes_list: + random_node = random.choice(nodes_list) + else: + random_node = random.choice(nodes) + + object_getter = object_getter or get_via_http_gate + + got_file_path = get_object( + wallet=wallet, + cid=cid, + oid=oid, + shell=shell, + endpoint=random_node.get_rpc_endpoint(), + ) + got_file_path_http = object_getter(cid=cid, oid=oid, node=request_node) + + assert_hashes_are_equal(file_name, got_file_path, got_file_path_http) + + +def assert_hashes_are_equal(orig_file_name: str, got_file_1: str, got_file_2: str) -> None: + msg = "Expected hashes are equal for files {f1} and {f2}" + got_file_hash_http = get_file_hash(got_file_1) + assert get_file_hash(got_file_2) == got_file_hash_http, msg.format(f1=got_file_2, f2=got_file_1) + assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format(f1=orig_file_name, f2=got_file_1) + + +def attr_into_header(attrs: dict) -> dict: + return {f"X-Attribute-{_key}": _value for _key, _value in attrs.items()} + + +@reporter.step("Convert each attribute (Key=Value) to the following format: -H 'X-Attribute-Key: Value'") +def attr_into_str_header_curl(attrs: dict) -> list: + headers = [] + for k, v in attrs.items(): + headers.append(f"-H 'X-Attribute-{k}: {v}'") + logger.info(f"[List of Attrs for curl:] {headers}") + return headers + + +@reporter.step("Try to get object via http (pass http_request and optional attributes) and expect error") +def try_to_get_object_via_passed_request_and_expect_error( + cid: str, + oid: str, + node: ClusterNode, + error_pattern: str, + http_request_path: str, + attrs: Optional[dict] = None, +) -> None: + try: + if attrs is None: + get_via_http_gate(cid, oid, node, http_request_path) + else: + get_via_http_gate_by_attribute(cid, attrs, node, http_request_path) + raise AssertionError(f"Expected error on getting object with cid: {cid}") + except Exception as err: + match = error_pattern.casefold() in str(err).casefold() + assert match, f"Expected {err} to match {error_pattern}" diff --git a/src/frostfs_testlib/steps/metrics.py b/src/frostfs_testlib/steps/metrics.py new file mode 100644 index 00000000..0d0950aa --- /dev/null +++ b/src/frostfs_testlib/steps/metrics.py @@ -0,0 +1,45 @@ +import re + +from frostfs_testlib import reporter +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.testing.test_control import wait_for_success + + +@reporter.step("Check metrics result") +@wait_for_success(max_wait_time=300, interval=10) +def check_metrics_counter( + cluster_nodes: list[ClusterNode], + operator: str = "==", + counter_exp: int = 0, + parse_from_command: bool = False, + **metrics_greps: str, +): + counter_act = 0 + for cluster_node in cluster_nodes: + counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps) + assert eval( + f"{counter_act} {operator} {counter_exp}" + ), f"Actual: {counter_act} {operator} Expected: {counter_exp} in nodes: {cluster_nodes}" + + +@reporter.step("Get metrics value from node: {node}") +def get_metrics_value(node: ClusterNode, parse_from_command: bool = False, **metrics_greps: str): + try: + command_result = node.metrics.storage.get_metrics_search_by_greps(**metrics_greps) + if parse_from_command: + metrics_counter = calc_metrics_count_from_stdout(command_result.stdout, **metrics_greps) + else: + metrics_counter = calc_metrics_count_from_stdout(command_result.stdout) + except RuntimeError as e: + metrics_counter = 0 + + return metrics_counter + + +@reporter.step("Parse metrics count and calc sum of result") +def calc_metrics_count_from_stdout(metric_result_stdout: str, command: str = None): + if command: + result = re.findall(rf"{command}\s*([\d.e+-]+)", metric_result_stdout) + else: + result = re.findall(r"}\s*([\d.e+-]+)", metric_result_stdout) + return sum(map(lambda x: int(float(x)), result)) diff --git a/src/frostfs_testlib/steps/network.py b/src/frostfs_testlib/steps/network.py new file mode 100644 index 00000000..6bde2f19 --- /dev/null +++ b/src/frostfs_testlib/steps/network.py @@ -0,0 +1,21 @@ +from frostfs_testlib.shell import CommandOptions +from frostfs_testlib.storage.cluster import ClusterNode + + +class IpHelper: + @staticmethod + def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[tuple]) -> None: + shell = node.host.get_shell() + for ip, table in block_ip: + if not table: + shell.exec(f"ip r a blackhole {ip}") + continue + shell.exec(f"ip r a blackhole {ip} table {table}") + + @staticmethod + def restore_input_traffic_to_node(node: ClusterNode) -> None: + shell = node.host.get_shell() + unlock_ip = shell.exec("ip r l table all | grep blackhole", CommandOptions(check=False)).stdout + + for active_blackhole in unlock_ip.strip().split("\n"): + shell.exec(f"ip r d {active_blackhole}") diff --git a/src/frostfs_testlib/steps/node_management.py b/src/frostfs_testlib/steps/node_management.py new file mode 100644 index 00000000..42b1fc52 --- /dev/null +++ b/src/frostfs_testlib/steps/node_management.py @@ -0,0 +1,292 @@ +import logging +import random +import re +import time +from dataclasses import dataclass +from time import sleep +from typing import Optional + +from frostfs_testlib import reporter +from frostfs_testlib.cli import FrostfsAdm, FrostfsCli +from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC +from frostfs_testlib.resources.common import MORPH_BLOCK_TIME +from frostfs_testlib.shell import Shell +from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align +from frostfs_testlib.storage.cluster import Cluster, StorageNode +from frostfs_testlib.testing.test_control import wait_for_success +from frostfs_testlib.utils import datetime_utils + +logger = logging.getLogger("NeoLogger") + + +@dataclass +class HealthStatus: + network_status: Optional[str] = None + health_status: Optional[str] = None + + @staticmethod + def from_stdout(output: str) -> "HealthStatus": + network, health = None, None + for line in output.split("\n"): + if "Network status" in line: + network = line.split(":")[-1].strip() + if "Health status" in line: + health = line.split(":")[-1].strip() + return HealthStatus(network, health) + + +@reporter.step("Get Locode from random storage node") +def get_locode_from_random_node(cluster: Cluster) -> str: + node = random.choice(cluster.services(StorageNode)) + locode = node.get_un_locode() + logger.info(f"Chosen '{locode}' locode from node {node}") + return locode + + +@reporter.step("Healthcheck for storage node {node}") +def storage_node_healthcheck(node: StorageNode) -> HealthStatus: + """ + The function returns storage node's health status. + Args: + node: storage node for which health status should be retrieved. + Returns: + health status as HealthStatus object. + """ + + host = node.host + service_config = host.get_service_config(node.name) + wallet_path = service_config.attributes["wallet_path"] + wallet_password = service_config.attributes["wallet_password"] + control_endpoint = service_config.attributes["control_endpoint"] + + shell = host.get_shell() + wallet_config_path = f"/tmp/{node.name}-config.yaml" + wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' + shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") + + cli_config = host.get_cli_config("frostfs-cli") + + cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) + result = cli.control.healthcheck(control_endpoint) + + return HealthStatus.from_stdout(result.stdout) + + +@reporter.step("Set status for {node}") +def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> None: + """ + The function sets particular status for given node. + Args: + node: node for which status should be set. + status: online or offline. + retries (optional, int): number of retry attempts if it didn't work from the first time + """ + host = node.host + service_config = host.get_service_config(node.name) + wallet_path = service_config.attributes["wallet_path"] + wallet_password = service_config.attributes["wallet_password"] + control_endpoint = service_config.attributes["control_endpoint"] + + shell = host.get_shell() + wallet_config_path = f"/tmp/{node.name}-config.yaml" + wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' + shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") + + cli_config = host.get_cli_config("frostfs-cli") + + cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) + cli.control.set_status(control_endpoint, status) + + +@reporter.step("Get netmap snapshot") +def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str: + """ + The function returns string representation of netmap snapshot. + Args: + node: node from which netmap snapshot should be requested. + Returns: + string representation of netmap + """ + + storage_wallet_config = node.get_wallet_config_path() + storage_wallet_path = node.get_wallet_path() + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, config_file=storage_wallet_config) + return cli.netmap.snapshot(rpc_endpoint=node.get_rpc_endpoint(), wallet=storage_wallet_path).stdout + + +@reporter.step("Get shard list for {node}") +def node_shard_list(node: StorageNode, json: Optional[bool] = None) -> list[str]: + """ + The function returns list of shards for specified storage node. + Args: + node: node for which shards should be returned. + Returns: + list of shards. + """ + host = node.host + service_config = host.get_service_config(node.name) + wallet_path = service_config.attributes["wallet_path"] + wallet_password = service_config.attributes["wallet_password"] + control_endpoint = service_config.attributes["control_endpoint"] + + shell = host.get_shell() + wallet_config_path = f"/tmp/{node.name}-config.yaml" + wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' + shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") + + cli_config = host.get_cli_config("frostfs-cli") + + cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) + result = cli.shards.list(endpoint=control_endpoint, json_mode=json) + + return re.findall(r"Shard (.*):", result.stdout) + + +@reporter.step("Shard set for {node}") +def node_shard_set_mode(node: StorageNode, shard: list[str], mode: str) -> None: + """ + The function sets mode for specified shard. + Args: + node: node on which shard mode should be set. + """ + host = node.host + service_config = host.get_service_config(node.name) + wallet_path = service_config.attributes["wallet_path"] + wallet_password = service_config.attributes["wallet_password"] + control_endpoint = service_config.attributes["control_endpoint"] + + shell = host.get_shell() + wallet_config_path = f"/tmp/{node.name}-config.yaml" + wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' + shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") + + cli_config = host.get_cli_config("frostfs-cli") + + cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) + cli.shards.set_mode(endpoint=control_endpoint, mode=mode, id=shard) + + +@reporter.step("Drop object from {node}") +def drop_object(node: StorageNode, cid: str, oid: str) -> None: + """ + The function drops object from specified node. + Args: + node: node from which object should be dropped. + """ + host = node.host + service_config = host.get_service_config(node.name) + wallet_path = service_config.attributes["wallet_path"] + wallet_password = service_config.attributes["wallet_password"] + control_endpoint = service_config.attributes["control_endpoint"] + + shell = host.get_shell() + wallet_config_path = f"/tmp/{node.name}-config.yaml" + wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' + shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") + + cli_config = host.get_cli_config("frostfs-cli") + + cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) + objects = f"{cid}/{oid}" + cli.control.drop_objects(control_endpoint, objects) + + +@reporter.step("Delete data from host for node {node}") +def delete_node_data(node: StorageNode) -> None: + node.stop_service() + node.host.delete_storage_node_data(node.name) + time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) + + +@reporter.step("Exclude node {node_to_exclude} from network map") +def exclude_node_from_network_map(node_to_exclude: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None: + node_netmap_key = node_to_exclude.get_wallet_public_key() + + storage_node_set_status(node_to_exclude, status="offline") + + time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) + tick_epoch(shell, cluster) + wait_for_epochs_align(shell, cluster) + + snapshot = get_netmap_snapshot(node=alive_node, shell=shell) + assert node_netmap_key not in snapshot, f"Expected node with key {node_netmap_key} to be absent in network map" + + +@reporter.step("Include node {node_to_include} into network map") +def include_node_to_network_map(node_to_include: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None: + storage_node_set_status(node_to_include, status="online") + + # Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch. + # First sleep can be omitted after https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/60 complete. + + time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2) + tick_epoch(shell, cluster) + time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2) + + await_node_in_map(node_to_include, shell, alive_node) + + +@reporter.step("Check node {node} in network map") +def check_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: + alive_node = alive_node or node + + node_netmap_key = node.get_wallet_public_key() + logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}") + + snapshot = get_netmap_snapshot(alive_node, shell) + assert node_netmap_key in snapshot, f"Expected node with key {node_netmap_key} to be in network map" + + +@wait_for_success(300, 15, title="Await node {node} in network map") +def await_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: + check_node_in_map(node, shell, alive_node) + + +@reporter.step("Check node {node} NOT in network map") +def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: + alive_node = alive_node or node + + node_netmap_key = node.get_wallet_public_key() + logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}") + + snapshot = get_netmap_snapshot(alive_node, shell) + assert node_netmap_key not in snapshot, f"Expected node with key {node_netmap_key} to be NOT in network map" + + +@reporter.step("Wait for node {node} is ready") +def wait_for_node_to_be_ready(node: StorageNode) -> None: + timeout, attempts = 60, 15 + for _ in range(attempts): + try: + health_check = storage_node_healthcheck(node) + if health_check.health_status == "READY": + return + except Exception as err: + logger.warning(f"Node {node} is not ready:\n{err}") + sleep(timeout) + raise AssertionError(f"Node {node} hasn't gone to the READY state after {timeout * attempts} seconds") + + +@reporter.step("Remove nodes from network map trough cli-adm morph command") +def remove_nodes_from_map_morph(shell: Shell, cluster: Cluster, remove_nodes: list[StorageNode], alive_node: Optional[StorageNode] = None): + """ + Move node to the Offline state in the candidates list and tick an epoch to update the netmap + using frostfs-adm + Args: + shell: local shell to make queries about current epoch. Remote shell will be used to tick new one + cluster: cluster instance under test + alive_node: node to send requests to (first node in cluster by default) + remove_nodes: list of nodes which would be removed from map + """ + + alive_node = alive_node if alive_node else remove_nodes[0] + remote_shell = alive_node.host.get_shell() + + node_netmap_keys = list(map(StorageNode.get_wallet_public_key, remove_nodes)) + logger.info(f"Nodes netmap keys are: {' '.join(node_netmap_keys)}") + + if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH: + # If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests) + frostfsadm = FrostfsAdm(shell=remote_shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH) + frostfsadm.morph.remove_nodes(node_netmap_keys) diff --git a/src/frostfs_testlib/steps/payment_neogo.py b/src/frostfs_testlib/steps/payment_neogo.py new file mode 100644 index 00000000..8e78cca4 --- /dev/null +++ b/src/frostfs_testlib/steps/payment_neogo.py @@ -0,0 +1,147 @@ +import base64 +import json +import logging +import re +import time +from typing import Optional + +from neo3.wallet import utils as neo3_utils +from neo3.wallet import wallet as neo3_wallet + +from frostfs_testlib import reporter +from frostfs_testlib.cli import NeoGo +from frostfs_testlib.resources.cli import NEOGO_EXECUTABLE +from frostfs_testlib.resources.common import FROSTFS_CONTRACT, GAS_HASH, MORPH_BLOCK_TIME +from frostfs_testlib.shell import Shell +from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain +from frostfs_testlib.utils import converting_utils, datetime_utils, wallet_utils + +logger = logging.getLogger("NeoLogger") + +EMPTY_PASSWORD = "" +TX_PERSIST_TIMEOUT = 15 # seconds +ASSET_POWER_SIDECHAIN = 10**12 + + +def get_nns_contract_hash(morph_chain: MorphChain) -> str: + return morph_chain.rpc_client.get_contract_state(1)["hash"] + + +def get_contract_hash(morph_chain: MorphChain, resolve_name: str, shell: Shell) -> str: + nns_contract_hash = get_nns_contract_hash(morph_chain) + neogo = NeoGo(shell=shell, neo_go_exec_path=NEOGO_EXECUTABLE) + out = neogo.contract.testinvokefunction( + scripthash=nns_contract_hash, + method="resolve", + arguments=f"string:{resolve_name} int:16", + rpc_endpoint=morph_chain.get_endpoint(), + ) + stack_data = json.loads(out.stdout.replace("\n", ""))["stack"][0]["value"] + return bytes.decode(base64.b64decode(stack_data[0]["value"])) + + +def transaction_accepted(morph_chain: MorphChain, tx_id: str): + """ + This function returns True in case of accepted TX. + Args: + tx_id(str): transaction ID + Returns: + (bool) + """ + + try: + for _ in range(0, TX_PERSIST_TIMEOUT): + time.sleep(1) + neogo = NeoGo(shell=morph_chain.host.get_shell(), neo_go_exec_path=NEOGO_EXECUTABLE) + resp = neogo.query.tx(tx_hash=tx_id, rpc_endpoint=morph_chain.get_endpoint()) + if resp is not None: + logger.info(f"TX is accepted in block: {resp}") + return True, resp + except Exception as out: + logger.info(f"request failed with error: {out}") + raise out + return False + + +@reporter.step("Get FrostFS Balance") +def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_password: str = ""): + """ + This function returns FrostFS balance for given wallet. + """ + with open(wallet_path) as wallet_file: + wallet = neo3_wallet.Wallet.from_json(json.load(wallet_file), password=wallet_password) + acc = wallet.accounts[-1] + payload = [{"type": "Hash160", "value": str(acc.script_hash)}] + try: + resp = morph_chain.rpc_client.invoke_function( + get_contract_hash(morph_chain, "balance.frostfs", shell=shell), "balanceOf", payload + ) + logger.info(f"Got response \n{resp}") + value = int(resp["stack"][0]["value"]) + return value / ASSET_POWER_SIDECHAIN + except Exception as out: + logger.error(f"failed to get wallet balance: {out}") + raise out + + +@reporter.step("Transfer Gas") +def transfer_gas( + shell: Shell, + amount: int, + morph_chain: MorphChain, + wallet_from_path: Optional[str] = None, + wallet_from_password: Optional[str] = None, + address_from: Optional[str] = None, + address_to: Optional[str] = None, + wallet_to_path: Optional[str] = None, + wallet_to_password: Optional[str] = None, +): + """ + This function transfer GAS in main chain from mainnet wallet to + the provided wallet. If the wallet contains more than one address, + the assets will be transferred to the last one. + Args: + shell: Shell instance. + wallet_from_password: Password of the wallet; it is required to decode the wallet + and extract its addresses. + wallet_from_path: Path to chain node wallet. + address_from: The address of the wallet to transfer assets from. + wallet_to_path: The path to the wallet to transfer assets to. + wallet_to_password: The password to the wallet to transfer assets to. + address_to: The address of the wallet to transfer assets to. + amount: Amount of gas to transfer. + """ + wallet_from_path = wallet_from_path or morph_chain.get_wallet_path() + wallet_from_password = ( + wallet_from_password if wallet_from_password is not None else morph_chain.get_wallet_password() + ) + address_from = address_from or wallet_utils.get_last_address_from_wallet(wallet_from_path, wallet_from_password) + address_to = address_to or wallet_utils.get_last_address_from_wallet(wallet_to_path, wallet_to_password) + + neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) + out = neogo.nep17.transfer( + rpc_endpoint=morph_chain.get_endpoint(), + wallet=wallet_from_path, + wallet_password=wallet_from_password, + amount=amount, + from_address=address_from, + to_address=address_to, + token="GAS", + force=True, + ) + txid = out.stdout.strip().split("\n")[-1] + if len(txid) != 64: + raise Exception("Got no TXID after run the command") + if not transaction_accepted(morph_chain, txid): + raise AssertionError(f"TX {txid} hasn't been processed") + time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) + + +@reporter.step("Get Sidechain Balance") +def get_sidechain_balance(morph_chain: MorphChain, address: str): + resp = morph_chain.rpc_client.get_nep17_balances(address=address) + logger.info(f"Got getnep17balances response: {resp}") + for balance in resp["balance"]: + if balance["assethash"] == GAS_HASH: + return float(balance["amount"]) / ASSET_POWER_SIDECHAIN + return float(0) diff --git a/src/frostfs_testlib/steps/s3_helper.py b/src/frostfs_testlib/steps/s3_helper.py new file mode 100644 index 00000000..c3092df7 --- /dev/null +++ b/src/frostfs_testlib/steps/s3_helper.py @@ -0,0 +1,209 @@ +import logging +import os +from datetime import datetime, timedelta +from typing import Optional + +from dateutil.parser import parse + +from frostfs_testlib import reporter +from frostfs_testlib.clients.s3 import BucketContainerResolver, S3ClientWrapper, VersioningStatus +from frostfs_testlib.shell import Shell +from frostfs_testlib.steps.cli.container import search_nodes_with_container +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo + +logger = logging.getLogger("NeoLogger") + + +@reporter.step("Expected all objects are presented in the bucket") +def check_objects_in_bucket( + s3_client: S3ClientWrapper, + bucket: str, + expected_objects: list, + unexpected_objects: Optional[list] = None, +) -> None: + unexpected_objects = unexpected_objects or [] + bucket_objects = s3_client.list_objects(bucket) + assert len(bucket_objects) == len(expected_objects), f"Expected {len(expected_objects)} objects in the bucket" + for bucket_object in expected_objects: + assert bucket_object in bucket_objects, f"Expected object {bucket_object} in objects list {bucket_objects}" + + for bucket_object in unexpected_objects: + assert bucket_object not in bucket_objects, f"Expected object {bucket_object} not in objects list {bucket_objects}" + + +@reporter.step("Try to get object and got error") +def try_to_get_objects_and_expect_error(s3_client: S3ClientWrapper, bucket: str, object_keys: list) -> None: + for obj in object_keys: + try: + s3_client.get_object(bucket, obj) + raise AssertionError(f"Object {obj} found in bucket {bucket}") + except Exception as err: + assert "The specified key does not exist" in str(err), f"Expected error in exception {err}" + + +@reporter.step("Set versioning status to '{status}' for bucket '{bucket}'") +def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: VersioningStatus): + if status == VersioningStatus.UNDEFINED: + return + + s3_client.put_bucket_versioning(bucket, status=status) + bucket_status = s3_client.get_bucket_versioning_status(bucket) + assert bucket_status == status.value, f"Expected {bucket_status} status. Got {status.value}" + + +def object_key_from_file_path(full_path: str) -> str: + return os.path.basename(full_path) + + +def assert_tags(actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None) -> None: + expected_tags = [{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else [] + unexpected_tags = [{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else [] + if expected_tags == []: + assert not actual_tags, f"Expected there is no tags, got {actual_tags}" + assert len(expected_tags) == len(actual_tags) + for tag in expected_tags: + assert tag in actual_tags, f"Tag {tag} must be in {actual_tags}" + for tag in unexpected_tags: + assert tag not in actual_tags, f"Tag {tag} should not be in {actual_tags}" + + +@reporter.step("Expected all tags are presented in object") +def check_tags_by_object( + s3_client: S3ClientWrapper, + bucket: str, + key: str, + expected_tags: list, + unexpected_tags: Optional[list] = None, +) -> None: + actual_tags = s3_client.get_object_tagging(bucket, key) + assert_tags(expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags) + + +@reporter.step("Expected all tags are presented in bucket") +def check_tags_by_bucket( + s3_client: S3ClientWrapper, + bucket: str, + expected_tags: list, + unexpected_tags: Optional[list] = None, +) -> None: + actual_tags = s3_client.get_bucket_tagging(bucket) + assert_tags(expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags) + + +def assert_object_lock_mode( + s3_client: S3ClientWrapper, + bucket: str, + file_name: str, + object_lock_mode: str, + retain_until_date: datetime, + legal_hold_status: str = "OFF", + retain_period: Optional[int] = None, +): + object_dict = s3_client.get_object(bucket, file_name, full_output=True) + assert object_dict.get("ObjectLockMode") == object_lock_mode, f"Expected Object Lock Mode is {object_lock_mode}" + assert ( + object_dict.get("ObjectLockLegalHoldStatus") == legal_hold_status + ), f"Expected Object Lock Legal Hold Status is {legal_hold_status}" + object_retain_date = object_dict.get("ObjectLockRetainUntilDate") + retain_date = parse(object_retain_date) if isinstance(object_retain_date, str) else object_retain_date + if retain_until_date: + assert retain_date.strftime("%Y-%m-%dT%H:%M:%S") == retain_until_date.strftime( + "%Y-%m-%dT%H:%M:%S" + ), f'Expected Object Lock Retain Until Date is {str(retain_until_date.strftime("%Y-%m-%dT%H:%M:%S"))}' + elif retain_period: + last_modify_date = object_dict.get("LastModified") + last_modify = parse(last_modify_date) if isinstance(last_modify_date, str) else last_modify_date + assert ( + retain_date - last_modify + timedelta(seconds=1) + ).days == retain_period, f"Expected retention period is {retain_period} days" + + +def _format_grants_as_strings(grants: list[dict]) -> list: + grantee_format = "{g_type}::{uri}:{permission}" + return set( + [ + grantee_format.format( + g_type=grant.get("Grantee", {}).get("Type", ""), + uri=grant.get("Grantee", {}).get("URI", ""), + permission=grant.get("Permission", ""), + ) + for grant in grants + ] + ) + + +@reporter.step("Verify ACL permissions") +def verify_acl_permissions(actual_acl_grants: list[dict], expected_acl_grants: list[dict], strict: bool = True): + actual_grants = _format_grants_as_strings(actual_acl_grants) + expected_grants = _format_grants_as_strings(expected_acl_grants) + + assert expected_grants <= actual_grants, "Permissions mismatch" + if strict: + assert expected_grants == actual_grants, "Extra permissions found, must not be there" + + +@reporter.step("Delete bucket with all objects") +def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str): + versioning_status = s3_client.get_bucket_versioning_status(bucket) + if versioning_status == VersioningStatus.ENABLED.value: + # From versioned bucket we should delete all versions and delete markers of all objects + objects_versions = s3_client.list_objects_versions(bucket) + if objects_versions: + s3_client.delete_object_versions_without_dm(bucket, objects_versions) + objects_delete_markers = s3_client.list_delete_markers(bucket) + if objects_delete_markers: + s3_client.delete_object_versions_without_dm(bucket, objects_delete_markers) + + else: + # From non-versioned bucket it's sufficient to delete objects by key + objects = s3_client.list_objects(bucket) + if objects: + s3_client.delete_objects(bucket, objects) + objects_delete_markers = s3_client.list_delete_markers(bucket) + if objects_delete_markers: + s3_client.delete_object_versions_without_dm(bucket, objects_delete_markers) + + # Delete the bucket itself + s3_client.delete_bucket(bucket) + + +@reporter.step("Search nodes bucket") +def search_nodes_with_bucket( + cluster: Cluster, + bucket_name: str, + wallet: WalletInfo, + shell: Shell, + endpoint: str, + bucket_container_resolver: BucketContainerResolver, +) -> list[ClusterNode]: + cid = None + for cluster_node in cluster.cluster_nodes: + cid = bucket_container_resolver.resolve(cluster_node, bucket_name) + if cid: + break + nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster) + return nodes_list + + +def get_bytes_relative_to_object(value: int | str, object_size: int = None, part_size: int = None) -> int: + if isinstance(value, int): + return value + + if "part" not in value and "object" not in value: + return int(value) + + if object_size is not None: + value = value.replace("object", str(object_size)) + + if part_size is not None: + value = value.replace("part", str(part_size)) + + return int(eval(value)) + + +def get_range_relative_to_object(rng: str, object_size: int = None, part_size: int = None, int_values: bool = False) -> str | int: + start, end = rng.split(":") + start = get_bytes_relative_to_object(start, object_size, part_size) + end = get_bytes_relative_to_object(end, object_size, part_size) + return (start, end) if int_values else f"bytes {start}-{end}/*" diff --git a/src/frostfs_testlib/steps/session_token.py b/src/frostfs_testlib/steps/session_token.py new file mode 100644 index 00000000..67c556d5 --- /dev/null +++ b/src/frostfs_testlib/steps/session_token.py @@ -0,0 +1,274 @@ +import base64 +import json +import logging +import os +import uuid +from dataclasses import dataclass +from typing import Any, Optional + +from frostfs_testlib import reporter +from frostfs_testlib.cli import FrostfsCli +from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC +from frostfs_testlib.resources.common import ASSETS_DIR +from frostfs_testlib.shell import Shell +from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.testing.readable import HumanReadableEnum +from frostfs_testlib.utils import json_utils, wallet_utils + +logger = logging.getLogger("NeoLogger") + +UNRELATED_KEY = "unrelated key in the session" +UNRELATED_OBJECT = "unrelated object in the session" +UNRELATED_CONTAINER = "unrelated container in the session" +WRONG_VERB = "wrong verb of the session" +INVALID_SIGNATURE = "invalid signature of the session data" + + +class ObjectVerb(HumanReadableEnum): + PUT = "PUT" + DELETE = "DELETE" + GET = "GET" + RANGEHASH = "RANGEHASH" + RANGE = "RANGE" + HEAD = "HEAD" + SEARCH = "SEARCH" + + +class ContainerVerb(HumanReadableEnum): + CREATE = "PUT" + DELETE = "DELETE" + SETEACL = "SETEACL" + + +@dataclass +class Lifetime: + exp: int = 100000000 + nbf: int = 0 + iat: int = 0 + + +@reporter.step("Generate Session Token") +def generate_session_token( + owner_wallet: WalletInfo, + session_wallet: WalletInfo, + session: dict[str, dict[str, Any]], + tokens_dir: str, + lifetime: Optional[Lifetime] = None, +) -> str: + """ + This function generates session token and writes it to the file. + Args: + owner_wallet: wallet of container owner + session_wallet: wallet to which we grant the access via session token + session: Contains allowed operation with parameters + tokens_dir: Dir for token + lifetime: lifetime options for session + Returns: + The path to the generated session token file + """ + + file_path = os.path.join(tokens_dir, str(uuid.uuid4())) + + pub_key_64 = wallet_utils.get_wallet_public_key(session_wallet.path, session_wallet.password, "base64") + + lifetime = lifetime or Lifetime() + + session_token = { + "body": { + "id": f"{base64.b64encode(uuid.uuid4().bytes).decode('utf-8')}", + "ownerID": {"value": f"{json_utils.encode_for_json(owner_wallet.get_address())}"}, + "lifetime": { + "exp": f"{lifetime.exp}", + "nbf": f"{lifetime.nbf}", + "iat": f"{lifetime.iat}", + }, + "sessionKey": pub_key_64, + } + } + session_token["body"].update(session) + + logger.info(f"Got this Session Token: {session_token}") + with open(file_path, "w", encoding="utf-8") as session_token_file: + json.dump(session_token, session_token_file, ensure_ascii=False, indent=4) + + return file_path + + +@reporter.step("Generate Session Token For Container") +def generate_container_session_token( + owner_wallet: WalletInfo, + session_wallet: WalletInfo, + verb: ContainerVerb, + tokens_dir: str, + lifetime: Optional[Lifetime] = None, + cid: Optional[str] = None, +) -> str: + """ + This function generates session token for ContainerSessionContext + and writes it to the file. It is able to prepare session token file + for a specific container () or for every container (adds + "wildcard" field). + Args: + owner_wallet: wallet of container owner. + session_wallet: wallet to which we grant the access via session token. + verb: verb to grant access to. + lifetime: lifetime options for session. + cid: container ID of the container + Returns: + The path to the generated session token file + """ + session = { + "container": { + "verb": verb.value, + "wildcard": cid is None, + **({"containerID": {"value": f"{json_utils.encode_for_json(cid)}"}} if cid is not None else {}), + }, + } + + return generate_session_token( + owner_wallet=owner_wallet, + session_wallet=session_wallet, + session=session, + tokens_dir=tokens_dir, + lifetime=lifetime, + ) + + +@reporter.step("Generate Session Token For Object") +def generate_object_session_token( + owner_wallet: WalletInfo, + session_wallet: WalletInfo, + oids: list[str], + cid: str, + verb: ObjectVerb, + tokens_dir: str, + lifetime: Optional[Lifetime] = None, +) -> str: + """ + This function generates session token for ObjectSessionContext + and writes it to the file. + Args: + owner_wallet: wallet of container owner + session_wallet: wallet to which we grant the access via session token + cid: container ID of the container + oids: list of objectIDs to put into session + verb: verb to grant access to; Valid verbs are: ObjectVerb. + lifetime: lifetime options for session + Returns: + The path to the generated session token file + """ + session = { + "object": { + "verb": verb.value, + "target": { + "container": {"value": json_utils.encode_for_json(cid)}, + "objects": [{"value": json_utils.encode_for_json(oid)} for oid in oids], + }, + }, + } + + return generate_session_token( + owner_wallet=owner_wallet, + session_wallet=session_wallet, + session=session, + tokens_dir=tokens_dir, + lifetime=lifetime, + ) + + +@reporter.step("Get signed token for container session") +def get_container_signed_token( + owner_wallet: WalletInfo, + user_wallet: WalletInfo, + verb: ContainerVerb, + shell: Shell, + tokens_dir: str, + lifetime: Optional[Lifetime] = None, +) -> str: + """ + Returns signed token file path for static container session + """ + session_token_file = generate_container_session_token( + owner_wallet=owner_wallet, + session_wallet=user_wallet, + verb=verb, + tokens_dir=tokens_dir, + lifetime=lifetime, + ) + return sign_session_token(shell, session_token_file, owner_wallet) + + +@reporter.step("Get signed token for object session") +def get_object_signed_token( + owner_wallet: WalletInfo, + user_wallet: WalletInfo, + cid: str, + storage_objects: list[StorageObjectInfo], + verb: ObjectVerb, + shell: Shell, + tokens_dir: str, + lifetime: Optional[Lifetime] = None, +) -> str: + """ + Returns signed token file path for static object session + """ + storage_object_ids = [storage_object.oid for storage_object in storage_objects] + session_token_file = generate_object_session_token( + owner_wallet=owner_wallet, + session_wallet=user_wallet, + oids=storage_object_ids, + cid=cid, + verb=verb, + tokens_dir=tokens_dir, + lifetime=lifetime, + ) + return sign_session_token(shell, session_token_file, owner_wallet) + + +@reporter.step("Create Session Token") +def create_session_token( + shell: Shell, + owner: str, + wallet: WalletInfo, + rpc_endpoint: str, +) -> str: + """ + Create session token for an object. + Args: + shell: Shell instance. + owner: User that writes the token. + wallet_path: The path to wallet to which we grant the access via session token. + wallet_password: Wallet password. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + Returns: + The path to the generated session token file. + """ + session_token = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) + frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + frostfscli.session.create( + rpc_endpoint=rpc_endpoint, + address=owner, + out=session_token, + wallet=wallet.path, + ) + return session_token + + +@reporter.step("Sign Session Token") +def sign_session_token(shell: Shell, session_token_file: str, wallet: WalletInfo) -> str: + """ + This function signs the session token by the given wallet. + + Args: + shell: Shell instance. + session_token_file: The path to the session token file. + wlt: The path to the signing wallet. + + Returns: + The path to the signed token. + """ + signed_token_file = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) + frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + frostfscli.util.sign_session_token(session_token_file, signed_token_file) + return signed_token_file diff --git a/src/frostfs_testlib/steps/storage_object.py b/src/frostfs_testlib/steps/storage_object.py new file mode 100644 index 00000000..4b4b2a6e --- /dev/null +++ b/src/frostfs_testlib/steps/storage_object.py @@ -0,0 +1,60 @@ +import logging +from time import sleep + +import pytest + +from frostfs_testlib import reporter +from frostfs_testlib.resources.error_patterns import OBJECT_ALREADY_REMOVED +from frostfs_testlib.shell import Shell +from frostfs_testlib.steps.cli.object import delete_object, get_object +from frostfs_testlib.steps.epoch import tick_epoch +from frostfs_testlib.steps.tombstone import verify_head_tombstone +from frostfs_testlib.storage.cluster import Cluster +from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo + +logger = logging.getLogger("NeoLogger") + +CLEANUP_TIMEOUT = 10 + + +@reporter.step("Delete Objects") +def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster) -> None: + """ + Deletes given storage objects. + + Args: + storage_objects: list of objects to delete + shell: executor for cli command + """ + + with reporter.step("Delete objects"): + for storage_object in storage_objects: + storage_object.tombstone = delete_object( + storage_object.wallet, + storage_object.cid, + storage_object.oid, + shell=shell, + endpoint=cluster.default_rpc_endpoint, + ) + verify_head_tombstone( + wallet=storage_object.wallet, + cid=storage_object.cid, + oid_ts=storage_object.tombstone, + oid=storage_object.oid, + shell=shell, + endpoint=cluster.default_rpc_endpoint, + ) + + tick_epoch(shell, cluster) + sleep(CLEANUP_TIMEOUT) + + with reporter.step("Get objects and check errors"): + for storage_object in storage_objects: + with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED): + get_object( + storage_object.wallet, + storage_object.cid, + storage_object.oid, + shell=shell, + endpoint=cluster.default_rpc_endpoint, + ) diff --git a/src/frostfs_testlib/steps/storage_policy.py b/src/frostfs_testlib/steps/storage_policy.py new file mode 100644 index 00000000..acc113f3 --- /dev/null +++ b/src/frostfs_testlib/steps/storage_policy.py @@ -0,0 +1,158 @@ +#!/usr/bin/python3 + +""" + This module contains keywords which are used for asserting + that storage policies are respected. +""" +import logging + +from frostfs_testlib import reporter +from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND +from frostfs_testlib.shell import Shell +from frostfs_testlib.steps.cli.object import head_object +from frostfs_testlib.steps.complex_object_actions import get_last_object +from frostfs_testlib.storage.cluster import StorageNode +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.utils import string_utils + +logger = logging.getLogger("NeoLogger") + + +# TODO: Unused, remove or make use of +@reporter.step("Get Object Copies") +def get_object_copies(complexity: str, wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: + """ + The function performs requests to all nodes of the container and + finds out if they store a copy of the object. The procedure is + different for simple and complex object, so the function requires + a sign of object complexity. + Args: + complexity (str): the tag of object size and complexity, + [Simple|Complex] + wallet (str): the path to the wallet on whose behalf the + copies are got + cid (str): ID of the container + oid (str): ID of the Object + shell: executor for cli command + Returns: + (int): the number of object copies in the container + """ + return ( + get_simple_object_copies(wallet, cid, oid, shell, nodes) + if complexity == "Simple" + else get_complex_object_copies(wallet, cid, oid, shell, nodes) + ) + + +@reporter.step("Get Simple Object Copies") +def get_simple_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: + """ + To figure out the number of a simple object copies, only direct + HEAD requests should be made to the every node of the container. + We consider non-empty HEAD response as a stored object copy. + Args: + wallet (str): the path to the wallet on whose behalf the + copies are got + cid (str): ID of the container + oid (str): ID of the Object + shell: executor for cli command + nodes: nodes to search on + Returns: + (int): the number of object copies in the container + """ + copies = 0 + for node in nodes: + try: + response = head_object(wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True) + if response: + logger.info(f"Found object {oid} on node {node}") + copies += 1 + except Exception: + logger.info(f"No {oid} object copy found on {node}, continue") + continue + return copies + + +@reporter.step("Get Complex Object Copies") +def get_complex_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: + """ + To figure out the number of a complex object copies, we firstly + need to retrieve its Last object. We consider that the number of + complex object copies is equal to the number of its last object + copies. When we have the Last object ID, the task is reduced + to getting simple object copies. + Args: + wallet (str): the path to the wallet on whose behalf the + copies are got + cid (str): ID of the container + oid (str): ID of the Object + shell: executor for cli command + Returns: + (int): the number of object copies in the container + """ + last_oid = get_last_object(wallet, cid, oid, shell, nodes) + assert last_oid, f"No Last Object for {cid}/{oid} found among all Storage Nodes" + return get_simple_object_copies(wallet, cid, last_oid, shell, nodes) + + +@reporter.step("Get Nodes With Object") +def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> list[StorageNode]: + """ + The function returns list of nodes which store + the given object. + Args: + cid (str): ID of the container which store the object + oid (str): object ID + shell: executor for cli command + nodes: nodes to find on + Returns: + (list): nodes which store the object + """ + + nodes_list = [] + for node in nodes: + wallet = WalletInfo.from_node(node) + try: + res = head_object( + wallet, + cid, + oid, + shell=shell, + endpoint=node.get_rpc_endpoint(), + is_direct=True, + ) + if res is not None: + logger.info(f"Found object {oid} on node {node}") + nodes_list.append(node) + except Exception: + logger.info(f"No {oid} object copy found on {node}, continue") + continue + return nodes_list + + +@reporter.step("Get Nodes Without Object") +def get_nodes_without_object(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> list[StorageNode]: + """ + The function returns list of nodes which do not store + the given object. + Args: + wallet (str): the path to the wallet on whose behalf + we request the nodes + cid (str): ID of the container which store the object + oid (str): object ID + shell: executor for cli command + Returns: + (list): nodes which do not store the object + """ + nodes_list = [] + for node in nodes: + try: + res = head_object(wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True) + if res is None: + nodes_list.append(node) + except Exception as err: + if string_utils.is_str_match_pattern(err, OBJECT_NOT_FOUND): + nodes_list.append(node) + else: + raise Exception(f"Got error {err} on head object command") from err + return nodes_list diff --git a/src/frostfs_testlib/steps/tombstone.py b/src/frostfs_testlib/steps/tombstone.py new file mode 100644 index 00000000..27f75d5c --- /dev/null +++ b/src/frostfs_testlib/steps/tombstone.py @@ -0,0 +1,24 @@ +import logging + +from frostfs_testlib import reporter +from frostfs_testlib.shell import Shell +from frostfs_testlib.steps.cli.object import head_object +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo + +logger = logging.getLogger("NeoLogger") + + +@reporter.step("Verify Head Tombstone") +def verify_head_tombstone(wallet: WalletInfo, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str): + header = head_object(wallet, cid, oid_ts, shell=shell, endpoint=endpoint)["header"] + + s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"] + logger.info(f"Header Session OIDs is {s_oid}") + logger.info(f"OID is {oid}") + + assert header["containerID"] == cid, "Tombstone Header CID is wrong" + assert header["ownerID"] == wallet.get_address_from_json(0), "Tombstone Owner ID is wrong" + assert header["objectType"] == "TOMBSTONE", "Header Type isn't Tombstone" + assert header["sessionToken"]["body"]["object"]["verb"] == "DELETE", "Header Session Type isn't DELETE" + assert header["sessionToken"]["body"]["object"]["target"]["container"] == cid, "Header Session ID is wrong" + assert oid in header["sessionToken"]["body"]["object"]["target"]["objects"], "Header Session OID is wrong" diff --git a/src/frostfs_testlib/storage/__init__.py b/src/frostfs_testlib/storage/__init__.py new file mode 100644 index 00000000..cbbef843 --- /dev/null +++ b/src/frostfs_testlib/storage/__init__.py @@ -0,0 +1,15 @@ +from frostfs_testlib.storage.service_registry import ServiceRegistry + +__class_registry = ServiceRegistry() + + +def get_service_registry() -> ServiceRegistry: + """Returns registry with registered classes related to cluster and cluster nodes. + + ServiceClassRegistry is a singleton instance that can be configured with multiple classes that + represents service on the cluster physical node. + + Returns: + Singleton ServiceClassRegistry instance. + """ + return __class_registry diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py new file mode 100644 index 00000000..b67e34d1 --- /dev/null +++ b/src/frostfs_testlib/storage/cluster.py @@ -0,0 +1,329 @@ +import random +import re + +import yaml +from yarl import URL + +from frostfs_testlib import reporter +from frostfs_testlib.hosting import Host, Hosting +from frostfs_testlib.hosting.config import ServiceConfig +from frostfs_testlib.storage import get_service_registry +from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml +from frostfs_testlib.storage.constants import ConfigAttributes +from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode +from frostfs_testlib.storage.dataclasses.metrics import Metrics +from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces +from frostfs_testlib.storage.service_registry import ServiceRegistry + + +class ClusterNode: + """ + Represents physical node where multiple different services may be located + """ + + class_registry: ServiceRegistry + id: int + host: Host + metrics: Metrics + + def __init__(self, host: Host, id: int) -> None: + self.host = host + self.id = id + self.class_registry = get_service_registry() + self.metrics = Metrics(host=self.host, metrics_endpoint=self.storage_node.get_metrics_endpoint()) + + @property + def host_ip(self): + return self.host.config.address + + def __eq__(self, other): + return self.host.config.address == other.host.config.address + + def __hash__(self): + return id(self.host.config.address) + + def __str__(self): + return self.host.config.address + + def __repr__(self) -> str: + return self.host.config.address + + # for backward compatibility and to not touch other codebase too much + @property + def storage_node(self) -> StorageNode: + return self.service(StorageNode) + + # for backward compatibility and to not touch other codebase too much + @property + def ir_node(self) -> InnerRing: + return self.service(InnerRing) + + # for backward compatibility and to not touch other codebase too much + @property + def morph_chain(self) -> MorphChain: + return self.service(MorphChain) + + # for backward compatibility and to not touch other codebase too much + @property + def http_gate(self) -> HTTPGate: + return self.service(HTTPGate) + + # for backward compatibility and to not touch other codebase too much + @property + def s3_gate(self) -> S3Gate: + return self.service(S3Gate) + + # TODO: Deprecated. Use config with ServiceConfigurationYml interface + def get_config(self, config_file_path: str) -> dict: + shell = self.host.get_shell() + + result = shell.exec(f"cat {config_file_path}") + config_text = result.stdout + + config = yaml.safe_load(config_text) + return config + + # TODO: Deprecated. Use config with ServiceConfigurationYml interface + def save_config(self, new_config: dict, config_file_path: str) -> None: + shell = self.host.get_shell() + + config_str = yaml.dump(new_config) + shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}") + + def config(self, service_type: type[ServiceClass]) -> ServiceConfigurationYml: + return self.service(service_type).config + + def service(self, service_type: type[ServiceClass]) -> ServiceClass: + """ + Get a service cluster node of specified type. + + Args: + service_type: type of the service which should be returned, + for frostfs it can be StorageNode, S3Gate, HttpGate, MorphChain and InnerRing. + + Returns: + service of service_type class. + """ + + service_entry = self.class_registry.get_entry(service_type) + service_name = service_entry["hosting_service_name"] + + pattern = f"{service_name}_{self.id:02}" + config = self.host.get_service_config(pattern) + + return service_type( + self.id, + config.name, + self.host, + ) + + @property + def services(self) -> list[NodeBase]: + svcs: list[NodeBase] = [] + svcs_names_on_node = [svc.name for svc in self.host.config.services] + for entry in self.class_registry._class_mapping.values(): + hosting_svc_name = entry["hosting_service_name"] + pattern = f"{hosting_svc_name}_{self.id:02}" + if pattern in svcs_names_on_node: + config = self.host.get_service_config(pattern) + svcs.append( + entry["cls"]( + self.id, + config.name, + self.host, + ) + ) + + return svcs + + def get_all_interfaces(self) -> dict[str, str]: + return self.host.config.interfaces + + def get_interface(self, interface: Interfaces) -> str: + return self.host.config.interfaces[interface.value] + + def get_data_interfaces(self) -> list[str]: + return [ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "data" in name_interface] + + def get_data_interface(self, search_interface: str) -> list[str]: + return [self.host.config.interfaces[interface] for interface in self.host.config.interfaces.keys() if search_interface == interface] + + def get_internal_interfaces(self) -> list[str]: + return [ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "internal" in name_interface] + + def get_internal_interface(self, search_internal: str) -> list[str]: + return [self.host.config.interfaces[interface] for interface in self.host.config.interfaces.keys() if search_internal == interface] + + +class Cluster: + """ + This class represents a Cluster object for the whole storage based on provided hosting + """ + + default_rpc_endpoint: str + default_s3_gate_endpoint: str + default_http_gate_endpoint: str + + def __init__(self, hosting: Hosting) -> None: + self._hosting = hosting + + self.class_registry = get_service_registry() + self.default_rpc_endpoint = self.services(StorageNode)[0].get_rpc_endpoint() + self.default_s3_gate_endpoint = self.services(S3Gate)[0].get_endpoint() + self.default_http_gate_endpoint = self.services(HTTPGate)[0].get_endpoint() + + @property + def hosts(self) -> list[Host]: + """ + Returns list of Hosts + """ + return self._hosting.hosts + + # for backward compatibility and to not touch other codebase too much + @property + def storage_nodes(self) -> list[StorageNode]: + return self.services(StorageNode) + + # for backward compatibility and to not touch other codebase too much + @property + def ir_nodes(self) -> list[InnerRing]: + return self.services(InnerRing) + + # for backward compatibility and to not touch other codebase too much + @property + def s3_gates(self) -> list[S3Gate]: + return self.services(S3Gate) + + @property + def http_gates(self) -> list[HTTPGate]: + return self.services(HTTPGate) + + @property + def morph_chain(self) -> list[MorphChain]: + return self.services(MorphChain) + + def nodes(self, services: list[ServiceClass]) -> list[ClusterNode]: + """ + Resolve which cluster nodes hosting the specified services. + + Args: + services: list of services to resolve hosting cluster nodes. + + Returns: + list of cluster nodes which host specified services. + """ + + cluster_nodes = set() + for service in services: + cluster_nodes.update([node for node in self.cluster_nodes if node.service(type(service)) == service]) + + return list(cluster_nodes) + + def node(self, service: ServiceClass) -> ClusterNode: + """ + Resolve single cluster node hosting the specified service. + + Args: + services: list of services to resolve hosting cluster nodes. + + Returns: + list of cluster nodes which host specified services. + """ + + nodes = [node for node in self.cluster_nodes if node.service(type(service)) == service] + if not len(nodes): + raise RuntimeError(f"Cannot find service {service} on any node") + + return nodes[0] + + def services(self, service_type: type[ServiceClass]) -> list[ServiceClass]: + """ + Get all services in a cluster of specified type. + + Args: + service_type: type of the services which should be returned, + for frostfs it can be StorageNode, S3Gate, HttpGate, MorphChain and InnerRing. + + Returns: + list of services of service_type class. + """ + + service = self.class_registry.get_entry(service_type) + service_name = service["hosting_service_name"] + cls: type[NodeBase] = service["cls"] + + pattern = f"{service_name}_\d*$" + configs = self.hosting.find_service_configs(pattern) + + found_nodes = [] + for config in configs: + # config.name is something like s3-gate01. Cut last digits to know service type + service_type = re.findall("(.*)_\d+", config.name)[0] + # exclude unsupported services + if service_type != service_name: + continue + + found_nodes.append( + cls( + self._get_id(config.name), + config.name, + self.hosting.get_host_by_service(config.name), + ) + ) + return found_nodes + + @property + def cluster_nodes(self) -> list[ClusterNode]: + """ + Returns list of Cluster Nodes + """ + + return [ClusterNode(host, id) for id, host in enumerate(self.hosts, start=1)] + + @property + def hosting(self) -> Hosting: + return self._hosting + + def _create_wallet_config(self, service: ServiceConfig) -> None: + wallet_path = service.attributes[ConfigAttributes.LOCAL_WALLET_CONFIG] + wallet_password = service.attributes[ConfigAttributes.WALLET_PASSWORD] + with open(wallet_path, "w") as file: + yaml.dump({"password": wallet_password}, file) + + def create_wallet_configs(self, hosting: Hosting) -> None: + configs = hosting.find_service_configs(".*") + for config in configs: + if ConfigAttributes.LOCAL_WALLET_CONFIG in config.attributes: + self._create_wallet_config(config) + + def is_local_devenv(self) -> bool: + if len(self.hosting.hosts) == 1: + host = self.hosting.hosts[0] + if host.config.address == "localhost" and host.config.plugin_name == "docker": + return True + return False + + def _get_id(self, node_name) -> int: + pattern = "\d*$" + + matches = re.search(pattern, node_name) + if not matches: + raise RuntimeError(f"Can't parse Id of the node {node_name}") + return int(matches.group()) + + def get_random_storage_rpc_endpoint(self) -> str: + return random.choice(self.get_storage_rpc_endpoints()) + + def get_storage_rpc_endpoints(self) -> list[str]: + nodes: list[StorageNode] = self.services(StorageNode) + return [node.get_rpc_endpoint() for node in nodes] + + def get_morph_endpoints(self) -> list[str]: + nodes: list[MorphChain] = self.services(MorphChain) + return [node.get_endpoint() for node in nodes] + + def get_nodes_by_ip(self, ips: list[str]) -> list[ClusterNode]: + cluster_nodes = [node for node in self.cluster_nodes if URL(node.morph_chain.get_endpoint()).host in ips] + with reporter.step(f"Return cluster nodes - {cluster_nodes}"): + return cluster_nodes diff --git a/src/frostfs_testlib/storage/configuration/interfaces.py b/src/frostfs_testlib/storage/configuration/interfaces.py new file mode 100644 index 00000000..b2bc6833 --- /dev/null +++ b/src/frostfs_testlib/storage/configuration/interfaces.py @@ -0,0 +1,65 @@ +from abc import ABC, abstractmethod +from typing import Any + + +class ServiceConfigurationYml(ABC): + """ + Class to manipulate yml configuration for service + """ + + def _find_option(self, key: str, data: dict): + tree = key.split(":") + current = data + for node in tree: + if isinstance(current, list) and len(current) - 1 >= int(node): + current = current[int(node)] + continue + + if node not in current: + return None + + current = current[node] + + return current + + def _set_option(self, key: str, value: Any, data: dict): + tree = key.split(":") + current = data + for node in tree[:-1]: + if isinstance(current, list) and len(current) - 1 >= int(node): + current = current[int(node)] + continue + + if node not in current: + current[node] = {} + + current = current[node] + + current[tree[-1]] = value + + @abstractmethod + def get(self, key: str) -> str: + """ + Get parameter value from current configuration + + Args: + key: key of the parameter in yaml format like 'storage:shard:default:resync_metabase' + + Returns: + value of the parameter + """ + + @abstractmethod + def set(self, values: dict[str, Any]): + """ + Sets parameters to configuration + + Args: + values: dict where key is the key of the parameter in yaml format like 'storage:shard:default:resync_metabase' and value is the value of the option to set + """ + + @abstractmethod + def revert(self): + """ + Revert changes + """ diff --git a/src/frostfs_testlib/storage/configuration/service_configuration.py b/src/frostfs_testlib/storage/configuration/service_configuration.py new file mode 100644 index 00000000..fddd64a0 --- /dev/null +++ b/src/frostfs_testlib/storage/configuration/service_configuration.py @@ -0,0 +1,88 @@ +import os +import re +from typing import Any + +import yaml + +from frostfs_testlib import reporter +from frostfs_testlib.shell.interfaces import CommandOptions, Shell +from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml + + +def extend_dict(extend_me: dict, extend_by: dict): + if isinstance(extend_by, dict): + for k, v in extend_by.items(): + if k in extend_me: + extend_dict(extend_me.get(k), v) + else: + extend_me[k] = v + else: + extend_me += extend_by + + +class ServiceConfiguration(ServiceConfigurationYml): + def __init__(self, service_name: str, shell: Shell, config_dir: str, main_config_path: str) -> None: + self.service_name = service_name + self.shell = shell + self.main_config_path = main_config_path + self.confd_path = os.path.join(config_dir, "conf.d") + self.custom_file = os.path.join(self.confd_path, "99_changes.yml") + + def _path_exists(self, path: str) -> bool: + return not self.shell.exec(f"test -e {path}", options=CommandOptions(check=False)).return_code + + def _get_config_files(self): + config_files = [self.main_config_path] + + if self._path_exists(self.confd_path): + files = self.shell.exec(f"find {self.confd_path} -type f").stdout.strip().split() + # Sorting files in backwards order from latest to first one + config_files.extend(sorted(files, key=lambda x: -int(re.findall("^\d+", os.path.basename(x))[0]))) + + return config_files + + def _get_configuration(self, config_files: list[str]) -> dict: + if not config_files: + return [{}] + + splitter = "+++++" + files_str = " ".join(config_files) + all_content = self.shell.exec( + f"echo Getting config files; for file in {files_str}; do (echo {splitter}; sudo cat ${{file}}); done" + ).stdout + files_content = all_content.split("+++++")[1:] + files_data = [yaml.safe_load(file_content) for file_content in files_content] + + mergedData = {} + for data in files_data: + extend_dict(mergedData, data) + + return mergedData + + def get(self, key: str) -> str | Any: + with reporter.step(f"Get {key} configuration value for {self.service_name}"): + config_files = self._get_config_files() + configuration = self._get_configuration(config_files) + result = self._find_option(key, configuration) + return result + + def set(self, values: dict[str, Any]): + with reporter.step(f"Change configuration for {self.service_name}"): + if not self._path_exists(self.confd_path): + self.shell.exec(f"mkdir {self.confd_path}") + + if self._path_exists(self.custom_file): + data = self._get_configuration([self.custom_file]) + else: + data = {} + + for key, value in values.items(): + self._set_option(key, value, data) + + content = yaml.dump(data) + self.shell.exec(f"echo '{content}' | sudo tee {self.custom_file}") + self.shell.exec(f"chmod 777 {self.custom_file}") + + def revert(self): + with reporter.step(f"Revert changed options for {self.service_name}"): + self.shell.exec(f"rm -rf {self.custom_file}") diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py new file mode 100644 index 00000000..2e492083 --- /dev/null +++ b/src/frostfs_testlib/storage/constants.py @@ -0,0 +1,30 @@ +class ConfigAttributes: + SERVICE_NAME = "systemd_service_name" + WALLET_PASSWORD = "wallet_password" + WALLET_PATH = "wallet_path" + WALLET_CONFIG = "wallet_config" + CONFIG_DIR = "service_config_dir" + CONFIG_PATH = "config_path" + WORKING_DIR = "working_dir" + SHARD_CONFIG_PATH = "shard_config_path" + LOGGER_CONFIG_PATH = "logger_config_path" + LOCAL_WALLET_PATH = "local_wallet_path" + LOCAL_WALLET_CONFIG = "local_wallet_config_path" + REMOTE_WALLET_CONFIG = "remote_wallet_config_path" + ENDPOINT_DATA_0 = "endpoint_data0" + ENDPOINT_DATA_1 = "endpoint_data1" + ENDPOINT_DATA_0_NS = "endpoint_data0_namespace" + ENDPOINT_INTERNAL = "endpoint_internal0" + ENDPOINT_PROMETHEUS = "endpoint_prometheus" + ENDPOINT_PPROF = "endpoint_pprof" + CONTROL_ENDPOINT = "control_endpoint" + UN_LOCODE = "un_locode" + + +class PlacementRule: + DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" + SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X" + REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" + REP_1_FOR_2_NODES_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 2 FROM * AS X" + DEFAULT_EC_PLACEMENT_RULE = "EC 3.1" + EC_1_1_FOR_2_NODES_PLACEMENT_RULE = "EC 1.1 IN X CBF 1 SELECT 2 FROM * AS X" diff --git a/src/frostfs_testlib/storage/controllers/__init__.py b/src/frostfs_testlib/storage/controllers/__init__.py new file mode 100644 index 00000000..65268f45 --- /dev/null +++ b/src/frostfs_testlib/storage/controllers/__init__.py @@ -0,0 +1,4 @@ +from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController +from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController +from frostfs_testlib.storage.controllers.disk_controller import DiskController, DiskInfo +from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py new file mode 100644 index 00000000..56282825 --- /dev/null +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -0,0 +1,225 @@ +import copy +from datetime import datetime + +import frostfs_testlib.resources.optionals as optionals +from frostfs_testlib import reporter +from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner +from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType +from frostfs_testlib.load.load_report import LoadReport +from frostfs_testlib.load.load_verifiers import LoadVerifier +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode +from frostfs_testlib.testing.parallel import parallel +from frostfs_testlib.testing.test_control import run_optionally + + +class BackgroundLoadController: + k6_dir: str + load_params: LoadParams + original_load_params: LoadParams + verification_params: LoadParams + cluster_nodes: list[ClusterNode] + nodes_under_load: list[ClusterNode] + load_counter: int + load_summaries: dict + endpoints: list[str] + runner: ScenarioRunner + started: bool + load_reporters: list[LoadReport] + + def __init__( + self, + k6_dir: str, + load_params: LoadParams, + cluster_nodes: list[ClusterNode], + nodes_under_load: list[ClusterNode], + runner: ScenarioRunner, + ) -> None: + self.k6_dir = k6_dir + self.original_load_params = load_params + self.load_params = copy.deepcopy(self.original_load_params) + self.cluster_nodes = cluster_nodes + self.nodes_under_load = nodes_under_load + self.load_counter = 1 + self.runner = runner + self.started = False + self.load_reporters = [] + if load_params.endpoint_selection_strategy is None: + raise RuntimeError("endpoint_selection_strategy should not be None") + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, []) + def _get_endpoints(self, load_type: LoadType, endpoint_selection_strategy: EndpointSelectionStrategy): + all_endpoints = { + LoadType.gRPC: { + EndpointSelectionStrategy.ALL: list( + set( + endpoint + for node_under_load in self.nodes_under_load + for endpoint in node_under_load.service(StorageNode).get_all_rpc_endpoint() + ) + ), + EndpointSelectionStrategy.FIRST: list( + set(node_under_load.service(StorageNode).get_rpc_endpoint() for node_under_load in self.nodes_under_load) + ), + }, + # for some reason xk6 appends http protocol on its own + LoadType.S3: { + EndpointSelectionStrategy.ALL: list( + set( + endpoint + for node_under_load in self.nodes_under_load + for endpoint in node_under_load.service(S3Gate).get_all_endpoints() + ) + ), + EndpointSelectionStrategy.FIRST: list( + set(node_under_load.service(S3Gate).get_endpoint() for node_under_load in self.nodes_under_load) + ), + }, + } + + return all_endpoints[load_type][endpoint_selection_strategy] + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step("Init k6 instances") + def init_k6(self): + self.endpoints = self._get_endpoints(self.load_params.load_type, self.load_params.endpoint_selection_strategy) + self.runner.init_k6_instances(self.load_params, self.endpoints, self.k6_dir) + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step("Prepare load instances") + def prepare(self): + self.runner.prepare(self.load_params, self.cluster_nodes, self.nodes_under_load, self.k6_dir) + self.init_k6() + + def append_reporter(self, load_report: LoadReport): + self.load_reporters.append(load_report) + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + def start(self): + with reporter.step(f"Start load on nodes {self.nodes_under_load}"): + self.runner.start() + self.started = True + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step("Stop load") + def stop(self): + self.runner.stop() + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, True) + def is_running(self) -> bool: + return self.runner.is_running + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step("Reset load") + def _reset_for_consequent_load(self): + """This method is required if we want to run multiple loads during test run. + Raise load counter by 1 and append it to load_id + """ + self.load_counter += 1 + self.load_params = copy.deepcopy(self.original_load_params) + self.load_params.set_id(f"{self.load_params.load_id}_{self.load_counter}") + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step("Startup load") + def startup(self): + self.prepare() + self.preset() + self.start() + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + def preset(self): + self.runner.preset() + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step("Stop and get results of load") + def teardown(self): + if not self.started: + return + + self.stop() + self.load_summaries = self._get_results() + self.started = False + + start_time = min(self._get_start_times()) + end_time = max(self._get_end_times()) + + for load_report in self.load_reporters: + load_report.set_start_time(start_time) + load_report.set_end_time(end_time) + load_report.add_summaries(self.load_summaries) + + def _get_start_times(self) -> list[datetime]: + futures = parallel([k6.get_start_time for k6 in self.runner.get_k6_instances()]) + return [future.result() for future in futures] + + def _get_end_times(self) -> list[datetime]: + futures = parallel([k6.get_end_time for k6 in self.runner.get_k6_instances()]) + return [future.result() for future in futures] + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step("Run post-load verification") + def verify(self): + try: + load_issues = self._collect_load_issues() + if self.load_params.verify: + load_issues.extend(self._run_verify_scenario()) + + assert not load_issues, "\n".join(load_issues) + finally: + self._reset_for_consequent_load() + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step("Collect load issues") + def _collect_load_issues(self): + verifier = LoadVerifier(self.load_params) + return verifier.collect_load_issues(self.load_summaries) + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + def wait_until_finish(self, soft_timeout: int = 0): + self.runner.wait_until_finish(soft_timeout) + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step("Verify loaded objects") + def _run_verify_scenario(self) -> list[str]: + self.verification_params = LoadParams( + verify_clients=self.load_params.verify_clients, + scenario=LoadScenario.VERIFY, + read_from=self.load_params.read_from, + registry_file=self.load_params.registry_file, + verify_time=self.load_params.verify_time, + custom_registry=self.load_params.custom_registry, + load_type=self.load_params.load_type, + load_id=self.load_params.load_id, + vu_init_time=0, + working_dir=self.load_params.working_dir, + endpoint_selection_strategy=self.load_params.endpoint_selection_strategy, + k6_process_allocation_strategy=self.load_params.k6_process_allocation_strategy, + setup_timeout=self.load_params.setup_timeout, + ) + + if self.verification_params.custom_registry: + self.verification_params.registry_file = self.load_params.custom_registry + + if self.verification_params.verify_time is None: + raise RuntimeError("verify_time should not be none") + + self.runner.init_k6_instances(self.verification_params, self.endpoints, self.k6_dir) + with reporter.step("Run verify scenario"): + self.runner.start() + self.runner.wait_until_finish() + + with reporter.step("Collect verify issues"): + verification_summaries = self._get_results() + verifier = LoadVerifier(self.load_params) + return verifier.collect_verify_issues(self.load_summaries, verification_summaries) + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + def _get_results(self) -> dict: + with reporter.step(f"Get {self.load_params.scenario.value} scenario results"): + return self.runner.get_results() + + def __str__(self) -> str: + return self.load_params.__str__() + + def __repr__(self) -> str: + return repr(self.load_params) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py new file mode 100644 index 00000000..51aaefbb --- /dev/null +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -0,0 +1,543 @@ +import itertools +import logging +import time +from datetime import datetime, timezone +from typing import TypeVar + +import frostfs_testlib.resources.optionals as optionals +from frostfs_testlib import reporter +from frostfs_testlib.cli import FrostfsAdm, FrostfsCli +from frostfs_testlib.cli.netmap_parser import NetmapParser +from frostfs_testlib.healthcheck.interfaces import Healthcheck +from frostfs_testlib.hosting.interfaces import HostStatus +from frostfs_testlib.plugins import load_all +from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC +from frostfs_testlib.resources.common import MORPH_BLOCK_TIME +from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider +from frostfs_testlib.steps.network import IpHelper +from frostfs_testlib.steps.node_management import include_node_to_network_map, remove_nodes_from_map_morph +from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode +from frostfs_testlib.storage.controllers.disk_controller import DiskController +from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeStatus +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.testing import parallel +from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success +from frostfs_testlib.utils.datetime_utils import parse_time + +logger = logging.getLogger("NeoLogger") + + +class StateManager: + def __init__(self, cluster_state_controller: "ClusterStateController") -> None: + self.csc = cluster_state_controller + + +StateManagerClass = TypeVar("StateManagerClass", bound=StateManager) + + +class ClusterStateController: + def __init__(self, shell: Shell, cluster: Cluster, healthcheck: Healthcheck) -> None: + self.stopped_nodes: list[ClusterNode] = [] + self.detached_disks: dict[str, DiskController] = {} + self.dropped_traffic: set[ClusterNode] = set() + self.excluded_from_netmap: list[StorageNode] = [] + self.stopped_services: set[NodeBase] = set() + self.cluster = cluster + self.healthcheck = healthcheck + self.shell = shell + self.suspended_services: dict[str, list[ClusterNode]] = {} + self.nodes_with_modified_interface: list[ClusterNode] = [] + self.managers: list[StateManagerClass] = [] + + # TODO: move all functionality to managers + managers = set(load_all(group="frostfs.testlib.csc_managers")) + for manager in managers: + self.managers.append(manager(self)) + + def manager(self, manager_type: type[StateManagerClass]) -> StateManagerClass: + for manager in self.managers: + # Subclasses here for the future if we have overriding subclasses of base interface + if issubclass(type(manager), manager_type): + return manager + + def _get_stopped_by_node(self, node: ClusterNode) -> set[NodeBase]: + stopped_by_node = [svc for svc in self.stopped_services if svc.host == node.host] + return set(stopped_by_node) + + def _get_stopped_by_type(self, service_type: type[ServiceClass]) -> set[ServiceClass]: + stopped_by_type = [svc for svc in self.stopped_services if isinstance(svc, service_type)] + return set(stopped_by_type) + + def _from_stopped_nodes(self, service_type: type[ServiceClass]) -> set[ServiceClass]: + stopped_on_nodes = set([node.service(service_type) for node in self.stopped_nodes]) + return set(stopped_on_nodes) + + def _get_online(self, service_type: type[ServiceClass]) -> set[ServiceClass]: + stopped_svc = self._get_stopped_by_type(service_type).union(self._from_stopped_nodes(service_type)) + online_svc = set(self.cluster.services(service_type)) - stopped_svc + return online_svc + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Stop host of node {node}") + def stop_node_host(self, node: ClusterNode, mode: str): + # Drop ssh connection for this node before shutdown + provider = SshConnectionProvider() + provider.drop(node.host_ip) + + self.stopped_nodes.append(node) + with reporter.step(f"Stop host {node.host.config.address}"): + node.host.stop_host(mode=mode) + self._wait_for_host_offline(node) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Shutdown whole cluster") + def shutdown_cluster(self, mode: str, reversed_order: bool = False): + nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes + + # Drop all ssh connections before shutdown + provider = SshConnectionProvider() + provider.drop_all() + + for node in nodes: + with reporter.step(f"Stop host {node.host.config.address}"): + self.stopped_nodes.append(node) + node.host.stop_host(mode=mode) + + for node in nodes: + self._wait_for_host_offline(node) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Start host of node {node}") + def start_node_host(self, node: ClusterNode, startup_healthcheck: bool = True): + with reporter.step(f"Start host {node.host.config.address}"): + node.host.start_host() + self._wait_for_host_online(node) + self.stopped_nodes.remove(node) + if startup_healthcheck: + self.wait_startup_healthcheck() + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Start stopped hosts") + def start_stopped_hosts(self, reversed_order: bool = False): + if not self.stopped_nodes: + return + + nodes = reversed(self.stopped_nodes) if reversed_order else self.stopped_nodes + for node in nodes: + with reporter.step(f"Start host {node.host.config.address}"): + node.host.start_host() + self.stopped_services.difference_update(self._get_stopped_by_node(node)) + + self.stopped_nodes = [] + with reporter.step("Wait for all nodes to go online"): + parallel(self._wait_for_host_online, self.cluster.cluster_nodes) + + self.wait_after_storage_startup() + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Detach disk {device} at {mountpoint} on node {node}") + def detach_disk(self, node: StorageNode, device: str, mountpoint: str): + disk_controller = self._get_disk_controller(node, device, mountpoint) + self.detached_disks[disk_controller.id] = disk_controller + disk_controller.detach() + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Attach disk {device} at {mountpoint} on node {node}") + def attach_disk(self, node: StorageNode, device: str, mountpoint: str): + disk_controller = self._get_disk_controller(node, device, mountpoint) + disk_controller.attach() + self.detached_disks.pop(disk_controller.id, None) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Restore detached disks") + def restore_disks(self): + for disk_controller in self.detached_disks.values(): + disk_controller.attach() + self.detached_disks = {} + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Stop all {service_type} services") + def stop_services_of_type(self, service_type: type[ServiceClass], mask: bool = True): + services = self.cluster.services(service_type) + self.stopped_services.update(services) + parallel([service.stop_service for service in services], mask=mask) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Start all {service_type} services") + def start_services_of_type(self, service_type: type[ServiceClass]): + services = self.cluster.services(service_type) + parallel([service.start_service for service in services]) + self.stopped_services.difference_update(set(services)) + + if service_type == StorageNode: + self.wait_after_storage_startup() + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Send sighup to all {service_type} services") + def sighup_services_of_type(self, service_type: type[ServiceClass]): + services = self.cluster.services(service_type) + parallel([service.send_signal_to_service for service in services], signal="SIGHUP") + + if service_type == StorageNode: + self.wait_after_storage_startup() + + @wait_for_success(600, 60) + def wait_s3gate(self, s3gate: S3Gate): + with reporter.step(f"Wait for {s3gate} reconnection"): + result = s3gate.get_metric("frostfs_s3_gw_pool_current_nodes") + assert 'address="127.0.0.1' in result.stdout, "S3Gate should connect to local storage node" + + @reporter.step("Wait for S3Gates reconnection to local storage") + def wait_s3gates(self): + online_s3gates = self._get_online(S3Gate) + if online_s3gates: + parallel(self.wait_s3gate, online_s3gates) + + @reporter.step("Wait for cluster startup healtcheck") + def wait_startup_healthcheck(self): + nodes = self.cluster.nodes(self._get_online(StorageNode)) + parallel(self.healthcheck.startup_healthcheck, nodes) + + @reporter.step("Wait for storage reconnection to the system") + def wait_after_storage_startup(self): + self.wait_startup_healthcheck() + self.wait_s3gates() + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Start all stopped services") + def start_all_stopped_services(self): + stopped_storages = self._get_stopped_by_type(StorageNode) + parallel([service.start_service for service in self.stopped_services]) + self.stopped_services.clear() + + if stopped_storages: + self.wait_after_storage_startup() + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Stop {service_type} service on {node}") + def stop_service_of_type(self, node: ClusterNode, service_type: ServiceClass, mask: bool = True): + service = node.service(service_type) + service.stop_service(mask) + self.stopped_services.add(service) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Send sighup to {service_type} service on {node}") + def sighup_service_of_type(self, node: ClusterNode, service_type: ServiceClass): + service = node.service(service_type) + service.send_signal_to_service("SIGHUP") + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Start {service_type} service on {node}") + def start_service_of_type(self, node: ClusterNode, service_type: ServiceClass): + service = node.service(service_type) + service.start_service() + self.stopped_services.discard(service) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Start all stopped {service_type} services") + def start_stopped_services_of_type(self, service_type: ServiceClass): + stopped_svc = self._get_stopped_by_type(service_type) + if not stopped_svc: + return + + parallel([svc.start_service for svc in stopped_svc]) + self.stopped_services.difference_update(stopped_svc) + + if service_type == StorageNode: + self.wait_after_storage_startup() + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Restart {service_type} service on {node}") + def restart_service_of_type(self, node: ClusterNode, service_type: ServiceClass): + service = node.service(service_type) + service.restart_service() + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Restart all {service_type} services") + def restart_services_of_type(self, service_type: type[ServiceClass]): + services = self.cluster.services(service_type) + parallel([service.restart_service for service in services]) + + if service_type == StorageNode: + self.wait_after_storage_startup() + + # TODO: Deprecated + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Stop storage service on {node}") + def stop_storage_service(self, node: ClusterNode, mask: bool = True): + self.stop_service_of_type(node, StorageNode, mask) + + # TODO: Deprecated + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Start storage service on {node}") + def start_storage_service(self, node: ClusterNode): + self.start_service_of_type(node, StorageNode) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Suspend {process_name} service in {node}") + def suspend_service(self, process_name: str, node: ClusterNode): + node.host.wait_success_suspend_process(process_name) + if self.suspended_services.get(process_name): + self.suspended_services[process_name].append(node) + else: + self.suspended_services[process_name] = [node] + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Resume {process_name} service in {node}") + def resume_service(self, process_name: str, node: ClusterNode): + node.host.wait_success_resume_process(process_name) + if self.suspended_services.get(process_name) and node in self.suspended_services[process_name]: + self.suspended_services[process_name].remove(node) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Start suspend processes services") + def resume_suspended_services(self): + for process_name, list_nodes in self.suspended_services.items(): + [node.host.wait_success_resume_process(process_name) for node in list_nodes] + self.suspended_services = {} + + @reporter.step("Drop traffic to {node}, nodes - {block_nodes}") + def drop_traffic(self, node: ClusterNode, wakeup_timeout: int, name_interface: str, block_nodes: list[ClusterNode] = None) -> None: + interfaces_tables = self._parse_interfaces(block_nodes, name_interface) + IpHelper.drop_input_traffic_to_node(node, interfaces_tables) + time.sleep(wakeup_timeout) + self.dropped_traffic.add(node) + + @reporter.step("Start traffic to {node}") + def restore_traffic(self, node: ClusterNode) -> None: + IpHelper.restore_input_traffic_to_node(node=node) + self.dropped_traffic.discard(node) + + @reporter.step("Restore blocked nodes") + def restore_all_traffic(self): + if not self.dropped_traffic: + return + parallel(self._restore_traffic_to_node, self.dropped_traffic) + self.dropped_traffic.clear() + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Hard reboot host {node} via magic SysRq option") + def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True, startup_healthcheck: bool = True): + shell = node.host.get_shell() + shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"') + + options = CommandOptions(close_stdin=True, timeout=1, check=False) + shell.exec('sudo sh -c "echo b > /proc/sysrq-trigger"', options) + + # Drop ssh connection for this node + provider = SshConnectionProvider() + provider.drop(node.host_ip) + + if wait_for_return: + # Let the things to be settled + # A little wait here to prevent ssh stuck during panic + time.sleep(10) + self._wait_for_host_online(node) + if startup_healthcheck: + self.wait_startup_healthcheck() + + @reporter.step("Down {interface} to {nodes}") + def down_interface(self, nodes: list[ClusterNode], interface: str): + for node in nodes: + node.host.down_interface(interface=interface) + assert node.host.check_state(interface=interface) == "DOWN" + self.nodes_with_modified_interface.append(node) + + @reporter.step("Up {interface} to {nodes}") + def up_interface(self, nodes: list[ClusterNode], interface: str): + for node in nodes: + node.host.up_interface(interface=interface) + assert node.host.check_state(interface=interface) == "UP" + if node in self.nodes_with_modified_interface: + self.nodes_with_modified_interface.remove(node) + + @reporter.step("Restore interface") + def restore_interfaces(self): + for node in self.nodes_with_modified_interface: + dict_interfaces = node.host.config.interfaces.keys() + for name_interface in dict_interfaces: + if "mgmt" not in name_interface: + node.host.up_interface(interface=name_interface) + + @reporter.step("Get node time") + def get_node_date(self, node: ClusterNode) -> datetime: + shell = node.host.get_shell() + return datetime.strptime(shell.exec('date +"%Y-%m-%d %H:%M:%S"').stdout.strip(), "%Y-%m-%d %H:%M:%S") + + @reporter.step("Set time on nodes in {in_date}") + def change_date_on_all_nodes(self, cluster: Cluster, in_date: datetime) -> None: + parallel(self.change_node_date, cluster.cluster_nodes, in_date=in_date) + + @reporter.step("Set time on {node} to {in_date}") + def change_node_date(self, node: ClusterNode, in_date: datetime) -> None: + shell = node.host.get_shell() + in_date_frmt = in_date.strftime("%Y-%m-%d %H:%M:%S") + shell.exec(f"timedatectl set-time '{in_date_frmt}'") + node_time = self.get_node_date(node) + + with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"): + assert (node_time - in_date).total_seconds() < 60 + + @reporter.step("Restore time on nodes") + def restore_date_on_all_nodes(self, cluster: Cluster) -> None: + parallel(self.restore_node_date, cluster.cluster_nodes) + + @reporter.step("Restore time on {node}") + def restore_node_date(self, node: ClusterNode) -> None: + shell = node.host.get_shell() + now_time = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S") + + with reporter.step(f"Set {now_time} time"): + shell.exec(f"timedatectl set-time '{now_time}'") + + @reporter.step("Set MaintenanceModeAllowed - {status}") + def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None: + frostfs_adm = FrostfsAdm( + shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH + ) + frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}") + + @reporter.step("Set node status to {status} in CSC") + def set_node_status(self, cluster_node: ClusterNode, wallet: WalletInfo, status: NodeStatus, await_tick: bool = True) -> None: + rpc_endpoint = cluster_node.storage_node.get_rpc_endpoint() + control_endpoint = cluster_node.service(StorageNode).get_control_endpoint() + + frostfs_adm, frostfs_cli, frostfs_cli_remote = self._get_cli(self.shell, wallet, cluster_node) + node_netinfo = NetmapParser.netinfo(frostfs_cli.netmap.netinfo(rpc_endpoint).stdout) + + if node_netinfo.maintenance_mode_allowed == "false": + with reporter.step("Enable maintenance mode"): + frostfs_adm.morph.set_config("MaintenanceModeAllowed=true") + + with reporter.step(f"Set node status to {status} using FrostfsCli"): + frostfs_cli_remote.control.set_status(control_endpoint, status.value) + + if not await_tick: + return + + with reporter.step("Tick 2 epoch with 2 block await."): + for _ in range(2): + frostfs_adm.morph.force_new_epoch() + time.sleep(parse_time(MORPH_BLOCK_TIME) * 2) + + self.await_node_status(status, wallet, cluster_node) + + @wait_for_success(80, 8, title="Wait for node status become {status}") + def await_node_status(self, status: NodeStatus, wallet: WalletInfo, cluster_node: ClusterNode, checker_node: ClusterNode = None): + frostfs_cli = FrostfsCli(self.shell, FROSTFS_CLI_EXEC, wallet.config_path) + if not checker_node: + checker_node = cluster_node + netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(checker_node.storage_node.get_rpc_endpoint()).stdout) + netmap = [node for node in netmap if cluster_node.get_interface(Interfaces.MGMT) == node.node] + if status == NodeStatus.OFFLINE: + assert ( + cluster_node.get_interface(Interfaces.MGMT) not in netmap + ), f"{cluster_node.get_interface(Interfaces.MGMT)} not in Offline" + else: + assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'" + + def remove_node_from_netmap(self, removes_nodes: list[StorageNode]) -> None: + alive_storage = list(set(self.cluster.storage_nodes) - set(removes_nodes))[0] + remove_nodes_from_map_morph(self.shell, self.cluster, removes_nodes, alive_storage) + self.excluded_from_netmap.extend(removes_nodes) + + def include_node_to_netmap(self, include_node: StorageNode, alive_node: StorageNode): + include_node_to_network_map(include_node, alive_node, self.shell, self.cluster) + self.excluded_from_netmap.pop(self.excluded_from_netmap.index(include_node)) + + def include_all_excluded_nodes(self): + if not self.excluded_from_netmap: + return + alive_node = list(set(self.cluster.storage_nodes) - set(self.excluded_from_netmap))[0] + if not alive_node: + return + + for exclude_node in self.excluded_from_netmap.copy(): + self.include_node_to_netmap(exclude_node, alive_node) + + def _get_cli( + self, local_shell: Shell, local_wallet: WalletInfo, cluster_node: ClusterNode + ) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]: + # TODO Move to service config + host = cluster_node.host + service_config = host.get_service_config(cluster_node.storage_node.name) + wallet_path = service_config.attributes["wallet_path"] + wallet_password = service_config.attributes["wallet_password"] + + shell = host.get_shell() + wallet_config_path = f"/tmp/{cluster_node.storage_node.name}-config.yaml" + wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' + shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") + + frostfs_adm = FrostfsAdm(shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH) + frostfs_cli = FrostfsCli(local_shell, FROSTFS_CLI_EXEC, local_wallet.config_path) + frostfs_cli_remote = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path) + return frostfs_adm, frostfs_cli, frostfs_cli_remote + + def _get_disk_controller(self, node: StorageNode, device: str, mountpoint: str) -> DiskController: + disk_controller_id = DiskController.get_id(node, device) + if disk_controller_id in self.detached_disks.keys(): + disk_controller = self.detached_disks[disk_controller_id] + else: + disk_controller = DiskController(node, device, mountpoint) + + return disk_controller + + @reporter.step("Restore traffic {node}") + def _restore_traffic_to_node(self, node): + IpHelper.restore_input_traffic_to_node(node) + + def _parse_interfaces(self, nodes: list[ClusterNode], name_interface: str) -> list[tuple]: + interfaces_and_tables = set() + for node in nodes: + shell = node.host.get_shell() + lines = shell.exec(f"ip r l table all | grep '{name_interface}'").stdout.splitlines() + + ips = [] + tables = [] + + for line in lines: + if "src" not in line or "table local" in line: + continue + parts = line.split() + ips.append(parts[-1]) + if "table" in line: + tables.append(parts[parts.index("table") + 1]) + tables.append(None) + + [interfaces_and_tables.add((ip, table)) for ip, table in itertools.product(ips, tables)] + + return interfaces_and_tables + + @reporter.step("Ping node") + def _ping_host(self, node: ClusterNode): + options = CommandOptions(check=False) + return self.shell.exec(f"ping {node.host.config.address} -c 1", options).return_code + + @retry(max_attempts=60, sleep_interval=10, expected_result=HostStatus.ONLINE, title="Waiting for {node} to go online") + def _wait_for_host_online(self, node: ClusterNode): + try: + ping_result = self._ping_host(node) + if ping_result != 0: + return HostStatus.OFFLINE + return node.host.get_host_status() + except Exception as err: + logger.warning(f"Host ping fails with error {err}") + return HostStatus.OFFLINE + + @retry(max_attempts=60, sleep_interval=10, expected_result=HostStatus.OFFLINE, title="Waiting for {node} to go offline") + def _wait_for_host_offline(self, node: ClusterNode): + try: + ping_result = self._ping_host(node) + if ping_result == 0: + return HostStatus.ONLINE + return node.host.get_host_status() + except Exception as err: + logger.warning(f"Host ping fails with error {err}") + return HostStatus.ONLINE + + @reporter.step("Get contract by domain - {domain_name}") + def get_domain_contracts(self, cluster_node: ClusterNode, domain_name: str): + frostfs_adm = FrostfsAdm(shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC) + return frostfs_adm.morph.dump_hashes(cluster_node.morph_chain.get_http_endpoint(), domain_name).stdout diff --git a/src/frostfs_testlib/storage/controllers/disk_controller.py b/src/frostfs_testlib/storage/controllers/disk_controller.py new file mode 100644 index 00000000..c2aa85ce --- /dev/null +++ b/src/frostfs_testlib/storage/controllers/disk_controller.py @@ -0,0 +1,41 @@ +from frostfs_testlib.hosting.interfaces import DiskInfo +from frostfs_testlib.shell import CommandOptions +from frostfs_testlib.storage.cluster import StorageNode +from frostfs_testlib.testing.test_control import wait_for_success + + +class DiskController: + def __init__(self, node: StorageNode, device: str, mountpoint: str) -> None: + self.node: StorageNode = node + self.device: str = device + self.device_by_label: str + self.mountpoint: str = mountpoint.strip() + self.disk_info: DiskInfo = DiskInfo() + self.id = self.get_id(node, device) + + shell = node.host.get_shell() + cmd = f"sudo udevadm info -n {device} | egrep \"S:.*label\" | awk '{{print $2}}'" + self.device_by_label = f"/dev/{shell.exec(cmd).stdout.strip()}" + + @wait_for_success(60, 3, False) + def _wait_until_detached(self): + return self.node.host.is_disk_attached(self.device, self.disk_info) + + @wait_for_success(60, 3, True) + def _wait_until_attached(self): + return self.node.host.is_disk_attached(self.device, self.disk_info) + + def detach(self): + self.disk_info = self.node.host.detach_disk(self.device) + self._wait_until_detached() + + def attach(self): + self.node.host.attach_disk(self.device, self.disk_info) + self._wait_until_attached() + remote_shell = self.node.host.get_shell() + remote_shell.exec(f"sudo umount -l {self.device}", options=CommandOptions(check=False)) + remote_shell.exec(f"sudo mount {self.device_by_label} {self.mountpoint}") + + @staticmethod + def get_id(node: StorageNode, device: str): + return f"{node.host.config.address} - {device}" diff --git a/src/frostfs_testlib/storage/controllers/shards_watcher.py b/src/frostfs_testlib/storage/controllers/shards_watcher.py new file mode 100644 index 00000000..50174066 --- /dev/null +++ b/src/frostfs_testlib/storage/controllers/shards_watcher.py @@ -0,0 +1,117 @@ +import json +from typing import Any + +from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards +from frostfs_testlib.shell.interfaces import CommandResult +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.testing.test_control import wait_for_success + + +class ShardsWatcher: + def __init__(self, node_under_test: ClusterNode) -> None: + self.shards_snapshots: list[dict[str, Any]] = [] + self.storage_node = node_under_test.storage_node + self.take_shards_snapshot() + + def take_shards_snapshot(self) -> None: + snapshot = self.get_shards_snapshot() + self.shards_snapshots.append(snapshot) + + def get_shards_snapshot(self) -> dict[str, Any]: + shards_snapshot: dict[str, Any] = {} + + shards = self.get_shards() + for shard in shards: + shards_snapshot[shard["shard_id"]] = shard + + return shards_snapshot + + def _get_current_snapshot(self) -> dict[str, Any]: + return self.shards_snapshots[-1] + + def _get_previous_snapshot(self) -> dict[str, Any]: + return self.shards_snapshots[-2] + + def _is_shard_present(self, shard_id) -> bool: + snapshot = self._get_current_snapshot() + return shard_id in snapshot + + def get_shards_with_new_errors(self) -> dict[str, Any]: + current_snapshot = self._get_current_snapshot() + previous_snapshot = self._get_previous_snapshot() + shards_with_new_errors: dict[str, Any] = {} + for shard_id, shard in previous_snapshot.items(): + if current_snapshot[shard_id]["error_count"] > shard["error_count"]: + shards_with_new_errors[shard_id] = current_snapshot[shard_id] + + return shards_with_new_errors + + def get_shards_with_errors(self) -> dict[str, Any]: + snapshot = self.get_shards_snapshot() + shards_with_errors: dict[str, Any] = {} + for shard_id, shard in snapshot.items(): + if shard["error_count"] > 0: + shards_with_errors[shard_id] = shard + + return shards_with_errors + + def get_shard_status(self, shard_id: str): # -> Any: + snapshot = self.get_shards_snapshot() + + assert shard_id in snapshot, f"Shard {shard_id} is missing: {snapshot}" + + return snapshot[shard_id]["mode"] + + @wait_for_success(60, 2) + def await_for_all_shards_status(self, status: str) -> None: + snapshot = self.get_shards_snapshot() + + for shard_id in snapshot: + assert snapshot[shard_id]["mode"] == status, f"Shard {shard_id} have wrong shard status" + + @wait_for_success(60, 2) + def await_for_shard_status(self, shard_id: str, status: str) -> None: + assert self.get_shard_status(shard_id) == status + + @wait_for_success(60, 2) + def await_for_shard_have_new_errors(self, shard_id: str) -> None: + self.take_shards_snapshot() + assert self._is_shard_present(shard_id) + shards_with_new_errors = self.get_shards_with_new_errors() + + assert shard_id in shards_with_new_errors, f"Expected shard {shard_id} to have new errors, but haven't {self.shards_snapshots[-1]}" + + @wait_for_success(300, 5) + def await_for_shards_have_no_new_errors(self) -> None: + self.take_shards_snapshot() + shards_with_new_errors = self.get_shards_with_new_errors() + assert len(shards_with_new_errors) == 0 + + def get_shards(self) -> dict[str, Any]: + shards_cli = FrostfsCliShards( + self.storage_node.host.get_shell(), + self.storage_node.host.get_cli_config("frostfs-cli").exec_path, + ) + + response = shards_cli.list( + endpoint=self.storage_node.get_control_endpoint(), + wallet=self.storage_node.get_remote_wallet_path(), + wallet_password=self.storage_node.get_wallet_password(), + json_mode=True, + ) + + return json.loads(response.stdout.split(">", 1)[1]) + + def set_shard_mode(self, shard_id: str, mode: str, clear_errors: bool = True) -> CommandResult: + shards_cli = FrostfsCliShards( + self.storage_node.host.get_shell(), + self.storage_node.host.get_cli_config("frostfs-cli").exec_path, + ) + return shards_cli.set_mode( + endpoint=self.storage_node.get_control_endpoint(), + wallet=self.storage_node.get_remote_wallet_path(), + wallet_password=self.storage_node.get_wallet_password(), + mode=mode, + id=[shard_id], + clear_errors=clear_errors, + ) diff --git a/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py new file mode 100644 index 00000000..f0b2a215 --- /dev/null +++ b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py @@ -0,0 +1,62 @@ +from typing import Any + +from frostfs_testlib import reporter +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController, StateManager +from frostfs_testlib.storage.dataclasses.node_base import ServiceClass +from frostfs_testlib.testing import parallel + + +class ConfigStateManager(StateManager): + def __init__(self, cluster_state_controller: ClusterStateController) -> None: + super().__init__(cluster_state_controller) + self.services_with_changed_config: set[tuple[ClusterNode, ServiceClass]] = set() + self.cluster = self.csc.cluster + + @reporter.step("Change configuration for {service_type} on all nodes") + def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any], sighup: bool = False): + services = self.cluster.services(service_type) + nodes = self.cluster.nodes(services) + self.services_with_changed_config.update([(node, service_type) for node in nodes]) + + if not sighup: + self.csc.stop_services_of_type(service_type) + + parallel([node.config(service_type).set for node in nodes], values=values) + if not sighup: + self.csc.start_services_of_type(service_type) + else: + self.csc.sighup_services_of_type(service_type) + + @reporter.step("Change configuration for {service_type} on {node}") + def set_on_node(self, node: ClusterNode, service_type: type[ServiceClass], values: dict[str, Any]): + self.services_with_changed_config.add((node, service_type)) + + self.csc.stop_service_of_type(node, service_type) + node.config(service_type).set(values) + self.csc.start_service_of_type(node, service_type) + + @reporter.step("Revert all configuration changes") + def revert_all(self, sighup: bool = False): + if not self.services_with_changed_config: + return + + parallel(self._revert_svc, self.services_with_changed_config, sighup) + self.services_with_changed_config.clear() + + if not sighup: + self.csc.start_all_stopped_services() + + # TODO: parallel can't have multiple parallel_items :( + @reporter.step("Revert all configuration {node_and_service}") + def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass], sighup: bool = False): + node, service_type = node_and_service + service = node.service(service_type) + + if not sighup: + self.csc.stop_service_of_type(node, service_type) + + node.config(service_type).revert() + + if sighup: + service.send_signal_to_service("SIGHUP") diff --git a/src/frostfs_testlib/storage/dataclasses/__init__.py b/src/frostfs_testlib/storage/dataclasses/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/frostfs_testlib/storage/dataclasses/acl.py b/src/frostfs_testlib/storage/dataclasses/acl.py new file mode 100644 index 00000000..362dee99 --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/acl.py @@ -0,0 +1,100 @@ +import logging +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Union + +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.testing.readable import HumanReadableEnum +from frostfs_testlib.utils import wallet_utils + +logger = logging.getLogger("NeoLogger") +EACL_LIFETIME = 100500 +FROSTFS_CONTRACT_CACHE_TIMEOUT = 30 + + +class EACLOperation(HumanReadableEnum): + PUT = "put" + GET = "get" + HEAD = "head" + GET_RANGE = "getrange" + GET_RANGE_HASH = "getrangehash" + SEARCH = "search" + DELETE = "delete" + + +class EACLAccess(HumanReadableEnum): + ALLOW = "allow" + DENY = "deny" + + +class EACLRole(HumanReadableEnum): + OTHERS = "others" + USER = "user" + SYSTEM = "system" + + +class EACLHeaderType(HumanReadableEnum): + REQUEST = "req" # Filter request headers + OBJECT = "obj" # Filter object headers + SERVICE = "SERVICE" # Filter service headers. These are not processed by FrostFS nodes and exist for service use only + + +class EACLMatchType(HumanReadableEnum): + STRING_EQUAL = "=" # Return true if strings are equal + STRING_NOT_EQUAL = "!=" # Return true if strings are different + + +@dataclass +class EACLFilter: + header_type: EACLHeaderType = EACLHeaderType.REQUEST + match_type: EACLMatchType = EACLMatchType.STRING_EQUAL + key: Optional[str] = None + value: Optional[str] = None + + def to_dict(self) -> Dict[str, Any]: + return { + "headerType": self.header_type, + "matchType": self.match_type, + "key": self.key, + "value": self.value, + } + + +@dataclass +class EACLFilters: + filters: Optional[List[EACLFilter]] = None + + def __str__(self): + return ",".join( + [f"{filter.header_type.value}:" f"{filter.key}{filter.match_type.value}{filter.value}" for filter in self.filters] + if self.filters + else [] + ) + + +@dataclass +class EACLPubKey: + keys: Optional[List[str]] = None + + +@dataclass +class EACLRule: + operation: Optional[EACLOperation] = None + access: Optional[EACLAccess] = None + role: Optional[Union[EACLRole, WalletInfo]] = None + filters: Optional[EACLFilters] = None + + def to_dict(self) -> Dict[str, Any]: + return { + "Operation": self.operation, + "Access": self.access, + "Role": self.role, + "Filters": self.filters or [], + } + + def __str__(self): + role = "" + if isinstance(self.role, EACLRole): + role = self.role.value + if isinstance(self.role, WalletInfo): + role = f"pubkey:{wallet_utils.get_wallet_public_key(self.role.path, self.role.password)}" + return f'{self.access.value} {self.operation.value} {self.filters or ""} {role}' diff --git a/src/frostfs_testlib/storage/dataclasses/ape.py b/src/frostfs_testlib/storage/dataclasses/ape.py new file mode 100644 index 00000000..11994356 --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/ape.py @@ -0,0 +1,154 @@ +import logging +from dataclasses import dataclass +from enum import Enum +from typing import Optional + +from frostfs_testlib.testing.readable import HumanReadableEnum +from frostfs_testlib.utils import string_utils + +logger = logging.getLogger("NeoLogger") +EACL_LIFETIME = 100500 +FROSTFS_CONTRACT_CACHE_TIMEOUT = 30 + + +class ObjectOperations(HumanReadableEnum): + PUT = "object.put" + PATCH = "object.patch" + GET = "object.get" + HEAD = "object.head" + GET_RANGE = "object.range" + GET_RANGE_HASH = "object.hash" + SEARCH = "object.search" + DELETE = "object.delete" + WILDCARD_ALL = "object.*" + + @staticmethod + def get_all(): + return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL] + + +class ContainerOperations(HumanReadableEnum): + PUT = "container.put" + GET = "container.get" + LIST = "container.list" + DELETE = "container.delete" + WILDCARD_ALL = "container.*" + + @staticmethod + def get_all(): + return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL] + + +@dataclass +class Operations: + GET_CONTAINER = "GetContainer" + PUT_CONTAINER = "PutContainer" + DELETE_CONTAINER = "DeleteContainer" + LIST_CONTAINER = "ListContainers" + GET_OBJECT = "GetObject" + DELETE_OBJECT = "DeleteObject" + HASH_OBJECT = "HashObject" + RANGE_OBJECT = "RangeObject" + SEARCH_OBJECT = "SearchObject" + HEAD_OBJECT = "HeadObject" + PUT_OBJECT = "PutObject" + PATCH_OBJECT = "PatchObject" + + +class Verb(HumanReadableEnum): + ALLOW = "allow" + DENY = "deny" + + +class Role(HumanReadableEnum): + OWNER = "owner" + IR = "ir" + CONTAINER = "container" + OTHERS = "others" + + +class ConditionType(HumanReadableEnum): + RESOURCE = "ResourceCondition" + REQUEST = "RequestCondition" + + +# See https://git.frostfs.info/TrueCloudLab/policy-engine/src/branch/master/schema/native/consts.go#L40-L53 +class ConditionKey(HumanReadableEnum): + ROLE = '"\\$Actor:role"' + PUBLIC_KEY = '"\\$Actor:publicKey"' + OBJECT_TYPE = '"\\$Object:objectType"' + OBJECT_ID = '"\\$Object:objectID"' + + +class MatchType(HumanReadableEnum): + EQUAL = "=" + NOT_EQUAL = "!=" + + +@dataclass +class Condition: + condition_key: ConditionKey | str + condition_value: str + condition_type: ConditionType = ConditionType.REQUEST + match_type: MatchType = MatchType.EQUAL + + def as_string(self): + key = self.condition_key.value if isinstance(self.condition_key, ConditionKey) else self.condition_key + value = self.condition_value.value if isinstance(self.condition_value, Enum) else self.condition_value + + return f"{self.condition_type.value}:{key}{self.match_type.value}{value}" + + @staticmethod + def by_role(*args, **kwargs) -> "Condition": + return Condition(ConditionKey.ROLE, *args, **kwargs) + + @staticmethod + def by_key(*args, **kwargs) -> "Condition": + return Condition(ConditionKey.PUBLIC_KEY, *args, **kwargs) + + @staticmethod + def by_object_type(*args, **kwargs) -> "Condition": + return Condition(ConditionKey.OBJECT_TYPE, *args, **kwargs) + + @staticmethod + def by_object_id(*args, **kwargs) -> "Condition": + return Condition(ConditionKey.OBJECT_ID, *args, **kwargs) + + +class Rule: + def __init__( + self, + access: Verb, + operations: list[ObjectOperations] | ObjectOperations, + conditions: list[Condition] | Condition = None, + chain_id: Optional[str] = None, + ) -> None: + self.access = access + self.operations = operations + + if not conditions: + self.conditions = [] + elif isinstance(conditions, Condition): + self.conditions = [conditions] + else: + self.conditions = conditions + + if not isinstance(self.conditions, list): + raise RuntimeError("Conditions must be a list") + + if not operations: + self.operations = [] + elif isinstance(operations, (ObjectOperations, ContainerOperations)): + self.operations = [operations] + else: + self.operations = operations + + if not isinstance(self.operations, list): + raise RuntimeError("Operations must be a list") + + self.chain_id = chain_id if chain_id else string_utils.unique_name("chain-id-") + + def as_string(self): + conditions = " ".join([cond.as_string() for cond in self.conditions]) + operations = " ".join([op.value for op in self.operations]) + return f"{self.access.value} {operations} {conditions} *" diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py new file mode 100644 index 00000000..4f5c3489 --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -0,0 +1,183 @@ +import yaml + +from frostfs_testlib.blockchain import RPCClient +from frostfs_testlib.storage.constants import ConfigAttributes +from frostfs_testlib.storage.dataclasses.node_base import NodeBase +from frostfs_testlib.storage.dataclasses.shard import Shard + + +class InnerRing(NodeBase): + """ + Class represents inner ring node in a cluster + + Inner ring node is not always the same as physical host (or physical node, if you will): + It can be service running in a container or on physical host + For testing perspective, it's not relevant how it is actually running, + since frostfs network will still treat it as "node" + """ + + def service_healthcheck(self) -> bool: + health_metric = "frostfs_ir_ir_health" + output = self.host.get_shell().exec(f"curl -s localhost:6662 | grep {health_metric} | sed 1,2d").stdout + return health_metric in output + + def get_netmap_cleaner_threshold(self) -> str: + config_file = self.get_remote_config_path() + contents = self.host.get_shell().exec(f"cat {config_file}").stdout + + config = yaml.safe_load(contents) + value = config["netmap_cleaner"]["threshold"] + + return value + + +class S3Gate(NodeBase): + """ + Class represents S3 gateway in a cluster + """ + + def get_endpoint(self) -> str: + return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0) + + def get_ns_endpoint(self, ns_name: str) -> str: + return self._get_attribute(f"{ConfigAttributes.ENDPOINT_DATA_0}_namespace").format(namespace=ns_name) + + def get_all_endpoints(self) -> list[str]: + return [ + self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0), + self._get_attribute(ConfigAttributes.ENDPOINT_DATA_1), + ] + + def get_ns_endpoint(self, ns_name: str) -> str: + return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0_NS).format(namespace=ns_name) + + def service_healthcheck(self) -> bool: + health_metric = "frostfs_s3_gw_state_health" + output = self.host.get_shell().exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d").stdout + return health_metric in output + + @property + def label(self) -> str: + return f"{self.name}: {self.get_endpoint()}" + + +class HTTPGate(NodeBase): + """ + Class represents HTTP gateway in a cluster + """ + + def get_endpoint(self) -> str: + return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0) + + def service_healthcheck(self) -> bool: + health_metric = "frostfs_http_gw_state_health" + output = self.host.get_shell().exec(f"curl -s localhost:5662 | grep {health_metric} | sed 1,2d").stdout + return health_metric in output + + @property + def label(self) -> str: + return f"{self.name}: {self.get_endpoint()}" + + +class MorphChain(NodeBase): + """ + Class represents side-chain aka morph-chain consensus node in a cluster + + Consensus node is not always the same as physical host (or physical node, if you will): + It can be service running in a container or on physical host + For testing perspective, it's not relevant how it is actually running, + since frostfs network will still treat it as "node" + """ + + rpc_client: RPCClient + + def construct(self): + self.rpc_client = RPCClient(self.get_endpoint()) + + def get_endpoint(self) -> str: + return self._get_attribute(ConfigAttributes.ENDPOINT_INTERNAL) + + def service_healthcheck(self) -> bool: + # TODO Rework in 1.3 Release when metrics for each service will be available + return True + + @property + def label(self) -> str: + return f"{self.name}: {self.get_endpoint()}" + + def get_http_endpoint(self) -> str: + return self._get_attribute("http_endpoint") + + +class StorageNode(NodeBase): + """ + Class represents storage node in a storage cluster + + Storage node is not always the same as physical host: + It can be service running in a container or on physical host (or physical node, if you will): + For testing perspective, it's not relevant how it is actually running, + since frostfs network will still treat it as "node" + """ + + def get_rpc_endpoint(self) -> str: + return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0) + + def get_all_rpc_endpoint(self) -> list[str]: + return [ + self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0), + self._get_attribute(ConfigAttributes.ENDPOINT_DATA_1), + ] + + def service_healthcheck(self) -> bool: + health_metric = "frostfs_node_state_health" + output = self.host.get_shell().exec(f"curl -s localhost:6672 | grep {health_metric} | sed 1,2d").stdout + return health_metric in output + + # TODO: Deprecated. Use new approach with config + def get_shard_config_path(self) -> str: + return self._get_attribute(ConfigAttributes.SHARD_CONFIG_PATH) + + # TODO: Deprecated. Use new approach with config + def get_shards_config(self) -> tuple[str, dict]: + return self.get_config(self.get_shard_config_path()) + + def get_shards(self) -> list[Shard]: + shards = self.config.get("storage:shard") + + if not shards: + raise RuntimeError(f"Cannot get shards information for {self.name} on {self.host.config.address}") + + if "default" in shards: + shards.pop("default") + return [Shard.from_object(shard) for shard in shards.values()] + + def get_control_endpoint(self) -> str: + return self._get_attribute(ConfigAttributes.CONTROL_ENDPOINT) + + def get_un_locode(self): + return self._get_attribute(ConfigAttributes.UN_LOCODE) + + def get_data_directory(self) -> str: + return self.host.get_data_directory(self.name) + + def delete_blobovnicza(self): + self.host.delete_blobovnicza(self.name) + + def delete_fstree(self): + self.host.delete_fstree(self.name) + + def delete_file(self, file_path: str) -> None: + self.host.delete_file(file_path) + + def is_file_exist(self, file_path) -> bool: + return self.host.is_file_exist(file_path) + + def delete_metabase(self): + self.host.delete_metabase(self.name) + + def delete_write_cache(self): + self.host.delete_write_cache(self.name) + + @property + def label(self) -> str: + return f"{self.name}: {self.get_rpc_endpoint()}" diff --git a/src/frostfs_testlib/storage/dataclasses/metrics.py b/src/frostfs_testlib/storage/dataclasses/metrics.py new file mode 100644 index 00000000..89690151 --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/metrics.py @@ -0,0 +1,80 @@ +import time +from functools import wraps +from typing import Callable + +import pytest + +from frostfs_testlib.hosting import Host +from frostfs_testlib.shell.interfaces import CommandResult + + +class Metrics: + def __init__(self, host: Host, metrics_endpoint: str) -> None: + self.storage = StorageMetrics(host, metrics_endpoint) + + +class StorageMetrics: + """ + Class represents storage metrics in a cluster + """ + + def __init__(self, host: Host, metrics_endpoint: str) -> None: + self.host = host + self.metrics_endpoint = metrics_endpoint + + def get_metrics_search_by_greps(self, **greps) -> CommandResult: + """ + Get a metrics, search by: cid, metric_type, shard_id etc. + Args: + greps: dict of grep-command-name and value + for example get_metrics_search_by_greps(command='container_objects_total', cid='123456') + Return: + result of metrics + """ + shell = self.host.get_shell() + additional_greps = " |grep ".join([grep_command for grep_command in greps.values()]) + result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {additional_greps}") + return result + + def get_all_metrics(self) -> CommandResult: + shell = self.host.get_shell() + result = shell.exec(f"curl -s {self.metrics_endpoint}") + return result + + +def wait_until_metric_result_is_stable( + relative_deviation: float = None, absolute_deviation: int = None, max_attempts: int = 10, sleep_interval: int = 30 +): + """ + A decorator function that repeatedly calls the decorated function until its result stabilizes + within a specified relative tolerance or until the maximum number of attempts is reached. + + This decorator is useful for scenarios where a function returns a metric or value that may fluctuate + over time, and you want to ensure that the result has stabilized before proceeding. + """ + + def decorator(func: Callable): + @wraps(func) + def wrapper(*args, **kwargs): + last_result = None + for _ in range(max_attempts): + # first function call + first_result = func(*args, **kwargs) + + # waiting before the second call + time.sleep(sleep_interval) + + # second function call + last_result = func(*args, **kwargs) + + # checking value stability + if first_result == pytest.approx(last_result, rel=relative_deviation, abs=absolute_deviation): + return last_result + + # if stability is not achieved, return the last value + if last_result is not None: + return last_result + + return wrapper + + return decorator diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py new file mode 100644 index 00000000..5c8b7233 --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -0,0 +1,224 @@ +from abc import abstractmethod +from dataclasses import dataclass +from datetime import datetime, timezone +from typing import Optional, TypedDict, TypeVar + +import yaml +from dateutil import parser + +from frostfs_testlib import reporter +from frostfs_testlib.hosting.config import ServiceConfig +from frostfs_testlib.hosting.interfaces import Host +from frostfs_testlib.shell.interfaces import CommandResult +from frostfs_testlib.storage.configuration.service_configuration import ServiceConfiguration, ServiceConfigurationYml +from frostfs_testlib.storage.constants import ConfigAttributes +from frostfs_testlib.testing.readable import HumanReadableABC +from frostfs_testlib.utils import wallet_utils + + +@dataclass +class NodeBase(HumanReadableABC): + """ + Represents a node of some underlying service + """ + + id: str + name: str + host: Host + _process_name: str + + def __init__(self, id, name, host) -> None: + self.id = id + self.name = name + self.host = host + self.construct() + + def construct(self): + pass + + def __eq__(self, other): + return self.name == other.name + + def __hash__(self): + return id(self.name) + + def __str__(self): + return self.label + + def __repr__(self) -> str: + return self.label + + @property + def label(self) -> str: + return self.name + + def get_service_systemctl_name(self) -> str: + return self._get_attribute(ConfigAttributes.SERVICE_NAME) + + def get_process_name(self) -> str: + return self._process_name + + def start_service(self): + with reporter.step(f"Unmask {self.name} service on {self.host.config.address}"): + self.host.unmask_service(self.name) + + with reporter.step(f"Start {self.name} service on {self.host.config.address}"): + self.host.start_service(self.name) + + def send_signal_to_service(self, signal: str): + with reporter.step(f"Send -{signal} signal to {self.name} service on {self.host.config.address}"): + self.host.send_signal_to_service(self.name, signal) + + @abstractmethod + def service_healthcheck(self) -> bool: + """Service healthcheck.""" + + # TODO: Migrate to sub-class Metrcis (not yet exists :)) + def get_metric(self, metric: str) -> CommandResult: + shell = self.host.get_shell() + result = shell.exec(f"curl -s {self.get_metrics_endpoint()} | grep -e '^{metric}'") + return result + + def get_metrics_endpoint(self) -> str: + return self._get_attribute(ConfigAttributes.ENDPOINT_PROMETHEUS) + + def get_pprof_endpoint(self) -> str: + return self._get_attribute(ConfigAttributes.ENDPOINT_PPROF) + + def stop_service(self, mask: bool = True): + if mask: + with reporter.step(f"Mask {self.name} service on {self.host.config.address}"): + self.host.mask_service(self.name) + + with reporter.step(f"Stop {self.name} service on {self.host.config.address}"): + self.host.stop_service(self.name) + + def restart_service(self): + with reporter.step(f"Restart {self.name} service on {self.host.config.address}"): + self.host.restart_service(self.name) + + def get_wallet_password(self) -> str: + return self._get_attribute(ConfigAttributes.WALLET_PASSWORD) + + def get_wallet_path(self) -> str: + return self._get_attribute( + ConfigAttributes.LOCAL_WALLET_PATH, + ConfigAttributes.WALLET_PATH, + ) + + def get_remote_wallet_path(self) -> str: + """ + Returns node wallet file path located on remote host + """ + return self._get_attribute( + ConfigAttributes.WALLET_PATH, + ) + + def get_remote_config_path(self) -> str: + """ + Returns node config file path located on remote host + """ + return self._get_attribute( + ConfigAttributes.CONFIG_PATH, + ) + + def get_remote_wallet_config_path(self) -> str: + """ + Returns node config file path located on remote host + """ + return self._get_attribute( + ConfigAttributes.REMOTE_WALLET_CONFIG, + ) + + def get_wallet_config_path(self) -> str: + return self._get_attribute( + ConfigAttributes.LOCAL_WALLET_CONFIG, + ConfigAttributes.WALLET_CONFIG, + ) + + def get_logger_config_path(self) -> str: + """ + Returns config path for logger located on remote host + """ + config_attributes = self.host.get_service_config(self.name) + return ( + self._get_attribute(ConfigAttributes.LOGGER_CONFIG_PATH) + if ConfigAttributes.LOGGER_CONFIG_PATH in config_attributes.attributes + else None + ) + + def get_working_dir_path(self) -> Optional[str]: + """ + Returns working directory path located on remote host + """ + config_attributes = self.host.get_service_config(self.name) + return self._get_attribute(ConfigAttributes.WORKING_DIR) if ConfigAttributes.WORKING_DIR in config_attributes.attributes else None + + @property + def config_dir(self) -> str: + return self._get_attribute(ConfigAttributes.CONFIG_DIR) + + @property + def main_config_path(self) -> str: + return self._get_attribute(ConfigAttributes.CONFIG_PATH) + + @property + def config(self) -> ServiceConfigurationYml: + return ServiceConfiguration(self.name, self.host.get_shell(), self.config_dir, self.main_config_path) + + # TODO: Deprecated. Use config with ServiceConfigurationYml interface + def get_config(self, config_file_path: Optional[str] = None) -> tuple[str, dict]: + if config_file_path is None: + config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) + + shell = self.host.get_shell() + + result = shell.exec(f"cat {config_file_path}") + config_text = result.stdout + + config = yaml.safe_load(config_text) + return config_file_path, config + + # TODO: Deprecated. Use config with ServiceConfigurationYml interface + def save_config(self, new_config: dict, config_file_path: Optional[str] = None) -> None: + if config_file_path is None: + config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) + + shell = self.host.get_shell() + + config_str = yaml.dump(new_config) + shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}") + + def get_wallet_public_key(self): + storage_wallet_path = self.get_wallet_path() + storage_wallet_pass = self.get_wallet_password() + return wallet_utils.get_wallet_public_key(storage_wallet_path, storage_wallet_pass) + + def _get_attribute(self, attribute_name: str, default_attribute_name: Optional[str] = None) -> str: + config = self.host.get_service_config(self.name) + + if attribute_name not in config.attributes: + if default_attribute_name is None: + raise RuntimeError(f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either") + + return config.attributes[default_attribute_name] + + return config.attributes[attribute_name] + + def _get_service_config(self) -> ServiceConfig: + return self.host.get_service_config(self.name) + + def get_service_uptime(self, service: str) -> datetime: + result = self.host.get_shell().exec(f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2") + start_time = parser.parse(result.stdout.strip()) + current_time = datetime.now(tz=timezone.utc) + active_time = current_time - start_time + return active_time + + +ServiceClass = TypeVar("ServiceClass", bound=NodeBase) + + +class NodeClassDict(TypedDict): + hosting_service_name: str + cls: type[NodeBase] diff --git a/src/frostfs_testlib/storage/dataclasses/object_size.py b/src/frostfs_testlib/storage/dataclasses/object_size.py new file mode 100644 index 00000000..0429c789 --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/object_size.py @@ -0,0 +1,13 @@ +from dataclasses import dataclass + + +@dataclass +class ObjectSize: + name: str + value: int + + def __str__(self) -> str: + return self.name + + def __repr__(self) -> str: + return self.__str__() diff --git a/src/frostfs_testlib/storage/dataclasses/policy.py b/src/frostfs_testlib/storage/dataclasses/policy.py new file mode 100644 index 00000000..872ee05e --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/policy.py @@ -0,0 +1,13 @@ +from dataclasses import dataclass + + +@dataclass +class PlacementPolicy: + name: str + value: str + + def __str__(self) -> str: + return self.name + + def __repr__(self) -> str: + return self.__str__() diff --git a/src/frostfs_testlib/storage/dataclasses/shard.py b/src/frostfs_testlib/storage/dataclasses/shard.py new file mode 100644 index 00000000..bebdbf5f --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/shard.py @@ -0,0 +1,92 @@ +from dataclasses import dataclass + +from configobj import ConfigObj + +SHARD_PREFIX = "FROSTFS_STORAGE_SHARD_" +BLOBSTOR_PREFIX = "_BLOBSTOR_" + + +@dataclass +class Blobstor: + path: str + path_type: str + + def __eq__(self, other) -> bool: + if not isinstance(other, self.__class__): + raise RuntimeError(f"Only two {self.__class__.__name__} instances can be compared") + return self.path == other.path and self.path_type == other.path_type + + def __hash__(self): + return hash((self.path, self.path_type)) + + @staticmethod + def from_config_object(section: ConfigObj, shard_id: str, blobstor_id: str): + var_prefix = f"{SHARD_PREFIX}{shard_id}{BLOBSTOR_PREFIX}{blobstor_id}" + return Blobstor(section.get(f"{var_prefix}_PATH"), section.get(f"{var_prefix}_TYPE")) + + +@dataclass +class Shard: + blobstor: list[Blobstor] + metabase: str + writecache: str + pilorama: str + + def __eq__(self, other) -> bool: + if not isinstance(other, self.__class__): + raise RuntimeError(f"Only two {self.__class__.__name__} instances can be compared") + return ( + set(self.blobstor) == set(other.blobstor) + and self.metabase == other.metabase + and self.writecache == other.writecache + and self.pilorama == other.pilorama + ) + + def __hash__(self): + return hash((self.metabase, self.writecache)) + + @staticmethod + def _get_blobstor_count_from_section(config_object: ConfigObj, shard_id: int): + pattern = f"{SHARD_PREFIX}{shard_id}{BLOBSTOR_PREFIX}" + blobstors = {key[: len(pattern) + 2] for key in config_object.keys() if pattern in key} + return len(blobstors) + + @staticmethod + def from_config_object(config_object: ConfigObj, shard_id: int): + var_prefix = f"{SHARD_PREFIX}{shard_id}" + + blobstor_count = Shard._get_blobstor_count_from_section(config_object, shard_id) + blobstors = [Blobstor.from_config_object(config_object, shard_id, blobstor_id) for blobstor_id in range(blobstor_count)] + + write_cache_enabled = config_object.as_bool(f"{var_prefix}_WRITECACHE_ENABLED") + + return Shard( + blobstors, + config_object.get(f"{var_prefix}_METABASE_PATH"), + config_object.get(f"{var_prefix}_WRITECACHE_PATH") if write_cache_enabled else "", + ) + + @staticmethod + def from_object(shard): + metabase = shard["metabase"]["path"] if "path" in shard["metabase"] else shard["metabase"] + writecache_enabled = True + if "enabled" in shard["writecache"]: + writecache_enabled = shard["writecache"]["enabled"] + + writecache = shard["writecache"]["path"] if "path" in shard["writecache"] else shard["writecache"] + if not writecache_enabled: + writecache = "" + + # Currently due to issue we need to check if pilorama exists in keys + # TODO: make pilorama mandatory after fix + if shard.get("pilorama"): + pilorama = shard["pilorama"]["path"] if "path" in shard["pilorama"] else shard["pilorama"] + else: + pilorama = None + + return Shard( + blobstor=[Blobstor(path=blobstor["path"], path_type=blobstor["type"]) for blobstor in shard["blobstor"]], + metabase=metabase, + writecache=writecache, + pilorama=pilorama, + ) diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py new file mode 100644 index 00000000..4c303fcd --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -0,0 +1,127 @@ +import re +from dataclasses import dataclass +from typing import Optional + +from pydantic import BaseModel, Field, field_validator + +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.testing.readable import HumanReadableEnum + + +@dataclass +class ObjectRef: + cid: str + oid: str + + +@dataclass +class LockObjectInfo(ObjectRef): + lifetime: Optional[int] = None + expire_at: Optional[int] = None + + +@dataclass +class StorageObjectInfo(ObjectRef): + size: Optional[int] = None + wallet: Optional[WalletInfo] = None + file_path: Optional[str] = None + file_hash: Optional[str] = None + attributes: Optional[list[dict[str, str]]] = None + tombstone: Optional[str] = None + locks: Optional[list[LockObjectInfo]] = None + + +class NodeStatus(HumanReadableEnum): + MAINTENANCE: str = "maintenance" + ONLINE: str = "online" + OFFLINE: str = "offline" + + +@dataclass +class NodeNetmapInfo: + node_id: str = None + node_status: NodeStatus = None + node_data_ips: list[str] = None + cluster_name: str = None + continent: str = None + country: str = None + country_code: str = None + external_address: list[str] = None + location: str = None + node: str = None + price: int = None + sub_div: str = None + sub_div_code: int = None + un_locode: str = None + role: str = None + + +class Interfaces(HumanReadableEnum): + DATA_O: str = "data0" + DATA_1: str = "data1" + MGMT: str = "mgmt" + INTERNAL_0: str = "internal0" + INTERNAL_1: str = "internal1" + + +@dataclass +class NodeNetInfo: + epoch: str = None + network_magic: str = None + time_per_block: str = None + container_fee: str = None + epoch_duration: str = None + inner_ring_candidate_fee: str = None + maximum_object_size: str = None + maximum_count_of_data_shards: str = None + maximum_count_of_parity_shards: str = None + withdrawal_fee: str = None + homomorphic_hashing_disabled: str = None + maintenance_mode_allowed: str = None + + +class Attributes(BaseModel): + cluster_name: str = Field(alias="ClusterName") + continent: str = Field(alias="Continent") + country: str = Field(alias="Country") + country_code: str = Field(alias="CountryCode") + external_addr: list[str] = Field(alias="ExternalAddr") + location: str = Field(alias="Location") + node: str = Field(alias="Node") + subdiv: str = Field(alias="SubDiv") + subdiv_code: str = Field(alias="SubDivCode") + un_locode: str = Field(alias="UN-LOCODE") + role: str = Field(alias="role") + + @field_validator("external_addr", mode="before") + @classmethod + def convert_external_addr(cls, value: str) -> list[str]: + return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", value)] + + +class NodeInfo(BaseModel): + public_key: str = Field(alias="publicKey") + addresses: list[str] = Field(alias="addresses") + state: str = Field(alias="state") + attributes: Attributes = Field(alias="attributes") + + @field_validator("addresses", mode="before") + @classmethod + def convert_external_addr(cls, value: str) -> list[str]: + return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", ",".join(value))] + + +@dataclass +class Chunk: + def __init__(self, object_id: str, required_nodes: list, confirmed_nodes: list, ec_parent_object_id: str, ec_index: int) -> None: + self.object_id = object_id + self.required_nodes = required_nodes + self.confirmed_nodes = confirmed_nodes + self.ec_parent_object_id = ec_parent_object_id + self.ec_index = ec_index + + def __str__(self) -> str: + return self.object_id + + def __repr__(self) -> str: + return self.object_id diff --git a/src/frostfs_testlib/storage/dataclasses/wallet.py b/src/frostfs_testlib/storage/dataclasses/wallet.py new file mode 100644 index 00000000..d053d294 --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/wallet.py @@ -0,0 +1,91 @@ +import json +import logging +import os +from dataclasses import dataclass +from typing import Optional + +import yaml + +from frostfs_testlib import reporter +from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG, DEFAULT_WALLET_PASS +from frostfs_testlib.shell import Shell +from frostfs_testlib.storage.cluster import NodeBase +from frostfs_testlib.utils.wallet_utils import get_last_address_from_wallet, init_wallet + +logger = logging.getLogger("frostfs.testlib.utils") + + +@dataclass +class WalletInfo: + path: str + password: str = DEFAULT_WALLET_PASS + config_path: str = DEFAULT_WALLET_CONFIG + + @staticmethod + def from_node(node: NodeBase): + wallet_path = node.get_wallet_path() + wallet_password = node.get_wallet_password() + wallet_config_file = os.path.join(ASSETS_DIR, os.path.basename(node.get_wallet_config_path())) + with open(wallet_config_file, "w") as file: + file.write(yaml.dump({"wallet": wallet_path, "password": wallet_password})) + + return WalletInfo(wallet_path, wallet_password, wallet_config_file) + + def get_address(self) -> str: + """ + Extracts the last address from wallet via neo3 lib. + + Returns: + The address of the wallet. + """ + return get_last_address_from_wallet(self.path, self.password) + + def get_address_from_json(self, account_id: int = 0) -> str: + """ + Extracts address of the given account id from wallet using json lookup. + (Useful if neo3 fails for some reason and can't be used). + + Args: + account_id: id of the account to get address. + + Returns: + address string. + """ + with open(self.path, "r") as wallet: + wallet_json = json.load(wallet) + assert abs(account_id) + 1 <= len(wallet_json["accounts"]), f"There is no index '{account_id}' in wallet: {wallet_json}" + + return wallet_json["accounts"][account_id]["address"] + + +class WalletFactory: + def __init__(self, wallets_dir: str, shell: Shell) -> None: + self.shell = shell + self.wallets_dir = wallets_dir + + def create_wallet(self, file_name: str, password: Optional[str] = None) -> WalletInfo: + """ + Creates new default wallet. + + Args: + file_name: output wallet file name. + password: wallet password. + + Returns: + WalletInfo object of new wallet. + """ + + if password is None: + password = "" + + base_path = os.path.join(self.wallets_dir, file_name) + wallet_path = f"{base_path}.json" + wallet_config_path = f"{base_path}.yaml" + init_wallet(wallet_path, password) + + with open(wallet_config_path, "w") as config_file: + config_file.write(f'wallet: {wallet_path}\npassword: "{password}"') + + reporter.attach(wallet_path, os.path.basename(wallet_path)) + + return WalletInfo(wallet_path, password, wallet_config_path) diff --git a/src/frostfs_testlib/storage/grpc_operations/__init__.py b/src/frostfs_testlib/storage/grpc_operations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py b/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py new file mode 100644 index 00000000..d9f94b28 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py @@ -0,0 +1,15 @@ +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.storage.grpc_operations import implementations, interfaces, interfaces_wrapper + + +class CliClientWrapper(interfaces_wrapper.GrpcClientWrapper): + def __init__(self, cli: FrostfsCli) -> None: + self.cli = cli + self.object: interfaces.ObjectInterface = implementations.ObjectOperations(self.cli) + self.container: interfaces.ContainerInterface = implementations.ContainerOperations(self.cli) + self.netmap: interfaces.NetmapInterface = implementations.NetmapOperations(self.cli) + self.ape_manager: interfaces.ApeManagerInterface = implementations.ApeManagerOperations(self.cli) + + +class RpcClientWrapper(interfaces_wrapper.GrpcClientWrapper): + pass # The next series diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py b/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py new file mode 100644 index 00000000..df820fa9 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py @@ -0,0 +1,5 @@ +from .ape_manager import ApeManagerOperations +from .chunks import ChunksOperations +from .container import ContainerOperations +from .netmap import NetmapOperations +from .object import ObjectOperations diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/ape_manager.py b/src/frostfs_testlib/storage/grpc_operations/implementations/ape_manager.py new file mode 100644 index 00000000..070d8a6c --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/ape_manager.py @@ -0,0 +1,79 @@ +from typing import Optional + +from frostfs_testlib import reporter +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT + + +class ApeManagerOperations: + def __init__(self, cli: FrostfsCli): + self.cli = cli + + @reporter.step("Add ape rule") + def add( + self, + rpc_endpoint: str, + chain_id: Optional[str] = None, + chain_id_hex: Optional[str] = None, + path: Optional[str] = None, + rule: Optional[str] | Optional[list[str]] = None, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ): + return self.cli.ape_manager.add( + rpc_endpoint=rpc_endpoint, + chain_id=chain_id, + chain_id_hex=chain_id_hex, + path=path, + rule=rule, + target_name=target_name, + target_type=target_type, + wallet=wallet, + address=address, + timeout=timeout, + ) + + @reporter.step("Get list APE rules") + def list( + self, + rpc_endpoint: str, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ): + return self.cli.ape_manager.list( + rpc_endpoint=rpc_endpoint, + target_name=target_name, + target_type=target_type, + wallet=wallet, + address=address, + timeout=timeout, + ) + + @reporter.step("Remove APE rule") + def remove( + self, + rpc_endpoint: str, + chain_id: Optional[str] = None, + chain_id_hex: Optional[str] = None, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ): + return self.cli.ape_manager.remove( + rpc_endpoint=rpc_endpoint, + chain_id=chain_id, + chain_id_hex=chain_id_hex, + target_name=target_name, + target_type=target_type, + wallet=wallet, + address=address, + timeout=timeout, + ) diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py new file mode 100644 index 00000000..0d787e24 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py @@ -0,0 +1,165 @@ +import json +from typing import Optional + +from frostfs_testlib import reporter +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher +from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, Interfaces, NodeNetmapInfo +from frostfs_testlib.storage.grpc_operations import interfaces +from frostfs_testlib.testing.test_control import wait_for_success +from frostfs_testlib.utils.cli_utils import parse_netmap_output + + +class ChunksOperations(interfaces.ChunksInterface): + def __init__(self, cli: FrostfsCli) -> None: + self.cli = cli + + @reporter.step("Search node without chunks") + def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]: + if not endpoint: + endpoint = cluster.default_rpc_endpoint + netmap = parse_netmap_output(self.cli.netmap.snapshot(endpoint, timeout=CLI_DEFAULT_TIMEOUT).stdout) + chunks_node_key = [] + for chunk in chunks: + chunks_node_key.extend(chunk.confirmed_nodes) + for node_info in netmap.copy(): + if node_info.node_id in chunks_node_key and node_info in netmap: + netmap.remove(node_info) + result = [] + for node_info in netmap: + for cluster_node in cluster.cluster_nodes: + if node_info.node == cluster_node.get_interface(Interfaces.MGMT): + result.append(cluster_node) + return result + + @reporter.step("Search node with chunk {chunk}") + def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]: + netmap = parse_netmap_output(self.cli.netmap.snapshot(cluster.default_rpc_endpoint, timeout=CLI_DEFAULT_TIMEOUT).stdout) + for node_info in netmap: + if node_info.node_id in chunk.confirmed_nodes: + for cluster_node in cluster.cluster_nodes: + if cluster_node.get_interface(Interfaces.MGMT) == node_info.node: + return (cluster_node, node_info) + + @wait_for_success(300, 5, fail_testcase=None) + @reporter.step("Search shard with chunk {chunk}") + def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: + oid_path = f"{chunk.object_id[0]}/{chunk.object_id[1]}/{chunk.object_id[2]}/{chunk.object_id[3]}" + node_shell = node.storage_node.host.get_shell() + shards_watcher = ShardsWatcher(node) + + with reporter.step("Search object file"): + for shard_id, shard_info in shards_watcher.shards_snapshots[-1].items(): + check_dir = node_shell.exec(f" [ -d {shard_info['blobstor'][1]['path']}/{oid_path} ] && echo 1 || echo 0").stdout + if "1" in check_dir.strip(): + return shard_id + + @reporter.step("Get all chunks") + def get_all( + self, + rpc_endpoint: str, + cid: str, + oid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = True, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> list[Chunk]: + object_nodes = self.cli.object.nodes( + rpc_endpoint=rpc_endpoint, + cid=cid, + address=address, + bearer=bearer, + generate_key=generate_key, + oid=oid, + trace=trace, + root=root, + verify_presence_all=verify_presence_all, + json=json, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + return self._parse_object_nodes(object_nodes.stdout.split("\n")[0]) + + @reporter.step("Get last parity chunk") + def get_parity( + self, + rpc_endpoint: str, + cid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = True, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + object_nodes = self.cli.object.nodes( + rpc_endpoint=rpc_endpoint, + cid=cid, + address=address, + bearer=bearer, + generate_key=generate_key, + oid=oid, + trace=trace, + root=root, + verify_presence_all=verify_presence_all, + json=json, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[-1] + + @reporter.step("Get first data chunk") + def get_first_data( + self, + rpc_endpoint: str, + cid: str, + oid: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = True, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> Chunk: + object_nodes = self.cli.object.nodes( + rpc_endpoint=rpc_endpoint, + cid=cid, + address=address, + bearer=bearer, + generate_key=generate_key, + oid=oid, + trace=trace, + root=root, + verify_presence_all=verify_presence_all, + json=json, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[0] + + def _parse_object_nodes(self, object_nodes: str) -> list[Chunk]: + parse_result = json.loads(object_nodes) + if parse_result.get("errors"): + raise RuntimeError(", ".join(parse_result["errors"])) + return [Chunk(**chunk) for chunk in parse_result["data_objects"]] diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py new file mode 100644 index 00000000..afdf6cb2 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py @@ -0,0 +1,338 @@ +import json +import logging +import re +from time import sleep +from typing import List, Optional, Union + +from frostfs_testlib import reporter +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.clients.s3 import BucketContainerResolver +from frostfs_testlib.plugins import load_plugin +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.grpc_operations import interfaces +from frostfs_testlib.utils import json_utils + +logger = logging.getLogger("NeoLogger") + + +class ContainerOperations(interfaces.ContainerInterface): + def __init__(self, cli: FrostfsCli) -> None: + self.cli = cli + + @reporter.step("Create Container") + def create( + self, + endpoint: str, + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, + address: Optional[str] = None, + attributes: Optional[dict] = None, + basic_acl: Optional[str] = None, + await_mode: bool = False, + disable_timestamp: bool = False, + force: bool = False, + trace: bool = False, + name: Optional[str] = None, + nonce: Optional[str] = None, + policy: Optional[str] = None, + session: Optional[str] = None, + subnet: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + A wrapper for `frostfs-cli container create` call. + + Args: + wallet (WalletInfo): a wallet on whose behalf a container is created + rule (optional, str): placement rule for container + basic_acl (optional, str): an ACL for container, will be + appended to `--basic-acl` key + attributes (optional, dict): container attributes , will be + appended to `--attributes` key + session_token (optional, str): a path to session token file + session_wallet(optional, str): a path to the wallet which signed + the session token; this parameter makes sense + when paired with `session_token` + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + options (optional, dict): any other options to pass to the call + name (optional, str): container name attribute + await_mode (bool): block execution until container is persisted + wait_for_creation (): Wait for container shows in container list + timeout: Timeout for the operation. + + Returns: + (str): CID of the created container + """ + result = self.cli.container.create( + rpc_endpoint=endpoint, + policy=policy, + nns_zone=nns_zone, + nns_name=nns_name, + address=address, + attributes=attributes, + basic_acl=basic_acl, + await_mode=await_mode, + disable_timestamp=disable_timestamp, + force=force, + trace=trace, + name=name, + nonce=nonce, + session=session, + subnet=subnet, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + + cid = self._parse_cid(result.stdout) + + logger.info("Container created; waiting until it is persisted in the sidechain") + + return cid + + @reporter.step("List Containers") + def list( + self, + endpoint: str, + name: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + owner: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + **params, + ) -> List[str]: + """ + A wrapper for `frostfs-cli container list` call. It returns all the + available containers for the given wallet. + Args: + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + timeout: Timeout for the operation. + Returns: + (list): list of containers + """ + result = self.cli.container.list( + rpc_endpoint=endpoint, + name=name, + address=address, + generate_key=generate_key, + owner=owner, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + **params, + ) + return result.stdout.split() + + @reporter.step("List Objects in container") + def list_objects( + self, + endpoint: str, + cid: str, + bearer: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> List[str]: + """ + A wrapper for `frostfs-cli container list-objects` call. It returns all the + available objects in container. + Args: + container_id: cid of container + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + timeout: Timeout for the operation. + Returns: + (list): list of containers + """ + result = self.cli.container.list_objects( + rpc_endpoint=endpoint, + cid=cid, + bearer=bearer, + wallet=wallet, + address=address, + generate_key=generate_key, + trace=trace, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + logger.info(f"Container objects: \n{result}") + return result.stdout.split() + + @reporter.step("Delete container") + def delete( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + await_mode: bool = False, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + force: bool = False, + trace: bool = False, + ): + return self.cli.container.delete( + rpc_endpoint=endpoint, + cid=cid, + address=address, + await_mode=await_mode, + session=session, + ttl=ttl, + xhdr=xhdr, + force=force, + trace=trace, + ).stdout + + @reporter.step("Get container") + def get( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + to: Optional[str] = None, + json_mode: bool = True, + trace: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> Union[dict, str]: + result = self.cli.container.get( + rpc_endpoint=endpoint, + cid=cid, + address=address, + generate_key=generate_key, + await_mode=await_mode, + to=to, + json_mode=json_mode, + trace=trace, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + container_info = json.loads(result.stdout) + attributes = dict() + for attr in container_info["attributes"]: + attributes[attr["key"]] = attr["value"] + container_info["attributes"] = attributes + container_info["ownerID"] = json_utils.json_reencode(container_info["ownerID"]["value"]) + return container_info + + @reporter.step("Get eacl container") + def get_eacl( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + json_mode: bool = True, + trace: bool = False, + to: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ): + return self.cli.container.get_eacl( + rpc_endpoint=endpoint, + cid=cid, + address=address, + generate_key=generate_key, + await_mode=await_mode, + to=to, + session=session, + ttl=ttl, + xhdr=xhdr, + timeout=CLI_DEFAULT_TIMEOUT, + ).stdout + + @reporter.step("Get nodes container") + def nodes( + self, + endpoint: str, + cid: str, + cluster: Cluster, + address: Optional[str] = None, + ttl: Optional[int] = None, + from_file: Optional[str] = None, + trace: bool = False, + short: Optional[bool] = True, + xhdr: Optional[dict] = None, + generate_key: Optional[bool] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> List[ClusterNode]: + result = self.cli.container.search_node( + rpc_endpoint=endpoint, + cid=cid, + address=address, + ttl=ttl, + from_file=from_file, + trace=trace, + short=short, + xhdr=xhdr, + generate_key=generate_key, + timeout=timeout, + ).stdout + + pattern = r"[0-9]+(?:\.[0-9]+){3}" + nodes_ip = list(set(re.findall(pattern, result))) + + with reporter.step(f"nodes ips = {nodes_ip}"): + nodes_list = cluster.get_nodes_by_ip(nodes_ip) + + with reporter.step(f"Return nodes - {nodes_list}"): + return nodes_list + + @reporter.step("Resolve container by name") + def resolve_container_by_name(name: str, node: ClusterNode): + resolver_cls = load_plugin("frostfs.testlib.bucket_cid_resolver", node.host.config.product) + resolver: BucketContainerResolver = resolver_cls() + return resolver.resolve(node, name) + + @reporter.step("Wait create container, with list") + def wait_creation(self, cid: str, endpoint: str, attempts: int = 15, sleep_interval: int = 1): + for _ in range(attempts): + containers = self.list(endpoint) + if cid in containers: + return + logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue") + sleep(sleep_interval) + raise RuntimeError(f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting") + + def _parse_cid(self, output: str) -> str: + """ + Parses container ID from a given CLI output. The input string we expect: + container ID: 2tz86kVTDpJxWHrhw3h6PbKMwkLtBEwoqhHQCKTre1FN + awaiting... + container has been persisted on sidechain + We want to take 'container ID' value from the string. + + Args: + output (str): CLI output to parse + + Returns: + (str): extracted CID + """ + try: + # taking first line from command's output + first_line = output.split("\n")[0] + except Exception: + first_line = "" + logger.error(f"Got empty output: {output}") + splitted = first_line.split(": ") + if len(splitted) != 2: + raise ValueError(f"no CID was parsed from command output: \t{first_line}") + return splitted[1] diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py b/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py new file mode 100644 index 00000000..76ee69a3 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py @@ -0,0 +1,171 @@ +import json as module_json +from typing import List, Optional + +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.cli.netmap_parser import NetmapParser +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeInfo, NodeNetInfo, NodeNetmapInfo + +from .. import interfaces + + +class NetmapOperations(interfaces.NetmapInterface): + def __init__(self, cli: FrostfsCli) -> None: + self.cli = cli + + def epoch( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> int: + """ + Get current epoch number. + """ + output = ( + self.cli.netmap.epoch( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return int(output) + + def netinfo( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> NodeNetInfo: + """ + Get target node info. + """ + output = ( + self.cli.netmap.netinfo( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return NetmapParser.netinfo(output) + + def nodeinfo( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + json: bool = True, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> NodeInfo: + """ + Get target node info. + """ + output = ( + self.cli.netmap.nodeinfo( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + json=json, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return NetmapParser.node_info(module_json.loads(output)) + + def snapshot( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> List[NodeNetmapInfo]: + """ + Get target node info. + """ + output = ( + self.cli.netmap.snapshot( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return NetmapParser.snapshot_all_nodes(output) + + def snapshot_one_node( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> List[NodeNetmapInfo]: + """ + Get target one node info. + """ + output = ( + self.cli.netmap.snapshot( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return NetmapParser.snapshot_one_node(output, rpc_endpoint) diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py new file mode 100644 index 00000000..be8a4701 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py @@ -0,0 +1,708 @@ +import json +import logging +import os +import re +import uuid +from typing import Any, Optional + +from frostfs_testlib import reporter, utils +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.resources.common import ASSETS_DIR +from frostfs_testlib.shell.interfaces import CommandResult +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces +from frostfs_testlib.storage.grpc_operations import interfaces +from frostfs_testlib.storage.grpc_operations.implementations.chunks import ChunksOperations +from frostfs_testlib.testing.test_control import wait_for_success +from frostfs_testlib.utils import cli_utils, file_utils + +logger = logging.getLogger("NeoLogger") + + +class ObjectOperations(interfaces.ObjectInterface): + def __init__(self, cli: FrostfsCli) -> None: + self.cli = cli + self.chunks: interfaces.ChunksInterface = ChunksOperations(self.cli) + + @reporter.step("Delete object") + def delete( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + DELETE an Object. + + Args: + cid: ID of Container where we get the Object from + oid: ID of Object we are going to delete + bearer: path to Bearer Token file, appends to `--bearer` key + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): Tombstone ID + """ + result = self.cli.object.delete( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + bearer=bearer, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + id_str = result.stdout.split("\n")[1] + tombstone = id_str.split(":")[1] + return tombstone.strip() + + @reporter.step("Get object") + def get( + self, + cid: str, + oid: str, + endpoint: str, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> file_utils.TestFile: + """ + GET from FrostFS. + + Args: + cid (str): ID of Container where we get the Object from + oid (str): Object ID + bearer: path to Bearer Token file, appends to `--bearer` key + write_object: path to downloaded file, appends to `--file` key + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + no_progress(optional, bool): do not show progress bar + xhdr (optional, dict): Request X-Headers in form of Key=Value + session (optional, dict): path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): path to downloaded file + """ + if not write_object: + write_object = str(uuid.uuid4()) + test_file = file_utils.TestFile(os.path.join(ASSETS_DIR, write_object)) + + self.cli.object.get( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + file=test_file, + bearer=bearer, + no_progress=no_progress, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + return test_file + + @reporter.step("Get object from random node") + def get_from_random_node( + self, + cid: str, + oid: str, + cluster: Cluster, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + GET from FrostFS random storage node + + Args: + cid: ID of Container where we get the Object from + oid: Object ID + cluster: cluster object + bearer (optional, str): path to Bearer Token file, appends to `--bearer` key + write_object (optional, str): path to downloaded file, appends to `--file` key + no_progress(optional, bool): do not show progress bar + xhdr (optional, dict): Request X-Headers in form of Key=Value + session (optional, dict): path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): path to downloaded file + """ + endpoint = cluster.get_random_storage_rpc_endpoint() + return self.get( + cid, + oid, + endpoint, + bearer, + write_object, + xhdr, + no_progress, + session, + timeout, + ) + + @reporter.step("Get hash object") + def hash( + self, + rpc_endpoint: str, + cid: str, + oid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + range: Optional[str] = None, + salt: Optional[str] = None, + ttl: Optional[int] = None, + session: Optional[str] = None, + hash_type: Optional[str] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + Get object hash. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + generate_key: Generate new private key. + oid: Object ID. + range: Range to take hash from in the form offset1:length1,... + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + salt: Salt in hex format. + ttl: TTL value in request meta header (default 2). + session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session. + hash_type: Hash type. Either 'sha256' or 'tz' (default "sha256"). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Dict with request X-Headers. + timeout: Timeout for the operation (default 15s). + + Returns: + Command's result. + """ + result = self.cli.object.hash( + rpc_endpoint=rpc_endpoint, + cid=cid, + oid=oid, + address=address, + bearer=bearer, + generate_key=generate_key, + range=range, + salt=salt, + ttl=ttl, + xhdr=xhdr, + session=session, + hash_type=hash_type, + timeout=timeout, + ) + + if range: + # Cut off the range and return only hash + return result.stdout.split(":")[1].strip() + + return result.stdout + + @reporter.step("Head object") + def head( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + json_output: bool = True, + is_raw: bool = False, + is_direct: bool = False, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> CommandResult | Any: + """ + HEAD an Object. + + Args: + cid (str): ID of Container where we get the Object from + oid (str): ObjectID to HEAD + bearer (optional, str): path to Bearer Token file, appends to `--bearer` key + endpoint(optional, str): FrostFS endpoint to send request to + json_output(optional, bool): return response in JSON format or not; this flag + turns into `--json` key + is_raw(optional, bool): send "raw" request or not; this flag + turns into `--raw` key + is_direct(optional, bool): send request directly to the node or not; this flag + turns into `--ttl 1` key + xhdr (optional, dict): Request X-Headers in form of Key=Value + session (optional, dict): path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + depending on the `json_output` parameter value, the function returns + (dict): HEAD response in JSON format + or + (str): HEAD response as a plain text + """ + result = self.cli.object.head( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + bearer=bearer, + json_mode=json_output, + raw=is_raw, + ttl=1 if is_direct else None, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + if not json_output: + return result + + try: + decoded = json.loads(result.stdout) + except Exception as exc: + # If we failed to parse output as JSON, the cause might be + # the plain text string in the beginning of the output. + # Here we cut off first string and try to parse again. + logger.info(f"failed to parse output: {exc}") + logger.info("parsing output in another way") + fst_line_idx = result.stdout.find("\n") + decoded = json.loads(result.stdout[fst_line_idx:]) + + # if response + if "chunks" in decoded.keys(): + logger.info("decoding ec chunks") + return decoded["chunks"] + + # If response is Complex Object header, it has `splitId` key + if "splitId" in decoded.keys(): + logger.info("decoding split header") + return utils.json_utils.decode_split_header(decoded) + + # If response is Last or Linking Object header, + # it has `header` dictionary and non-null `split` dictionary + if "split" in decoded["header"].keys(): + if decoded["header"]["split"]: + logger.info("decoding linking object") + return utils.json_utils.decode_linking_object(decoded) + + if decoded["header"]["objectType"] == "STORAGE_GROUP": + logger.info("decoding storage group") + return utils.json_utils.decode_storage_group(decoded) + + if decoded["header"]["objectType"] == "TOMBSTONE": + logger.info("decoding tombstone") + return utils.json_utils.decode_tombstone(decoded) + + logger.info("decoding simple header") + return utils.json_utils.decode_simple_header(decoded) + + @reporter.step("Lock Object") + def lock( + self, + cid: str, + oid: str, + endpoint: str, + lifetime: Optional[int] = None, + expire_at: Optional[int] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + Locks object in container. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + oid: Object ID. + lifetime: Lock lifetime. + expire_at: Lock expiration epoch. + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + session: Path to a JSON-encoded container session token. + ttl: TTL value in request meta header (default 2). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Dict with request X-Headers. + timeout: Timeout for the operation. + + Returns: + Lock object ID + """ + result = self.cli.object.lock( + rpc_endpoint=endpoint, + lifetime=lifetime, + expire_at=expire_at, + address=address, + cid=cid, + oid=oid, + bearer=bearer, + xhdr=xhdr, + session=session, + ttl=ttl, + timeout=timeout, + ) + + # Splitting CLI output to separate lines and taking the penultimate line + id_str = result.stdout.strip().split("\n")[0] + oid = id_str.split(":")[1] + return oid.strip() + + @reporter.step("Put object") + def put( + self, + path: str, + cid: str, + endpoint: str, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + PUT of given file. + + Args: + path: path to file to be PUT + cid: ID of Container where we get the Object from + bearer: path to Bearer Token file, appends to `--bearer` key + copies_number: Number of copies of the object to store within the RPC call + attributes: User attributes in form of Key1=Value1,Key2=Value2 + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + no_progress: do not show progress bar + expire_at: Last epoch in the life of the object + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): ID of uploaded Object + """ + result = self.cli.object.put( + rpc_endpoint=endpoint, + file=path, + cid=cid, + attributes=attributes, + bearer=bearer, + copies_number=copies_number, + expire_at=expire_at, + no_progress=no_progress, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + # Splitting CLI output to separate lines and taking the penultimate line + id_str = result.stdout.strip().split("\n")[-2] + oid = id_str.split(":")[1] + return oid.strip() + + @reporter.step("Patch object") + def patch( + self, + cid: str, + oid: str, + endpoint: str, + ranges: list[str] = None, + payloads: list[str] = None, + new_attrs: Optional[str] = None, + replace_attrs: bool = False, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + trace: bool = False, + ) -> str: + """ + PATCH an object. + + Args: + cid: ID of Container where we get the Object from + oid: Object ID + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + ranges: An array of ranges in which to replace data in the format [offset1:length1, offset2:length2] + payloads: An array of file paths to be applied in each range + new_attrs: Attributes to be changed in the format "key1=value1,key2=value2" + replace_attrs: Replace all attributes completely with new ones specified in new_attrs + bearer: Path to Bearer Token file, appends to `--bearer` key + xhdr: Request X-Headers in form of Key=Value + session: Path to a JSON-encoded container session token + timeout: Timeout for the operation + trace: Generate trace ID and print it + Returns: + (str): ID of patched Object + """ + result = self.cli.object.patch( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + range=ranges, + payload=payloads, + new_attrs=new_attrs, + replace_attrs=replace_attrs, + bearer=bearer, + xhdr=xhdr, + session=session, + timeout=timeout, + trace=trace, + ) + return result.stdout.split(":")[1].strip() + + @reporter.step("Put object to random node") + def put_to_random_node( + self, + path: str, + cid: str, + cluster: Cluster, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + PUT of given file to a random storage node. + + Args: + path: path to file to be PUT + cid: ID of Container where we get the Object from + cluster: cluster under test + bearer: path to Bearer Token file, appends to `--bearer` key + copies_number: Number of copies of the object to store within the RPC call + attributes: User attributes in form of Key1=Value1,Key2=Value2 + cluster: cluster under test + no_progress: do not show progress bar + expire_at: Last epoch in the life of the object + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + ID of uploaded Object + """ + endpoint = cluster.get_random_storage_rpc_endpoint() + return self.put( + path, + cid, + endpoint, + bearer, + copies_number, + attributes, + xhdr, + expire_at, + no_progress, + session, + timeout=timeout, + ) + + @reporter.step("Get Range") + def range( + self, + cid: str, + oid: str, + range_cut: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> tuple[file_utils.TestFile, bytes]: + """ + GETRANGE an Object. + + Args: + wallet: wallet on whose behalf GETRANGE is done + cid: ID of Container where we get the Object from + oid: ID of Object we are going to request + range_cut: range to take data from in the form offset:length + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + bearer: path to Bearer Token file, appends to `--bearer` key + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str, bytes) - path to the file with range content and content of this file as bytes + """ + test_file = file_utils.TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4()))) + + self.cli.object.range( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + range=range_cut, + file=test_file, + bearer=bearer, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + with open(test_file, "rb") as file: + content = file.read() + return test_file, content + + @reporter.step("Search object") + def search( + self, + cid: str, + endpoint: str, + bearer: str = "", + oid: Optional[str] = None, + filters: Optional[dict] = None, + expected_objects_list: Optional[list] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + phy: bool = False, + root: bool = False, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + ttl: Optional[int] = None, + ) -> list: + """ + SEARCH an Object. + + Args: + wallet: wallet on whose behalf SEARCH is done + cid: ID of Container where we get the Object from + shell: executor for cli command + bearer: path to Bearer Token file, appends to `--bearer` key + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + filters: key=value pairs to filter Objects + expected_objects_list: a list of ObjectIDs to compare found Objects with + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + phy: Search physically stored objects. + root: Search for user objects. + timeout: Timeout for the operation. + + Returns: + list of found ObjectIDs + """ + result = self.cli.object.search( + rpc_endpoint=endpoint, + cid=cid, + bearer=bearer, + oid=oid, + xhdr=xhdr, + filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None, + session=session, + phy=phy, + root=root, + address=address, + generate_key=generate_key, + ttl=ttl, + timeout=timeout, + ) + + found_objects = re.findall(r"(\w{43,44})", result.stdout) + + if expected_objects_list: + if sorted(found_objects) == sorted(expected_objects_list): + logger.info(f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'") + else: + logger.warning(f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'") + + return found_objects + + @wait_for_success() + @reporter.step("Search object nodes") + def nodes( + self, + cluster: Cluster, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> list[ClusterNode]: + endpoint = alive_node.storage_node.get_rpc_endpoint() + + response = self.cli.object.nodes( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + bearer=bearer, + ttl=1 if is_direct else None, + json=True, + xhdr=xhdr, + timeout=timeout, + verify_presence_all=verify_presence_all, + ) + + response_json = json.loads(response.stdout) + # Currently, the command will show expected and confirmed nodes. + # And we (currently) count only nodes which are both expected and confirmed + object_nodes_id = { + required_node + for data_object in response_json["data_objects"] + for required_node in data_object["required_nodes"] + if required_node in data_object["confirmed_nodes"] + } + + netmap_nodes_list = cli_utils.parse_netmap_output( + self.cli.netmap.snapshot( + rpc_endpoint=endpoint, + ).stdout + ) + netmap_nodes = [ + netmap_node for object_node in object_nodes_id for netmap_node in netmap_nodes_list if object_node == netmap_node.node_id + ] + + object_nodes = [ + cluster_node + for netmap_node in netmap_nodes + for cluster_node in cluster.cluster_nodes + if netmap_node.node == cluster_node.get_interface(Interfaces.MGMT) + ] + + return object_nodes + + @reporter.step("Search parts of object") + def parts( + self, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> list[str]: + endpoint = alive_node.storage_node.get_rpc_endpoint() + response = self.cli.object.nodes( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + bearer=bearer, + ttl=1 if is_direct else None, + json=True, + xhdr=xhdr, + timeout=timeout, + verify_presence_all=verify_presence_all, + ) + response_json = json.loads(response.stdout) + return [data_object["object_id"] for data_object in response_json["data_objects"]] diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py new file mode 100644 index 00000000..379bbe0e --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py @@ -0,0 +1,5 @@ +from .ape_manager import ApeManagerInterface +from .chunks import ChunksInterface +from .container import ContainerInterface +from .netmap import NetmapInterface +from .object import ObjectInterface diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/ape_manager.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/ape_manager.py new file mode 100644 index 00000000..5b198bc0 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/ape_manager.py @@ -0,0 +1,48 @@ +from abc import ABC, abstractmethod +from typing import Optional + +from frostfs_testlib.shell.interfaces import CommandResult + + +class ApeManagerInterface(ABC): + @abstractmethod + def add( + self, + rpc_endpoint: str, + chain_id: Optional[str] = None, + chain_id_hex: Optional[str] = None, + path: Optional[str] = None, + rule: Optional[str] | Optional[list[str]] = None, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + pass + + @abstractmethod + def list( + self, + rpc_endpoint: str, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + pass + + @abstractmethod + def remove( + self, + rpc_endpoint: str, + chain_id: Optional[str] = None, + chain_id_hex: Optional[str] = None, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + pass diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py new file mode 100644 index 00000000..986b938e --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py @@ -0,0 +1,79 @@ +from abc import ABC, abstractmethod +from typing import Optional + +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo + + +class ChunksInterface(ABC): + @abstractmethod + def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]: + pass + + @abstractmethod + def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]: + pass + + @abstractmethod + def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: + pass + + @abstractmethod + def get_all( + self, + rpc_endpoint: str, + cid: str, + oid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> list[Chunk]: + pass + + @abstractmethod + def get_parity( + self, + rpc_endpoint: str, + cid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + pass + + @abstractmethod + def get_first_data( + self, + rpc_endpoint: str, + cid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + pass diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py new file mode 100644 index 00000000..397f7b25 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py @@ -0,0 +1,129 @@ +from abc import ABC, abstractmethod +from typing import List, Optional + +from frostfs_testlib.storage.cluster import Cluster, ClusterNode + + +class ContainerInterface(ABC): + @abstractmethod + def create( + self, + endpoint: str, + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, + address: Optional[str] = None, + attributes: Optional[dict] = None, + basic_acl: Optional[str] = None, + await_mode: bool = False, + disable_timestamp: bool = False, + force: bool = False, + trace: bool = False, + name: Optional[str] = None, + nonce: Optional[str] = None, + policy: Optional[str] = None, + session: Optional[str] = None, + subnet: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> str: + """ + Create a new container and register it in the FrostFS. + It will be stored in the sidechain when the Inner Ring accepts it. + """ + raise NotImplementedError("No implemethed method create") + + @abstractmethod + def delete( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + await_mode: bool = False, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + force: bool = False, + trace: bool = False, + ) -> List[str]: + """ + Delete an existing container. + Only the owner of the container has permission to remove the container. + """ + raise NotImplementedError("No implemethed method delete") + + @abstractmethod + def get( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + to: Optional[str] = None, + json_mode: bool = True, + trace: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[str]: + """Get container field info.""" + raise NotImplementedError("No implemethed method get") + + @abstractmethod + def get_eacl( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + json_mode: bool = True, + trace: bool = False, + to: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[str]: + """Get extended ACL table of container.""" + raise NotImplementedError("No implemethed method get-eacl") + + @abstractmethod + def list( + self, + endpoint: str, + name: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + owner: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + **params, + ) -> List[str]: + """List all created containers.""" + raise NotImplementedError("No implemethed method list") + + @abstractmethod + def nodes( + self, + endpoint: str, + cid: str, + cluster: Cluster, + address: Optional[str] = None, + ttl: Optional[int] = None, + from_file: Optional[str] = None, + trace: bool = False, + short: Optional[bool] = True, + xhdr: Optional[dict] = None, + generate_key: Optional[bool] = None, + timeout: Optional[str] = None, + ) -> List[ClusterNode]: + """Show the nodes participating in the container in the current epoch.""" + raise NotImplementedError("No implemethed method nodes") + + @abstractmethod + def wait_creation(self, cid: str, endpoint: str, attempts: Optional[str], sleep_interval: Optional[int]) -> None: + raise NotImplementedError("No implemented method wait_creation") diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py new file mode 100644 index 00000000..3fdc98ab --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py @@ -0,0 +1,89 @@ +from abc import ABC, abstractmethod +from typing import List, Optional + +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeInfo, NodeNetInfo, NodeNetmapInfo + + +class NetmapInterface(ABC): + @abstractmethod + def epoch( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = False, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> int: + """ + Get current epoch number. + """ + raise NotImplementedError("No implemethed method epoch") + + @abstractmethod + def netinfo( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> NodeNetInfo: + """ + Get target node info. + """ + raise NotImplementedError("No implemethed method netinfo") + + @abstractmethod + def nodeinfo( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> NodeInfo: + """ + Get target node info. + """ + raise NotImplementedError("No implemethed method nodeinfo") + + @abstractmethod + def snapshot( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[NodeNetmapInfo]: + """ + Get target node info. + """ + raise NotImplementedError("No implemethed method snapshot") + + @abstractmethod + def snapshot_one_node( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[NodeNetmapInfo]: + """ + Get target one node info. + """ + raise NotImplementedError("No implemethed method snapshot") diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py new file mode 100644 index 00000000..550c461c --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py @@ -0,0 +1,223 @@ +from abc import ABC, abstractmethod +from typing import Any, List, Optional + +from frostfs_testlib.shell.interfaces import CommandResult +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.utils import file_utils + +from .chunks import ChunksInterface + + +class ObjectInterface(ABC): + def __init__(self) -> None: + self.chunks: ChunksInterface + + @abstractmethod + def delete( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def get( + self, + cid: str, + oid: str, + endpoint: str, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> file_utils.TestFile: + pass + + @abstractmethod + def get_from_random_node( + self, + cid: str, + oid: str, + cluster: Cluster, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def hash( + self, + endpoint: str, + cid: str, + oid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + range: Optional[str] = None, + salt: Optional[str] = None, + ttl: Optional[int] = None, + session: Optional[str] = None, + hash_type: Optional[str] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def head( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + json_output: bool = True, + is_raw: bool = False, + is_direct: bool = False, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult | Any: + pass + + @abstractmethod + def lock( + self, + cid: str, + oid: str, + endpoint: str, + lifetime: Optional[int] = None, + expire_at: Optional[int] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def put( + self, + path: str, + cid: str, + endpoint: str, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def patch( + self, + cid: str, + oid: str, + endpoint: str, + ranges: Optional[list[str]] = None, + payloads: Optional[list[str]] = None, + new_attrs: Optional[str] = None, + replace_attrs: bool = False, + bearer: Optional[str] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + trace: bool = False, + ) -> str: + pass + + @abstractmethod + def put_to_random_node( + self, + path: str, + cid: str, + cluster: Cluster, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def range( + self, + cid: str, + oid: str, + range_cut: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> tuple[file_utils.TestFile, bytes]: + pass + + @abstractmethod + def search( + self, + cid: str, + endpoint: str, + bearer: str = "", + oid: Optional[str] = None, + filters: Optional[dict] = None, + expected_objects_list: Optional[list] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + phy: bool = False, + root: bool = False, + timeout: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + ttl: Optional[int] = None, + ) -> List: + pass + + @abstractmethod + def nodes( + self, + cluster: Cluster, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = None, + ) -> List[ClusterNode]: + pass + + @abstractmethod + def parts( + self, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = None, + ) -> List[str]: + pass diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py b/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py new file mode 100644 index 00000000..5edc99f9 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py @@ -0,0 +1,14 @@ +from abc import ABC + +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli + +from . import interfaces + + +class GrpcClientWrapper(ABC): + def __init__(self) -> None: + self.cli: FrostfsCli + self.object: interfaces.ObjectInterface + self.container: interfaces.ContainerInterface + self.netmap: interfaces.NetmapInterface + self.ape_manager: interfaces.ApeManagerInterface diff --git a/src/frostfs_testlib/storage/service_registry.py b/src/frostfs_testlib/storage/service_registry.py new file mode 100644 index 00000000..3154dc73 --- /dev/null +++ b/src/frostfs_testlib/storage/service_registry.py @@ -0,0 +1,21 @@ +from frostfs_testlib.storage.dataclasses.node_base import NodeBase, NodeClassDict, ServiceClass + + +class ServiceRegistry: + _class_mapping: dict[str, NodeClassDict] = {} + + def get_entry(self, service_type: type[ServiceClass]) -> NodeClassDict: + key = service_type.__name__ + + if key not in self._class_mapping: + raise RuntimeError( + f"Unregistered service type requested: {key}. At this moment registered services are: {self._class_mapping.keys()}" + ) + + return self._class_mapping[key] + + def register_service(self, service_name: str, service_class: type[NodeBase]): + self._class_mapping[service_class.__name__] = { + "cls": service_class, + "hosting_service_name": service_name, + } diff --git a/src/frostfs_testlib/testing/__init__.py b/src/frostfs_testlib/testing/__init__.py new file mode 100644 index 00000000..34839721 --- /dev/null +++ b/src/frostfs_testlib/testing/__init__.py @@ -0,0 +1,2 @@ +from frostfs_testlib.testing.parallel import parallel +from frostfs_testlib.testing.test_control import expect_not_raises, run_optionally, wait_for_success diff --git a/src/frostfs_testlib/testing/cluster_test_base.py b/src/frostfs_testlib/testing/cluster_test_base.py new file mode 100644 index 00000000..50c8eb6c --- /dev/null +++ b/src/frostfs_testlib/testing/cluster_test_base.py @@ -0,0 +1,43 @@ +import time +from typing import Optional + +from frostfs_testlib import reporter +from frostfs_testlib.resources.common import MORPH_BLOCK_TIME +from frostfs_testlib.shell import Shell +from frostfs_testlib.steps import epoch +from frostfs_testlib.storage.cluster import Cluster +from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode +from frostfs_testlib.utils import datetime_utils + + +# To skip adding every mandatory singleton dependency to EACH test function +class ClusterTestBase: + shell: Shell + cluster: Cluster + + @reporter.step("Tick {epochs_to_tick} epochs, wait {wait_block} block") + def tick_epochs( + self, + epochs_to_tick: int, + alive_node: Optional[StorageNode] = None, + wait_block: int = None, + ): + for _ in range(epochs_to_tick): + self.tick_epoch(alive_node, wait_block) + + def tick_epoch(self, alive_node: Optional[StorageNode] = None, wait_block: int = None, delta: Optional[int] = None): + epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node, delta=delta) + if wait_block: + self.wait_for_blocks(wait_block) + + def wait_for_epochs_align(self): + epoch.wait_for_epochs_align(self.shell, self.cluster) + + def get_epoch(self): + return epoch.get_epoch(self.shell, self.cluster) + + def ensure_fresh_epoch(self): + return epoch.ensure_fresh_epoch(self.shell, self.cluster) + + def wait_for_blocks(self, blocks_count: int = 1): + time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * blocks_count) diff --git a/src/frostfs_testlib/testing/parallel.py b/src/frostfs_testlib/testing/parallel.py new file mode 100644 index 00000000..6c4f6e05 --- /dev/null +++ b/src/frostfs_testlib/testing/parallel.py @@ -0,0 +1,148 @@ +import itertools +import traceback +from concurrent.futures import Future, ThreadPoolExecutor +from contextlib import contextmanager +from typing import Callable, Collection, Optional, Union + +MAX_WORKERS = 50 + + +@contextmanager +def parallel_workers_limit(workers_count: int): + global MAX_WORKERS + original_value = MAX_WORKERS + MAX_WORKERS = workers_count + try: + yield + finally: + MAX_WORKERS = original_value + + +def parallel( + fn: Union[Callable, list[Callable]], + parallel_items: Optional[Collection] = None, + *args, + **kwargs, +) -> list[Future]: + """Parallel execution of selected function or list of function using ThreadPoolExecutor. + Also checks the exceptions of each thread. + + Args: + fn: function(s) to run. Can work in 2 modes: + 1. If you have dedicated function with some items to process in parallel, + like you do with executor.map(fn, parallel_items), pass this function as fn. + 2. If you need to process each item with it's own method, like you do + with executor.submit(fn, args, kwargs), pass list of methods here. + See examples in runners.py in this repo. + parallel_items: items to iterate on (should be None in case of 2nd mode). + args: any other args required in target function(s). + if any arg is itertool.cycle, it will be iterated before passing to new thread. + kwargs: any other kwargs required in target function(s) + if any kwarg is itertool.cycle, it will be iterated before passing to new thread. + + Returns: + list of futures. + """ + + if callable(fn): + if not parallel_items: + raise RuntimeError("Parallel items should not be none when fn is callable.") + futures = _run_by_items(fn, parallel_items, *args, **kwargs) + elif isinstance(fn, list): + futures = _run_by_fn_list(fn, *args, **kwargs) + else: + raise RuntimeError("Nothing to run. fn should be either callable or list of callables.") + + # Check for exceptions + exceptions = [future.exception() for future in futures if future.exception()] + if exceptions: + # Prettify exception in parallel with all underlying stack traces + # For example, we had 3 RuntimeError exceptions during parallel. This format will give us something like + # + # RuntimeError: The following exceptions occured during parallel run: + # 1) Exception one text + # 2) Exception two text + # 3) Exception three text + # TRACES: + # ==== 1 ==== + # Traceback (most recent call last): + # File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run + # result = self.fn(*self.args, **self.kwargs) + # File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service + # raise RuntimeError(f"Exception one text") + # RuntimeError: Exception one text + # + # ==== 2 ==== + # Traceback (most recent call last): + # File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run + # result = self.fn(*self.args, **self.kwargs) + # File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service + # raise RuntimeError(f"Exception two text") + # RuntimeError: Exception two text + # + # ==== 3 ==== + # Traceback (most recent call last): + # File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run + # result = self.fn(*self.args, **self.kwargs) + # File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service + # raise RuntimeError(f"Exception three text") + # RuntimeError: Exception three text + short_summary = "\n".join([f"{i}) {str(e)}" for i, e in enumerate(exceptions, 1)]) + stack_traces = "\n".join( + [f"==== {i} ====\n{''.join(traceback.TracebackException.from_exception(e).format())}" for i, e in enumerate(exceptions, 1)] + ) + message = f"{short_summary}\nTRACES:\n{stack_traces}" + raise RuntimeError(f"The following exceptions occured during parallel run:\n{message}") + return futures + + +def _run_by_fn_list(fn_list: list[Callable], *args, **kwargs) -> list[Future]: + if not len(fn_list): + return [] + if not all([callable(f) for f in fn_list]): + raise RuntimeError("fn_list should contain only callables") + + futures: list[Future] = [] + + with ThreadPoolExecutor(max_workers=min(len(fn_list), MAX_WORKERS)) as executor: + for fn in fn_list: + task_args = _get_args(*args) + task_kwargs = _get_kwargs(**kwargs) + + futures.append(executor.submit(fn, *task_args, **task_kwargs)) + + return futures + + +def _run_by_items(fn: Callable, parallel_items: Collection, *args, **kwargs) -> list[Future]: + futures: list[Future] = [] + + with ThreadPoolExecutor(max_workers=min(len(parallel_items), MAX_WORKERS)) as executor: + for item in parallel_items: + task_args = _get_args(*args) + task_kwargs = _get_kwargs(**kwargs) + task_args.insert(0, item) + + futures.append(executor.submit(fn, *task_args, **task_kwargs)) + + return futures + + +def _get_kwargs(**kwargs): + actkwargs = {} + for key, arg in kwargs.items(): + if isinstance(arg, itertools.cycle): + actkwargs[key] = next(arg) + else: + actkwargs[key] = arg + return actkwargs + + +def _get_args(*args): + actargs = [] + for arg in args: + if isinstance(arg, itertools.cycle): + actargs.append(next(arg)) + else: + actargs.append(arg) + return actargs diff --git a/src/frostfs_testlib/testing/readable.py b/src/frostfs_testlib/testing/readable.py new file mode 100644 index 00000000..80f11695 --- /dev/null +++ b/src/frostfs_testlib/testing/readable.py @@ -0,0 +1,36 @@ +from abc import ABCMeta +from enum import Enum + + +class HumanReadableEnum(Enum): + def __str__(self): + return self._name_ + + def __repr__(self): + return self._name_ + + +class HumanReadableABCMeta(ABCMeta): + def __str__(cls): + if "__repr_name__" in cls.__dict__: + return cls.__dict__["__repr_name__"] + return cls.__name__ + + def __repr__(cls): + if "__repr_name__" in cls.__dict__: + return cls.__dict__["__repr_name__"] + return cls.__name__ + + +class HumanReadableABC(metaclass=HumanReadableABCMeta): + @classmethod + def __str__(cls): + if "__repr_name__" in cls.__dict__: + return cls.__dict__["__repr_name__"] + return type(cls).__name__ + + @classmethod + def __repr__(cls): + if "__repr_name__" in cls.__dict__: + return cls.__dict__["__repr_name__"] + return type(cls).__name__ diff --git a/src/frostfs_testlib/testing/test_control.py b/src/frostfs_testlib/testing/test_control.py new file mode 100644 index 00000000..bc38208e --- /dev/null +++ b/src/frostfs_testlib/testing/test_control.py @@ -0,0 +1,222 @@ +import inspect +import logging +import os +from functools import wraps +from time import sleep, time +from typing import Any + +import yaml +from _pytest.outcomes import Failed +from pytest import fail + +from frostfs_testlib import reporter +from frostfs_testlib.resources.common import ASSETS_DIR +from frostfs_testlib.utils.func_utils import format_by_args + +logger = logging.getLogger("NeoLogger") + +# TODO: we may consider deprecating some methods here and use tenacity instead + + +class expect_not_raises: + """ + Decorator/Context manager check that some action, method or test does not raise exceptions + + Useful to set proper state of failed test cases in allure + + Example: + def do_stuff(): + raise Exception("Fail") + + def test_yellow(): <- this test is marked yellow (Test Defect) in allure + do_stuff() + + def test_red(): <- this test is marked red (Failed) in allure + with expect_not_raises(): + do_stuff() + + @expect_not_raises() + def test_also_red(): <- this test is also marked red (Failed) in allure + do_stuff() + """ + + def __enter__(self): + pass + + def __exit__(self, exception_type, exception_value, exception_traceback): + if exception_value: + fail(str(exception_value)) + + def __call__(self, func): + @wraps(func) + def impl(*a, **kw): + with expect_not_raises(): + func(*a, **kw) + + return impl + + +def retry(max_attempts: int, sleep_interval: int = 1, expected_result: Any = None, title: str = None): + """ + Decorator to wait for some conditions/functions to pass successfully. + This is useful if you don't know exact time when something should pass successfully and do not + want to use sleep(X) with too big X. + + Be careful though, wrapped function should only check the state of something, not change it. + """ + + assert max_attempts >= 1, "Cannot apply retry decorator with max_attempts < 1" + + def wrapper(func): + def call(func, *a, **kw): + last_exception = None + for _ in range(max_attempts): + try: + actual_result = func(*a, **kw) + if expected_result is not None: + assert expected_result == actual_result + return actual_result + except Exception as ex: + logger.debug(ex) + last_exception = ex + sleep(sleep_interval) + except Failed as ex: + logger.debug(ex) + last_exception = ex + sleep(sleep_interval) + + # timeout exceeded with no success, raise last_exception + if last_exception is not None: + raise last_exception + + @wraps(func) + def impl(*a, **kw): + if title is not None: + with reporter.step(format_by_args(func, title, *a, **kw)): + return call(func, *a, **kw) + + return call(func, *a, **kw) + + return impl + + return wrapper + + +def run_optionally(enabled: bool, mock_value: Any = True): + """ + Decorator to run something conditionally. + MUST be placed after @pytest.fixture and before @allure decorators. + + Args: + enabled: if true, decorated func will be called as usual. if false the decorated func will be skipped and mock_value will be returned. + mock_value: the value to be returned when decorated func is skipped. + """ + + def deco(func): + @wraps(func) + def func_impl(*a, **kw): + if enabled: + return func(*a, **kw) + return mock_value + + @wraps(func) + def gen_impl(*a, **kw): + if enabled: + yield from func(*a, **kw) + return + yield mock_value + + return gen_impl if inspect.isgeneratorfunction(func) else func_impl + + return deco + + +def cached_fixture(enabled: bool): + """ + Decorator to cache fixtures. + MUST be placed after @pytest.fixture and before @allure decorators. + + Args: + enabled: if true, decorated func will be cached. + """ + + def deco(func): + @wraps(func) + def func_impl(*a, **kw): + # TODO: *a and *kw should be parsed to some kind of hashsum and used in filename to prevent cache load from different parameters + cache_file = os.path.join(ASSETS_DIR, f"fixture_cache_{func.__name__}.yml") + + if enabled and os.path.exists(cache_file): + with open(cache_file, "r") as cache_input: + return yaml.load(cache_input, Loader=yaml.Loader) + + result = func(*a, **kw) + + if enabled: + with open(cache_file, "w") as cache_output: + yaml.dump(result, cache_output) + return result + + # TODO: cache yielding fixtures + @wraps(func) + def gen_impl(*a, **kw): + raise NotImplementedError("Not implemented for yielding fixtures") + + return gen_impl if inspect.isgeneratorfunction(func) else func_impl + + return deco + + +def wait_for_success( + max_wait_time: int = 60, + interval: int = 1, + expected_result: Any = None, + fail_testcase: bool = False, + fail_message: str = "", + title: str = None, +): + """ + Decorator to wait for some conditions/functions to pass successfully. + This is useful if you don't know exact time when something should pass successfully and do not + want to use sleep(X) with too big X. + + Be careful though, wrapped function should only check the state of something, not change it. + """ + + def wrapper(func): + def call(func, *a, **kw): + start = int(round(time())) + last_exception = None + while start + max_wait_time >= int(round(time())): + try: + actual_result = func(*a, **kw) + if expected_result is not None: + assert expected_result == actual_result, fail_message + return actual_result + except Exception as ex: + logger.debug(ex) + last_exception = ex + sleep(interval) + except Failed as ex: + logger.debug(ex) + last_exception = ex + sleep(interval) + + if fail_testcase: + fail(str(last_exception)) + + # timeout exceeded with no success, raise last_exception + if last_exception is not None: + raise last_exception + + @wraps(func) + def impl(*a, **kw): + if title is not None: + with reporter.step(format_by_args(func, title, *a, **kw)): + return call(func, *a, **kw) + + return call(func, *a, **kw) + + return impl + + return wrapper diff --git a/src/frostfs_testlib/utils/__init__.py b/src/frostfs_testlib/utils/__init__.py index fbc4a8f7..4acc5b13 100644 --- a/src/frostfs_testlib/utils/__init__.py +++ b/src/frostfs_testlib/utils/__init__.py @@ -1,3 +1,7 @@ +""" +Idea of utils is to have small utilitary functions which are not dependent of anything. +""" + import frostfs_testlib.utils.converting_utils import frostfs_testlib.utils.datetime_utils import frostfs_testlib.utils.json_utils diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py new file mode 100644 index 00000000..87872967 --- /dev/null +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -0,0 +1,209 @@ +#!/usr/bin/python3.10 + +# TODO: This file is deprecated and all code which uses these calls should be refactored to use shell classes + +""" +Helper functions to use with `frostfs-cli`, `neo-go` and other CLIs. +""" +import csv +import json +import logging +import re +import sys +from contextlib import suppress +from datetime import datetime +from io import StringIO +from textwrap import shorten +from typing import Any, Optional, Union + +import pexpect + +from frostfs_testlib import reporter +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetmapInfo + +logger = logging.getLogger("NeoLogger") +COLOR_GREEN = "\033[92m" +COLOR_OFF = "\033[0m" + + +def _run_with_passwd(cmd: str) -> str: + child = pexpect.spawn(cmd) + child.delaybeforesend = 1 + child.expect(".*") + child.sendline("\r") + if sys.platform == "darwin": + child.expect(pexpect.EOF) + cmd = child.before + else: + child.wait() + cmd = child.read() + return cmd.decode() + + +def _configure_aws_cli(cmd: str, key_id: str, access_key: str, region: str, out_format: str = "json") -> str: + child = pexpect.spawn(cmd) + child.delaybeforesend = 1 + + child.expect("AWS Access Key ID.*") + child.sendline(key_id) + + child.expect("AWS Secret Access Key.*") + child.sendline(access_key) + + child.expect("Default region name.*") + child.sendline("region") + + child.expect("Default output format.*") + child.sendline(out_format) + + child.wait() + cmd = child.read() + # child.expect(pexpect.EOF) + # cmd = child.before + return cmd.decode() + + +def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: datetime, end_time: datetime) -> None: + command_attachment = ( + f"COMMAND: '{cmd}'\n" + f"OUTPUT:\n {output}\n" + f"RC: {return_code}\n" + f"Start / End / Elapsed\t {start_time} / {end_time} / {end_time - start_time}" + ) + with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'): + reporter.attach(command_attachment, "Command execution") + + +def log_command_execution(cmd: str, output: Union[str, dict], params: Optional[dict] = None, **kwargs) -> None: + logger.info(f"{cmd}: {output}") + + if not params: + params = {} + + if params.get("Body") and len(params.get("Body")) > 1000: + params["Body"] = "" + + output_params = params + + try: + json_params = json.dumps(params, indent=4, sort_keys=True, default=str) + except TypeError as err: + logger.warning(f"Failed to serialize '{cmd}' request parameters:\n{params}\nException: {err}") + else: + output_params = json_params + + output = json.dumps(output, indent=4, sort_keys=True, default=str) + + command_execution = f"COMMAND: '{cmd}'\n" f"URL: {kwargs['endpoint']}\n" f"PARAMS:\n{output_params}\n" f"OUTPUT:\n{output}\n" + aws_command = _convert_request_to_aws_cli_command(cmd, params, **kwargs) + + reporter.attach(command_execution, "Command execution") + reporter.attach(aws_command, "AWS CLI Command") + + +def _convert_request_to_aws_cli_command(command: str, params: dict, **kwargs) -> str: + overriden_names = [_convert_json_name_to_aws_cli(name) for name in kwargs.keys()] + command = command.replace("_", "-") + options = [] + + for name, value in params.items(): + name = _convert_json_name_to_aws_cli(name) + + # To override parameters for AWS CLI + if name in overriden_names: + continue + + if option := _create_option(name, value): + options.append(option) + + for name, value in kwargs.items(): + name = _convert_json_name_to_aws_cli(name) + if option := _create_option(name, value): + options.append(option) + + options = " ".join(options) + api = "s3api" if "s3" in kwargs["endpoint"] else "iam" + return f"aws --no-verify-ssl --no-paginate {api} {command} {options}" + + +def _convert_json_name_to_aws_cli(name: str) -> str: + specific_names = {"CORSConfiguration": "cors-configuration"} + + if aws_cli_name := specific_names.get(name): + return aws_cli_name + return re.sub(r"([a-z])([A-Z])", r"\1 \2", name).lower().replace(" ", "-").replace("_", "-") + + +def _create_option(name: str, value: Any) -> str | None: + if isinstance(value, bool) and value: + return f"--{name}" + + if isinstance(value, dict): + value = json.dumps(value, indent=4, sort_keys=True, default=str) + return f"--{name} '{value}'" + + if value: + return f"--{name} {value}" + + return None + + +def parse_netmap_output(output: str) -> list[NodeNetmapInfo]: + """ + The code will parse each line and return each node as dataclass. + """ + netmap_nodes = output.split("Node ")[1:] + dataclasses_netmap = [] + result_netmap = {} + + regexes = { + "node_id": r"\d+: (?P\w+)", + "node_data_ips": r"(?P/ip4/.+?)$", + "node_status": r"(?PONLINE|OFFLINE)", + "cluster_name": r"ClusterName: (?P\w+)", + "continent": r"Continent: (?P\w+)", + "country": r"Country: (?P\w+)", + "country_code": r"CountryCode: (?P\w+)", + "external_address": r"ExternalAddr: (?P/ip[4].+?)$", + "location": r"Location: (?P\w+.*)", + "node": r"Node: (?P\d+\.\d+\.\d+\.\d+)", + "price": r"Price: (?P\d+)", + "sub_div": r"SubDiv: (?P.*)", + "sub_div_code": r"SubDivCode: (?P\w+)", + "un_locode": r"UN-LOCODE: (?P\w+.*)", + "role": r"role: (?P\w+)", + } + + for node in netmap_nodes: + for key, regex in regexes.items(): + search_result = re.search(regex, node, flags=re.MULTILINE) + if key == "node_data_ips": + result_netmap[key] = search_result[key].strip().split(" ") + continue + if key == "external_address": + result_netmap[key] = search_result[key].strip().split(",") + continue + if search_result == None: + result_netmap[key] = None + continue + result_netmap[key] = search_result[key].strip() + + dataclasses_netmap.append(NodeNetmapInfo(**result_netmap)) + + return dataclasses_netmap + + +def parse_cmd_table(output: str, delimiter="|") -> list[dict[str, str]]: + parsing_output = [] + reader = csv.reader(StringIO(output.strip()), delimiter=delimiter) + iter_reader = iter(reader) + header_row = next(iter_reader) + for row in iter_reader: + table = {} + for i in range(len(row)): + header = header_row[i].strip().lower().replace(" ", "_") + value = row[i].strip().lower() + if header: + table[header] = value + parsing_output.append(table) + return parsing_output diff --git a/src/frostfs_testlib/utils/converting_utils.py b/src/frostfs_testlib/utils/converting_utils.py index 24b77aef..273d9b46 100644 --- a/src/frostfs_testlib/utils/converting_utils.py +++ b/src/frostfs_testlib/utils/converting_utils.py @@ -1,10 +1,23 @@ import base64 import binascii import json +from typing import Tuple import base58 +def calc_unit(value: float, skip_units: int = 0) -> Tuple[float, str]: + units = ["B", "KiB", "MiB", "GiB", "TiB"] + + for unit in units[skip_units:]: + if value < 1024: + return value, unit + + value = value / 1024.0 + + return value, unit + + def str_to_ascii_hex(input: str) -> str: b = binascii.hexlify(input.encode()) return str(b)[2:-1] diff --git a/src/frostfs_testlib/utils/datetime_utils.py b/src/frostfs_testlib/utils/datetime_utils.py index a357d8a8..830178f9 100644 --- a/src/frostfs_testlib/utils/datetime_utils.py +++ b/src/frostfs_testlib/utils/datetime_utils.py @@ -10,6 +10,9 @@ def parse_time(value: str) -> int: Returns: Number of seconds in the parsed time interval. """ + if value is None: + return 0 + value = value.lower() for suffix in ["s", "sec"]: diff --git a/src/frostfs_testlib/utils/env_utils.py b/src/frostfs_testlib/utils/env_utils.py new file mode 100644 index 00000000..3fdebe11 --- /dev/null +++ b/src/frostfs_testlib/utils/env_utils.py @@ -0,0 +1,29 @@ +import logging +import re + +from frostfs_testlib import reporter + +logger = logging.getLogger("NeoLogger") + + +@reporter.step("Read environment.properties") +def read_env_properties(file_path: str) -> dict: + with open(file_path, "r") as file: + raw_content = file.read() + + env_properties = {} + for line in raw_content.split("\n"): + m = re.match("(.*?)=(.*)", line) + if not m: + logger.warning(f"Could not parse env property from {line}") + continue + key, value = m.group(1), m.group(2) + env_properties[key] = value + return env_properties + + +@reporter.step("Update data in environment.properties") +def save_env_properties(file_path: str, env_data: dict) -> None: + with open(file_path, "a+") as env_file: + for env, env_value in env_data.items(): + env_file.write(f"{env}={env_value}\n") diff --git a/src/frostfs_testlib/utils/failover_utils.py b/src/frostfs_testlib/utils/failover_utils.py new file mode 100644 index 00000000..5c4d52fa --- /dev/null +++ b/src/frostfs_testlib/utils/failover_utils.py @@ -0,0 +1,198 @@ +import logging +from dataclasses import dataclass +from time import sleep +from typing import Optional + +from frostfs_testlib import reporter +from frostfs_testlib.resources.common import SERVICE_MAX_STARTUP_TIME +from frostfs_testlib.shell import Shell +from frostfs_testlib.steps.cli.object import neo_go_dump_keys +from frostfs_testlib.steps.node_management import storage_node_healthcheck +from frostfs_testlib.steps.storage_policy import get_nodes_with_object +from frostfs_testlib.storage.cluster import Cluster, ClusterNode, NodeBase, StorageNode +from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain +from frostfs_testlib.storage.dataclasses.node_base import ServiceClass +from frostfs_testlib.testing.test_control import wait_for_success +from frostfs_testlib.utils.datetime_utils import parse_time + +logger = logging.getLogger("NeoLogger") + + +@reporter.step("Check and return status of given service") +def service_status(service: str, shell: Shell) -> str: + return shell.exec(f"sudo systemctl is-active {service}").stdout.rstrip() + + +@dataclass +class TopCommand: + """ + This class using `from_stdout` helps to parse result from `top command`, could return result only for one PID + pid: Process PID + output: stdout result from TOP command + """ + + pid: Optional[str] = None + user: Optional[str] = None + pr: Optional[str] = None + ni: Optional[str] = None + virt: Optional[str] = None + res: Optional[str] = None + shr: Optional[str] = None + status: Optional[str] = None + cpu_percent: Optional[str] = None + mem_percent: Optional[str] = None + time: Optional[str] = None + cmd: Optional[str] = None + STATUS_RUNNING = "R" + STATUS_SLEEP = "S" + STATUS_ZOMBIE = "Z" + STATUS_UNSLEEP = "D" + STATUS_TRACED = "T" + + @staticmethod + def from_stdout(output: str, requested_pid: int) -> "TopCommand": + list_var = [None for i in range(12)] + for line in output.split("\n"): + if str(requested_pid) in line: + list_var = line.split() + return TopCommand( + pid=list_var[0], + user=list_var[1], + pr=list_var[2], + ni=list_var[3], + virt=list_var[4], + res=list_var[5], + shr=list_var[6], + status=list_var[7], + cpu_percent=list_var[8], + mem_percent=list_var[9], + time=list_var[10], + cmd=list_var[11], + ) + + +@reporter.step("Run `top` command with specified PID") +def service_status_top(service: str, shell: Shell) -> TopCommand: + pid = service_pid(service, shell) + output = shell.exec(f"sudo top -b -n 1 -p {pid}").stdout + return TopCommand.from_stdout(output, pid) + + +@reporter.step("Restart service n times with sleep") +def multiple_restart( + service_type: type[NodeBase], + node: ClusterNode, + count: int = 5, + sleep_interval: int = 2, +): + service_systemctl_name = node.service(service_type).get_service_systemctl_name() + service_name = node.service(service_type).name + for _ in range(count): + node.host.restart_service(service_name) + logger.info(f"Restart {service_systemctl_name}; sleep {sleep_interval} seconds and continue") + sleep(sleep_interval) + + +@wait_for_success(60, 5, title="Wait for services become {expected_status} on node {cluster_node}") +def check_services_status(cluster_node: ClusterNode, service_list: list[ServiceClass], expected_status: str): + cmd = "" + for service in service_list: + cmd += f' sudo systemctl status {service.get_service_systemctl_name()} --lines=0 | grep "Active:";' + result = cluster_node.host.get_shell().exec(cmd).stdout.rstrip() + statuses = list() + for line in result.split("\n"): + status_substring = line.split() + statuses.append(status_substring[1]) + unique_statuses = list(set(statuses)) + assert ( + len(unique_statuses) == 1 and expected_status in unique_statuses + ), f"Requested status={expected_status} not found in requested services={service_list}, list of statuses={result}" + + +@wait_for_success(60, 5, title="Wait for {service} become active") +def wait_service_in_desired_state(service: str, shell: Shell, expected_status: Optional[str] = "active"): + real_status = service_status(service=service, shell=shell) + assert ( + expected_status == real_status + ), f"Service {service}: expected status= {expected_status}, real status {real_status}" + + +@wait_for_success(parse_time(SERVICE_MAX_STARTUP_TIME), 1, title="Wait for {service_type} passes healtcheck on {node}") +def service_type_healthcheck( + service_type: type[NodeBase], + node: ClusterNode, +): + service = node.service(service_type) + assert ( + service.service_healthcheck() + ), f"Healthcheck failed for {service.get_service_systemctl_name()}, IP={node.host_ip}" + + +@reporter.step("Kill by process name") +def kill_by_service_name(service_type: type[NodeBase], node: ClusterNode): + service_systemctl_name = node.service(service_type).get_service_systemctl_name() + pid = service_pid(service_systemctl_name, node.host.get_shell()) + node.host.get_shell().exec(f"sudo kill -9 {pid}") + + +@reporter.step("Suspend {service}") +def suspend_service(shell: Shell, service: str): + shell.exec(f"sudo kill -STOP {service_pid(service, shell)}") + + +@reporter.step("Resume {service}") +def resume_service(shell: Shell, service: str): + shell.exec(f"sudo kill -CONT {service_pid(service, shell)}") + + +# retry mechanism cause when the task has been started recently '0' PID could be returned +@wait_for_success(10, 1, title="Get {service} pid") +def service_pid(service: str, shell: Shell) -> int: + output = shell.exec(f"systemctl show --property MainPID {service}").stdout.rstrip() + splitted = output.split("=") + PID = int(splitted[1]) + assert PID > 0, f"Service {service} has invalid PID={PID}" + return PID + + +@reporter.step("Wrapper for neo-go dump keys command") +def dump_keys(shell: Shell, node: ClusterNode) -> dict: + host = node.host + service_config = host.get_service_config(node.service(MorphChain).name) + wallet = service_config.attributes["wallet_path"] + return neo_go_dump_keys(shell=shell, wallet=wallet) + + +@reporter.step("Wait for object replication") +def wait_object_replication( + cid: str, + oid: str, + expected_copies: int, + shell: Shell, + nodes: list[StorageNode], + sleep_interval: int = 15, + attempts: int = 20, +) -> list[StorageNode]: + nodes_with_object = [] + for _ in range(attempts): + nodes_with_object = get_nodes_with_object(cid, oid, shell=shell, nodes=nodes) + if len(nodes_with_object) >= expected_copies: + return nodes_with_object + sleep(sleep_interval) + raise AssertionError( + f"Expected {expected_copies} copies of object, but found {len(nodes_with_object)}. " + f"Waiting time {sleep_interval * attempts}" + ) + + +def is_all_storage_nodes_returned(cluster: Cluster) -> bool: + with reporter.step("Run health check for all storage nodes"): + for node in cluster.services(StorageNode): + try: + health_check = storage_node_healthcheck(node) + except Exception as err: + logger.warning(f"Node healthcheck fails with error {err}") + return False + if health_check.health_status != "READY" or health_check.network_status != "ONLINE": + return False + return True diff --git a/src/frostfs_testlib/utils/file_keeper.py b/src/frostfs_testlib/utils/file_keeper.py new file mode 100644 index 00000000..a5670cc1 --- /dev/null +++ b/src/frostfs_testlib/utils/file_keeper.py @@ -0,0 +1,48 @@ +from concurrent.futures import ThreadPoolExecutor + +from frostfs_testlib import reporter +from frostfs_testlib.storage.dataclasses.node_base import NodeBase + + +class FileKeeper: + """This class is responsible to make backup copy of modified file and restore when required (mostly after the test)""" + + files_to_restore: dict[NodeBase, list[str]] = {} + + @reporter.step("Adding {file_to_restore} from node {node} to restore list") + def add(self, node: NodeBase, file_to_restore: str): + if node in self.files_to_restore and file_to_restore in self.files_to_restore[node]: + # Already added + return + + if node not in self.files_to_restore: + self.files_to_restore[node] = [] + + if file_to_restore not in self.files_to_restore[node]: + self.files_to_restore[node].append(file_to_restore) + + shell = node.host.get_shell() + shell.exec(f"cp {file_to_restore} {file_to_restore}.bak") + + @reporter.step("Restore files") + def restore_files(self): + nodes = self.files_to_restore.keys() + if not nodes: + return + + with ThreadPoolExecutor(max_workers=len(nodes)) as executor: + results = executor.map(self._restore_files_on_node, nodes) + + self.files_to_restore.clear() + + for _ in results: + # Iterate through results for exception check if any + pass + + @reporter.step("Restore files on node {node}") + def _restore_files_on_node(self, node: NodeBase): + shell = node.host.get_shell() + for file_to_restore in self.files_to_restore[node]: + with reporter.step(f"Restore file {file_to_restore} on node {node}"): + shell.exec(f"cp {file_to_restore}.bak {file_to_restore}") + shell.exec(f"rm {file_to_restore}.bak") diff --git a/src/frostfs_testlib/utils/file_utils.py b/src/frostfs_testlib/utils/file_utils.py new file mode 100644 index 00000000..8839d7f0 --- /dev/null +++ b/src/frostfs_testlib/utils/file_utils.py @@ -0,0 +1,218 @@ +import hashlib +import logging +import os +import uuid +from typing import Any, Optional + +from frostfs_testlib import reporter +from frostfs_testlib.resources.common import ASSETS_DIR +from frostfs_testlib.utils import string_utils + +logger = logging.getLogger("NeoLogger") + + +class TestFile(os.PathLike): + def __init__(self, path: str): + self.path = path + + def __del__(self): + logger.debug(f"Removing file {self.path}") + if os.path.exists(self.path): + os.remove(self.path) + + def __str__(self): + return self.path + + def __repr__(self): + return self.path + + def __fspath__(self): + return self.path + + +def ensure_directory(path): + directory = os.path.dirname(path) + + if not os.path.exists(directory): + os.makedirs(directory) + + +def ensure_directory_opener(path, flags): + ensure_directory(path) + return os.open(path, flags) + + +# TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps +# Use object_size dt in future as argument +@reporter.step("Generate file") +def generate_file(size: int, file_name: Optional[str] = None) -> TestFile: + """Generates a binary file with the specified size in bytes. + + Args: + size: Size in bytes, can be declared as 6e+6 for example. + + Returns: + The path to the generated file. + """ + + if file_name is None: + file_name = string_utils.unique_name("object-") + + test_file = TestFile(os.path.join(ASSETS_DIR, file_name)) + with open(test_file, "wb", opener=ensure_directory_opener) as file: + file.write(os.urandom(size)) + logger.info(f"File with size {size} bytes has been generated: {test_file}") + + return test_file + + +# TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps +# Use object_size dt in future as argument +@reporter.step("Generate file with content") +def generate_file_with_content( + size: int, + file_path: Optional[str | TestFile] = None, + content: Optional[str] = None, +) -> TestFile: + """Creates a new file with specified content. + + Args: + file_path: Path to the file that should be created. If not specified, then random file + path will be generated. + content: Content that should be stored in the file. If not specified, then random binary + content will be generated. + + Returns: + Path to the generated file. + """ + mode = "w+" + if content is None: + content = os.urandom(size) + mode = "wb" + + test_file = None + if not file_path: + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))) + elif isinstance(file_path, TestFile): + test_file = file_path + else: + test_file = TestFile(file_path) + + with open(test_file, mode, opener=ensure_directory_opener) as file: + file.write(content) + + return test_file + + +@reporter.step("Get File Hash") +def get_file_hash(file_path: str | TestFile, len: Optional[int] = None, offset: Optional[int] = None) -> str: + """Generates hash for the specified file. + + Args: + file_path: Path to the file to generate hash for. + len: How many bytes to read. + offset: Position to start reading from. + + Returns: + Hash of the file as hex-encoded string. + """ + file_hash = hashlib.sha256() + with open(file_path, "rb") as out: + if len and not offset: + file_hash.update(out.read(len)) + elif len and offset: + out.seek(offset, 0) + file_hash.update(out.read(len)) + elif offset and not len: + out.seek(offset, 0) + file_hash.update(out.read()) + else: + file_hash.update(out.read()) + return file_hash.hexdigest() + + +@reporter.step("Concatenation set of files to one file") +def concat_files(file_paths: list[str | TestFile], resulting_file_path: Optional[str | TestFile] = None) -> TestFile: + """Concatenates several files into a single file. + + Args: + file_paths: Paths to the files to concatenate. + resulting_file_path: Path to the file where concatenated content should be stored. + + Returns: + Path to the resulting file. + """ + + test_file = None + if not resulting_file_path: + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))) + elif isinstance(resulting_file_path, TestFile): + test_file = resulting_file_path + else: + test_file = TestFile(resulting_file_path) + + with open(test_file, "wb", opener=ensure_directory_opener) as f: + for file in file_paths: + with open(file, "rb") as part_file: + f.write(part_file.read()) + return test_file + + +@reporter.step("Split file to {parts} parts") +def split_file(file_path: str | TestFile, parts: int) -> list[TestFile]: + """Splits specified file into several specified number of parts. + + Each part is saved under name `{original_file}_part_{i}`. + + Args: + file_path: Path to the file that should be split. + parts: Number of parts the file should be split into. + + Returns: + Paths to the part files. + """ + with open(file_path, "rb") as file: + content = file.read() + + content_size = len(content) + chunk_size = int((content_size + parts) / parts) + + part_id = 1 + part_file_paths = [] + for content_offset in range(0, content_size + 1, chunk_size): + part_file_name = f"{file_path}_part_{part_id}" + part_file_paths.append(TestFile(part_file_name)) + with open(part_file_name, "wb") as out_file: + out_file.write(content[content_offset : content_offset + chunk_size]) + part_id += 1 + + return part_file_paths + + +@reporter.step("Get file content") +def get_file_content(file_path: str | TestFile, content_len: Optional[int] = None, mode: str = "r", offset: Optional[int] = None) -> Any: + """Returns content of specified file. + + Args: + file_path: Path to the file. + content_len: Limit of content length. If None, then entire file content is returned; + otherwise only the first content_len bytes of the content are returned. + mode: Mode of opening the file. + offset: Position to start reading from. + + Returns: + Content of the specified file. + """ + with open(file_path, mode) as file: + if content_len and not offset: + content = file.read(content_len) + elif content_len and offset: + file.seek(offset, 0) + content = file.read(content_len) + elif offset and not content_len: + file.seek(offset, 0) + content = file.read() + else: + content = file.read() + + return content diff --git a/src/frostfs_testlib/utils/func_utils.py b/src/frostfs_testlib/utils/func_utils.py new file mode 100644 index 00000000..0e22d4af --- /dev/null +++ b/src/frostfs_testlib/utils/func_utils.py @@ -0,0 +1,58 @@ +import collections +import inspect +import sys +from typing import Callable + + +def format_by_args(__func: Callable, __title: str, *a, **kw) -> str: + params = _func_parameters(__func, *a, **kw) + args = list(map(lambda x: _represent(x), a)) + + return __title.format(*args, **params) + + +# These 2 functions are copied from allure_commons._allure +# Duplicate it here in order to be independent of allure and make some adjustments. +def _represent(item): + if isinstance(item, str): + return item + elif isinstance(item, (bytes, bytearray)): + return repr(type(item)) + else: + return repr(item) + + +def _func_parameters(func, *args, **kwargs): + parameters = {} + arg_spec = inspect.getfullargspec(func) + arg_order = list(arg_spec.args) + args_dict = dict(zip(arg_spec.args, args)) + + if arg_spec.defaults: + kwargs_defaults_dict = dict(zip(arg_spec.args[-len(arg_spec.defaults) :], arg_spec.defaults)) + parameters.update(kwargs_defaults_dict) + + if arg_spec.varargs: + arg_order.append(arg_spec.varargs) + varargs = args[len(arg_spec.args) :] + parameters.update({arg_spec.varargs: varargs} if varargs else {}) + + if arg_spec.args and arg_spec.args[0] in ["cls", "self"]: + args_dict.pop(arg_spec.args[0], None) + + if kwargs: + if sys.version_info < (3, 7): + # Sort alphabetically as old python versions does + # not preserve call order for kwargs. + arg_order.extend(sorted(list(kwargs.keys()))) + else: + # Keep py3.7 behaviour to preserve kwargs order + arg_order.extend(list(kwargs.keys())) + parameters.update(kwargs) + + parameters.update(args_dict) + + items = parameters.items() + sorted_items = sorted(map(lambda kv: (kv[0], _represent(kv[1])), items), key=lambda x: arg_order.index(x[0])) + + return collections.OrderedDict(sorted_items) diff --git a/src/frostfs_testlib/utils/string_utils.py b/src/frostfs_testlib/utils/string_utils.py index a80192cf..acbca92f 100644 --- a/src/frostfs_testlib/utils/string_utils.py +++ b/src/frostfs_testlib/utils/string_utils.py @@ -1,11 +1,29 @@ +import itertools import random import re import string +from datetime import datetime ONLY_ASCII_LETTERS = string.ascii_letters DIGITS_AND_ASCII_LETTERS = string.ascii_letters + string.digits NON_DIGITS_AND_LETTERS = string.punctuation +# if unique_name is called multiple times within the same microsecond, append 0-4 to the name so it surely unique +FUSE = itertools.cycle(range(5)) + + +def unique_name(prefix: str = "", postfix: str = ""): + """ + Generate unique short name of anything with prefix. + This should be unique in scope of multiple runs + + Args: + prefix: prefix for unique name generation + Returns: + unique name string + """ + return f"{prefix}{hex(int(datetime.now().timestamp() * 1000000))}{next(FUSE)}{postfix}" + def random_string(length: int = 5, source: str = ONLY_ASCII_LETTERS): """ diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py new file mode 100644 index 00000000..06760851 --- /dev/null +++ b/src/frostfs_testlib/utils/version_utils.py @@ -0,0 +1,90 @@ +import logging +import re +from functools import lru_cache + +from frostfs_testlib import reporter +from frostfs_testlib.cli import FrostfsAdm, FrostfsCli +from frostfs_testlib.hosting import Host, Hosting +from frostfs_testlib.resources.cli import FROSTFS_ADM_EXEC, FROSTFS_AUTHMATE_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE +from frostfs_testlib.shell import Shell +from frostfs_testlib.testing.parallel import parallel + +logger = logging.getLogger("NeoLogger") + + +@reporter.step("Get local binaries versions") +def get_local_binaries_versions(shell: Shell) -> dict[str, str]: + versions = {} + + for binary in [NEOGO_EXECUTABLE, FROSTFS_AUTHMATE_EXEC]: + out = shell.exec(f"{binary} --version").stdout + versions[binary] = parse_version(out) + + frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC) + versions[FROSTFS_CLI_EXEC] = parse_version(frostfs_cli.version.get().stdout) + + try: + frostfs_adm = FrostfsAdm(shell, FROSTFS_ADM_EXEC) + versions[FROSTFS_ADM_EXEC] = parse_version(frostfs_adm.version.get().stdout) + except RuntimeError: + logger.info(f"{FROSTFS_ADM_EXEC} not installed") + + out = shell.exec("aws --version").stdout + out_lines = out.split("\n") + versions["AWS"] = out_lines[0] if out_lines else "Unknown" + logger.info(f"Local binaries version: {out_lines[0]}") + + return versions + + +@reporter.step("Collect binaries versions from host") +def parallel_binary_verions(host: Host) -> dict[str, str]: + versions_by_host = {} + + binary_path_by_name = { + **{ + svc.name[:-3]: { + "exec_path": svc.attributes.get("exec_path"), + "param": svc.attributes.get("custom_version_parameter", "--version"), + } + for svc in host.config.services + if svc.attributes.get("exec_path") and svc.attributes.get("requires_version_check", "true") == "true" + }, + **{ + cli.name: {"exec_path": cli.exec_path, "param": cli.attributes.get("custom_version_parameter", "--version")} + for cli in host.config.clis + if cli.attributes.get("requires_version_check", "true") == "true" + }, + } + + shell = host.get_shell() + versions_at_host = {} + for binary_name, binary in binary_path_by_name.items(): + binary_path = binary["exec_path"] + try: + result = shell.exec(f"{binary_path} {binary['param']}") + version = parse_version(result.stdout) or parse_version(result.stderr) or "Unknown" + versions_at_host[binary_name] = version.strip() + except Exception as exc: + logger.error(f"Cannot get version for {binary_path} because of\n{exc}") + versions_at_host[binary_name] = "Unknown" + versions_by_host[host.config.address] = versions_at_host + return versions_by_host + + +@lru_cache +def get_remote_binaries_versions(hosting: Hosting) -> dict[str, dict[str, str]]: + versions_by_host: dict[str, dict[str, str]] = {} + + with reporter.step("Get remote binaries versions"): + future_binary_verions = parallel(parallel_binary_verions, parallel_items=hosting.hosts) + + for future in future_binary_verions: + versions_by_host.update(future.result()) + + return versions_by_host + + +def parse_version(version_output: str) -> str: + version = re.search(r"(?<=version[:=])\s?[\"\']?v?(.+)", version_output, re.IGNORECASE) + return version.group(1).strip("\"'\n\t ") if version else version_output diff --git a/src/frostfs_testlib/utils/wallet_utils.py b/src/frostfs_testlib/utils/wallet_utils.py index 0c5ab1a5..d2b42293 100644 --- a/src/frostfs_testlib/utils/wallet_utils.py +++ b/src/frostfs_testlib/utils/wallet_utils.py @@ -9,6 +9,16 @@ from neo3.wallet import wallet as neo3_wallet logger = logging.getLogger("frostfs.testlib.utils") +def __fix_wallet_schema(wallet: dict) -> None: + # Temporary function to fix wallets that do not conform to the schema + # TODO: get rid of it once issue is solved + if "name" not in wallet: + wallet["name"] = None + for account in wallet["accounts"]: + if "extra" not in account: + account["extra"] = None + + def init_wallet(wallet_path: str, wallet_password: str): """ Create new wallet and new account. @@ -33,29 +43,15 @@ def get_last_address_from_wallet(wallet_path: str, wallet_password: str): Returns: The address for the wallet. """ - with open(wallet_path) as wallet_file: - wallet = neo3_wallet.Wallet.from_json(json.load(wallet_file), password=wallet_password) + wallet = load_wallet(wallet_path, wallet_password) address = wallet.accounts[-1].address logger.info(f"got address: {address}") return address def get_wallet_public_key(wallet_path: str, wallet_password: str, format: str = "hex") -> str: - def __fix_wallet_schema(wallet: dict) -> None: - # Temporary function to fix wallets that do not conform to the schema - # TODO: get rid of it once issue is solved - if "name" not in wallet: - wallet["name"] = None - for account in wallet["accounts"]: - if "extra" not in account: - account["extra"] = None - - # Get public key from wallet file - with open(wallet_path, "r") as file: - wallet_content = json.load(file) - __fix_wallet_schema(wallet_content) - wallet_from_json = neo3_wallet.Wallet.from_json(wallet_content, password=wallet_password) - public_key_hex = str(wallet_from_json.accounts[0].public_key) + wallet = load_wallet(wallet_path, wallet_password) + public_key_hex = str(wallet.accounts[0].public_key) # Convert public key to specified format if format == "hex": @@ -69,7 +65,9 @@ def get_wallet_public_key(wallet_path: str, wallet_password: str, format: str = raise ValueError(f"Invalid public key format: {format}") -def load_wallet(path: str, passwd: str = "") -> neo3_wallet.Wallet: - with open(path, "r") as wallet_file: - wlt_data = wallet_file.read() - return neo3_wallet.Wallet.from_json(json.loads(wlt_data), password=passwd) +def load_wallet(wallet_path: str, wallet_password: str) -> neo3_wallet.Wallet: + with open(wallet_path) as wallet_file: + wallet_content = json.load(wallet_file) + + __fix_wallet_schema(wallet_content) + return neo3_wallet.Wallet.from_json(wallet_content, password=wallet_password) diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..ea6d681e --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,5 @@ +import os +import sys + +app_dir = os.path.join(os.getcwd(), "src") +sys.path.insert(0, app_dir) diff --git a/tests/helpers.py b/tests/helpers.py index 83910026..b7776fd0 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -14,11 +14,7 @@ def format_error_details(error: Exception) -> str: Returns: String containing exception details. """ - detail_lines = traceback.format_exception( - etype=type(error), - value=error, - tb=error.__traceback__, - ) + detail_lines = traceback.format_exception(error) return "".join(detail_lines) diff --git a/tests/test_dataclasses.py b/tests/test_dataclasses.py new file mode 100644 index 00000000..677aed45 --- /dev/null +++ b/tests/test_dataclasses.py @@ -0,0 +1,33 @@ +from typing import Any + +import pytest + +from frostfs_testlib.clients import AwsCliClient, Boto3ClientWrapper +from frostfs_testlib.storage.dataclasses.acl import EACLRole +from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode +from frostfs_testlib.storage.dataclasses.object_size import ObjectSize + + +class TestDataclassesStr: + """Here we are testing important classes string representation.""" + + @pytest.mark.parametrize( + "obj, expected", + [ + (Boto3ClientWrapper, "Boto3 client"), + (AwsCliClient, "AWS CLI"), + (ObjectSize("simple", 1), "simple"), + (ObjectSize("simple", 10), "simple"), + (ObjectSize("complex", 5000), "complex"), + (ObjectSize("complex", 5555), "complex"), + (StorageNode, "StorageNode"), + (MorphChain, "MorphChain"), + (S3Gate, "S3Gate"), + (HTTPGate, "HTTPGate"), + (InnerRing, "InnerRing"), + (EACLRole.OTHERS, "OTHERS"), + ], + ) + def test_classes_string_representation(self, obj: Any, expected: str): + assert f"{obj}" == expected + assert repr(obj) == expected diff --git a/tests/test_hosting.py b/tests/test_hosting.py index 14be8c55..39580cb0 100644 --- a/tests/test_hosting.py +++ b/tests/test_hosting.py @@ -15,6 +15,7 @@ class TestHosting(TestCase): HOST1 = { "address": HOST1_ADDRESS, "plugin_name": HOST1_PLUGIN, + "healthcheck_plugin_name": "basic", "attributes": HOST1_ATTRIBUTES, "clis": HOST1_CLIS, "services": HOST1_SERVICES, @@ -32,6 +33,7 @@ class TestHosting(TestCase): HOST2 = { "address": HOST2_ADDRESS, "plugin_name": HOST2_PLUGIN, + "healthcheck_plugin_name": "basic", "attributes": HOST2_ATTRIBUTES, "clis": HOST2_CLIS, "services": HOST2_SERVICES, @@ -52,18 +54,14 @@ class TestHosting(TestCase): self.assertEqual(host1.config.plugin_name, self.HOST1_PLUGIN) self.assertDictEqual(host1.config.attributes, self.HOST1_ATTRIBUTES) self.assertListEqual(host1.config.clis, [CLIConfig(**cli) for cli in self.HOST1_CLIS]) - self.assertListEqual( - host1.config.services, [ServiceConfig(**service) for service in self.HOST1_SERVICES] - ) + self.assertListEqual(host1.config.services, [ServiceConfig(**service) for service in self.HOST1_SERVICES]) host2 = hosting.get_host_by_address(self.HOST2_ADDRESS) self.assertEqual(host2.config.address, self.HOST2_ADDRESS) self.assertEqual(host2.config.plugin_name, self.HOST2_PLUGIN) self.assertDictEqual(host2.config.attributes, self.HOST2_ATTRIBUTES) self.assertListEqual(host2.config.clis, [CLIConfig(**cli) for cli in self.HOST2_CLIS]) - self.assertListEqual( - host2.config.services, [ServiceConfig(**service) for service in self.HOST2_SERVICES] - ) + self.assertListEqual(host2.config.services, [ServiceConfig(**service) for service in self.HOST2_SERVICES]) def test_get_host_by_service(self): hosting = Hosting() @@ -104,9 +102,7 @@ class TestHosting(TestCase): services = hosting.find_service_configs(rf"^{self.SERVICE_NAME_PREFIX}") self.assertEqual(len(services), 2) for service in services: - self.assertEqual( - service.name[: len(self.SERVICE_NAME_PREFIX)], self.SERVICE_NAME_PREFIX - ) + self.assertEqual(service.name[: len(self.SERVICE_NAME_PREFIX)], self.SERVICE_NAME_PREFIX) service1 = hosting.find_service_configs(self.SERVICE1["name"]) self.assertEqual(len(service1), 1) diff --git a/tests/test_load_config.py b/tests/test_load_config.py new file mode 100644 index 00000000..fbeb587d --- /dev/null +++ b/tests/test_load_config.py @@ -0,0 +1,798 @@ +from dataclasses import Field, dataclass, fields, is_dataclass +from typing import Any, get_args + +import pytest + +from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType, Preset, ReadFrom +from frostfs_testlib.load.runners import DefaultRunner +from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME +from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController + + +@dataclass +class MetaTestField: + field: Field + field_type: type + instance: Any + + +class TestLoadConfig: + @pytest.fixture + def set_empty(self, request: pytest.FixtureRequest): + # Workaround for verify + if "param" in request.__dict__ and request.param: + return request.param + + return False + + @pytest.fixture + def load_type(self, request: pytest.FixtureRequest): + # Workaround for verify + if "param" in request.__dict__ and request.param: + return request.param + + return None + + @pytest.fixture + def load_params(self, load_type: LoadType, set_empty: bool, request: pytest.FixtureRequest): + load_scenario = request.param + return self._get_filled_load_params(load_type, load_scenario, set_empty) + + def test_load_params_only_load_type_required(self): + load_params = LoadParams(load_type=LoadType.S3) + expected = "s3" + assert repr(load_params) == expected + assert f"{load_params}" == expected + + def test_load_params_init_time(self): + load_params = LoadParams(load_type=LoadType.S3) + vus = 100 + + load_params.vu_init_time = BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME + # Used in time calculations + load_params.readers = vus + load_params.writers = vus + load_params.preallocated_readers = vus + load_params.preallocated_writers = vus + + # Not used in time calculations + load_params.deleters = vus + load_params.preallocated_deleters = vus + + expected = vus * 4 * BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME + actual = load_params.get_init_time() + assert actual == expected, "Incorrect time for get_init_time()" + + def test_load_params_initially_have_all_values_none(self): + load_params = LoadParams(load_type=LoadType.S3) + self._check_all_values_none(load_params, ["load_type", "scenario"]) + + def test_preset_initially_have_all_values_none(self): + preset = Preset() + self._check_all_values_none(preset) + + @pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True) + def test_string_representation_s3_car(self, load_params: LoadParams): + load_params.object_size = 524288 + expected = "s3_car 512 MiB, write_rate=10, read_rate=9, delete_rate=11, preallocated_writers=20, preallocated_readers=20, preallocated_deleters=21" + assert f"{load_params}" == expected + assert repr(load_params) == expected + + @pytest.mark.parametrize("load_params", [LoadScenario.gRPC], indirect=True) + def test_string_representation_grpc(self, load_params: LoadParams): + load_params.object_size = 512 + expected = "grpc 512 KiB, writers=7, readers=7, deleters=8" + assert f"{load_params}" == expected + assert repr(load_params) == expected + + @pytest.mark.parametrize("load_params", [LoadScenario.gRPC], indirect=True) + def test_load_controller_string_representation(self, load_params: LoadParams): + load_params.endpoint_selection_strategy = EndpointSelectionStrategy.ALL + load_params.object_size = 512 + background_load_controller = BackgroundLoadController("tmp", load_params, None, None, DefaultRunner(None)) + expected = "grpc 512 KiB, writers=7, readers=7, deleters=8" + assert f"{background_load_controller}" == expected + assert repr(background_load_controller) == expected + + def test_load_set_id_changes_fields(self): + load_params = LoadParams(load_type=LoadType.S3) + load_params.preset = Preset() + load_params.read_from = ReadFrom["REGISTRY"] + load_params.working_dir = "/tmp" + load_params.set_id("test_id") + + assert load_params.registry_file == "/tmp/test_id_registry.bolt" + assert load_params.preset.pregen_json == "/tmp/test_id_prepare.json" + assert load_params.load_id == "test_id" + + # No other values should be changed + self._check_all_values_none( + load_params, + [ + "load_type", + "working_dir", + "load_id", + "registry_file", + "preset", + "scenario", + "read_from", + ], + ) + self._check_all_values_none(load_params.preset, ["pregen_json", "scenario"]) + + @pytest.mark.parametrize("load_params", [LoadScenario.gRPC], indirect=True) + def test_argument_parsing_for_grpc_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '11'", + "--acl 'acl'", + "--preload_obj '13'", + "--retry '24'", + "--rule 'rule' --rule 'rule_2'", + "--out 'pregen_json'", + "--workers '7'", + "--containers '16'", + "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", + "--ignore-errors", + "--sleep '19'", + "--local", + ] + expected_env_vars = { + "DURATION": 9, + "WRITE_OBJ_SIZE": 11, + "REGISTRY_FILE": "registry_file", + "K6_MIN_ITERATION_DURATION": "min_iteration_duration", + "K6_SETUP_TIMEOUT": "setup_timeout", + "WRITERS": 7, + "READERS": 7, + "DELETERS": 8, + "READ_AGE": 8, + "STREAMING": 9, + "K6_OUT": "output", + "PREGEN_JSON": "pregen_json", + "PREPARE_LOCALLY": True, + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params", [LoadScenario.gRPC_CAR], indirect=True) + def test_argument_parsing_for_grpc_car_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '11'", + "--preload_obj '13'", + "--retry '24'", + "--rule 'rule' --rule 'rule_2'", + "--out 'pregen_json'", + "--workers '7'", + "--containers '16'", + "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", + "--ignore-errors", + "--sleep '19'", + "--local", + "--acl 'acl'", + ] + expected_env_vars = { + "DURATION": 9, + "WRITE_OBJ_SIZE": 11, + "K6_OUT": "output", + "REGISTRY_FILE": "registry_file", + "K6_MIN_ITERATION_DURATION": "min_iteration_duration", + "K6_SETUP_TIMEOUT": "setup_timeout", + "MAX_WRITERS": 11, + "MAX_READERS": 11, + "MAX_DELETERS": 12, + "PRE_ALLOC_DELETERS": 21, + "PRE_ALLOC_READERS": 20, + "PRE_ALLOC_WRITERS": 20, + "PREGEN_JSON": "pregen_json", + "TIME_UNIT": "time_unit", + "WRITE_RATE": 10, + "READ_RATE": 9, + "READ_AGE": 8, + "DELETE_RATE": 11, + "STREAMING": 9, + "PREPARE_LOCALLY": True, + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params", [LoadScenario.S3], indirect=True) + def test_argument_parsing_for_s3_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '11'", + "--preload_obj '13'", + "--no-verify-ssl", + "--out 'pregen_json'", + "--workers '7'", + "--buckets '13'", + "--location 's3_location' --location 's3_location_2'", + "--ignore-errors", + "--sleep '19'", + "--acl 'acl'", + ] + expected_env_vars = { + "DURATION": 9, + "WRITE_OBJ_SIZE": 11, + "REGISTRY_FILE": "registry_file", + "K6_OUT": "output", + "K6_MIN_ITERATION_DURATION": "min_iteration_duration", + "K6_SETUP_TIMEOUT": "setup_timeout", + "WRITERS": 7, + "READERS": 7, + "DELETERS": 8, + "READ_AGE": 8, + "STREAMING": 9, + "NO_VERIFY_SSL": True, + "PREGEN_JSON": "pregen_json", + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True) + def test_argument_parsing_for_s3_car_scenario_with_stringed_time(self, load_params: LoadParams): + load_params.load_time = "2d3h5min" + expected_preset_args = [ + "--size '11'", + "--preload_obj '13'", + "--no-verify-ssl", + "--out 'pregen_json'", + "--workers '7'", + "--buckets '13'", + "--location 's3_location' --location 's3_location_2'", + "--ignore-errors", + "--sleep '19'", + "--acl 'acl'", + ] + expected_env_vars = { + "DURATION": 183900, + "WRITE_OBJ_SIZE": 11, + "REGISTRY_FILE": "registry_file", + "K6_OUT": "output", + "K6_MIN_ITERATION_DURATION": "min_iteration_duration", + "K6_SETUP_TIMEOUT": "setup_timeout", + "NO_VERIFY_SSL": True, + "MAX_WRITERS": 11, + "MAX_READERS": 11, + "MAX_DELETERS": 12, + "PRE_ALLOC_DELETERS": 21, + "PRE_ALLOC_READERS": 20, + "PRE_ALLOC_WRITERS": 20, + "PREGEN_JSON": "pregen_json", + "TIME_UNIT": "time_unit", + "WRITE_RATE": 10, + "READ_RATE": 9, + "READ_AGE": 8, + "STREAMING": 9, + "DELETE_RATE": 11, + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True) + def test_argument_parsing_for_s3_car_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '11'", + "--preload_obj '13'", + "--no-verify-ssl", + "--out 'pregen_json'", + "--workers '7'", + "--buckets '13'", + "--location 's3_location' --location 's3_location_2'", + "--ignore-errors", + "--sleep '19'", + "--acl 'acl'", + ] + expected_env_vars = { + "DURATION": 9, + "WRITE_OBJ_SIZE": 11, + "REGISTRY_FILE": "registry_file", + "K6_OUT": "output", + "K6_MIN_ITERATION_DURATION": "min_iteration_duration", + "K6_SETUP_TIMEOUT": "setup_timeout", + "NO_VERIFY_SSL": True, + "MAX_WRITERS": 11, + "MAX_READERS": 11, + "MAX_DELETERS": 12, + "PRE_ALLOC_DELETERS": 21, + "PRE_ALLOC_READERS": 20, + "PRE_ALLOC_WRITERS": 20, + "PREGEN_JSON": "pregen_json", + "TIME_UNIT": "time_unit", + "WRITE_RATE": 10, + "READ_RATE": 9, + "READ_AGE": 8, + "STREAMING": 9, + "DELETE_RATE": 11, + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params", [LoadScenario.HTTP], indirect=True) + def test_argument_parsing_for_http_scenario(self, load_params: LoadParams): + load_params.preset.local = False + expected_preset_args = [ + "--no-verify-ssl", + "--size '11'", + "--preload_obj '13'", + "--retry '24'", + "--rule 'rule' --rule 'rule_2'", + "--out 'pregen_json'", + "--workers '7'", + "--containers '16'", + "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", + "--ignore-errors", + "--sleep '19'", + "--acl 'acl'", + ] + expected_env_vars = { + "DURATION": 9, + "WRITE_OBJ_SIZE": 11, + "K6_OUT": "output", + "NO_VERIFY_SSL": True, + "REGISTRY_FILE": "registry_file", + "K6_MIN_ITERATION_DURATION": "min_iteration_duration", + "K6_SETUP_TIMEOUT": "setup_timeout", + "WRITERS": 7, + "READERS": 7, + "DELETERS": 8, + "READ_AGE": 8, + "STREAMING": 9, + "PREGEN_JSON": "pregen_json", + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params", [LoadScenario.LOCAL], indirect=True) + def test_argument_parsing_for_local_scenario(self, load_params: LoadParams): + load_params.preset.local = False + expected_preset_args = [ + "--size '11'", + "--preload_obj '13'", + "--retry '24'", + "--rule 'rule' --rule 'rule_2'", + "--out 'pregen_json'", + "--workers '7'", + "--containers '16'", + "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", + "--ignore-errors", + "--sleep '19'", + "--acl 'acl'", + ] + expected_env_vars = { + "CONFIG_DIR": "config_dir", + "CONFIG_FILE": "config_file", + "DURATION": 9, + "WRITE_OBJ_SIZE": 11, + "K6_OUT": "output", + "REGISTRY_FILE": "registry_file", + "K6_MIN_ITERATION_DURATION": "min_iteration_duration", + "K6_SETUP_TIMEOUT": "setup_timeout", + "WRITERS": 7, + "READERS": 7, + "DELETERS": 8, + "READ_AGE": 8, + "STREAMING": 9, + "MAX_TOTAL_SIZE_GB": 17, + "PREGEN_JSON": "pregen_json", + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize( + "input, value, params", + [ + (["A C ", " B"], ["A C", "B"], [f"--policy 'A C' --policy 'B'"]), + (" A ", ["A"], ["--policy 'A'"]), + (" A , B ", ["A , B"], ["--policy 'A , B'"]), + ([" A", "B "], ["A", "B"], ["--policy 'A' --policy 'B'"]), + (None, None, []), + ], + ) + def test_grpc_list_parsing_formatter(self, input, value, params): + load_params = LoadParams(LoadType.gRPC) + load_params.preset = Preset() + load_params.preset.container_placement_policy = input + assert load_params.preset.container_placement_policy == value + + self._check_preset_params(load_params, params) + + @pytest.mark.parametrize( + "input, value, params", + [ + (["A C ", " B"], ["A C", "B"], [f"--location 'A C' --location 'B'"]), + (" A ", ["A"], ["--location 'A'"]), + (" A , B ", ["A , B"], ["--location 'A , B'"]), + ([" A", "B "], ["A", "B"], ["--location 'A' --location 'B'"]), + (None, None, []), + ], + ) + def test_s3_list_parsing_formatter(self, input, value, params): + load_params = LoadParams(LoadType.S3) + load_params.preset = Preset() + load_params.preset.s3_location = input + assert load_params.preset.s3_location == value + + self._check_preset_params(load_params, params) + + @pytest.mark.parametrize( + "load_type, input, value, params", + [ + (LoadType.gRPC, ["A C ", " B"], ["A C", "B"], [f"--rule 'A C' --rule 'B'"]), + (LoadType.gRPC, " A ", ["A"], ["--rule 'A'"]), + (LoadType.gRPC, " A , B ", ["A , B"], ["--rule 'A , B'"]), + (LoadType.gRPC, [" A", "B "], ["A", "B"], ["--rule 'A' --rule 'B'"]), + (LoadType.gRPC, None, None, []), + (LoadType.S3, ["A C ", " B"], ["A C", "B"], []), + (LoadType.S3, None, None, []), + ], + ) + def test_ape_list_parsing_formatter(self, load_type, input, value, params): + load_params = LoadParams(load_type) + load_params.preset = Preset() + load_params.preset.rule = input + assert load_params.preset.rule == value + + self._check_preset_params(load_params, params) + + @pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True) + def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams): + expected_env_vars = { + "CLIENTS": 14, + "REGISTRY_FILE": "registry_file", + "K6_SETUP_TIMEOUT": "setup_timeout", + "NO_VERIFY_SSL": True, + "TIME_LIMIT": 11, + } + + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.gRPC)], indirect=True) + def test_argument_parsing_for_grpc_verify_scenario(self, load_params: LoadParams): + expected_env_vars = { + "CLIENTS": 14, + "REGISTRY_FILE": "registry_file", + "K6_SETUP_TIMEOUT": "setup_timeout", + "NO_VERIFY_SSL": True, + "TIME_LIMIT": 11, + } + + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.gRPC, True)], indirect=True) + def test_empty_argument_parsing_for_grpc_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '0'", + "--preload_obj '0'", + "--retry '0'", + "--rule ''", + "--out ''", + "--workers '0'", + "--containers '0'", + "--policy ''", + "--sleep '0'", + "--acl ''", + ] + expected_env_vars = { + "DURATION": 0, + "WRITE_OBJ_SIZE": 0, + "REGISTRY_FILE": "", + "K6_OUT": "", + "K6_MIN_ITERATION_DURATION": "", + "K6_SETUP_TIMEOUT": "", + "WRITERS": 0, + "READERS": 0, + "DELETERS": 0, + "READ_AGE": 0, + "STREAMING": 0, + "PREGEN_JSON": "", + "PREPARE_LOCALLY": False, + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.gRPC_CAR, True)], indirect=True) + def test_empty_argument_parsing_for_grpc_car_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '0'", + "--preload_obj '0'", + "--retry '0'", + "--rule ''", + "--out ''", + "--workers '0'", + "--containers '0'", + "--policy ''", + "--sleep '0'", + "--acl ''", + ] + expected_env_vars = { + "DURATION": 0, + "WRITE_OBJ_SIZE": 0, + "REGISTRY_FILE": "", + "K6_OUT": "", + "K6_MIN_ITERATION_DURATION": "", + "K6_SETUP_TIMEOUT": "", + "MAX_WRITERS": 0, + "MAX_READERS": 0, + "MAX_DELETERS": 0, + "PRE_ALLOC_DELETERS": 0, + "PRE_ALLOC_READERS": 0, + "PRE_ALLOC_WRITERS": 0, + "PREGEN_JSON": "", + "TIME_UNIT": "", + "WRITE_RATE": 0, + "READ_RATE": 0, + "DELETE_RATE": 0, + "READ_AGE": 0, + "STREAMING": 0, + "PREPARE_LOCALLY": False, + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.S3, True)], indirect=True) + def test_empty_argument_parsing_for_s3_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '0'", + "--preload_obj '0'", + "--out ''", + "--workers '0'", + "--buckets '0'", + "--location ''", + "--sleep '0'", + "--acl ''", + ] + expected_env_vars = { + "DURATION": 0, + "WRITE_OBJ_SIZE": 0, + "REGISTRY_FILE": "", + "K6_OUT": "", + "K6_MIN_ITERATION_DURATION": "", + "K6_SETUP_TIMEOUT": "", + "WRITERS": 0, + "READERS": 0, + "DELETERS": 0, + "READ_AGE": 0, + "STREAMING": 0, + "NO_VERIFY_SSL": False, + "PREGEN_JSON": "", + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.S3_CAR, True)], indirect=True) + def test_empty_argument_parsing_for_s3_car_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '0'", + "--preload_obj '0'", + "--out ''", + "--workers '0'", + "--buckets '0'", + "--location ''", + "--sleep '0'", + "--acl ''", + ] + expected_env_vars = { + "DURATION": 0, + "WRITE_OBJ_SIZE": 0, + "REGISTRY_FILE": "", + "K6_OUT": "", + "K6_MIN_ITERATION_DURATION": "", + "K6_SETUP_TIMEOUT": "", + "NO_VERIFY_SSL": False, + "MAX_WRITERS": 0, + "MAX_READERS": 0, + "MAX_DELETERS": 0, + "PRE_ALLOC_DELETERS": 0, + "PRE_ALLOC_READERS": 0, + "PRE_ALLOC_WRITERS": 0, + "PREGEN_JSON": "", + "TIME_UNIT": "", + "WRITE_RATE": 0, + "READ_RATE": 0, + "DELETE_RATE": 0, + "READ_AGE": 0, + "STREAMING": 0, + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.HTTP, True)], indirect=True) + def test_empty_argument_parsing_for_http_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '0'", + "--preload_obj '0'", + "--retry '0'", + "--rule ''", + "--out ''", + "--workers '0'", + "--containers '0'", + "--policy ''", + "--sleep '0'", + "--acl ''", + ] + expected_env_vars = { + "DURATION": 0, + "WRITE_OBJ_SIZE": 0, + "NO_VERIFY_SSL": False, + "REGISTRY_FILE": "", + "K6_OUT": "", + "K6_MIN_ITERATION_DURATION": "", + "K6_SETUP_TIMEOUT": "", + "WRITERS": 0, + "READERS": 0, + "DELETERS": 0, + "READ_AGE": 0, + "STREAMING": 0, + "PREGEN_JSON": "", + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.LOCAL, True)], indirect=True) + def test_empty_argument_parsing_for_local_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '0'", + "--preload_obj '0'", + "--retry '0'", + "--rule ''", + "--out ''", + "--workers '0'", + "--containers '0'", + "--policy ''", + "--sleep '0'", + "--acl ''", + ] + expected_env_vars = { + "CONFIG_DIR": "", + "CONFIG_FILE": "", + "DURATION": 0, + "WRITE_OBJ_SIZE": 0, + "REGISTRY_FILE": "", + "K6_OUT": "", + "K6_MIN_ITERATION_DURATION": "", + "K6_SETUP_TIMEOUT": "", + "MAX_TOTAL_SIZE_GB": 0, + "WRITERS": 0, + "READERS": 0, + "DELETERS": 0, + "READ_AGE": 0, + "STREAMING": 0, + "PREGEN_JSON": "", + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize( + "load_params, load_type, set_empty", + [(LoadScenario.VERIFY, LoadType.S3, True)], + indirect=True, + ) + def test_empty_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams): + expected_env_vars = { + "CLIENTS": 0, + "REGISTRY_FILE": "", + "K6_SETUP_TIMEOUT": "", + "NO_VERIFY_SSL": False, + "TIME_LIMIT": 0, + } + + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize( + "load_params, load_type, set_empty", + [(LoadScenario.VERIFY, LoadType.gRPC, True)], + indirect=True, + ) + def test_argument_parsing_for_grpc_verify_scenario(self, load_params: LoadParams): + expected_env_vars = { + "CLIENTS": 0, + "REGISTRY_FILE": "", + "K6_SETUP_TIMEOUT": "", + "NO_VERIFY_SSL": False, + "TIME_LIMIT": 0, + } + + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize( + "load_params, load_type", + [(LoadScenario.gRPC, LoadType.gRPC)], + indirect=True, + ) + @pytest.mark.parametrize( + "load_time, expected_seconds", + [ + (300, 300), + ("2d3h45min", 186300), + ("1d6h", 108000), + ("1d", 86400), + ("1d1min", 86460), + ("2h", 7200), + ("2h2min", 7320), + ], + ) + def test_convert_time_to_seconds(self, load_params: LoadParams, load_time: str | int, expected_seconds: int): + load_params.load_time = load_time + assert load_params.load_time == expected_seconds + + def _check_preset_params(self, load_params: LoadParams, expected_preset_args: list[str]): + preset_parameters = load_params.get_preset_arguments() + assert sorted(preset_parameters) == sorted(expected_preset_args) + + def _check_env_vars(self, load_params: LoadParams, expected_env_vars: dict[str, str]): + env_vars = load_params.get_k6_vars() + assert env_vars == expected_env_vars + + def _check_all_values_none(self, dataclass, skip_fields=None): + if skip_fields is None: + skip_fields = [] + + dataclass_fields = [field for field in fields(dataclass) if field.name not in skip_fields] + for field in dataclass_fields: + value = getattr(dataclass, field.name) + assert value is None, f"{field.name} is not None" + + def _check_all_values_not_none(self, dataclass, skip_fields=None): + if skip_fields is None: + skip_fields = [] + + dataclass_fields = [field for field in fields(dataclass) if field.name not in skip_fields] + for field in dataclass_fields: + value = getattr(dataclass, field.name) + assert value is not None, f"{field.name} is not None" + + def _get_filled_load_params(self, load_type: LoadType, load_scenario: LoadScenario, set_emtpy: bool = False) -> LoadParams: + load_type_map = { + LoadScenario.S3: LoadType.S3, + LoadScenario.S3_CAR: LoadType.S3, + LoadScenario.gRPC: LoadType.gRPC, + LoadScenario.gRPC_CAR: LoadType.gRPC, + LoadScenario.LOCAL: LoadType.gRPC, + LoadScenario.HTTP: LoadType.HTTP, + } + load_type = load_type_map[load_scenario] if not load_type else load_type + + load_params = LoadParams(load_type) + load_params.scenario = load_scenario + load_params.preset = Preset() + + meta_fields = self._get_meta_fields(load_params) + for field in meta_fields: + if getattr(field.instance, field.field.name) is None and load_params.scenario in field.field.metadata["applicable_scenarios"]: + value_to_set_map = { + int: 0 if set_emtpy else len(field.field.name), + float: 0 if set_emtpy else len(field.field.name), + str: "" if set_emtpy else field.field.name, + list[str]: "" if set_emtpy else [field.field.name, f"{field.field.name}_2"], + bool: False if set_emtpy else True, + } + value_to_set = value_to_set_map[field.field_type] + setattr(field.instance, field.field.name, value_to_set) + + return load_params + + def _get_actual_field_type(self, field: Field) -> type: + return get_args(field.type)[0] if len(get_args(field.type)) else get_args(field.type) + + def _get_meta_fields(self, instance): + data_fields = fields(instance) + fields_with_data = [MetaTestField(field, self._get_actual_field_type(field), instance) for field in data_fields if field.metadata] + + for field in data_fields: + actual_field_type = self._get_actual_field_type(field) + if is_dataclass(actual_field_type) and getattr(instance, field.name): + fields_with_data += self._get_meta_fields(getattr(instance, field.name)) + + return fields_with_data or [] diff --git a/tests/test_ssh_shell.py b/tests/test_ssh_shell.py index 4d1c0fd0..ecd8c3cc 100644 --- a/tests/test_ssh_shell.py +++ b/tests/test_ssh_shell.py @@ -1,50 +1,68 @@ import os -from unittest import SkipTest, TestCase + +import pytest from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput -from frostfs_testlib.shell.ssh_shell import SSHShell +from frostfs_testlib.shell.ssh_shell import SshConnectionProvider, SSHShell from helpers import format_error_details, get_output_lines -def init_shell() -> SSHShell: - host = os.getenv("SSH_SHELL_HOST") +def get_shell(host: str): port = os.getenv("SSH_SHELL_PORT", "22") login = os.getenv("SSH_SHELL_LOGIN") - private_key_path = os.getenv("SSH_SHELL_PRIVATE_KEY_PATH") - private_key_passphrase = os.getenv("SSH_SHELL_PRIVATE_KEY_PASSPHRASE") + + password = os.getenv("SSH_SHELL_PASSWORD", "") + private_key_path = os.getenv("SSH_SHELL_PRIVATE_KEY_PATH", "") + private_key_passphrase = os.getenv("SSH_SHELL_PRIVATE_KEY_PASSPHRASE", "") if not all([host, login, private_key_path, private_key_passphrase]): # TODO: in the future we might use https://pypi.org/project/mock-ssh-server, # at the moment it is not suitable for us because of its issues with stdin - raise SkipTest("SSH connection is not configured") + pytest.skip("SSH connection is not configured") return SSHShell( host=host, port=port, login=login, + password=password, private_key_path=private_key_path, private_key_passphrase=private_key_passphrase, ) -class TestSSHShellInteractive(TestCase): - @classmethod - def setUpClass(cls): - cls.shell = init_shell() +@pytest.fixture(scope="module") +def shell() -> SSHShell: + return get_shell(host=os.getenv("SSH_SHELL_HOST")) - def test_command_with_one_prompt(self): + +@pytest.fixture(scope="module") +def shell_same_host() -> SSHShell: + return get_shell(host=os.getenv("SSH_SHELL_HOST")) + + +@pytest.fixture(scope="module") +def shell_another_host() -> SSHShell: + return get_shell(host=os.getenv("SSH_SHELL_HOST_2")) + + +@pytest.fixture(scope="function", autouse=True) +def reset_connection(): + provider = SshConnectionProvider() + provider.drop_all() + + +class TestSSHShellInteractive: + def test_command_with_one_prompt(self, shell: SSHShell): script = "password = input('Password: '); print('\\n' + password)" inputs = [InteractiveInput(prompt_pattern="Password", input="test")] - result = self.shell.exec( - f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs) - ) + result = shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)) - self.assertEqual(0, result.return_code) - self.assertEqual(["Password: test", "test"], get_output_lines(result)) - self.assertEqual("", result.stderr) + assert result.return_code == 0 + assert ["Password: test", "test"] == get_output_lines(result) + assert not result.stderr - def test_command_with_several_prompts(self): + def test_command_with_several_prompts(self, shell: SSHShell): script = ( "input1 = input('Input1: '); print('\\n' + input1); " "input2 = input('Input2: '); print('\\n' + input2)" @@ -54,86 +72,132 @@ class TestSSHShellInteractive(TestCase): InteractiveInput(prompt_pattern="Input2", input="test2"), ] - result = self.shell.exec( - f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs) - ) + result = shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)) - self.assertEqual(0, result.return_code) - self.assertEqual( - ["Input1: test1", "test1", "Input2: test2", "test2"], get_output_lines(result) - ) - self.assertEqual("", result.stderr) + assert result.return_code == 0 + assert ["Input1: test1", "test1", "Input2: test2", "test2"] == get_output_lines(result) + assert not result.stderr - def test_invalid_command_with_check(self): + def test_invalid_command_with_check(self, shell: SSHShell): script = "invalid script" inputs = [InteractiveInput(prompt_pattern=".*", input="test")] - with self.assertRaises(RuntimeError) as raised: - self.shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)) + with pytest.raises(RuntimeError) as raised: + shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)) - error = format_error_details(raised.exception) - self.assertIn("SyntaxError", error) - self.assertIn("return code: 1", error) + error = format_error_details(raised.value) + assert "SyntaxError" in error + assert "return code: 1" in error - def test_invalid_command_without_check(self): + def test_invalid_command_without_check(self, shell: SSHShell): script = "invalid script" inputs = [InteractiveInput(prompt_pattern=".*", input="test")] - result = self.shell.exec( + result = shell.exec( f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs, check=False), ) - self.assertIn("SyntaxError", result.stdout) - self.assertEqual(1, result.return_code) + assert "SyntaxError" in result.stdout + assert result.return_code == 1 - def test_non_existing_binary(self): + def test_non_existing_binary(self, shell: SSHShell): inputs = [InteractiveInput(prompt_pattern=".*", input="test")] - with self.assertRaises(RuntimeError) as raised: - self.shell.exec("not-a-command", CommandOptions(interactive_inputs=inputs)) + with pytest.raises(RuntimeError) as raised: + shell.exec("not-a-command", CommandOptions(interactive_inputs=inputs)) - error = format_error_details(raised.exception) - self.assertIn("return code: 127", error) + error = format_error_details(raised.value) + assert "return code: 127" in error -class TestSSHShellNonInteractive(TestCase): - @classmethod - def setUpClass(cls): - cls.shell = init_shell() - - def test_correct_command(self): +class TestSSHShellNonInteractive: + def test_correct_command(self, shell: SSHShell): script = "print('test')" - result = self.shell.exec(f'python3 -c "{script}"') + result = shell.exec(f'python3 -c "{script}"') - self.assertEqual(0, result.return_code) - self.assertEqual("test", result.stdout.strip()) - self.assertEqual("", result.stderr) + assert result.return_code == 0 + assert result.stdout.strip() == "test" + assert not result.stderr - def test_invalid_command_with_check(self): + def test_invalid_command_with_check(self, shell: SSHShell): script = "invalid script" - with self.assertRaises(RuntimeError) as raised: - self.shell.exec(f'python3 -c "{script}"') + with pytest.raises(RuntimeError) as raised: + shell.exec(f'python3 -c "{script}"') - error = format_error_details(raised.exception) - self.assertIn("Error", error) - self.assertIn("return code: 1", error) + error = format_error_details(raised.value) + assert "Error" in error + assert "return code: 1" in error - def test_invalid_command_without_check(self): + def test_invalid_command_without_check(self, shell: SSHShell): script = "invalid script" - result = self.shell.exec(f'python3 -c "{script}"', CommandOptions(check=False)) + result = shell.exec(f'python3 -c "{script}"', CommandOptions(check=False)) - self.assertEqual(1, result.return_code) + assert result.return_code == 1 # TODO: we have inconsistency with local shell here, the local shell captures error info # in stdout while ssh shell captures it in stderr - self.assertIn("Error", result.stderr) + assert "Error" in result.stderr - def test_non_existing_binary(self): - with self.assertRaises(RuntimeError) as exc: - self.shell.exec("not-a-command") + def test_non_existing_binary(self, shell: SSHShell): + with pytest.raises(RuntimeError) as raised: + shell.exec("not-a-command") - error = format_error_details(exc.exception) - self.assertIn("Error", error) - self.assertIn("return code: 127", error) + error = format_error_details(raised.value) + assert "Error" in error + assert "return code: 127" in error + + +class TestSSHShellConnection: + def test_connection_provider_is_singleton(self): + provider = SshConnectionProvider() + provider2 = SshConnectionProvider() + assert id(provider) == id(provider2) + + def test_connection_provider_has_creds(self, shell: SSHShell): + provider = SshConnectionProvider() + assert len(provider.creds) == 1 + assert len(provider.connections) == 0 + + def test_connection_provider_has_only_one_connection(self, shell: SSHShell): + provider = SshConnectionProvider() + assert len(provider.connections) == 0 + shell.exec("echo 1") + assert len(provider.connections) == 1 + shell.exec("echo 2") + assert len(provider.connections) == 1 + shell.drop() + assert len(provider.connections) == 0 + + def test_connection_same_host(self, shell: SSHShell, shell_same_host: SSHShell): + provider = SshConnectionProvider() + assert len(provider.connections) == 0 + + shell.exec("echo 1") + assert len(provider.connections) == 1 + + shell_same_host.exec("echo 2") + assert len(provider.connections) == 1 + + shell.drop() + assert len(provider.connections) == 0 + + shell.exec("echo 3") + assert len(provider.connections) == 1 + + def test_connection_another_host(self, shell: SSHShell, shell_another_host: SSHShell): + provider = SshConnectionProvider() + assert len(provider.connections) == 0 + + shell.exec("echo 1") + assert len(provider.connections) == 1 + + shell_another_host.exec("echo 2") + assert len(provider.connections) == 2 + + shell.drop() + assert len(provider.connections) == 1 + + shell_another_host.drop() + assert len(provider.connections) == 0