Compare commits

..

1 commit

Author SHA1 Message Date
fcde457bf4 Change s3 auth func
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2023-06-29 11:48:06 +03:00
145 changed files with 3277 additions and 11013 deletions

View file

@ -1,109 +0,0 @@
hosts:
- address: localhost
hostname: localhost
attributes:
sudo_shell: false
plugin_name: docker
healthcheck_plugin_name: basic
attributes:
skip_readiness_check: True
force_transactions: True
services:
- name: frostfs-storage_01
attributes:
container_name: s01
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet01.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet01.json
wallet_password: ""
volume_name: storage_storage_s01
endpoint_data0: s01.frostfs.devenv:8080
control_endpoint: s01.frostfs.devenv:8081
un_locode: "RU MOW"
- name: frostfs-storage_02
attributes:
container_name: s02
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet02.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet02.json
wallet_password: ""
volume_name: storage_storage_s02
endpoint_data0: s02.frostfs.devenv:8080
control_endpoint: s02.frostfs.devenv:8081
un_locode: "RU LED"
- name: frostfs-storage_03
attributes:
container_name: s03
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet03.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet03.json
wallet_password: ""
volume_name: storage_storage_s03
endpoint_data0: s03.frostfs.devenv:8080
control_endpoint: s03.frostfs.devenv:8081
un_locode: "SE STO"
- name: frostfs-storage_04
attributes:
container_name: s04
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet04.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet04.json
wallet_password: ""
volume_name: storage_storage_s04
endpoint_data0: s04.frostfs.devenv:8080
control_endpoint: s04.frostfs.devenv:8081
un_locode: "FI HEL"
- name: frostfs-s3_01
attributes:
container_name: s3_gate
config_path: ../frostfs-dev-env/services/s3_gate/.s3.env
wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json
local_wallet_config_path: ./TemporaryDir/password-s3.yml
local_wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json
wallet_password: "s3"
endpoint_data0: https://s3.frostfs.devenv:8080
- name: frostfs-http_01
attributes:
container_name: http_gate
config_path: ../frostfs-dev-env/services/http_gate/.http.env
wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json
local_wallet_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json
wallet_password: "one"
endpoint_data0: http://http.frostfs.devenv
- name: frostfs-ir_01
attributes:
container_name: ir01
config_path: ../frostfs-dev-env/services/ir/.ir.env
wallet_path: ../frostfs-dev-env/services/ir/az.json
local_wallet_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/ir/az.json
wallet_password: "one"
- name: neo-go_01
attributes:
container_name: morph_chain
config_path: ../frostfs-dev-env/services/morph_chain/protocol.privnet.yml
wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json
local_wallet_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json
wallet_password: "one"
endpoint_internal0: http://morph-chain.frostfs.devenv:30333
- name: main-chain_01
attributes:
container_name: main_chain
config_path: ../frostfs-dev-env/services/chain/protocol.privnet.yml
wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json
local_wallet_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json
wallet_password: "one"
endpoint_internal0: http://main-chain.frostfs.devenv:30333
- name: coredns_01
attributes:
container_name: coredns
clis:
- name: frostfs-cli
exec_path: frostfs-cli

View file

@ -1,21 +0,0 @@
name: DCO action
on: [pull_request]
jobs:
dco:
name: DCO
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.21'
- name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
with:
from: 'origin/${{ github.event.pull_request.base.ref }}'

1
.github/CODEOWNERS vendored Normal file
View file

@ -0,0 +1 @@
* @aprasolova @vdomnich-yadro @dansingjulia @yadro-vavdeev @abereziny

21
.github/workflows/dco.yml vendored Normal file
View file

@ -0,0 +1,21 @@
name: DCO check
on:
pull_request:
branches:
- master
jobs:
commits_check_job:
runs-on: ubuntu-latest
name: Commits Check
steps:
- name: Get PR Commits
id: 'get-pr-commits'
uses: tim-actions/get-pr-commits@master
with:
token: ${{ secrets.GITHUB_TOKEN }}
- name: DCO Check
uses: tim-actions/dco@master
with:
commits: ${{ steps.get-pr-commits.outputs.commits }}

2
.gitignore vendored
View file

@ -1,7 +1,6 @@
# ignore IDE files # ignore IDE files
.vscode .vscode
.idea .idea
venv.*
# ignore temp files under any path # ignore temp files under any path
.DS_Store .DS_Store
@ -11,4 +10,3 @@ venv.*
/dist /dist
/build /build
*.egg-info *.egg-info
wallet_config.yml

View file

@ -1 +0,0 @@
* @JuliaKovshova @abereziny @d.zayakin @anikeev-yadro @anurindm @ylukoyan @i.niyazov

View file

@ -3,8 +3,8 @@
First, thank you for contributing! We love and encourage pull requests from First, thank you for contributing! We love and encourage pull requests from
everyone. Please follow the guidelines: everyone. Please follow the guidelines:
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/issues) and - Check the open [issues](https://github.com/TrueCloudLab/frostfs-testlib/issues) and
[pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/pulls) for existing [pull requests](https://github.com/TrueCloudLab/frostfs-testlib/pulls) for existing
discussions. discussions.
- Open an issue first, to discuss a new feature or enhancement. - Open an issue first, to discuss a new feature or enhancement.
@ -26,8 +26,8 @@ Start by forking the `frostfs-testlib` repository, make changes in a branch and
send a pull request. We encourage pull requests to discuss code changes. Here send a pull request. We encourage pull requests to discuss code changes. Here
are the steps in details: are the steps in details:
### Set up your Git Repository ### Set up your GitHub Repository
Fork [FrostFS testlib upstream](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/forks) source Fork [FrostFS testlib upstream](https://github.com/TrueCloudLab/frostfs-testlib/fork) source
repository to your own personal repository. Copy the URL of your fork and clone it: repository to your own personal repository. Copy the URL of your fork and clone it:
```shell ```shell
@ -37,7 +37,7 @@ $ git clone <url of your fork>
### Set up git remote as ``upstream`` ### Set up git remote as ``upstream``
```shell ```shell
$ cd frostfs-testlib $ cd frostfs-testlib
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-testlib $ git remote add upstream https://github.com/TrueCloudLab/frostfs-testlib
$ git fetch upstream $ git fetch upstream
``` ```
@ -63,9 +63,9 @@ $ git checkout -b feature/123-something_awesome
``` ```
### Test your changes ### Test your changes
Before submitting any changes to the library, please, make sure that linter and all unit tests are passing. To run the tests, please, use the following command: Before submitting any changes to the library, please, make sure that all unit tests are passing. To run the tests, please, use the following command:
```shell ```shell
$ make validation $ python -m unittest discover --start-directory tests
``` ```
To enable tests that interact with SSH server, please, setup SSH server and set the following environment variables before running the tests: To enable tests that interact with SSH server, please, setup SSH server and set the following environment variables before running the tests:
@ -99,8 +99,8 @@ $ git push origin feature/123-something_awesome
``` ```
### Create a Pull Request ### Create a Pull Request
Pull requests can be created via Git. Refer to [this Pull requests can be created via GitHub. Refer to [this
document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for document](https://help.github.com/articles/creating-a-pull-request/) for
detailed steps on how to create a pull request. After a Pull Request gets peer detailed steps on how to create a pull request. After a Pull Request gets peer
reviewed and approved, it will be merged. reviewed and approved, it will be merged.

View file

@ -1,11 +1,8 @@
SHELL := /bin/bash SHELL := /bin/bash
PYTHON_VERSION := 3.10 PYTHON_VERSION := 3.10
VENV_NAME := frostfs-testlib VENV_DIR := venv.frostfs-testlib
VENV_DIR := venv.${VENV_NAME}
current_dir := $(shell pwd) current_dir := $(shell pwd)
DIRECTORIES := $(sort $(dir $(wildcard ../frostfs-testlib-plugin-*/ ../*-testcases/)))
FROM_VENV := . ${VENV_DIR}/bin/activate &&
venv: create requirements paths precommit venv: create requirements paths precommit
@echo Ready @echo Ready
@ -16,35 +13,15 @@ precommit:
paths: paths:
@echo Append paths for project @echo Append paths for project
@echo Virtual environment: ${current_dir}/${VENV_DIR} @echo Virtual environment: ${VENV_DIR}
@rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth @sudo rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
@touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth @sudo touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
@echo ${current_dir}/src | tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth @echo ${current_dir}/src/frostfs_testlib_frostfs_testlib | sudo tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
create: ${VENV_DIR} create:
@echo Create virtual environment for
${VENV_DIR}: virtualenv --python=python${PYTHON_VERSION} --prompt=frostfs-testlib ${VENV_DIR}
@echo Create virtual environment ${current_dir}/${VENV_DIR}
virtualenv --python=python${PYTHON_VERSION} --prompt=${VENV_NAME} ${VENV_DIR}
requirements: requirements:
@echo Isntalling pip requirements @echo Isntalling pip requirements
. ${VENV_DIR}/bin/activate && pip install -Ur requirements.txt . ${VENV_DIR}/bin/activate && pip install -Ur requirements.txt
#### VALIDATION SECTION ####
lint: create requirements
${FROM_VENV} pylint --disable R,C,W ./src
unit_test:
@echo Starting unit tests
${FROM_VENV} python -m pytest tests
.PHONY: lint_dependent $(DIRECTORIES)
lint_dependent: $(DIRECTORIES)
$(DIRECTORIES):
@echo checking dependent repo $@
$(MAKE) validation -C $@
validation: lint unit_test lint_dependent

View file

@ -92,4 +92,4 @@ The library provides the following primary components:
## Contributing ## Contributing
Any contributions to the library should conform to the [contribution guideline](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/src/branch/master/CONTRIBUTING.md). Any contributions to the library should conform to the [contribution guideline](https://github.com/TrueCloudLab/frostfs-testlib/blob/master/CONTRIBUTING.md).

View file

@ -18,17 +18,17 @@ keywords = ["frostfs", "test"]
dependencies = [ dependencies = [
"allure-python-commons>=2.13.2", "allure-python-commons>=2.13.2",
"docker>=4.4.0", "docker>=4.4.0",
"pyyaml==6.0.1", "importlib_metadata>=5.0; python_version < '3.10'",
"neo-mamba==1.0.0", "neo-mamba==1.0.0",
"paramiko>=2.10.3", "paramiko>=2.10.3",
"pexpect>=4.8.0", "pexpect>=4.8.0",
"requests==2.28.1", "requests>=2.28.0",
"docstring_parser>=0.15", "docstring_parser>=0.15",
"testrail-api>=1.12.0", "testrail-api>=1.12.0",
"pytest==7.1.2", "pytest==7.1.2",
"tenacity==8.0.1", "tenacity==8.0.1",
"boto3==1.35.30", "boto3==1.16.33",
"boto3-stubs[essential]==1.35.30", "boto3-stubs[essential]==1.16.33",
] ]
requires-python = ">=3.10" requires-python = ">=3.10"
@ -36,7 +36,7 @@ requires-python = ">=3.10"
dev = ["black", "bumpver", "isort", "pre-commit"] dev = ["black", "bumpver", "isort", "pre-commit"]
[project.urls] [project.urls]
Homepage = "https://git.frostfs.info/TrueCloudLab/frostfs-testlib" Homepage = "https://github.com/TrueCloudLab/frostfs-testlib"
[project.entry-points."frostfs.testlib.reporter"] [project.entry-points."frostfs.testlib.reporter"]
allure = "frostfs_testlib.reporter.allure_handler:AllureHandler" allure = "frostfs_testlib.reporter.allure_handler:AllureHandler"
@ -44,33 +44,13 @@ allure = "frostfs_testlib.reporter.allure_handler:AllureHandler"
[project.entry-points."frostfs.testlib.hosting"] [project.entry-points."frostfs.testlib.hosting"]
docker = "frostfs_testlib.hosting.docker_host:DockerHost" docker = "frostfs_testlib.hosting.docker_host:DockerHost"
[project.entry-points."frostfs.testlib.healthcheck"]
basic = "frostfs_testlib.healthcheck.basic_healthcheck:BasicHealthcheck"
[project.entry-points."frostfs.testlib.csc_managers"]
config = "frostfs_testlib.storage.controllers.state_managers.config_state_manager:ConfigStateManager"
[project.entry-points."frostfs.testlib.services"]
frostfs-storage = "frostfs_testlib.storage.dataclasses.frostfs_services:StorageNode"
frostfs-s3 = "frostfs_testlib.storage.dataclasses.frostfs_services:S3Gate"
frostfs-http = "frostfs_testlib.storage.dataclasses.frostfs_services:HTTPGate"
neo-go = "frostfs_testlib.storage.dataclasses.frostfs_services:MorphChain"
frostfs-ir = "frostfs_testlib.storage.dataclasses.frostfs_services:InnerRing"
[project.entry-points."frostfs.testlib.credentials_providers"]
authmate = "frostfs_testlib.credentials.authmate_s3_provider:AuthmateS3CredentialsProvider"
wallet_factory = "frostfs_testlib.credentials.wallet_factory_provider:WalletFactoryProvider"
[project.entry-points."frostfs.testlib.bucket_cid_resolver"]
frostfs = "frostfs_testlib.s3.curl_bucket_resolver:CurlBucketContainerResolver"
[tool.isort] [tool.isort]
profile = "black" profile = "black"
src_paths = ["src", "tests"] src_paths = ["src", "tests"]
line_length = 140 line_length = 100
[tool.black] [tool.black]
line-length = 140 line-length = 100
target-version = ["py310"] target-version = ["py310"]
[tool.bumpver] [tool.bumpver]
@ -84,12 +64,3 @@ push = false
[tool.bumpver.file_patterns] [tool.bumpver.file_patterns]
"pyproject.toml" = ['current_version = "{version}"', 'version = "{version}"'] "pyproject.toml" = ['current_version = "{version}"', 'version = "{version}"']
"src/frostfs_testlib/__init__.py" = ["{version}"] "src/frostfs_testlib/__init__.py" = ["{version}"]
[tool.pytest.ini_options]
filterwarnings = [
"ignore:Blowfish has been deprecated:cryptography.utils.CryptographyDeprecationWarning",
]
testpaths = ["tests"]
[project.entry-points.pytest11]
testlib = "frostfs_testlib"

View file

@ -1,5 +1,6 @@
allure-python-commons==2.13.2 allure-python-commons==2.13.2
docker==4.4.0 docker==4.4.0
importlib_metadata==5.0.0
neo-mamba==1.0.0 neo-mamba==1.0.0
paramiko==2.10.3 paramiko==2.10.3
pexpect==4.8.0 pexpect==4.8.0
@ -8,15 +9,14 @@ docstring_parser==0.15
testrail-api==1.12.0 testrail-api==1.12.0
tenacity==8.0.1 tenacity==8.0.1
pytest==7.1.2 pytest==7.1.2
boto3==1.35.30 boto3==1.16.33
boto3-stubs[essential]==1.35.30 boto3-stubs[essential]==1.16.33
# Dev dependencies # Dev dependencies
black==22.8.0 black==22.8.0
bumpver==2022.1118 bumpver==2022.1118
isort==5.12.0 isort==5.12.0
pre-commit==2.20.0 pre-commit==2.20.0
pylint==2.17.4
# Packaging dependencies # Packaging dependencies
build==0.8.0 build==0.8.0

View file

@ -1,4 +1 @@
__version__ = "2.0.1" __version__ = "2.0.1"
from .fixtures import configure_testlib, hosting, temp_directory
from .hooks import pytest_collection_modifyitems

View file

@ -1,5 +1,5 @@
from frostfs_testlib.analytics import test_case from frostfs_testlib.analytics import test_case
from frostfs_testlib.analytics.test_case import TestCasePriority from frostfs_testlib.analytics.test_case import TestCasePriority
from frostfs_testlib.analytics.test_collector import TestCase, TestCaseCollector from frostfs_testlib.analytics.test_collector import TestCase, TestCaseCollector
from frostfs_testlib.analytics.test_exporter import TСExporter from frostfs_testlib.analytics.test_exporter import TestExporter
from frostfs_testlib.analytics.testrail_exporter import TestrailExporter from frostfs_testlib.analytics.testrail_exporter import TestrailExporter

View file

@ -6,7 +6,6 @@ from docstring_parser.google import DEFAULT_SECTIONS, Section, SectionType
DEFAULT_SECTIONS.append(Section("Steps", "steps", SectionType.MULTIPLE)) DEFAULT_SECTIONS.append(Section("Steps", "steps", SectionType.MULTIPLE))
class TestCase: class TestCase:
""" """
Test case object implementation for use in collector and exporters Test case object implementation for use in collector and exporters
@ -107,9 +106,7 @@ class TestCaseCollector:
# Read test_case suite and section name from test class if possible and get test function from class # Read test_case suite and section name from test class if possible and get test function from class
if test.cls: if test.cls:
suite_name = test.cls.__dict__.get("__test_case_suite_name__", suite_name) suite_name = test.cls.__dict__.get("__test_case_suite_name__", suite_name)
suite_section_name = test.cls.__dict__.get( suite_section_name = test.cls.__dict__.get("__test_case_suite_section__", suite_section_name)
"__test_case_suite_section__", suite_section_name
)
test_function = test.cls.__dict__[test.originalname] test_function = test.cls.__dict__[test.originalname]
else: else:
# If no test class, read test function from module # If no test class, read test function from module
@ -120,9 +117,7 @@ class TestCaseCollector:
test_case_title = test_function.__dict__.get("__test_case_title__", None) test_case_title = test_function.__dict__.get("__test_case_title__", None)
test_case_priority = test_function.__dict__.get("__test_case_priority__", None) test_case_priority = test_function.__dict__.get("__test_case_priority__", None)
suite_name = test_function.__dict__.get("__test_case_suite_name__", suite_name) suite_name = test_function.__dict__.get("__test_case_suite_name__", suite_name)
suite_section_name = test_function.__dict__.get( suite_section_name = test_function.__dict__.get("__test_case_suite_section__", suite_section_name)
"__test_case_suite_section__", suite_section_name
)
# Parce test_steps if they define in __doc__ # Parce test_steps if they define in __doc__
doc_string = parse(test_function.__doc__, style=DocstringStyle.GOOGLE) doc_string = parse(test_function.__doc__, style=DocstringStyle.GOOGLE)
@ -130,9 +125,7 @@ class TestCaseCollector:
if doc_string.short_description: if doc_string.short_description:
test_case_description = doc_string.short_description test_case_description = doc_string.short_description
if doc_string.long_description: if doc_string.long_description:
test_case_description = ( test_case_description = f"{doc_string.short_description}\r\n{doc_string.long_description}"
f"{doc_string.short_description}\r\n{doc_string.long_description}"
)
if doc_string.meta: if doc_string.meta:
for meta in doc_string.meta: for meta in doc_string.meta:
@ -147,19 +140,17 @@ class TestCaseCollector:
test_case_params = test_case_call_spec.id test_case_params = test_case_call_spec.id
# Format title with params # Format title with params
if test_case_title: if test_case_title:
test_case_title = self.__format_string_with_params__( test_case_title = self.__format_string_with_params__(test_case_title,test_case_call_spec.params)
test_case_title, test_case_call_spec.params
)
# Format steps with params # Format steps with params
if test_case_steps: if test_case_steps:
for key, value in test_case_steps.items(): for key, value in test_case_steps.items():
value = self.__format_string_with_params__(value, test_case_call_spec.params) value = self.__format_string_with_params__(value,test_case_call_spec.params)
test_case_steps[key] = value test_case_steps[key] = value
# If there is set basic test case attributes create TestCase and return # If there is set basic test case attributes create TestCase and return
if test_case_id and test_case_title and suite_name and suite_name: if test_case_id and test_case_title and suite_name and suite_name:
test_case = TestCase( test_case = TestCase(
uuid_id=test_case_id, id=test_case_id,
title=test_case_title, title=test_case_title,
description=test_case_description, description=test_case_description,
priority=test_case_priority, priority=test_case_priority,

View file

@ -3,8 +3,7 @@ from abc import ABC, abstractmethod
from frostfs_testlib.analytics.test_collector import TestCase from frostfs_testlib.analytics.test_collector import TestCase
# TODO: REMOVE ME class TestExporter(ABC):
class TСExporter(ABC):
test_cases_cache = [] test_cases_cache = []
test_suites_cache = [] test_suites_cache = []
@ -47,7 +46,9 @@ class TСExporter(ABC):
""" """
@abstractmethod @abstractmethod
def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None: def update_test_case(
self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section
) -> None:
""" """
Update test case in TMS Update test case in TMS
""" """
@ -59,11 +60,13 @@ class TСExporter(ABC):
for test_case in test_cases: for test_case in test_cases:
test_suite = self.get_or_create_test_suite(test_case.suite_name) test_suite = self.get_or_create_test_suite(test_case.suite_name)
test_section = self.get_or_create_suite_section(test_suite, test_case.suite_section_name) test_section = self.get_or_create_suite_section(
test_suite, test_case.suite_section_name
)
test_case_in_tms = self.search_test_case_id(test_case.id) test_case_in_tms = self.search_test_case_id(test_case.id)
steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()] steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()]
if test_case_in_tms: if test_case_in_tms:
self.update_test_case(test_case, test_case_in_tms, test_suite, test_section) self.update_test_case(test_case, test_case_in_tms)
else: else:
self.create_test_case(test_case, test_suite, test_section) self.create_test_case(test_case)

View file

@ -1,10 +1,10 @@
from testrail_api import TestRailAPI from testrail_api import TestRailAPI
from frostfs_testlib.analytics.test_collector import TestCase from frostfs_testlib.analytics.test_collector import TestCase
from frostfs_testlib.analytics.test_exporter import TСExporter from frostfs_testlib.analytics.test_exporter import TestExporter
class TestrailExporter(TСExporter): class TestrailExporter(TestExporter):
def __init__( def __init__(
self, self,
tr_url: str, tr_url: str,
@ -62,13 +62,19 @@ class TestrailExporter(TСExporter):
It's help do not call TMS each time then we search test case It's help do not call TMS each time then we search test case
""" """
for test_suite in self.test_suites_cache: for test_suite in self.test_suites_cache:
self.test_cases_cache.extend(self.api.cases.get_cases(self.tr_project_id, suite_id=test_suite["id"])) self.test_cases_cache.extend(
self.api.cases.get_cases(self.tr_project_id, suite_id=test_suite["id"])
)
def search_test_case_id(self, test_case_id: str) -> object: def search_test_case_id(self, test_case_id: str) -> object:
""" """
Find test cases in TestRail (cache) by ID Find test cases in TestRail (cache) by ID
""" """
test_cases = [test_case for test_case in self.test_cases_cache if test_case["custom_autotest_name"] == test_case_id] test_cases = [
test_case
for test_case in self.test_cases_cache
if test_case["custom_autotest_name"] == test_case_id
]
if len(test_cases) > 1: if len(test_cases) > 1:
raise RuntimeError(f"Too many results found in test rail for id {test_case_id}") raise RuntimeError(f"Too many results found in test rail for id {test_case_id}")
@ -81,7 +87,9 @@ class TestrailExporter(TСExporter):
""" """
Get suite name with exact name from Testrail or create if not exist Get suite name with exact name from Testrail or create if not exist
""" """
test_rail_suites = [suite for suite in self.test_suites_cache if suite["name"] == test_suite_name] test_rail_suites = [
suite for suite in self.test_suites_cache if suite["name"] == test_suite_name
]
if not test_rail_suites: if not test_rail_suites:
test_rail_suite = self.api.suites.add_suite( test_rail_suite = self.api.suites.add_suite(
@ -94,13 +102,17 @@ class TestrailExporter(TСExporter):
elif len(test_rail_suites) == 1: elif len(test_rail_suites) == 1:
return test_rail_suites.pop() return test_rail_suites.pop()
else: else:
raise RuntimeError(f"Too many results found in test rail for suite name {test_suite_name}") raise RuntimeError(
f"Too many results found in test rail for suite name {test_suite_name}"
)
def get_or_create_suite_section(self, test_rail_suite, section_name) -> object: def get_or_create_suite_section(self, test_rail_suite, section_name) -> object:
""" """
Get suite section with exact name from Testrail or create new one if not exist Get suite section with exact name from Testrail or create new one if not exist
""" """
test_rail_sections = [section for section in test_rail_suite["sections"] if section["name"] == section_name] test_rail_sections = [
section for section in test_rail_suite["sections"] if section["name"] == section_name
]
if not test_rail_sections: if not test_rail_sections:
test_rail_section = self.api.sections.add_section( test_rail_section = self.api.sections.add_section(
@ -116,7 +128,9 @@ class TestrailExporter(TСExporter):
elif len(test_rail_sections) == 1: elif len(test_rail_sections) == 1:
return test_rail_sections.pop() return test_rail_sections.pop()
else: else:
raise RuntimeError(f"Too many results found in test rail for section name {section_name}") raise RuntimeError(
f"Too many results found in test rail for section name {section_name}"
)
def prepare_request_body(self, test_case: TestCase, test_suite, test_suite_section) -> dict: def prepare_request_body(self, test_case: TestCase, test_suite, test_suite_section) -> dict:
""" """
@ -150,7 +164,9 @@ class TestrailExporter(TСExporter):
self.api.cases.add_case(**request_body) self.api.cases.add_case(**request_body)
def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None: def update_test_case(
self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section
) -> None:
""" """
Update test case in Testrail Update test case in Testrail
""" """

View file

@ -1,5 +1,4 @@
from frostfs_testlib.cli.frostfs_adm import FrostfsAdm from frostfs_testlib.cli.frostfs_adm import FrostfsAdm
from frostfs_testlib.cli.frostfs_authmate import FrostfsAuthmate from frostfs_testlib.cli.frostfs_authmate import FrostfsAuthmate
from frostfs_testlib.cli.frostfs_cli import FrostfsCli from frostfs_testlib.cli.frostfs_cli import FrostfsCli
from frostfs_testlib.cli.generic_cli import GenericCli
from frostfs_testlib.cli.neogo import NeoGo, NetworkType from frostfs_testlib.cli.neogo import NeoGo, NetworkType

View file

@ -27,7 +27,11 @@ class FrostfsAdmMorph(CliCommand):
""" """
return self._execute( return self._execute(
"morph deposit-notary", "morph deposit-notary",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]}, **{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
) )
def dump_balances( def dump_balances(
@ -52,7 +56,11 @@ class FrostfsAdmMorph(CliCommand):
""" """
return self._execute( return self._execute(
"morph dump-balances", "morph dump-balances",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]}, **{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
) )
def dump_config(self, rpc_endpoint: str) -> CommandResult: def dump_config(self, rpc_endpoint: str) -> CommandResult:
@ -66,23 +74,11 @@ class FrostfsAdmMorph(CliCommand):
""" """
return self._execute( return self._execute(
"morph dump-config", "morph dump-config",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]}, **{
) param: param_value
for param, param_value in locals().items()
def set_config(self, set_key_value: str, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None) -> CommandResult: if param not in ["self"]
"""Add/update global config value in the FrostFS network. },
Args:
set_key_value: key1=val1 [key2=val2 ...]
alphabet_wallets: Path to alphabet wallets dir
rpc_endpoint: N3 RPC node endpoint
Returns:
Command's result.
"""
return self._execute(
f"morph set-config {set_key_value}",
**{param: param_value for param, param_value in locals().items() if param not in ["self", "set_key_value"]},
) )
def dump_containers( def dump_containers(
@ -105,10 +101,14 @@ class FrostfsAdmMorph(CliCommand):
""" """
return self._execute( return self._execute(
"morph dump-containers", "morph dump-containers",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]}, **{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
) )
def dump_hashes(self, rpc_endpoint: str, domain: Optional[str] = None) -> CommandResult: def dump_hashes(self, rpc_endpoint: str) -> CommandResult:
"""Dump deployed contract hashes. """Dump deployed contract hashes.
Args: Args:
@ -119,11 +119,15 @@ class FrostfsAdmMorph(CliCommand):
""" """
return self._execute( return self._execute(
"morph dump-hashes", "morph dump-hashes",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]}, **{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
) )
def force_new_epoch( def force_new_epoch(
self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None, delta: Optional[int] = None self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None
) -> CommandResult: ) -> CommandResult:
"""Create new FrostFS epoch event in the side chain. """Create new FrostFS epoch event in the side chain.
@ -136,7 +140,11 @@ class FrostfsAdmMorph(CliCommand):
""" """
return self._execute( return self._execute(
"morph force-new-epoch", "morph force-new-epoch",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]}, **{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
) )
def generate_alphabet( def generate_alphabet(
@ -157,7 +165,11 @@ class FrostfsAdmMorph(CliCommand):
""" """
return self._execute( return self._execute(
"morph generate-alphabet", "morph generate-alphabet",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]}, **{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
) )
def generate_storage_wallet( def generate_storage_wallet(
@ -180,7 +192,11 @@ class FrostfsAdmMorph(CliCommand):
""" """
return self._execute( return self._execute(
"morph generate-storage-wallet", "morph generate-storage-wallet",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]}, **{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
) )
def init( def init(
@ -203,7 +219,7 @@ class FrostfsAdmMorph(CliCommand):
container_alias_fee: Container alias fee (default 500). container_alias_fee: Container alias fee (default 500).
container_fee: Container registration fee (default 1000). container_fee: Container registration fee (default 1000).
contracts: Path to archive with compiled FrostFS contracts contracts: Path to archive with compiled FrostFS contracts
(default fetched from latest git release). (default fetched from latest github release).
epoch_duration: Amount of side chain blocks in one FrostFS epoch (default 240). epoch_duration: Amount of side chain blocks in one FrostFS epoch (default 240).
homomorphic_disabled: Disable object homomorphic hashing. homomorphic_disabled: Disable object homomorphic hashing.
local_dump: Path to the blocks dump file. local_dump: Path to the blocks dump file.
@ -216,7 +232,11 @@ class FrostfsAdmMorph(CliCommand):
""" """
return self._execute( return self._execute(
"morph init", "morph init",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]}, **{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
) )
def refill_gas( def refill_gas(
@ -239,7 +259,11 @@ class FrostfsAdmMorph(CliCommand):
""" """
return self._execute( return self._execute(
"morph refill-gas", "morph refill-gas",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]}, **{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
) )
def restore_containers( def restore_containers(
@ -262,7 +286,11 @@ class FrostfsAdmMorph(CliCommand):
""" """
return self._execute( return self._execute(
"morph restore-containers", "morph restore-containers",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]}, **{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
) )
def set_policy( def set_policy(
@ -312,7 +340,7 @@ class FrostfsAdmMorph(CliCommand):
Args: Args:
alphabet_wallets: Path to alphabet wallets dir. alphabet_wallets: Path to alphabet wallets dir.
contracts: Path to archive with compiled FrostFS contracts contracts: Path to archive with compiled FrostFS contracts
(default fetched from latest git release). (default fetched from latest github release).
rpc_endpoint: N3 RPC node endpoint. rpc_endpoint: N3 RPC node endpoint.
Returns: Returns:
@ -320,13 +348,17 @@ class FrostfsAdmMorph(CliCommand):
""" """
return self._execute( return self._execute(
"morph update-contracts", "morph update-contracts",
**{param: param_value for param, param_value in locals().items() if param not in ["self"]}, **{
param: param_value
for param, param_value in locals().items()
if param not in ["self"]
},
) )
def remove_nodes( def remove_nodes(
self, node_netmap_keys: list[str], rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None self, node_netmap_keys: list[str], rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None
) -> CommandResult: ) -> CommandResult:
"""Move node to the Offline state in the candidates list """ Move node to the Offline state in the candidates list
and tick an epoch to update the netmap using frostfs-adm and tick an epoch to update the netmap using frostfs-adm
Args: Args:
@ -342,124 +374,9 @@ class FrostfsAdmMorph(CliCommand):
return self._execute( return self._execute(
f"morph remove-nodes {' '.join(node_netmap_keys)}", f"morph remove-nodes {' '.join(node_netmap_keys)}",
**{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]}, **{
) param: param_value
for param, param_value in locals().items()
def add_rule( if param not in ["self", "node_netmap_keys"]
self, },
chain_id: str,
target_name: str,
target_type: str,
rule: Optional[list[str]] = None,
path: Optional[str] = None,
chain_id_hex: Optional[bool] = None,
chain_name: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
chain-id: Assign ID to the parsed chain
chain-id-hex: Flag to parse chain ID as hex
path: Path to encoded chain in JSON or binary format
rule: Rule statement
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"morph ape add-rule-chain",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def get_rule(
self,
chain_id: str,
target_name: str,
target_type: str,
chain_id_hex: Optional[bool] = None,
chain_name: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
chain-id string Chain id
chain-id-hex Flag to parse chain ID as hex
target-name string Resource name in APE resource name format
target-type string Resource type(container/namespace)
timeout duration Timeout for an operation (default 15s)
wallet string Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"morph ape get-rule-chain",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list_rules(
self,
target_type: str,
target_name: Optional[str] = None,
rpc_endpoint: Optional[str] = None,
chain_name: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"morph ape list-rule-chains",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def remove_rule(
self,
chain_id: str,
target_name: str,
target_type: str,
all: Optional[bool] = None,
chain_name: Optional[str] = None,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
all: Remove all chains
chain-id: Assign ID to the parsed chain
chain-id-hex: Flag to parse chain ID as hex
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"morph ape rm-rule-chain",
**{param: value for param, value in locals().items() if param not in ["self"]},
) )

View file

@ -6,8 +6,8 @@ from frostfs_testlib.shell import Shell
class FrostfsAuthmate: class FrostfsAuthmate:
secret: FrostfsAuthmateSecret secret: Optional[FrostfsAuthmateSecret] = None
version: FrostfsAuthmateVersion version: Optional[FrostfsAuthmateVersion] = None
def __init__(self, shell: Shell, frostfs_authmate_exec_path: str): def __init__(self, shell: Shell, frostfs_authmate_exec_path: str):
self.secret = FrostfsAuthmateSecret(shell, frostfs_authmate_exec_path) self.secret = FrostfsAuthmateSecret(shell, frostfs_authmate_exec_path)

View file

@ -44,6 +44,7 @@ class FrostfsAuthmateSecret(CliCommand):
wallet: str, wallet: str,
wallet_password: str, wallet_password: str,
peer: str, peer: str,
bearer_rules: str,
gate_public_key: Union[str, list[str]], gate_public_key: Union[str, list[str]],
address: Optional[str] = None, address: Optional[str] = None,
container_id: Optional[str] = None, container_id: Optional[str] = None,

View file

@ -22,7 +22,7 @@ class FrostfsCliACL(CliCommand):
Well-known system object headers start with '$Object:' prefix. Well-known system object headers start with '$Object:' prefix.
User defined headers start without prefix. User defined headers start without prefix.
Read more about filter keys at: Read more about filter keys at:
https://git.frostfs.info/TrueCloudLab/frostfs-api/src/branch/master/proto-docs/acl.md#message-eaclrecord-filter http://github.com/TrueCloudLab/frostfs-api/blob/master/proto-docs/acl.md#message-eaclrecordfilter
Match is '=' for matching and '!=' for non-matching filter. Match is '=' for matching and '!=' for non-matching filter.
Value is a valid unicode string corresponding to object or request header value. Value is a valid unicode string corresponding to object or request header value.

View file

@ -1,70 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsCliApeManager(CliCommand):
"""Operations with APE manager."""
def add(
self,
rpc_endpoint: str,
chain_id: Optional[str] = None,
chain_id_hex: Optional[str] = None,
path: Optional[str] = None,
rule: Optional[str] | Optional[list[str]] = None,
target_name: Optional[str] = None,
target_type: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Add rule chain for a target."""
return self._execute(
"ape-manager add",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list(
self,
rpc_endpoint: str,
target_name: Optional[str] = None,
target_type: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Generate APE override by target and APE chains. Util command.
Generated APE override can be dumped to a file in JSON format that is passed to
"create" command.
"""
return self._execute(
"ape-manager list",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def remove(
self,
rpc_endpoint: str,
chain_id: Optional[str] = None,
chain_id_hex: Optional[str] = None,
target_name: Optional[str] = None,
target_type: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Generate APE override by target and APE chains. Util command.
Generated APE override can be dumped to a file in JSON format that is passed to
"create" command.
"""
return self._execute(
"ape-manager remove",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -1,54 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsCliBearer(CliCommand):
def create(
self,
rpc_endpoint: str,
out: str,
issued_at: Optional[str] = None,
expire_at: Optional[str] = None,
not_valid_before: Optional[str] = None,
ape: Optional[str] = None,
eacl: Optional[str] = None,
owner: Optional[str] = None,
json: Optional[bool] = False,
impersonate: Optional[bool] = False,
wallet: Optional[str] = None,
address: Optional[str] = None,
) -> CommandResult:
"""Create bearer token.
All epoch flags can be specified relative to the current epoch with the +n syntax.
In this case --rpc-endpoint flag should be specified and the epoch in bearer token
is set to current epoch + n.
"""
return self._execute(
"bearer create",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def generate_ape_override(
self,
chain_id: Optional[str] = None,
chain_id_hex: Optional[str] = None,
cid: Optional[str] = None,
output: Optional[str] = None,
path: Optional[str] = None,
rule: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
) -> CommandResult:
"""Generate APE override by target and APE chains. Util command.
Generated APE override can be dumped to a file in JSON format that is passed to
"create" command.
"""
return self._execute(
"bearer generate-ape-override",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -2,16 +2,12 @@ from typing import Optional
from frostfs_testlib.cli.frostfs_cli.accounting import FrostfsCliAccounting from frostfs_testlib.cli.frostfs_cli.accounting import FrostfsCliAccounting
from frostfs_testlib.cli.frostfs_cli.acl import FrostfsCliACL from frostfs_testlib.cli.frostfs_cli.acl import FrostfsCliACL
from frostfs_testlib.cli.frostfs_cli.ape_manager import FrostfsCliApeManager
from frostfs_testlib.cli.frostfs_cli.bearer import FrostfsCliBearer
from frostfs_testlib.cli.frostfs_cli.container import FrostfsCliContainer from frostfs_testlib.cli.frostfs_cli.container import FrostfsCliContainer
from frostfs_testlib.cli.frostfs_cli.control import FrostfsCliControl
from frostfs_testlib.cli.frostfs_cli.netmap import FrostfsCliNetmap from frostfs_testlib.cli.frostfs_cli.netmap import FrostfsCliNetmap
from frostfs_testlib.cli.frostfs_cli.object import FrostfsCliObject from frostfs_testlib.cli.frostfs_cli.object import FrostfsCliObject
from frostfs_testlib.cli.frostfs_cli.session import FrostfsCliSession from frostfs_testlib.cli.frostfs_cli.session import FrostfsCliSession
from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards
from frostfs_testlib.cli.frostfs_cli.storagegroup import FrostfsCliStorageGroup from frostfs_testlib.cli.frostfs_cli.storagegroup import FrostfsCliStorageGroup
from frostfs_testlib.cli.frostfs_cli.tree import FrostfsCliTree
from frostfs_testlib.cli.frostfs_cli.util import FrostfsCliUtil from frostfs_testlib.cli.frostfs_cli.util import FrostfsCliUtil
from frostfs_testlib.cli.frostfs_cli.version import FrostfsCliVersion from frostfs_testlib.cli.frostfs_cli.version import FrostfsCliVersion
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
@ -28,7 +24,6 @@ class FrostfsCli:
storagegroup: FrostfsCliStorageGroup storagegroup: FrostfsCliStorageGroup
util: FrostfsCliUtil util: FrostfsCliUtil
version: FrostfsCliVersion version: FrostfsCliVersion
control: FrostfsCliControl
def __init__(self, shell: Shell, frostfs_cli_exec_path: str, config_file: Optional[str] = None): def __init__(self, shell: Shell, frostfs_cli_exec_path: str, config_file: Optional[str] = None):
self.accounting = FrostfsCliAccounting(shell, frostfs_cli_exec_path, config=config_file) self.accounting = FrostfsCliAccounting(shell, frostfs_cli_exec_path, config=config_file)
@ -41,7 +36,3 @@ class FrostfsCli:
self.storagegroup = FrostfsCliStorageGroup(shell, frostfs_cli_exec_path, config=config_file) self.storagegroup = FrostfsCliStorageGroup(shell, frostfs_cli_exec_path, config=config_file)
self.util = FrostfsCliUtil(shell, frostfs_cli_exec_path, config=config_file) self.util = FrostfsCliUtil(shell, frostfs_cli_exec_path, config=config_file)
self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file) self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file)
self.tree = FrostfsCliTree(shell, frostfs_cli_exec_path, config=config_file)
self.control = FrostfsCliControl(shell, frostfs_cli_exec_path, config=config_file)
self.bearer = FrostfsCliBearer(shell, frostfs_cli_exec_path, config=config_file)
self.ape_manager = FrostfsCliApeManager(shell, frostfs_cli_exec_path, config=config_file)

View file

@ -8,16 +8,12 @@ class FrostfsCliContainer(CliCommand):
def create( def create(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: Optional[str] = None, wallet: str,
nns_zone: Optional[str] = None,
nns_name: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
attributes: Optional[dict] = None, attributes: Optional[dict] = None,
basic_acl: Optional[str] = None, basic_acl: Optional[str] = None,
await_mode: bool = False, await_mode: bool = False,
disable_timestamp: bool = False, disable_timestamp: bool = False,
force: bool = False,
trace: bool = False,
name: Optional[str] = None, name: Optional[str] = None,
nonce: Optional[str] = None, nonce: Optional[str] = None,
policy: Optional[str] = None, policy: Optional[str] = None,
@ -39,8 +35,6 @@ class FrostfsCliContainer(CliCommand):
basic_acl: Hex encoded basic ACL value or keywords like 'public-read-write', basic_acl: Hex encoded basic ACL value or keywords like 'public-read-write',
'private', 'eacl-public-read' (default "private"). 'private', 'eacl-public-read' (default "private").
disable_timestamp: Disable timestamp container attribute. disable_timestamp: Disable timestamp container attribute.
force: Skip placement validity check.
trace: Generate trace ID and print it.
name: Container name attribute. name: Container name attribute.
nonce: UUIDv4 nonce value for container. nonce: UUIDv4 nonce value for container.
policy: QL-encoded or JSON-encoded placement policy or path to file with it. policy: QL-encoded or JSON-encoded placement policy or path to file with it.
@ -51,8 +45,6 @@ class FrostfsCliContainer(CliCommand):
wallet: WIF (NEP-2) string or path to the wallet or binary key. wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers. xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s). timeout: Timeout for the operation (default 15s).
nns_zone: Container nns zone attribute.
nns_name: Container nns name attribute.
Returns: Returns:
Command's result. Command's result.
@ -65,15 +57,15 @@ class FrostfsCliContainer(CliCommand):
def delete( def delete(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
await_mode: bool = False, await_mode: bool = False,
session: Optional[str] = None, session: Optional[str] = None,
ttl: Optional[int] = None, ttl: Optional[int] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
force: bool = False, force: bool = False,
trace: bool = False, timeout: Optional[str] = None,
) -> CommandResult: ) -> CommandResult:
""" """
Delete an existing container. Delete an existing container.
@ -83,13 +75,13 @@ class FrostfsCliContainer(CliCommand):
address: Address of wallet account. address: Address of wallet account.
await_mode: Block execution until container is removed. await_mode: Block execution until container is removed.
cid: Container ID. cid: Container ID.
trace: Generate trace ID and print it.
force: Do not check whether container contains locks and remove immediately. force: Do not check whether container contains locks and remove immediately.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>'). rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
session: Path to a JSON-encoded container session token. session: Path to a JSON-encoded container session token.
ttl: TTL value in request meta header (default 2). ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key. wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers. xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s).
Returns: Returns:
Command's result. Command's result.
@ -103,14 +95,12 @@ class FrostfsCliContainer(CliCommand):
def get( def get(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
generate_key: Optional[bool] = None,
await_mode: bool = False, await_mode: bool = False,
to: Optional[str] = None, to: Optional[str] = None,
json_mode: bool = False, json_mode: bool = False,
trace: bool = False,
ttl: Optional[int] = None, ttl: Optional[int] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
timeout: Optional[str] = None, timeout: Optional[str] = None,
@ -123,14 +113,12 @@ class FrostfsCliContainer(CliCommand):
await_mode: Block execution until container is removed. await_mode: Block execution until container is removed.
cid: Container ID. cid: Container ID.
json_mode: Print or dump container in JSON format. json_mode: Print or dump container in JSON format.
trace: Generate trace ID and print it.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>'). rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
to: Path to dump encoded container. to: Path to dump encoded container.
ttl: TTL value in request meta header (default 2). ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key. wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers. xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s). timeout: Timeout for the operation (default 15s).
generate_key: Generate a new private key.
Returns: Returns:
Command's result. Command's result.
@ -143,10 +131,9 @@ class FrostfsCliContainer(CliCommand):
def get_eacl( def get_eacl(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
generate_key: Optional[bool] = None,
await_mode: bool = False, await_mode: bool = False,
to: Optional[str] = None, to: Optional[str] = None,
session: Optional[str] = None, session: Optional[str] = None,
@ -163,14 +150,11 @@ class FrostfsCliContainer(CliCommand):
cid: Container ID. cid: Container ID.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>'). rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
to: Path to dump encoded container. to: Path to dump encoded container.
json_mode: Print or dump container in JSON format.
trace: Generate trace ID and print it.
session: Path to a JSON-encoded container session token. session: Path to a JSON-encoded container session token.
ttl: TTL value in request meta header (default 2). ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key. wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers. xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s). timeout: Timeout for the operation (default 15s).
generate_key: Generate a new private key.
Returns: Returns:
Command's result. Command's result.
@ -184,10 +168,8 @@ class FrostfsCliContainer(CliCommand):
def list( def list(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
name: Optional[str] = None, wallet: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
generate_key: Optional[bool] = None,
owner: Optional[str] = None, owner: Optional[str] = None,
ttl: Optional[int] = None, ttl: Optional[int] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
@ -199,15 +181,12 @@ class FrostfsCliContainer(CliCommand):
Args: Args:
address: Address of wallet account. address: Address of wallet account.
name: List containers by the attribute name.
owner: Owner of containers (omit to use owner from private key). owner: Owner of containers (omit to use owner from private key).
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>'). rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
ttl: TTL value in request meta header (default 2). ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key. wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers. xhdr: Dict with request X-Headers.
trace: Generate trace ID and print it.
timeout: Timeout for the operation (default 15s). timeout: Timeout for the operation (default 15s).
generate_key: Generate a new private key.
Returns: Returns:
Command's result. Command's result.
@ -220,12 +199,9 @@ class FrostfsCliContainer(CliCommand):
def list_objects( def list_objects(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
bearer: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
generate_key: Optional[bool] = None,
trace: bool = False,
ttl: Optional[int] = None, ttl: Optional[int] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
timeout: Optional[str] = None, timeout: Optional[str] = None,
@ -236,14 +212,11 @@ class FrostfsCliContainer(CliCommand):
Args: Args:
address: Address of wallet account. address: Address of wallet account.
cid: Container ID. cid: Container ID.
bearer: File with signed JSON or binary encoded bearer token.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>'). rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
ttl: TTL value in request meta header (default 2). ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key. wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers. xhdr: Dict with request X-Headers.
trace: Generate trace ID and print it.
timeout: Timeout for the operation (default 15s). timeout: Timeout for the operation (default 15s).
generate_key: Generate a new private key.
Returns: Returns:
Command's result. Command's result.
@ -253,12 +226,11 @@ class FrostfsCliContainer(CliCommand):
**{param: value for param, value in locals().items() if param not in ["self"]}, **{param: value for param, value in locals().items() if param not in ["self"]},
) )
# TODO Deprecated method with 0.42
def set_eacl( def set_eacl(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
await_mode: bool = False, await_mode: bool = False,
table: Optional[str] = None, table: Optional[str] = None,
@ -294,12 +266,11 @@ class FrostfsCliContainer(CliCommand):
def search_node( def search_node(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
ttl: Optional[int] = None, ttl: Optional[int] = None,
from_file: Optional[str] = None, from_file: Optional[str] = None,
trace: bool = False,
short: Optional[bool] = True, short: Optional[bool] = True,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
generate_key: Optional[bool] = None, generate_key: Optional[bool] = None,
@ -317,9 +288,8 @@ class FrostfsCliContainer(CliCommand):
from_file: string File path with encoded container from_file: string File path with encoded container
timeout: duration Timeout for the operation (default 15 s) timeout: duration Timeout for the operation (default 15 s)
short: shorten the output of node information. short: shorten the output of node information.
trace: Generate trace ID and print it.
xhdr: Dict with request X-Headers. xhdr: Dict with request X-Headers.
generate_key: Generate a new private key. generate_key: Generate a new private key
Returns: Returns:
@ -328,5 +298,9 @@ class FrostfsCliContainer(CliCommand):
return self._execute( return self._execute(
f"container nodes {from_str}", f"container nodes {from_str}",
**{param: value for param, value in locals().items() if param not in ["self", "from_file", "from_str"]}, **{
param: value
for param, value in locals().items()
if param not in ["self", "from_file", "from_str"]
},
) )

View file

@ -1,232 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsCliControl(CliCommand):
def set_status(
self,
endpoint: str,
status: str,
wallet: Optional[str] = None,
force: Optional[bool] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Set status of the storage node in FrostFS network map
Args:
wallet: Path to the wallet or binary key
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
force: Force turning to local maintenance
status: New netmap status keyword ('online', 'offline', 'maintenance')
timeout: Timeout for an operation (default 15s)
Returns:
Command`s result.
"""
return self._execute(
"control set-status",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def healthcheck(
self,
endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Health check for FrostFS storage nodes
Args:
wallet: Path to the wallet or binary key
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
timeout: Timeout for an operation (default 15s)
Returns:
Command`s result.
"""
return self._execute(
"control healthcheck",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def drop_objects(
self,
endpoint: str,
objects: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
wallet: Path to the wallet or binary key
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
objects: List of object addresses to be removed in string format
timeout: Timeout for an operation (default 15s)
Returns:
Command`s result.
"""
return self._execute(
"control drop-objects",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def add_rule(
self,
endpoint: str,
chain_id: str,
target_name: str,
target_type: str,
rule: Optional[list[str]] = None,
path: Optional[str] = None,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
chain-id: Assign ID to the parsed chain
chain-id-hex: Flag to parse chain ID as hex
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
path: Path to encoded chain in JSON or binary format
rule: Rule statement
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control add-rule",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def get_rule(
self,
endpoint: str,
chain_id: str,
target_name: str,
target_type: str,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address string Address of wallet account
chain-id string Chain id
chain-id-hex Flag to parse chain ID as hex
endpoint string Remote node control address (as 'multiaddr' or '<host>:<port>')
target-name string Resource name in APE resource name format
target-type string Resource type(container/namespace)
timeout duration Timeout for an operation (default 15s)
wallet string Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control get-rule",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list_rules(
self,
endpoint: str,
target_name: str,
target_type: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control list-rules",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list_targets(
self,
endpoint: str,
chain_name: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
chain-name: Chain name(ingress|s3)
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control list-targets",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def remove_rule(
self,
endpoint: str,
chain_id: str,
target_name: str,
target_type: str,
all: Optional[bool] = None,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
all: Remove all chains
chain-id: Assign ID to the parsed chain
chain-id-hex: Flag to parse chain ID as hex
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control remove-rule",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -8,7 +8,7 @@ class FrostfsCliNetmap(CliCommand):
def epoch( def epoch(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: Optional[str] = None, wallet: str,
address: Optional[str] = None, address: Optional[str] = None,
generate_key: bool = False, generate_key: bool = False,
ttl: Optional[int] = None, ttl: Optional[int] = None,
@ -38,7 +38,7 @@ class FrostfsCliNetmap(CliCommand):
def netinfo( def netinfo(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: Optional[str] = None, wallet: str,
address: Optional[str] = None, address: Optional[str] = None,
generate_key: bool = False, generate_key: bool = False,
ttl: Optional[int] = None, ttl: Optional[int] = None,
@ -68,7 +68,7 @@ class FrostfsCliNetmap(CliCommand):
def nodeinfo( def nodeinfo(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: Optional[str] = None, wallet: str,
address: Optional[str] = None, address: Optional[str] = None,
generate_key: bool = False, generate_key: bool = False,
json: bool = False, json: bool = False,
@ -100,7 +100,7 @@ class FrostfsCliNetmap(CliCommand):
def snapshot( def snapshot(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: Optional[str] = None, wallet: str,
address: Optional[str] = None, address: Optional[str] = None,
generate_key: bool = False, generate_key: bool = False,
ttl: Optional[int] = None, ttl: Optional[int] = None,

View file

@ -8,12 +8,11 @@ class FrostfsCliObject(CliCommand):
def delete( def delete(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
oid: str, oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
session: Optional[str] = None, session: Optional[str] = None,
ttl: Optional[int] = None, ttl: Optional[int] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
@ -26,7 +25,6 @@ class FrostfsCliObject(CliCommand):
address: Address of wallet account. address: Address of wallet account.
bearer: File with signed JSON or binary encoded bearer token. bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID. cid: Container ID.
generate_key: Generate new private key.
oid: Object ID. oid: Object ID.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>'). rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
session: Filepath to a JSON- or binary-encoded token of the object DELETE session. session: Filepath to a JSON- or binary-encoded token of the object DELETE session.
@ -46,12 +44,11 @@ class FrostfsCliObject(CliCommand):
def get( def get(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
oid: str, oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
file: Optional[str] = None, file: Optional[str] = None,
header: Optional[str] = None, header: Optional[str] = None,
no_progress: bool = False, no_progress: bool = False,
@ -69,7 +66,6 @@ class FrostfsCliObject(CliCommand):
bearer: File with signed JSON or binary encoded bearer token. bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID. cid: Container ID.
file: File to write object payload to. Default: stdout. file: File to write object payload to. Default: stdout.
generate_key: Generate new private key.
header: File to write header to. Default: stdout. header: File to write header to. Default: stdout.
no_progress: Do not show progress bar. no_progress: Do not show progress bar.
oid: Object ID. oid: Object ID.
@ -92,12 +88,11 @@ class FrostfsCliObject(CliCommand):
def hash( def hash(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
oid: str, oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
range: Optional[str] = None, range: Optional[str] = None,
salt: Optional[str] = None, salt: Optional[str] = None,
ttl: Optional[int] = None, ttl: Optional[int] = None,
@ -113,7 +108,6 @@ class FrostfsCliObject(CliCommand):
address: Address of wallet account. address: Address of wallet account.
bearer: File with signed JSON or binary encoded bearer token. bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID. cid: Container ID.
generate_key: Generate new private key.
oid: Object ID. oid: Object ID.
range: Range to take hash from in the form offset1:length1,... range: Range to take hash from in the form offset1:length1,...
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>'). rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
@ -130,18 +124,19 @@ class FrostfsCliObject(CliCommand):
""" """
return self._execute( return self._execute(
"object hash", "object hash",
**{param: value for param, value in locals().items() if param not in ["self", "params"]}, **{
param: value for param, value in locals().items() if param not in ["self", "params"]
},
) )
def head( def head(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
oid: str, oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
file: Optional[str] = None, file: Optional[str] = None,
json_mode: bool = False, json_mode: bool = False,
main_only: bool = False, main_only: bool = False,
@ -160,7 +155,6 @@ class FrostfsCliObject(CliCommand):
bearer: File with signed JSON or binary encoded bearer token. bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID. cid: Container ID.
file: File to write object payload to. Default: stdout. file: File to write object payload to. Default: stdout.
generate_key: Generate new private key.
json_mode: Marshal output in JSON. json_mode: Marshal output in JSON.
main_only: Return only main fields. main_only: Return only main fields.
oid: Object ID. oid: Object ID.
@ -184,14 +178,13 @@ class FrostfsCliObject(CliCommand):
def lock( def lock(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
oid: str, oid: str,
wallet: Optional[str] = None,
lifetime: Optional[int] = None, lifetime: Optional[int] = None,
expire_at: Optional[int] = None, expire_at: Optional[int] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
session: Optional[str] = None, session: Optional[str] = None,
ttl: Optional[int] = None, ttl: Optional[int] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
@ -204,7 +197,6 @@ class FrostfsCliObject(CliCommand):
address: Address of wallet account. address: Address of wallet account.
bearer: File with signed JSON or binary encoded bearer token. bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID. cid: Container ID.
generate_key: Generate new private key.
oid: Object ID. oid: Object ID.
lifetime: Lock lifetime. lifetime: Lock lifetime.
expire_at: Lock expiration epoch. expire_at: Lock expiration epoch.
@ -226,14 +218,12 @@ class FrostfsCliObject(CliCommand):
def put( def put(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
file: str, file: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
attributes: Optional[dict] = None, attributes: Optional[dict] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
copies_number: Optional[int] = None,
disable_filename: bool = False, disable_filename: bool = False,
disable_timestamp: bool = False, disable_timestamp: bool = False,
expire_at: Optional[int] = None, expire_at: Optional[int] = None,
@ -251,13 +241,11 @@ class FrostfsCliObject(CliCommand):
address: Address of wallet account. address: Address of wallet account.
attributes: User attributes in form of Key1=Value1,Key2=Value2. attributes: User attributes in form of Key1=Value1,Key2=Value2.
bearer: File with signed JSON or binary encoded bearer token. bearer: File with signed JSON or binary encoded bearer token.
copies_number: Number of copies of the object to store within the RPC call.
cid: Container ID. cid: Container ID.
disable_filename: Do not set well-known filename attribute. disable_filename: Do not set well-known filename attribute.
disable_timestamp: Do not set well-known timestamp attribute. disable_timestamp: Do not set well-known timestamp attribute.
expire_at: Last epoch in the life of the object. expire_at: Last epoch in the life of the object.
file: File with object payload. file: File with object payload.
generate_key: Generate new private key.
no_progress: Do not show progress bar. no_progress: Do not show progress bar.
notify: Object notification in the form of *epoch*:*topic*; '-' notify: Object notification in the form of *epoch*:*topic*; '-'
topic means using default. topic means using default.
@ -279,13 +267,12 @@ class FrostfsCliObject(CliCommand):
def range( def range(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
oid: str, oid: str,
range: str, range: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
file: Optional[str] = None, file: Optional[str] = None,
json_mode: bool = False, json_mode: bool = False,
raw: bool = False, raw: bool = False,
@ -302,7 +289,6 @@ class FrostfsCliObject(CliCommand):
bearer: File with signed JSON or binary encoded bearer token. bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID. cid: Container ID.
file: File to write object payload to. Default: stdout. file: File to write object payload to. Default: stdout.
generate_key: Generate new private key.
json_mode: Marshal output in JSON. json_mode: Marshal output in JSON.
oid: Object ID. oid: Object ID.
range: Range to take data from in the form offset:length. range: Range to take data from in the form offset:length.
@ -325,11 +311,10 @@ class FrostfsCliObject(CliCommand):
def search( def search(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
filters: Optional[list] = None, filters: Optional[list] = None,
oid: Optional[str] = None, oid: Optional[str] = None,
phy: bool = False, phy: bool = False,
@ -347,7 +332,6 @@ class FrostfsCliObject(CliCommand):
bearer: File with signed JSON or binary encoded bearer token. bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID. cid: Container ID.
filters: Repeated filter expressions or files with protobuf JSON. filters: Repeated filter expressions or files with protobuf JSON.
generate_key: Generate new private key.
oid: Object ID. oid: Object ID.
phy: Search physically stored objects. phy: Search physically stored objects.
root: Search for user objects. root: Search for user objects.
@ -365,46 +349,3 @@ class FrostfsCliObject(CliCommand):
"object search", "object search",
**{param: value for param, value in locals().items() if param not in ["self"]}, **{param: value for param, value in locals().items() if param not in ["self"]},
) )
def nodes(
self,
rpc_endpoint: str,
cid: str,
oid: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = False,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Search object nodes.
Args:
address: Address of wallet account.
bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID.
generate_key: Generate new private key.
oid: Object ID.
trace: Generate trace ID and print it.
root: Search for user objects.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
verify_presence_all: Verify the actual presence of the object on all netmap nodes.
ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s).
Returns:
Command's result.
"""
return self._execute(
"object nodes",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -9,6 +9,7 @@ class FrostfsCliSession(CliCommand):
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str, wallet: str,
wallet_password: str,
out: str, out: str,
lifetime: Optional[int] = None, lifetime: Optional[int] = None,
address: Optional[str] = None, address: Optional[str] = None,
@ -29,7 +30,12 @@ class FrostfsCliSession(CliCommand):
Returns: Returns:
Command's result. Command's result.
""" """
return self._execute( return self._execute_with_password(
"session create", "session create",
**{param: value for param, value in locals().items() if param not in ["self"]}, wallet_password,
**{
param: value
for param, value in locals().items()
if param not in ["self", "wallet_password"]
},
) )

View file

@ -39,10 +39,10 @@ class FrostfsCliShards(CliCommand):
def set_mode( def set_mode(
self, self,
endpoint: str, endpoint: str,
wallet: str,
wallet_password: str,
mode: str, mode: str,
id: Optional[list[str]] = None, id: Optional[list[str]],
wallet: Optional[str] = None,
wallet_password: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
all: bool = False, all: bool = False,
clear_errors: bool = False, clear_errors: bool = False,
@ -65,15 +65,14 @@ class FrostfsCliShards(CliCommand):
Returns: Returns:
Command's result. Command's result.
""" """
if not wallet_password:
return self._execute(
"control shards set-mode",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
return self._execute_with_password( return self._execute_with_password(
"control shards set-mode", "control shards set-mode",
wallet_password, wallet_password,
**{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, **{
param: value
for param, value in locals().items()
if param not in ["self", "wallet_password"]
},
) )
def dump( def dump(
@ -106,14 +105,18 @@ class FrostfsCliShards(CliCommand):
return self._execute_with_password( return self._execute_with_password(
"control shards dump", "control shards dump",
wallet_password, wallet_password,
**{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, **{
param: value
for param, value in locals().items()
if param not in ["self", "wallet_password"]
},
) )
def list( def list(
self, self,
endpoint: str, endpoint: str,
wallet: Optional[str] = None, wallet: str,
wallet_password: Optional[str] = None, wallet_password: str,
address: Optional[str] = None, address: Optional[str] = None,
json_mode: bool = False, json_mode: bool = False,
timeout: Optional[str] = None, timeout: Optional[str] = None,
@ -132,130 +135,12 @@ class FrostfsCliShards(CliCommand):
Returns: Returns:
Command's result. Command's result.
""" """
if not wallet_password:
return self._execute(
"control shards list",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
return self._execute_with_password( return self._execute_with_password(
"control shards list", "control shards list",
wallet_password, wallet_password,
**{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, **{
) param: value
for param, value in locals().items()
def evacuation_start( if param not in ["self", "wallet_password"]
self, },
endpoint: str,
id: Optional[str] = None,
scope: Optional[str] = None,
all: bool = False,
no_errors: bool = True,
await_mode: bool = False,
address: Optional[str] = None,
timeout: Optional[str] = None,
no_progress: bool = False,
) -> CommandResult:
"""
Objects evacuation from shard to other shards.
Args:
address: Address of wallet account
all: Process all shards
await: Block execution until evacuation is completed
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
id: List of shard IDs in base58 encoding
no_errors: Skip invalid/unreadable objects (default true)
no_progress: Print progress if await provided
scope: Evacuation scope; possible values: trees, objects, all (default "all")
timeout: Timeout for an operation (default 15s)
Returns:
Command's result.
"""
return self._execute(
"control shards evacuation start",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def evacuation_reset(
self,
endpoint: str,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Reset evacuate objects from shard to other shards status.
Args:
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
timeout: Timeout for an operation (default 15s)
Returns:
Command's result.
"""
return self._execute(
"control shards evacuation reset",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def evacuation_stop(
self,
endpoint: str,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Stop running evacuate process from shard to other shards.
Args:
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
timeout: Timeout for an operation (default 15s)
Returns:
Command's result.
"""
return self._execute(
"control shards evacuation stop",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def evacuation_status(
self,
endpoint: str,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""
Get evacuate objects from shard to other shards status.
Args:
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
timeout: Timeout for an operation (default 15s)
Returns:
Command's result.
"""
return self._execute(
"control shards evacuation status",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def detach(self, endpoint: str, address: Optional[str] = None, id: Optional[str] = None, timeout: Optional[str] = None):
"""
Detach and close the shards
Args:
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
id: List of shard IDs in base58 encoding
timeout: Timeout for an operation (default 15s)
Returns:
Command's result.
"""
return self._execute(
"control shards detach",
**{param: value for param, value in locals().items() if param not in ["self"]},
) )

View file

@ -1,53 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsCliTree(CliCommand):
def healthcheck(
self,
wallet: Optional[str] = None,
rpc_endpoint: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Get internal balance of FrostFS account
Args:
address: Address of wallet account.
owner: Owner of balance account (omit to use owner from private key).
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
wallet: WIF (NEP-2) string or path to the wallet or binary key.
Returns:
Command's result.
"""
return self._execute(
"tree healthcheck",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list(
self,
cid: str,
rpc_endpoint: Optional[str] = None,
wallet: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Get Tree List
Args:
cid: Container ID.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
wallet: WIF (NEP-2) string or path to the wallet or binary key.
timeout: duration Timeout for the operation (default 15 s)
Returns:
Command's result.
"""
return self._execute(
"tree list",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -7,9 +7,9 @@ from frostfs_testlib.shell import CommandResult
class FrostfsCliUtil(CliCommand): class FrostfsCliUtil(CliCommand):
def sign_bearer_token( def sign_bearer_token(
self, self,
wallet: str,
from_file: str, from_file: str,
to_file: str, to_file: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
json: Optional[bool] = False, json: Optional[bool] = False,
) -> CommandResult: ) -> CommandResult:
@ -33,9 +33,9 @@ class FrostfsCliUtil(CliCommand):
def sign_session_token( def sign_session_token(
self, self,
wallet: str,
from_file: str, from_file: str,
to_file: str, to_file: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
) -> CommandResult: ) -> CommandResult:
""" """
@ -54,11 +54,3 @@ class FrostfsCliUtil(CliCommand):
"util sign session-token", "util sign session-token",
**{param: value for param, value in locals().items() if param not in ["self"]}, **{param: value for param, value in locals().items() if param not in ["self"]},
) )
def convert_eacl(self, from_file: str, to_file: str, json: Optional[bool] = False, ape: Optional[bool] = False):
"""Convert representation of extended ACL table."""
return self._execute(
"util convert eacl",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -1,30 +0,0 @@
from typing import Optional
from frostfs_testlib.hosting.interfaces import Host
from frostfs_testlib.shell.interfaces import CommandOptions, Shell
class GenericCli(object):
def __init__(self, cli_name: str, host: Host) -> None:
self.host = host
self.cli_name = cli_name
def __call__(
self,
args: Optional[str] = "",
pipes: Optional[str] = "",
shell: Optional[Shell] = None,
options: Optional[CommandOptions] = None,
):
if not shell:
shell = self.host.get_shell()
cli_config = self.host.get_cli_config(self.cli_name, True)
extra_args = ""
exec_path = self.cli_name
if cli_config:
extra_args = " ".join(cli_config.extra_args)
exec_path = cli_config.exec_path
cmd = f"{exec_path} {args} {extra_args} {pipes}"
return shell.exec(cmd, options)

View file

@ -1,91 +0,0 @@
import re
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo, NodeStatus
class NetmapParser:
@staticmethod
def netinfo(output: str) -> NodeNetInfo:
regexes = {
"epoch": r"Epoch: (?P<epoch>\d+)",
"network_magic": r"Network magic: (?P<network_magic>.*$)",
"time_per_block": r"Time per block: (?P<time_per_block>\d+\w+)",
"container_fee": r"Container fee: (?P<container_fee>\d+)",
"epoch_duration": r"Epoch duration: (?P<epoch_duration>\d+)",
"inner_ring_candidate_fee": r"Inner Ring candidate fee: (?P<inner_ring_candidate_fee>\d+)",
"maximum_object_size": r"Maximum object size: (?P<maximum_object_size>\d+)",
"maximum_count_of_data_shards": r"Maximum count of data shards: (?P<maximum_count_of_data_shards>\d+)",
"maximum_count_of_parity_shards": r"Maximum count of parity shards: (?P<maximum_count_of_parity_shards>\d+)",
"withdrawal_fee": r"Withdrawal fee: (?P<withdrawal_fee>\d+)",
"homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?P<homomorphic_hashing_disabled>true|false)",
"maintenance_mode_allowed": r"Maintenance mode allowed: (?P<maintenance_mode_allowed>true|false)",
"eigen_trust_alpha": r"EigenTrustAlpha: (?P<eigen_trust_alpha>\d+\w+$)",
"eigen_trust_iterations": r"EigenTrustIterations: (?P<eigen_trust_iterations>\d+)",
}
parse_result = {}
for key, regex in regexes.items():
search_result = re.search(regex, output, flags=re.MULTILINE)
if search_result == None:
parse_result[key] = None
continue
parse_result[key] = search_result[key].strip()
node_netinfo = NodeNetInfo(**parse_result)
return node_netinfo
@staticmethod
def snapshot_all_nodes(output: str) -> list[NodeNetmapInfo]:
"""The code will parse each line and return each node as dataclass."""
netmap_nodes = output.split("Node ")[1:]
dataclasses_netmap = []
result_netmap = {}
regexes = {
"node_id": r"\d+: (?P<node_id>\w+)",
"node_data_ips": r"(?P<node_data_ips>/ip4/.+?)$",
"node_status": r"(?P<node_status>ONLINE|MAINTENANCE|OFFLINE)",
"cluster_name": r"ClusterName: (?P<cluster_name>\w+)",
"continent": r"Continent: (?P<continent>\w+)",
"country": r"Country: (?P<country>\w+)",
"country_code": r"CountryCode: (?P<country_code>\w+)",
"external_address": r"ExternalAddr: (?P<external_address>/ip[4].+?)$",
"location": r"Location: (?P<location>\w+.*)",
"node": r"Node: (?P<node>\d+\.\d+\.\d+\.\d+)",
"price": r"Price: (?P<price>\d+)",
"sub_div": r"SubDiv: (?P<sub_div>.*)",
"sub_div_code": r"SubDivCode: (?P<sub_div_code>\w+)",
"un_locode": r"UN-LOCODE: (?P<un_locode>\w+.*)",
"role": r"role: (?P<role>\w+)",
}
for node in netmap_nodes:
for key, regex in regexes.items():
search_result = re.search(regex, node, flags=re.MULTILINE)
if search_result == None:
result_netmap[key] = None
continue
if key == "node_data_ips":
result_netmap[key] = search_result[key].strip().split(" ")
continue
if key == "external_address":
result_netmap[key] = search_result[key].strip().split(",")
continue
if key == "node_status":
result_netmap[key] = NodeStatus(search_result[key].strip().lower())
continue
result_netmap[key] = search_result[key].strip()
dataclasses_netmap.append(NodeNetmapInfo(**result_netmap))
return dataclasses_netmap
@staticmethod
def snapshot_one_node(output: str, cluster_node: ClusterNode) -> NodeNetmapInfo | None:
snapshot_nodes = NetmapParser.snapshot_all_nodes(output=output)
snapshot_node = [node for node in snapshot_nodes if node.node == cluster_node.host_ip]
if not snapshot_node:
return None
return snapshot_node[0]

View file

@ -1,47 +0,0 @@
import re
from typing import Optional
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsAuthmate
from frostfs_testlib.credentials.interfaces import S3Credentials, S3CredentialsProvider, User
from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC
from frostfs_testlib.shell import LocalShell
from frostfs_testlib.steps.cli.container import list_containers
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate
from frostfs_testlib.utils import string_utils
class AuthmateS3CredentialsProvider(S3CredentialsProvider):
@reporter.step("Init S3 Credentials using Authmate CLI")
def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None) -> S3Credentials:
cluster_nodes: list[ClusterNode] = self.cluster.cluster_nodes
shell = LocalShell()
wallet = user.wallet
endpoint = cluster_node.storage_node.get_rpc_endpoint()
gate_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes]
# unique short bucket name
bucket = string_utils.unique_name("bucket-")
frostfs_authmate: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC)
issue_secret_output = frostfs_authmate.secret.issue(
wallet=wallet.path,
peer=endpoint,
gate_public_key=gate_public_keys,
wallet_password=wallet.password,
container_policy=location_constraints,
container_friendly_name=bucket,
).stdout
aws_access_key_id = str(re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group("aws_access_key_id"))
aws_secret_access_key = str(
re.search(r"secret_access_key.*:\s.(?P<aws_secret_access_key>\w*)", issue_secret_output).group("aws_secret_access_key")
)
cid = str(re.search(r"container_id.*:\s.(?P<container_id>\w*)", issue_secret_output).group("container_id"))
containers_list = list_containers(wallet, shell, endpoint)
assert cid in containers_list, f"Expected cid {cid} in {containers_list}"
user.s3_credentials = S3Credentials(aws_access_key_id, aws_secret_access_key)
return user.s3_credentials

View file

@ -1,51 +0,0 @@
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Any, Optional
from frostfs_testlib.plugins import load_plugin
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
@dataclass
class S3Credentials:
access_key: str
secret_key: str
@dataclass
class User:
name: str
attributes: dict[str, Any] = field(default_factory=dict)
wallet: WalletInfo | None = None
s3_credentials: S3Credentials | None = None
class S3CredentialsProvider(ABC):
def __init__(self, cluster: Cluster) -> None:
self.cluster = cluster
@abstractmethod
def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None, **kwargs) -> S3Credentials:
raise NotImplementedError("Directly called abstract class?")
class GrpcCredentialsProvider(ABC):
def __init__(self, cluster: Cluster) -> None:
self.cluster = cluster
@abstractmethod
def provide(self, user: User, cluster_node: ClusterNode, **kwargs) -> WalletInfo:
raise NotImplementedError("Directly called abstract class?")
class CredentialsProvider(object):
S3: S3CredentialsProvider
GRPC: GrpcCredentialsProvider
def __init__(self, cluster: Cluster) -> None:
config = cluster.cluster_nodes[0].host.config
s3_cls = load_plugin("frostfs.testlib.credentials_providers", config.s3_creds_plugin_name)
self.S3 = s3_cls(cluster)
grpc_cls = load_plugin("frostfs.testlib.credentials_providers", config.grpc_creds_plugin_name)
self.GRPC = grpc_cls(cluster)

View file

@ -1,14 +0,0 @@
from frostfs_testlib import reporter
from frostfs_testlib.credentials.interfaces import GrpcCredentialsProvider, User
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_PASS
from frostfs_testlib.shell.local_shell import LocalShell
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.dataclasses.wallet import WalletFactory, WalletInfo
class WalletFactoryProvider(GrpcCredentialsProvider):
@reporter.step("Init gRPC Credentials using wallet generation")
def provide(self, user: User, cluster_node: ClusterNode) -> WalletInfo:
wallet_factory = WalletFactory(ASSETS_DIR, LocalShell())
user.wallet = wallet_factory.create_wallet(file_name=user.name, password=DEFAULT_WALLET_PASS)
return user.wallet

View file

@ -1,5 +1,5 @@
class Options: class Options:
DEFAULT_SHELL_TIMEOUT = 120 DEFAULT_SHELL_TIMEOUT = 90
@staticmethod @staticmethod
def get_default_shell_timeout(): def get_default_shell_timeout():

View file

@ -1,45 +0,0 @@
import logging
import os
from importlib.metadata import entry_points
import pytest
import yaml
from frostfs_testlib import reporter
from frostfs_testlib.hosting.hosting import Hosting
from frostfs_testlib.resources.common import ASSETS_DIR, HOSTING_CONFIG_FILE
from frostfs_testlib.storage import get_service_registry
@pytest.fixture(scope="session")
def configure_testlib():
reporter.get_reporter().register_handler(reporter.AllureHandler())
reporter.get_reporter().register_handler(reporter.StepsLogger())
logging.getLogger("paramiko").setLevel(logging.INFO)
# Register Services for cluster
registry = get_service_registry()
services = entry_points(group="frostfs.testlib.services")
for svc in services:
registry.register_service(svc.name, svc.load())
@pytest.fixture(scope="session")
def temp_directory(configure_testlib):
with reporter.step("Prepare tmp directory"):
full_path = ASSETS_DIR
if not os.path.exists(full_path):
os.mkdir(full_path)
return full_path
@pytest.fixture(scope="session")
def hosting(configure_testlib) -> Hosting:
with open(HOSTING_CONFIG_FILE, "r") as file:
hosting_config = yaml.full_load(file)
hosting_instance = Hosting()
hosting_instance.configure(hosting_config)
return hosting_instance

View file

@ -1,109 +0,0 @@
from typing import Callable
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.healthcheck.interfaces import Healthcheck
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
from frostfs_testlib.shell import CommandOptions
from frostfs_testlib.steps.node_management import storage_node_healthcheck
from frostfs_testlib.storage.cluster import ClusterNode, ServiceClass
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.failover_utils import check_services_status
class BasicHealthcheck(Healthcheck):
def _perform(self, cluster_node: ClusterNode, checks: dict[Callable, dict]):
issues: list[str] = []
for check, kwargs in checks.items():
issue = check(cluster_node, **kwargs)
if issue:
issues.append(issue)
assert not issues, "Issues found:\n" + "\n".join(issues)
@wait_for_success(900, 30, title="Wait for full healthcheck for {cluster_node}")
def full_healthcheck(self, cluster_node: ClusterNode):
checks = {
self.storage_healthcheck: {},
self._tree_healthcheck: {},
}
self._perform(cluster_node, checks)
@wait_for_success(900, 30, title="Wait for startup healthcheck on {cluster_node}")
def startup_healthcheck(self, cluster_node: ClusterNode):
checks = {
self.storage_healthcheck: {},
self._tree_healthcheck: {},
}
self._perform(cluster_node, checks)
@wait_for_success(900, 30, title="Wait for storage healthcheck on {cluster_node}")
def storage_healthcheck(self, cluster_node: ClusterNode) -> str | None:
checks = {
self._storage_healthcheck: {},
}
self._perform(cluster_node, checks)
@wait_for_success(900, 30, title="Wait for tree healthcheck on {cluster_node}")
def tree_healthcheck(self, cluster_node: ClusterNode) -> str | None:
checks = {
self._tree_healthcheck: {},
}
self._perform(cluster_node, checks)
@wait_for_success(120, 5, title="Wait for service healthcheck on {cluster_node}")
def services_healthcheck(self, cluster_node: ClusterNode):
svcs_to_check = cluster_node.services
checks = {
check_services_status: {
"service_list": svcs_to_check,
"expected_status": "active",
},
self._check_services: {"services": svcs_to_check},
}
self._perform(cluster_node, checks)
def _check_services(self, cluster_node: ClusterNode, services: list[ServiceClass]):
for svc in services:
result = svc.service_healthcheck()
if result == False:
return f"Service {svc.get_service_systemctl_name()} healthcheck failed on node {cluster_node}."
@reporter.step("Storage healthcheck on {cluster_node}")
def _storage_healthcheck(self, cluster_node: ClusterNode) -> str | None:
result = storage_node_healthcheck(cluster_node.storage_node)
self._gather_socket_info(cluster_node)
if result.health_status != "READY" or result.network_status != "ONLINE":
return f"Node {cluster_node} is not healthy. Health={result.health_status}. Network={result.network_status}"
@reporter.step("Tree healthcheck on {cluster_node}")
def _tree_healthcheck(self, cluster_node: ClusterNode) -> str | None:
host = cluster_node.host
service_config = host.get_service_config(cluster_node.storage_node.name)
wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{cluster_node.storage_node.name}-config.yaml"
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
remote_cli = FrostfsCli(
shell,
host.get_cli_config(FROSTFS_CLI_EXEC).exec_path,
config_file=wallet_config_path,
)
result = remote_cli.tree.healthcheck(rpc_endpoint="127.0.0.1:8080")
if result.return_code != 0:
return (
f"Error during tree healthcheck (rc={result.return_code}): {result.stdout}. \n Stderr: {result.stderr}"
)
@reporter.step("Gather socket info for {cluster_node}")
def _gather_socket_info(self, cluster_node: ClusterNode):
cluster_node.host.get_shell().exec("ss -tuln | grep 8080", CommandOptions(check=False))

View file

@ -1,25 +0,0 @@
from abc import ABC, abstractmethod
from frostfs_testlib.storage.cluster import ClusterNode
class Healthcheck(ABC):
@abstractmethod
def full_healthcheck(self, cluster_node: ClusterNode):
"""Perform full healthcheck on the target cluster node"""
@abstractmethod
def startup_healthcheck(self, cluster_node: ClusterNode):
"""Perform healthcheck required on startup of target cluster node"""
@abstractmethod
def storage_healthcheck(self, cluster_node: ClusterNode):
"""Perform storage service healthcheck on target cluster node"""
@abstractmethod
def services_healthcheck(self, cluster_node: ClusterNode):
"""Perform service status check on target cluster node"""
@abstractmethod
def tree_healthcheck(self, cluster_node: ClusterNode):
"""Perform tree healthcheck on target cluster node"""

View file

@ -1,13 +0,0 @@
import pytest
@pytest.hookimpl
def pytest_collection_modifyitems(items: list[pytest.Item]):
# All tests which reside in frostfs nodeid are granted with frostfs marker, excluding
# nodeid = full path of the test
# 1. plugins
# 2. testlib itself
for item in items:
location = item.location[0]
if "frostfs" in location and "plugin" not in location and "testlib" not in location:
item.add_marker("frostfs")

View file

@ -10,7 +10,9 @@ class ParsedAttributes:
def parse(cls, attributes: dict[str, Any]): def parse(cls, attributes: dict[str, Any]):
# Pick attributes supported by the class # Pick attributes supported by the class
field_names = set(field.name for field in fields(cls)) field_names = set(field.name for field in fields(cls))
supported_attributes = {key: value for key, value in attributes.items() if key in field_names} supported_attributes = {
key: value for key, value in attributes.items() if key in field_names
}
return cls(**supported_attributes) return cls(**supported_attributes)
@ -27,7 +29,6 @@ class CLIConfig:
name: str name: str
exec_path: str exec_path: str
attributes: dict[str, str] = field(default_factory=dict) attributes: dict[str, str] = field(default_factory=dict)
extra_args: list[str] = field(default_factory=list)
@dataclass @dataclass
@ -51,7 +52,6 @@ class HostConfig:
Attributes: Attributes:
plugin_name: Name of plugin that should be used to manage the host. plugin_name: Name of plugin that should be used to manage the host.
healthcheck_plugin_name: Name of the plugin for healthcheck operations.
address: Address of the machine (IP or DNS name). address: Address of the machine (IP or DNS name).
services: List of services hosted on the machine. services: List of services hosted on the machine.
clis: List of CLI tools available on the machine. clis: List of CLI tools available on the machine.
@ -60,17 +60,10 @@ class HostConfig:
""" """
plugin_name: str plugin_name: str
hostname: str
healthcheck_plugin_name: str
address: str address: str
s3_creds_plugin_name: str = field(default="authmate")
grpc_creds_plugin_name: str = field(default="wallet_factory")
product: str = field(default="frostfs")
services: list[ServiceConfig] = field(default_factory=list) services: list[ServiceConfig] = field(default_factory=list)
clis: list[CLIConfig] = field(default_factory=list) clis: list[CLIConfig] = field(default_factory=list)
attributes: dict[str, str] = field(default_factory=dict) attributes: dict[str, str] = field(default_factory=dict)
interfaces: dict[str, str] = field(default_factory=dict)
environment: dict[str, str] = field(default_factory=dict)
def __post_init__(self) -> None: def __post_init__(self) -> None:
self.services = [ServiceConfig(**service) for service in self.services or []] self.services = [ServiceConfig(**service) for service in self.services or []]

View file

@ -11,7 +11,7 @@ import docker
from requests import HTTPError from requests import HTTPError
from frostfs_testlib.hosting.config import ParsedAttributes from frostfs_testlib.hosting.config import ParsedAttributes
from frostfs_testlib.hosting.interfaces import DiskInfo, Host, HostStatus from frostfs_testlib.hosting.interfaces import DiskInfo, Host
from frostfs_testlib.shell import LocalShell, Shell, SSHShell from frostfs_testlib.shell import LocalShell, Shell, SSHShell
from frostfs_testlib.shell.command_inspectors import SudoInspector from frostfs_testlib.shell.command_inspectors import SudoInspector
@ -61,10 +61,10 @@ class ServiceAttributes(ParsedAttributes):
class DockerHost(Host): class DockerHost(Host):
"""Manages services hosted in Docker containers running on a local or remote machine.""" """Manages services hosted in Docker containers running on a local or remote machine."""
def get_shell(self, sudo: bool = False) -> Shell: def get_shell(self) -> Shell:
host_attributes = HostAttributes.parse(self._config.attributes) host_attributes = HostAttributes.parse(self._config.attributes)
command_inspectors = [] command_inspectors = []
if sudo: if host_attributes.sudo_shell:
command_inspectors.append(SudoInspector()) command_inspectors.append(SudoInspector())
if not host_attributes.ssh_login: if not host_attributes.ssh_login:
@ -87,15 +87,6 @@ class DockerHost(Host):
for service_config in self._config.services: for service_config in self._config.services:
self.start_service(service_config.name) self.start_service(service_config.name)
def get_host_status(self) -> HostStatus:
# We emulate host status by checking all services.
for service_config in self._config.services:
state = self._get_container_state(service_config.name)
if state != "running":
return HostStatus.OFFLINE
return HostStatus.ONLINE
def stop_host(self) -> None: def stop_host(self) -> None:
# We emulate stopping machine by stopping all services # We emulate stopping machine by stopping all services
# As an alternative we can probably try to stop docker service... # As an alternative we can probably try to stop docker service...
@ -126,14 +117,6 @@ class DockerHost(Host):
timeout=service_attributes.stop_timeout, timeout=service_attributes.stop_timeout,
) )
def mask_service(self, service_name: str) -> None:
# Not required for Docker
return
def unmask_service(self, service_name: str) -> None:
# Not required for Docker
return
def wait_success_suspend_process(self, service_name: str): def wait_success_suspend_process(self, service_name: str):
raise NotImplementedError("Not supported for docker") raise NotImplementedError("Not supported for docker")
@ -152,20 +135,9 @@ class DockerHost(Host):
timeout=service_attributes.start_timeout, timeout=service_attributes.start_timeout,
) )
def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None:
raise NotImplementedError("Not implemented for docker")
def get_data_directory(self, service_name: str) -> str: def get_data_directory(self, service_name: str) -> str:
service_attributes = self._get_service_attributes(service_name) service_attributes = self._get_service_attributes(service_name)
return service_attributes.data_directory_path
client = self._get_docker_client()
volume_info = client.inspect_volume(service_attributes.volume_name)
volume_path = volume_info["Mountpoint"]
return volume_path
def send_signal_to_service(self, service_name: str, signal: str) -> None:
raise NotImplementedError("Not implemented for docker")
def delete_metabase(self, service_name: str) -> None: def delete_metabase(self, service_name: str) -> None:
raise NotImplementedError("Not implemented for docker") raise NotImplementedError("Not implemented for docker")
@ -182,20 +154,12 @@ class DockerHost(Host):
def delete_pilorama(self, service_name: str) -> None: def delete_pilorama(self, service_name: str) -> None:
raise NotImplementedError("Not implemented for docker") raise NotImplementedError("Not implemented for docker")
def delete_file(self, file_path: str) -> None:
raise NotImplementedError("Not implemented for docker")
def is_file_exist(self, file_path: str) -> None:
raise NotImplementedError("Not implemented for docker")
def wipefs_storage_node_data(self, service_name: str) -> None:
raise NotImplementedError("Not implemented for docker")
def finish_wipefs(self, service_name: str) -> None:
raise NotImplementedError("Not implemented for docker")
def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None:
volume_path = self.get_data_directory(service_name) service_attributes = self._get_service_attributes(service_name)
client = self._get_docker_client()
volume_info = client.inspect_volume(service_attributes.volume_name)
volume_path = volume_info["Mountpoint"]
shell = self.get_shell() shell = self.get_shell()
meta_clean_cmd = f"rm -rf {volume_path}/meta*/*" meta_clean_cmd = f"rm -rf {volume_path}/meta*/*"
@ -242,41 +206,11 @@ class DockerHost(Host):
with open(file_path, "wb") as file: with open(file_path, "wb") as file:
file.write(logs) file.write(logs)
def get_filtered_logs(
self,
filter_regex: str,
since: Optional[datetime] = None,
until: Optional[datetime] = None,
unit: Optional[str] = None,
exclude_filter: Optional[str] = None,
priority: Optional[str] = None,
) -> str:
client = self._get_docker_client()
filtered_logs = ""
for service_config in self._config.services:
container_name = self._get_service_attributes(service_config.name).container_name
try:
filtered_logs = client.logs(container_name, since=since, until=until)
except HTTPError as exc:
logger.info(f"Got exception while dumping logs of '{container_name}': {exc}")
continue
if exclude_filter:
filtered_logs = filtered_logs.replace(exclude_filter, "")
matches = re.findall(filter_regex, filtered_logs, re.IGNORECASE + re.MULTILINE)
found = list(matches)
if found:
filtered_logs += f"{container_name}:\n{os.linesep.join(found)}"
return filtered_logs
def is_message_in_logs( def is_message_in_logs(
self, self,
message_regex: str, message_regex: str,
since: Optional[datetime] = None, since: Optional[datetime] = None,
until: Optional[datetime] = None, until: Optional[datetime] = None,
unit: Optional[str] = None,
) -> bool: ) -> bool:
client = self._get_docker_client() client = self._get_docker_client()
for service_config in self._config.services: for service_config in self._config.services:
@ -319,23 +253,20 @@ class DockerHost(Host):
return container return container
return None return None
def _wait_for_container_to_be_in_state(self, container_name: str, expected_state: str, timeout: int) -> None: def _wait_for_container_to_be_in_state(
self, container_name: str, expected_state: str, timeout: int
) -> None:
iterations = 10 iterations = 10
iteration_wait_time = timeout / iterations iteration_wait_time = timeout / iterations
# To speed things up, we break timeout in smaller iterations and check container state # To speed things up, we break timeout in smaller iterations and check container state
# several times. This way waiting stops as soon as container reaches the expected state # several times. This way waiting stops as soon as container reaches the expected state
for _ in range(iterations): for _ in range(iterations):
state = self._get_container_state(container_name) container = self._get_container_by_name(container_name)
logger.debug(f"Current container state\n:{json.dumps(container, indent=2)}")
if state == expected_state: if container and container["State"] == expected_state:
return return
time.sleep(iteration_wait_time) time.sleep(iteration_wait_time)
raise RuntimeError(f"Container {container_name} is not in {expected_state} state.") raise RuntimeError(f"Container {container_name} is not in {expected_state} state.")
def _get_container_state(self, container_name: str) -> str:
container = self._get_container_by_name(container_name)
logger.debug(f"Current container state\n:{json.dumps(container, indent=2)}")
return container.get("State", None)

View file

@ -4,14 +4,6 @@ from typing import Optional
from frostfs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig from frostfs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig
from frostfs_testlib.shell.interfaces import Shell from frostfs_testlib.shell.interfaces import Shell
from frostfs_testlib.testing.readable import HumanReadableEnum
from frostfs_testlib.testing.test_control import retry
class HostStatus(HumanReadableEnum):
ONLINE = "Online"
OFFLINE = "Offline"
UNKNOWN = "Unknown"
class DiskInfo(dict): class DiskInfo(dict):
@ -26,7 +18,9 @@ class Host(ABC):
def __init__(self, config: HostConfig) -> None: def __init__(self, config: HostConfig) -> None:
self._config = config self._config = config
self._service_config_by_name = {service_config.name: service_config for service_config in config.services} self._service_config_by_name = {
service_config.name: service_config for service_config in config.services
}
self._cli_config_by_name = {cli_config.name: cli_config for cli_config in config.clis} self._cli_config_by_name = {cli_config.name: cli_config for cli_config in config.clis}
@property @property
@ -54,7 +48,7 @@ class Host(ABC):
raise ValueError(f"Unknown service name: '{service_name}'") raise ValueError(f"Unknown service name: '{service_name}'")
return service_config return service_config
def get_cli_config(self, cli_name: str, allow_empty: bool = False) -> CLIConfig: def get_cli_config(self, cli_name: str) -> CLIConfig:
"""Returns config of CLI tool with specified name. """Returns config of CLI tool with specified name.
The CLI must be located on this host. The CLI must be located on this host.
@ -66,17 +60,14 @@ class Host(ABC):
Config of the CLI tool. Config of the CLI tool.
""" """
cli_config = self._cli_config_by_name.get(cli_name) cli_config = self._cli_config_by_name.get(cli_name)
if cli_config is None and not allow_empty: if cli_config is None:
raise ValueError(f"Unknown CLI name: '{cli_name}'") raise ValueError(f"Unknown CLI name: '{cli_name}'")
return cli_config return cli_config
@abstractmethod @abstractmethod
def get_shell(self, sudo: bool = True) -> Shell: def get_shell(self) -> Shell:
"""Returns shell to this host. """Returns shell to this host.
Args:
sudo: if True, run all commands in shell with elevated rights
Returns: Returns:
Shell that executes commands on this host. Shell that executes commands on this host.
""" """
@ -85,10 +76,6 @@ class Host(ABC):
def start_host(self) -> None: def start_host(self) -> None:
"""Starts the host machine.""" """Starts the host machine."""
@abstractmethod
def get_host_status(self) -> HostStatus:
"""Check host status."""
@abstractmethod @abstractmethod
def stop_host(self, mode: str) -> None: def stop_host(self, mode: str) -> None:
"""Stops the host machine. """Stops the host machine.
@ -117,37 +104,6 @@ class Host(ABC):
service_name: Name of the service to stop. service_name: Name of the service to stop.
""" """
@abstractmethod
def send_signal_to_service(self, service_name: str, signal: str) -> None:
"""Send signal to service with specified name using kill -<signal>
The service must be hosted on this host.
Args:
service_name: Name of the service to stop.
signal: signal name. See kill -l to all names
"""
@abstractmethod
def mask_service(self, service_name: str) -> None:
"""Prevent the service from start by any activity by masking it.
The service must be hosted on this host.
Args:
service_name: Name of the service to mask.
"""
@abstractmethod
def unmask_service(self, service_name: str) -> None:
"""Allow the service to start by any activity by unmasking it.
The service must be hosted on this host.
Args:
service_name: Name of the service to unmask.
"""
@abstractmethod @abstractmethod
def restart_service(self, service_name: str) -> None: def restart_service(self, service_name: str) -> None:
"""Restarts the service with specified name and waits until it starts. """Restarts the service with specified name and waits until it starts.
@ -156,6 +112,7 @@ class Host(ABC):
service_name: Name of the service to restart. service_name: Name of the service to restart.
""" """
@abstractmethod @abstractmethod
def get_data_directory(self, service_name: str) -> str: def get_data_directory(self, service_name: str) -> str:
""" """
@ -166,6 +123,7 @@ class Host(ABC):
service_name: Name of storage node service. service_name: Name of storage node service.
""" """
@abstractmethod @abstractmethod
def wait_success_suspend_process(self, process_name: str) -> None: def wait_success_suspend_process(self, process_name: str) -> None:
"""Search for a service ID by its name and stop the process """Search for a service ID by its name and stop the process
@ -189,21 +147,6 @@ class Host(ABC):
cache_only: To delete cache only. cache_only: To delete cache only.
""" """
@abstractmethod
def wipefs_storage_node_data(self, service_name: str) -> None:
"""Erases all data of the storage node with specified name.
Args:
service_name: Name of storage node service.
"""
def finish_wipefs(self, service_name: str) -> None:
"""Erases all data of the storage node with specified name.
Args:
service_name: Name of storage node service.
"""
@abstractmethod @abstractmethod
def delete_fstree(self, service_name: str) -> None: def delete_fstree(self, service_name: str) -> None:
""" """
@ -245,22 +188,12 @@ class Host(ABC):
""" """
@abstractmethod @abstractmethod
def delete_file(self, file_path: str) -> None: def delete_pilorama(self, service_name: str) -> None:
""" """
Deletes file with provided file path Deletes all pilorama.db files in the node.
Args: Args:
file_path: full path to the file to delete service_name: Name of storage node service.
"""
@abstractmethod
def is_file_exist(self, file_path: str) -> bool:
"""
Checks if file exist
Args:
file_path: full path to the file to check
""" """
@ -315,38 +248,12 @@ class Host(ABC):
filter_regex: regex to filter output filter_regex: regex to filter output
""" """
@abstractmethod
def get_filtered_logs(
self,
filter_regex: str,
since: Optional[datetime] = None,
until: Optional[datetime] = None,
unit: Optional[str] = None,
exclude_filter: Optional[str] = None,
priority: Optional[str] = None,
) -> str:
"""Get logs from host filtered by regex.
Args:
filter_regex: regex filter for logs.
since: If set, limits the time from which logs should be collected. Must be in UTC.
until: If set, limits the time until which logs should be collected. Must be in UTC.
unit: required unit.
priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher.
For example, if we specify the -p 2 option, journalctl will show all messages with levels 2, 1 and 0.
Returns:
Found entries as str if any found.
Empty string otherwise.
"""
@abstractmethod @abstractmethod
def is_message_in_logs( def is_message_in_logs(
self, self,
message_regex: str, message_regex: str,
since: Optional[datetime] = None, since: Optional[datetime] = None,
until: Optional[datetime] = None, until: Optional[datetime] = None,
unit: Optional[str] = None,
) -> bool: ) -> bool:
"""Checks logs on host for specified message regex. """Checks logs on host for specified message regex.
@ -359,35 +266,3 @@ class Host(ABC):
True if message found in logs in the given time frame. True if message found in logs in the given time frame.
False otherwise. False otherwise.
""" """
@abstractmethod
def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None:
"""
Waites for service to be in specified state.
Args:
systemd_service_name: Service to wait state of.
expected_state: State to wait for
timeout: Seconds to wait
"""
def down_interface(self, interface: str) -> None:
shell = self.get_shell()
shell.exec(f"ip link set {interface} down")
def up_interface(self, interface: str) -> None:
shell = self.get_shell()
shell.exec(f"ip link set {interface} up")
def check_state(self, interface: str) -> str:
shell = self.get_shell()
return shell.exec(f"ip link show {interface} | sed -z 's/.*state \(.*\) mode .*/\\1/'").stdout.strip()
@retry(max_attempts=5, sleep_interval=5, expected_result="UP")
def check_state_up(self, interface: str) -> str:
return self.check_state(interface=interface)
@retry(max_attempts=5, sleep_interval=5, expected_result="DOWN")
def check_state_down(self, interface: str) -> str:
return self.check_state(interface=interface)

View file

@ -1,95 +0,0 @@
import json
import logging
import logging.config
import httpx
from frostfs_testlib import reporter
timeout = httpx.Timeout(60, read=150)
LOGGING_CONFIG = {
"disable_existing_loggers": False,
"version": 1,
"handlers": {"default": {"class": "logging.StreamHandler", "formatter": "http", "stream": "ext://sys.stderr"}},
"formatters": {
"http": {
"format": "%(levelname)s [%(asctime)s] %(name)s - %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
}
},
"loggers": {
"httpx": {
"handlers": ["default"],
"level": "DEBUG",
},
"httpcore": {
"handlers": ["default"],
"level": "ERROR",
},
},
}
logging.config.dictConfig(LOGGING_CONFIG)
logger = logging.getLogger("NeoLogger")
class HttpClient:
@reporter.step("Send {method} request to {url}")
def send(self, method: str, url: str, expected_status_code: int = None, **kwargs: dict) -> httpx.Response:
transport = httpx.HTTPTransport(verify=False, retries=5)
client = httpx.Client(timeout=timeout, transport=transport)
response = client.request(method, url, **kwargs)
self._attach_response(response)
logger.info(f"Response: {response.status_code} => {response.text}")
if expected_status_code:
assert response.status_code == expected_status_code, (
f"Got {response.status_code} response code" f" while {expected_status_code} expected"
)
return response
def _attach_response(self, response: httpx.Response):
request = response.request
try:
request_headers = json.dumps(dict(request.headers), indent=4)
except json.JSONDecodeError:
request_headers = str(request.headers)
try:
request_body = request.read()
try:
request_body = request_body.decode("utf-8")
except UnicodeDecodeError as e:
request_body = f"Unable to decode binary data to text using UTF-8 encoding: {str(e)}"
except Exception as e:
request_body = f"Error reading request body: {str(e)}"
request_body = "" if request_body is None else request_body
try:
response_headers = json.dumps(dict(response.headers), indent=4)
except json.JSONDecodeError:
response_headers = str(response.headers)
report = (
f"Method: {request.method}\n\n"
f"URL: {request.url}\n\n"
f"Request Headers: {request_headers}\n\n"
f"Request Body: {request_body}\n\n"
f"Response Status Code: {response.status_code}\n\n"
f"Response Headers: {response_headers}\n\n"
f"Response Body: {response.text}\n\n"
)
curl_request = self._create_curl_request(request.url, request.method, request.headers, request_body)
reporter.attach(report, "Requests Info")
reporter.attach(curl_request, "CURL")
def _create_curl_request(self, url: str, method: str, headers: httpx.Headers, data: str) -> str:
headers = " ".join(f'-H "{name.title()}: {value}"' for name, value in headers.items())
data = f" -d '{data}'" if data else ""
# Option -k means no verify SSL
return f"curl {url} -X {method} {headers}{data} -k"

View file

@ -1,15 +0,0 @@
from frostfs_testlib.load.interfaces.loader import Loader
from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner
from frostfs_testlib.load.load_config import (
EndpointSelectionStrategy,
K6ProcessAllocationStrategy,
LoadParams,
LoadScenario,
LoadType,
NodesSelectionStrategy,
Preset,
ReadFrom,
)
from frostfs_testlib.load.load_report import LoadReport
from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader
from frostfs_testlib.load.runners import DefaultRunner, LocalRunner, S3LocalRunner

View file

@ -1,14 +0,0 @@
from abc import ABC, abstractmethod
from frostfs_testlib.shell.interfaces import Shell
class Loader(ABC):
@abstractmethod
def get_shell(self) -> Shell:
"""Get shell for the loader"""
@property
@abstractmethod
def ip(self):
"""Get address of the loader"""

View file

@ -1,55 +0,0 @@
from abc import ABC, abstractmethod
from frostfs_testlib.load.interfaces.loader import Loader
from frostfs_testlib.load.k6 import K6
from frostfs_testlib.load.load_config import LoadParams
from frostfs_testlib.storage.cluster import ClusterNode
class ScenarioRunner(ABC):
@abstractmethod
def prepare(
self,
load_params: LoadParams,
cluster_nodes: list[ClusterNode],
nodes_under_load: list[ClusterNode],
k6_dir: str,
):
"""Preparation steps before running the load"""
@abstractmethod
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
"""Init K6 instances"""
@abstractmethod
def get_k6_instances(self) -> list[K6]:
"""Get K6 instances"""
@abstractmethod
def start(self):
"""Start K6 instances"""
@abstractmethod
def stop(self):
"""Stop K6 instances"""
@abstractmethod
def preset(self):
"""Run preset for load"""
@property
@abstractmethod
def is_running(self) -> bool:
"""Returns True if load is running at the moment"""
@abstractmethod
def wait_until_finish(self, soft_timeout: int = 0):
"""Wait until load is finished"""
@abstractmethod
def get_results(self) -> dict:
"""Get results from K6 run"""
@abstractmethod
def get_loaders(self) -> list[Loader]:
"""Return loaders"""

View file

@ -1,96 +0,0 @@
from dataclasses import dataclass, field
from frostfs_testlib.load.load_config import LoadParams, LoadScenario
from frostfs_testlib.load.load_metrics import get_metrics_object
@dataclass
class SummarizedErorrs:
total: int = field(default_factory=int)
percent: float = field(default_factory=float)
threshold: float = field(default_factory=float)
by_node: dict[str, int] = field(default_factory=dict)
def calc_stats(self, operations):
self.total += sum(self.by_node.values())
if not operations:
return
self.percent = self.total / operations * 100
@dataclass
class SummarizedLatencies:
avg: float = field(default_factory=float)
min: float = field(default_factory=float)
max: float = field(default_factory=float)
by_node: dict[str, dict[str, int]] = field(default_factory=dict)
def calc_stats(self):
if not self.by_node:
return
avgs = [lt["avg"] for lt in self.by_node.values()]
self.avg = sum(avgs) / len(avgs)
minimal = [lt["min"] for lt in self.by_node.values()]
self.min = min(minimal)
maximum = [lt["max"] for lt in self.by_node.values()]
self.max = max(maximum)
@dataclass
class SummarizedStats:
threads: int = field(default_factory=int)
requested_rate: int = field(default_factory=int)
operations: int = field(default_factory=int)
rate: float = field(default_factory=float)
throughput: float = field(default_factory=float)
latencies: SummarizedLatencies = field(default_factory=SummarizedLatencies)
errors: SummarizedErorrs = field(default_factory=SummarizedErorrs)
total_bytes: int = field(default_factory=int)
passed: bool = True
def calc_stats(self):
self.errors.calc_stats(self.operations)
self.latencies.calc_stats()
self.passed = self.errors.percent <= self.errors.threshold
@staticmethod
def collect(load_params: LoadParams, load_summaries: dict) -> dict[str, "SummarizedStats"]:
if load_params.scenario in [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]:
delete_vus = max(load_params.preallocated_deleters or 0, load_params.max_deleters or 0)
write_vus = max(load_params.preallocated_writers or 0, load_params.max_writers or 0)
read_vus = max(load_params.preallocated_readers or 0, load_params.max_readers or 0)
else:
write_vus = load_params.writers
read_vus = load_params.readers
delete_vus = load_params.deleters
summarized = {
"Write": SummarizedStats(threads=write_vus, requested_rate=load_params.write_rate),
"Read": SummarizedStats(threads=read_vus, requested_rate=load_params.read_rate),
"Delete": SummarizedStats(threads=delete_vus, requested_rate=load_params.delete_rate),
}
for node_key, load_summary in load_summaries.items():
metrics = get_metrics_object(load_params.scenario, load_summary)
for operation in metrics.operations:
target = summarized[operation._NAME]
if not operation.total_iterations:
continue
target.operations += operation.total_iterations
target.rate += operation.rate
target.latencies.by_node[node_key] = operation.latency
target.throughput += operation.throughput
target.errors.threshold = load_params.error_threshold
target.total_bytes += operation.total_bytes
if operation.failed_iterations:
target.errors.by_node[node_key] = operation.failed_iterations
for operation in summarized.values():
operation.calc_stats()
return summarized

View file

@ -1,27 +1,27 @@
import json import json
import logging import logging
import math
import os import os
from dataclasses import dataclass from dataclasses import dataclass, fields
from datetime import datetime
from threading import Event
from time import sleep from time import sleep
from typing import Any from typing import Any
from urllib.parse import urlparse
from frostfs_testlib import reporter from frostfs_testlib.load.load_config import (
from frostfs_testlib.credentials.interfaces import User K6ProcessAllocationStrategy,
from frostfs_testlib.load.interfaces.loader import Loader LoadParams,
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType LoadScenario,
LoadType,
)
from frostfs_testlib.processes.remote_process import RemoteProcess from frostfs_testlib.processes.remote_process import RemoteProcess
from frostfs_testlib.resources.common import STORAGE_USER_NAME from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, K6_TEARDOWN_PERIOD from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, LOAD_NODE_SSH_USER
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.testing.test_control import wait_for_success
EXIT_RESULT_CODE = 0 EXIT_RESULT_CODE = 0
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
reporter = get_reporter()
@dataclass @dataclass
@ -42,52 +42,31 @@ class K6:
endpoints: list[str], endpoints: list[str],
k6_dir: str, k6_dir: str,
shell: Shell, shell: Shell,
loader: Loader, load_node: str,
user: User, wallet: WalletInfo,
): ):
if load_params.scenario is None: if load_params.scenario is None:
raise RuntimeError("Scenario should not be none") raise RuntimeError("Scenario should not be none")
self.load_params = load_params self.load_params: LoadParams = load_params
self.endpoints = endpoints self.endpoints = endpoints
self.loader = loader self.load_node: str = load_node
self.shell = shell self.shell: Shell = shell
self.user = user self.wallet = wallet
self.preset_output: str = "" self.scenario: LoadScenario = load_params.scenario
self.summary_json: str = os.path.join( self.summary_json: str = os.path.join(
self.load_params.working_dir, self.load_params.working_dir,
f"{self.load_params.load_id}_{self.load_params.scenario.value}_summary.json", f"{self.load_params.load_id}_{self.scenario.value}_summary.json",
) )
self._k6_dir: str = k6_dir self._k6_dir: str = k6_dir
command = (
f"{self._generate_env_variables()}{self._k6_dir}/k6 run {self._generate_k6_variables()} "
f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js"
)
remote_user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None
process_id = self.load_params.load_id if self.load_params.scenario != LoadScenario.VERIFY else f"{self.load_params.load_id}_verify"
self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, remote_user, process_id)
def _get_fill_percents(self):
fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs | grep data").stdout.split("\n")
return [line.split() for line in fill_percents][:-1]
def check_fill_percent(self):
fill_percents = self._get_fill_percents()
percent_mean = 0
for line in fill_percents:
percent_mean += float(line[1].split("%")[0])
percent_mean = percent_mean / len(fill_percents)
logger.info(f"{self.loader.ip} mean fill percent is {percent_mean}")
return percent_mean >= self.load_params.fill_percent
@property @property
def process_dir(self) -> str: def process_dir(self) -> str:
return self._k6_process.process_dir return self._k6_process.process_dir
@reporter.step_deco("Preset containers and objects")
def preset(self) -> str: def preset(self) -> str:
with reporter.step(f"Run preset on loader {self.loader.ip} for endpoints {self.endpoints}"):
preset_grpc = f"{self._k6_dir}/scenarios/preset/preset_grpc.py" preset_grpc = f"{self._k6_dir}/scenarios/preset/preset_grpc.py"
preset_s3 = f"{self._k6_dir}/scenarios/preset/preset_s3.py" preset_s3 = f"{self._k6_dir}/scenarios/preset/preset_s3.py"
preset_map = { preset_map = {
@ -99,136 +78,105 @@ class K6:
base_args = { base_args = {
preset_grpc: [ preset_grpc: [
preset_grpc, preset_grpc,
f"--endpoint {','.join(self.endpoints)}", f"--endpoint {self.endpoints[0]}",
f"--wallet {self.user.wallet.path} ", f"--wallet {self.wallet.path} ",
f"--config {self.user.wallet.config_path} ", f"--config {self.wallet.config_path} ",
], ],
preset_s3: [ preset_s3: [
preset_s3, preset_s3,
f"--endpoint {','.join(self.endpoints)}", f"--endpoint {self.endpoints[0]}",
], ],
} }
preset_scenario = preset_map[self.load_params.load_type] preset_scenario = preset_map[self.load_params.load_type]
command_args = base_args[preset_scenario].copy() command_args = base_args[preset_scenario].copy()
command_args += self.load_params.get_preset_arguments() command_args += [
f"--{field.metadata['preset_argument']} '{getattr(self.load_params, field.name)}'"
for field in fields(self.load_params)
if field.metadata
and self.scenario in field.metadata["applicable_scenarios"]
and field.metadata["preset_argument"]
and getattr(self.load_params, field.name) is not None
]
if self.load_params.preset:
command_args += [
f"--{field.metadata['preset_argument']} '{getattr(self.load_params.preset, field.name)}'"
for field in fields(self.load_params.preset)
if field.metadata
and self.scenario in field.metadata["applicable_scenarios"]
and field.metadata["preset_argument"]
and getattr(self.load_params.preset, field.name) is not None
]
command = " ".join(command_args) command = " ".join(command_args)
result = self.shell.exec(command) result = self.shell.exec(command)
assert result.return_code == EXIT_RESULT_CODE, f"Return code of preset is not zero: {result.stdout}" assert (
result.return_code == EXIT_RESULT_CODE
), f"Return code of preset is not zero: {result.stdout}"
return result.stdout.strip("\n")
self.preset_output = result.stdout.strip("\n") @reporter.step_deco("Generate K6 command")
return self.preset_output def _generate_env_variables(self) -> str:
env_vars = {
field.metadata["env_variable"]: getattr(self.load_params, field.name)
for field in fields(self.load_params)
if field.metadata
and self.scenario in field.metadata["applicable_scenarios"]
and field.metadata["env_variable"]
and getattr(self.load_params, field.name) is not None
}
@reporter.step("Generate K6 variables") if self.load_params.preset:
def _generate_k6_variables(self) -> str: env_vars.update(
env_vars = self.load_params.get_k6_vars() {
field.metadata["env_variable"]: getattr(self.load_params.preset, field.name)
for field in fields(self.load_params.preset)
if field.metadata
and self.scenario in field.metadata["applicable_scenarios"]
and field.metadata["env_variable"]
and getattr(self.load_params.preset, field.name) is not None
}
)
env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints) env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints)
env_vars["SUMMARY_JSON"] = self.summary_json env_vars["SUMMARY_JSON"] = self.summary_json
reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables") reporter.attach(
return " ".join([f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None]) "\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables"
)
@reporter.step("Generate env variables") return " ".join(
def _generate_env_variables(self) -> str: [f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None]
env_vars = self.load_params.get_env_vars()
if not env_vars:
return ""
reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "ENV variables")
return " ".join([f"{param}='{value}'" for param, value in env_vars.items() if value is not None]) + " "
def get_start_time(self) -> datetime:
return datetime.fromtimestamp(self._k6_process.start_time())
def get_end_time(self) -> datetime:
return datetime.fromtimestamp(self._k6_process.end_time())
def start(self) -> None:
with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"):
self._k6_process.start()
def wait_until_finished(self, event: Event, soft_timeout: int = 0) -> None:
with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"):
if self.load_params.scenario == LoadScenario.VERIFY:
timeout = self.load_params.verify_time or 0
else:
timeout = self.load_params.load_time or 0
start_time = int(self.get_start_time().timestamp())
current_time = int(datetime.utcnow().timestamp())
working_time = current_time - start_time
remaining_time = timeout - working_time
setup_teardown_time = (
int(K6_TEARDOWN_PERIOD) + self.load_params.get_init_time() + int(self.load_params.setup_timeout.replace("s", "").strip())
) )
remaining_time_including_setup_and_teardown = remaining_time + setup_teardown_time
timeout = remaining_time_including_setup_and_teardown
if soft_timeout: @reporter.step_deco("Start K6 on initiator")
timeout = min(timeout, soft_timeout) def start(self) -> None:
command = (
f"{self._k6_dir}/k6 run {self._generate_env_variables()} "
f"{self._k6_dir}/scenarios/{self.scenario.value}.js"
)
self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir)
original_timeout = timeout @reporter.step_deco("Wait until K6 is finished")
def wait_until_finished(self, timeout: int = 0, k6_should_be_running: bool = False) -> None:
timeouts = { wait_interval = 10
"K6 start time": start_time,
"Current time": current_time,
"K6 working time": working_time,
"Remaining time for load": remaining_time,
"Setup and teardown": setup_teardown_time,
"Remaining time including setup/teardown": remaining_time_including_setup_and_teardown,
"Soft timeout": soft_timeout,
"Selected timeout": original_timeout,
}
reporter.attach("\n".join([f"{k}: {v}" for k, v in timeouts.items()]), "timeouts.txt")
min_wait_interval = 10
wait_interval = min_wait_interval
if self._k6_process is None: if self._k6_process is None:
assert "No k6 instances were executed" assert "No k6 instances were executed"
if k6_should_be_running:
assert self._k6_process.running(), "k6 should be running."
while timeout > 0: while timeout > 0:
if not self.load_params.fill_percent is None:
with reporter.step(f"Check the percentage of filling of all data disks on the node"):
if self.check_fill_percent():
logger.info(f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%")
event.set()
self.stop()
return
if event.is_set():
self.stop()
return
if not self._k6_process.running(): if not self._k6_process.running():
return return
logger.info(f"K6 is running. Waiting {wait_interval} seconds...")
remaining_time_hours = f"{timeout//3600}h" if timeout // 3600 != 0 else ""
remaining_time_minutes = f"{timeout//60%60}m" if timeout // 60 % 60 != 0 else ""
logger.info(
f"K6 is running. Remaining time {remaining_time_hours}{remaining_time_minutes}{timeout%60}s. Next check after {wait_interval} seconds..."
)
sleep(wait_interval) sleep(wait_interval)
timeout -= min(timeout, wait_interval) timeout -= wait_interval
wait_interval = max(
min(timeout, int(math.log2(timeout + 1)) * 15) - min_wait_interval,
min_wait_interval,
)
if not self._k6_process.running():
return
self.stop() self.stop()
if not soft_timeout: raise TimeoutError(f"Expected K6 finished in {timeout} sec.")
raise TimeoutError(f"Expected K6 to finish after {original_timeout} sec.")
def get_results(self) -> Any: def get_results(self) -> Any:
with reporter.step(f"Get load results from loader {self.loader.ip} on endpoints {self.endpoints}"): with reporter.step(f"K6 results from {self.load_node}"):
self.__log_output() self.__log_output()
if not self.summary_json: if not self.summary_json:
@ -236,30 +184,33 @@ class K6:
summary_text = self.shell.exec(f"cat {self.summary_json}").stdout summary_text = self.shell.exec(f"cat {self.summary_json}").stdout
summary_json = json.loads(summary_text) summary_json = json.loads(summary_text)
endpoint = urlparse(self.endpoints[0]).netloc or self.endpoints[0]
allure_filenames = { allure_filenames = {
K6ProcessAllocationStrategy.PER_LOAD_NODE: f"{self.loader.ip}_{self.load_params.scenario.value}_summary.json", K6ProcessAllocationStrategy.PER_LOAD_NODE: f"{self.load_node}_{self.scenario.value}_summary.json",
K6ProcessAllocationStrategy.PER_ENDPOINT: f"{self.loader.ip}_{self.load_params.scenario.value}_{endpoint}_summary.json", K6ProcessAllocationStrategy.PER_ENDPOINT: f"{self.load_node}_{self.scenario.value}_{self.endpoints[0]}_summary.json",
} }
allure_filename = allure_filenames[self.load_params.k6_process_allocation_strategy] allure_filename = allure_filenames[self.load_params.k6_process_allocation_strategy]
reporter.attach(summary_text, allure_filename) reporter.attach(summary_text, allure_filename)
return summary_json return summary_json
@reporter.step_deco("Stop K6")
def stop(self) -> None: def stop(self) -> None:
with reporter.step(f"Stop load from loader {self.loader.ip} on endpoints {self.endpoints}"): if self.is_running:
if self.is_running():
self._k6_process.stop() self._k6_process.stop()
self._wait_until_process_end() self._wait_until_process_end()
@property
def is_running(self) -> bool: def is_running(self) -> bool:
if self._k6_process: if self._k6_process:
return self._k6_process.running() return self._k6_process.running()
return False return False
@reporter.step("Wait until K6 process end") @reporter.step_deco("Wait until process end")
@wait_for_success(K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout") @wait_for_success(
K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout"
)
def _wait_until_process_end(self): def _wait_until_process_end(self):
return self._k6_process.running() return self._k6_process.running()

View file

@ -1,38 +1,7 @@
import math
import os import os
from dataclasses import dataclass, field, fields, is_dataclass from dataclasses import dataclass, field
from enum import Enum from enum import Enum
from types import MappingProxyType from typing import Optional
from typing import Any, Callable, Optional, get_args
from frostfs_testlib.utils.converting_utils import calc_unit
def convert_time_to_seconds(time: int | str | None) -> int:
if time is None:
return None
if str(time).isdigit():
seconds = int(time)
else:
days, hours, minutes = 0, 0, 0
if "d" in time:
days, time = time.split("d")
if "h" in time:
hours, time = time.split("h")
if "min" in time:
minutes = time.replace("min", "")
seconds = int(days) * 86400 + int(hours) * 3600 + int(minutes) * 60
return seconds
def force_list(input: str | list[str]):
if input is None:
return None
if isinstance(input, list):
return list(map(str.strip, input))
return [input.strip()]
class LoadType(Enum): class LoadType(Enum):
@ -46,17 +15,8 @@ class LoadScenario(Enum):
gRPC_CAR = "grpc_car" gRPC_CAR = "grpc_car"
S3 = "s3" S3 = "s3"
S3_CAR = "s3_car" S3_CAR = "s3_car"
S3_MULTIPART = "s3_multipart"
S3_LOCAL = "s3local"
HTTP = "http" HTTP = "http"
VERIFY = "verify" VERIFY = "verify"
LOCAL = "local"
class ReadFrom(Enum):
REGISTRY = "registry"
PRESET = "preset"
MANUAL = "manual"
all_load_scenarios = [ all_load_scenarios = [
@ -65,57 +25,29 @@ all_load_scenarios = [
LoadScenario.HTTP, LoadScenario.HTTP,
LoadScenario.S3_CAR, LoadScenario.S3_CAR,
LoadScenario.gRPC_CAR, LoadScenario.gRPC_CAR,
LoadScenario.LOCAL,
LoadScenario.S3_MULTIPART,
LoadScenario.S3_LOCAL,
] ]
all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY] all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY]
constant_vus_scenarios = [ constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP]
LoadScenario.gRPC,
LoadScenario.S3,
LoadScenario.HTTP,
LoadScenario.LOCAL,
LoadScenario.S3_MULTIPART,
LoadScenario.S3_LOCAL,
]
constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR] constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]
grpc_preset_scenarios = [ grpc_preset_scenarios = [LoadScenario.gRPC, LoadScenario.HTTP, LoadScenario.gRPC_CAR]
LoadScenario.gRPC, s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR]
LoadScenario.HTTP,
LoadScenario.gRPC_CAR,
LoadScenario.LOCAL,
]
s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART, LoadScenario.S3_LOCAL]
@dataclass
class MetaField:
name: str
metadata: MappingProxyType
value: Any
def metadata_field( def metadata_field(
applicable_scenarios: list[LoadScenario], applicable_scenarios: list[LoadScenario],
preset_param: Optional[str] = None, preset_param: Optional[str] = None,
scenario_variable: Optional[str] = None, scenario_variable: Optional[str] = None,
string_repr: Optional[bool] = True,
distributed: Optional[bool] = False, distributed: Optional[bool] = False,
formatter: Optional[Callable] = None,
env_variable: Optional[str] = None,
): ):
return field( return field(
default=None, default=None,
metadata={ metadata={
"applicable_scenarios": applicable_scenarios, "applicable_scenarios": applicable_scenarios,
"preset_argument": preset_param, "preset_argument": preset_param,
"scenario_variable": scenario_variable, "env_variable": scenario_variable,
"string_repr": string_repr,
"distributed": distributed, "distributed": distributed,
"formatter": formatter,
"env_variable": env_variable,
}, },
) )
@ -129,8 +61,6 @@ class NodesSelectionStrategy(Enum):
ALL_EXCEPT_UNDER_TEST = "ALL_EXCEPT_UNDER_TEST" ALL_EXCEPT_UNDER_TEST = "ALL_EXCEPT_UNDER_TEST"
# Select ONE random node except under test (useful for failover). # Select ONE random node except under test (useful for failover).
RANDOM_SINGLE_EXCEPT_UNDER_TEST = "RANDOM_SINGLE_EXCEPT_UNDER_TEST" RANDOM_SINGLE_EXCEPT_UNDER_TEST = "RANDOM_SINGLE_EXCEPT_UNDER_TEST"
# Select node under test
NODE_UNDER_TEST = "NODE_UNDER_TEST"
class EndpointSelectionStrategy(Enum): class EndpointSelectionStrategy(Enum):
@ -152,75 +82,33 @@ class K6ProcessAllocationStrategy(Enum):
PER_ENDPOINT = "PER_ENDPOINT" PER_ENDPOINT = "PER_ENDPOINT"
class MetaConfig:
def _get_field_formatter(self, field_name: str) -> Callable | None:
data_fields = fields(self)
formatters = [
field.metadata["formatter"]
for field in data_fields
if field.name == field_name and "formatter" in field.metadata and field.metadata["formatter"] != None
]
if formatters:
return formatters[0]
return None
def __setattr__(self, field_name, value):
formatter = self._get_field_formatter(field_name)
if formatter:
value = formatter(value)
super().__setattr__(field_name, value)
@dataclass @dataclass
class Preset(MetaConfig): class Preset:
# ------ COMMON ------ # ------ COMMON ------
# Amount of objects which should be created # Amount of objects which should be created
objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None, False) objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None)
# Preset json. Filled automatically. # Preset json. Filled automatically.
pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False) pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON")
# Workers count for preset # Workers count for preset
workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False) workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None)
# Acl for container/buckets
acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False)
# ------ GRPC ------ # ------ GRPC ------
# Amount of containers which should be created # Amount of containers which should be created
containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None, False) containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None)
# Container placement policy for containers for gRPC # Container placement policy for containers for gRPC
container_placement_policy: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "policy", None, False, formatter=force_list) container_placement_policy: Optional[str] = metadata_field(
# Number of retries for creation of container grpc_preset_scenarios, "policy", None
container_creation_retry: Optional[int] = metadata_field(grpc_preset_scenarios, "retry", None, False) )
# ------ S3 ------ # ------ S3 ------
# Amount of buckets which should be created # Amount of buckets which should be created
buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None, False) buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None)
# S3 region (AKA placement policy for S3 buckets) # S3 region (AKA placement policy for S3 buckets)
s3_location: Optional[list[str]] = metadata_field(s3_preset_scenarios, "location", None, False, formatter=force_list) s3_location: Optional[str] = metadata_field(s3_preset_scenarios, "location", None)
# Delay between containers creation and object upload for preset
object_upload_delay: Optional[int] = metadata_field(all_load_scenarios, "sleep", None, False)
# Flag to control preset erorrs
ignore_errors: Optional[bool] = metadata_field(all_load_scenarios, "ignore-errors", None, False)
# Flag to ensure created containers store data on local endpoints
local: Optional[bool] = metadata_field(grpc_preset_scenarios, "local", None, False)
@dataclass @dataclass
class PrometheusParams(MetaConfig): class LoadParams:
# Prometheus server URL
server_url: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_SERVER_URL", string_repr=False)
# Prometheus trend stats
trend_stats: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_TREND_STATS", string_repr=False)
# Additional tags
metrics_tags: Optional[str] = metadata_field(all_load_scenarios, None, "METRIC_TAGS", False)
@dataclass
class LoadParams(MetaConfig):
# ------- CONTROL PARAMS ------- # ------- CONTROL PARAMS -------
# Load type can be gRPC, HTTP, S3. # Load type can be gRPC, HTTP, S3.
load_type: LoadType load_type: LoadType
@ -237,253 +125,90 @@ class LoadParams(MetaConfig):
verify: Optional[bool] = None verify: Optional[bool] = None
# Just id for load so distinct it between runs. Filled automatically. # Just id for load so distinct it between runs. Filled automatically.
load_id: Optional[str] = None load_id: Optional[str] = None
# Acceptable number of load errors in %
# 100 means 100% errors allowed
# 1.5 means 1.5% errors allowed
# 0 means no errors allowed
error_threshold: Optional[float] = None
# Working directory # Working directory
working_dir: Optional[str] = None working_dir: Optional[str] = None
# Preset for the k6 run # Preset for the k6 run
preset: Optional[Preset] = None preset: Optional[Preset] = None
# K6 download url
k6_url: Optional[str] = None
# Requests module url
requests_module_url: Optional[str] = None
# aws cli download url
awscli_url: Optional[str] = None
# No ssl verification flag
no_verify_ssl: Optional[bool] = metadata_field(
[
LoadScenario.S3,
LoadScenario.S3_CAR,
LoadScenario.S3_MULTIPART,
LoadScenario.S3_LOCAL,
LoadScenario.VERIFY,
LoadScenario.HTTP,
],
"no-verify-ssl",
"NO_VERIFY_SSL",
False,
)
# Percentage of filling of all data disks on all nodes
fill_percent: Optional[float] = None
# if specified, max payload size in GB of the storage engine. If the storage engine is already full, no new objects will be saved.
max_total_size_gb: Optional[float] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "MAX_TOTAL_SIZE_GB")
# if set, the payload is generated on the fly and is not read into memory fully.
streaming: Optional[int] = metadata_field(all_load_scenarios, None, "STREAMING", False)
# Output format
output: Optional[str] = metadata_field(all_load_scenarios, None, "K6_OUT", False)
# Prometheus params
prometheus: Optional[PrometheusParams] = None
# ------- COMMON SCENARIO PARAMS ------- # ------- COMMON SCENARIO PARAMS -------
# Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value. # Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value.
load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION", False, formatter=convert_time_to_seconds) load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION")
# Object size in KB for load and preset. # Object size in KB for load and preset.
object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False) object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE")
# For read operations, controls from which set get objects to read
read_from: Optional[ReadFrom] = None
# For read operations done from REGISTRY, controls delay which object should live before it will be used for read operation
read_age: Optional[int] = metadata_field(all_load_scenarios, None, "READ_AGE", False)
# Output registry K6 file. Filled automatically. # Output registry K6 file. Filled automatically.
registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False) registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE")
# In case if we want to use custom registry file left from another load run
custom_registry: Optional[str] = None
# In case if we want to use custom registry file left from another load run
force_fresh_registry: Optional[bool] = None
# Specifies the minimum duration of every single execution (i.e. iteration). # Specifies the minimum duration of every single execution (i.e. iteration).
# Any iterations that are shorter than this value will cause that VU to # Any iterations that are shorter than this value will cause that VU to
# sleep for the remainder of the time until the specified minimum duration is reached. # sleep for the remainder of the time until the specified minimum duration is reached.
min_iteration_duration: Optional[str] = metadata_field(all_load_scenarios, None, "K6_MIN_ITERATION_DURATION", False) min_iteration_duration: Optional[str] = metadata_field(
# Prepare/cut objects locally on client before sending all_load_scenarios, None, "K6_MIN_ITERATION_DURATION"
prepare_locally: Optional[bool] = metadata_field([LoadScenario.gRPC, LoadScenario.gRPC_CAR], None, "PREPARE_LOCALLY", False) )
# Specifies K6 setupTimeout time. Currently hardcoded in xk6 as 5 seconds for all scenarios # Specifies K6 setupTimeout time. Currently hardcoded in xk6 as 5 seconds for all scenarios
# https://k6.io/docs/using-k6/k6-options/reference/#setup-timeout # https://k6.io/docs/using-k6/k6-options/reference/#setup-timeout
setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT", False) setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT")
# Delay for read operations in case if we read from registry
read_age: Optional[int] = metadata_field(all_load_scenarios, None, "READ_AGE", None, False)
# Initialization time for each VU for k6 load
vu_init_time: Optional[float] = None
# ------- CONSTANT VUS SCENARIO PARAMS ------- # ------- CONSTANT VUS SCENARIO PARAMS -------
# Amount of Writers VU. # Amount of Writers VU.
writers: Optional[int] = metadata_field(constant_vus_scenarios, None, "WRITERS", True, True) writers: Optional[int] = metadata_field(constant_vus_scenarios, None, "WRITERS", True)
# Amount of Readers VU. # Amount of Readers VU.
readers: Optional[int] = metadata_field(constant_vus_scenarios, None, "READERS", True, True) readers: Optional[int] = metadata_field(constant_vus_scenarios, None, "READERS", True)
# Amount of Deleters VU. # Amount of Deleters VU.
deleters: Optional[int] = metadata_field(constant_vus_scenarios, None, "DELETERS", True, True) deleters: Optional[int] = metadata_field(constant_vus_scenarios, None, "DELETERS", True)
# ------- CONSTANT ARRIVAL RATE SCENARIO PARAMS ------- # ------- CONSTANT ARRIVAL RATE SCENARIO PARAMS -------
# Number of iterations to start during each timeUnit period for write. # Number of iterations to start during each timeUnit period for write.
write_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "WRITE_RATE", True, True) write_rate: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "WRITE_RATE", True
)
# Number of iterations to start during each timeUnit period for read. # Number of iterations to start during each timeUnit period for read.
read_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "READ_RATE", True, True) read_rate: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "READ_RATE", True
)
# Number of iterations to start during each timeUnit period for delete. # Number of iterations to start during each timeUnit period for delete.
delete_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "DELETE_RATE", True, True) delete_rate: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "DELETE_RATE", True
)
# Amount of preAllocatedVUs for write operations. # Amount of preAllocatedVUs for write operations.
preallocated_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True, True) preallocated_writers: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True
)
# Amount of maxVUs for write operations. # Amount of maxVUs for write operations.
max_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_WRITERS", False, True) max_writers: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "MAX_WRITERS", True
)
# Amount of preAllocatedVUs for read operations. # Amount of preAllocatedVUs for read operations.
preallocated_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True, True) preallocated_readers: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True
)
# Amount of maxVUs for read operations. # Amount of maxVUs for read operations.
max_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_READERS", False, True) max_readers: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "MAX_READERS", True
)
# Amount of preAllocatedVUs for read operations. # Amount of preAllocatedVUs for read operations.
preallocated_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True, True) preallocated_deleters: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True
)
# Amount of maxVUs for delete operations. # Amount of maxVUs for delete operations.
max_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_DELETERS", False, True) max_deleters: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "MAX_DELETERS", True
# Multipart )
# Number of parts to upload in parallel
writers_multipart: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITERS_MULTIPART", False, True)
# part size must be greater than (5 MB)
write_object_part_size: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITE_OBJ_PART_SIZE", False)
# Period of time to apply the rate value. # Period of time to apply the rate value.
time_unit: Optional[str] = metadata_field(constant_arrival_rate_scenarios, None, "TIME_UNIT", False) time_unit: Optional[str] = metadata_field(constant_arrival_rate_scenarios, None, "TIME_UNIT")
# ------- VERIFY SCENARIO PARAMS ------- # ------- VERIFY SCENARIO PARAMS -------
# Maximum verification time for k6 to verify objects. Default is BACKGROUND_LOAD_MAX_VERIFY_TIME (3600). # Maximum verification time for k6 to verify objects. Default is BACKGROUND_LOAD_MAX_VERIFY_TIME (3600).
verify_time: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "TIME_LIMIT", False) verify_time: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "TIME_LIMIT")
# Amount of Verification VU. # Amount of Verification VU.
verify_clients: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "CLIENTS", True, False) verify_clients: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "CLIENTS", True)
# ------- LOCAL SCENARIO PARAMS -------
# Config file location (filled automatically)
config_file: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_FILE", False)
# Config directory location (filled automatically)
config_dir: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_DIR", False)
def set_id(self, load_id): def set_id(self, load_id):
self.load_id = load_id self.load_id = load_id
if self.read_from == ReadFrom.REGISTRY:
self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt") self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt")
# For now it's okay to have it this way
if self.custom_registry is not None:
self.registry_file = self.custom_registry
if self.read_from == ReadFrom.PRESET:
self.registry_file = None
if self.preset: if self.preset:
self.preset.pregen_json = os.path.join(self.working_dir, f"{load_id}_prepare.json") self.preset.pregen_json = os.path.join(self.working_dir, f"{load_id}_prepare.json")
def get_k6_vars(self):
env_vars = {
meta_field.metadata["scenario_variable"]: meta_field.value
for meta_field in self._get_meta_fields(self)
if self.scenario in meta_field.metadata["applicable_scenarios"]
and meta_field.metadata["scenario_variable"]
and meta_field.value is not None
}
return env_vars
def get_env_vars(self):
env_vars = {
meta_field.metadata["env_variable"]: meta_field.value
for meta_field in self._get_meta_fields(self)
if self.scenario in meta_field.metadata["applicable_scenarios"]
and meta_field.metadata["env_variable"]
and meta_field.value is not None
}
return env_vars
def __post_init__(self):
default_scenario_map = {
LoadType.gRPC: LoadScenario.gRPC,
LoadType.HTTP: LoadScenario.HTTP,
LoadType.S3: LoadScenario.S3,
}
if self.scenario is None:
self.scenario = default_scenario_map[self.load_type]
def get_preset_arguments(self):
command_args = [
self._get_preset_argument(meta_field)
for meta_field in self._get_meta_fields(self)
if self.scenario in meta_field.metadata["applicable_scenarios"]
and meta_field.metadata["preset_argument"]
and meta_field.value is not None
and self._get_preset_argument(meta_field)
]
return command_args
def get_init_time(self) -> int:
return math.ceil(self._get_total_vus() * self.vu_init_time)
def _get_total_vus(self) -> int:
vu_fields = ["writers", "preallocated_writers", "readers", "preallocated_readers"]
data_fields = [getattr(self, field.name) or 0 for field in fields(self) if field.name in vu_fields]
return sum(data_fields)
def _get_applicable_fields(self):
applicable_fields = [
meta_field
for meta_field in self._get_meta_fields(self)
if self.scenario in meta_field.metadata["applicable_scenarios"] and meta_field.value
]
return applicable_fields
@staticmethod
def _get_preset_argument(meta_field: MetaField) -> str:
if isinstance(meta_field.value, bool):
# For preset calls, bool values are passed with just --<argument_name> if the value is True
return f"--{meta_field.metadata['preset_argument']}" if meta_field.value else ""
if isinstance(meta_field.value, list):
return (
" ".join(f"--{meta_field.metadata['preset_argument']} '{value}'" for value in meta_field.value) if meta_field.value else ""
)
return f"--{meta_field.metadata['preset_argument']} '{meta_field.value}'"
@staticmethod
def _get_meta_fields(instance) -> list[MetaField]:
data_fields = fields(instance)
fields_with_data = [
MetaField(field.name, field.metadata, getattr(instance, field.name))
for field in data_fields
if field.metadata and getattr(instance, field.name) is not None
]
for field in data_fields:
actual_field_type = get_args(field.type)[0] if len(get_args(field.type)) else get_args(field.type)
if is_dataclass(actual_field_type) and getattr(instance, field.name):
fields_with_data += LoadParams._get_meta_fields(getattr(instance, field.name))
return fields_with_data or []
def __str__(self) -> str:
load_type_str = self.scenario.value if self.scenario else self.load_type.value
# TODO: migrate load_params defaults to testlib
if self.object_size is not None:
size, unit = calc_unit(self.object_size, 1)
static_params = [f"{load_type_str} {size:.4g} {unit}"]
else:
static_params = [f"{load_type_str}"]
dynamic_params = [
f"{meta_field.name}={meta_field.value}" for meta_field in self._get_applicable_fields() if meta_field.metadata["string_repr"]
]
params = ", ".join(static_params + dynamic_params)
return params
def __repr__(self) -> str:
return self.__str__()

View file

@ -1,54 +1,83 @@
from abc import ABC from abc import ABC
from typing import Any, Optional from typing import Any
from frostfs_testlib.load.load_config import LoadScenario from frostfs_testlib.load.load_config import LoadScenario
class OperationMetric(ABC): class MetricsBase(ABC):
_NAME = "" _WRITE_SUCCESS = ""
_SUCCESS = "" _WRITE_ERRORS = ""
_ERRORS = "" _WRITE_THROUGHPUT = "data_sent"
_THROUGHPUT = ""
_LATENCY = "" _READ_SUCCESS = ""
_READ_ERRORS = ""
_READ_THROUGHPUT = "data_received"
_DELETE_SUCCESS = ""
_DELETE_ERRORS = ""
def __init__(self, summary) -> None: def __init__(self, summary) -> None:
self.summary = summary self.summary = summary
self.metrics = summary["metrics"] self.metrics = summary["metrics"]
@property @property
def total_iterations(self) -> int: def write_total_iterations(self) -> int:
return self._get_metric(self._SUCCESS) + self._get_metric(self._ERRORS) return self._get_metric(self._WRITE_SUCCESS) + self._get_metric(self._WRITE_ERRORS)
@property @property
def success_iterations(self) -> int: def write_success_iterations(self) -> int:
return self._get_metric(self._SUCCESS) return self._get_metric(self._WRITE_SUCCESS)
@property @property
def latency(self) -> dict: def write_rate(self) -> float:
return self._get_metric(self._LATENCY) return self._get_metric_rate(self._WRITE_SUCCESS)
@property @property
def rate(self) -> float: def write_failed_iterations(self) -> int:
return self._get_metric_rate(self._SUCCESS) return self._get_metric(self._WRITE_ERRORS)
@property @property
def failed_iterations(self) -> int: def write_throughput(self) -> float:
return self._get_metric(self._ERRORS) return self._get_metric_rate(self._WRITE_THROUGHPUT)
@property @property
def throughput(self) -> float: def read_total_iterations(self) -> int:
return self._get_metric_rate(self._THROUGHPUT) return self._get_metric(self._READ_SUCCESS) + self._get_metric(self._READ_ERRORS)
@property @property
def total_bytes(self) -> float: def read_success_iterations(self) -> int:
return self._get_metric(self._THROUGHPUT) return self._get_metric(self._READ_SUCCESS)
@property
def read_rate(self) -> int:
return self._get_metric_rate(self._READ_SUCCESS)
@property
def read_failed_iterations(self) -> int:
return self._get_metric(self._READ_ERRORS)
@property
def read_throughput(self) -> float:
return self._get_metric_rate(self._READ_THROUGHPUT)
@property
def delete_total_iterations(self) -> int:
return self._get_metric(self._DELETE_SUCCESS) + self._get_metric(self._DELETE_ERRORS)
@property
def delete_success_iterations(self) -> int:
return self._get_metric(self._DELETE_SUCCESS)
@property
def delete_failed_iterations(self) -> int:
return self._get_metric(self._DELETE_ERRORS)
@property
def delete_rate(self) -> int:
return self._get_metric_rate(self._DELETE_SUCCESS)
def _get_metric(self, metric: str) -> int: def _get_metric(self, metric: str) -> int:
metrics_method_map = { metrics_method_map = {"counter": self._get_counter_metric, "gauge": self._get_gauge_metric}
"counter": self._get_counter_metric,
"gauge": self._get_gauge_metric,
"trend": self._get_trend_metrics,
}
if metric not in self.metrics: if metric not in self.metrics:
return 0 return 0
@ -56,7 +85,9 @@ class OperationMetric(ABC):
metric = self.metrics[metric] metric = self.metrics[metric]
metric_type = metric["type"] metric_type = metric["type"]
if metric_type not in metrics_method_map: if metric_type not in metrics_method_map:
raise Exception(f"Unsupported metric type: {metric_type}, supported: {metrics_method_map.keys()}") raise Exception(
f"Unsupported metric type: {metric_type}, supported: {metrics_method_map.keys()}"
)
return metrics_method_map[metric_type](metric) return metrics_method_map[metric_type](metric)
@ -69,7 +100,9 @@ class OperationMetric(ABC):
metric = self.metrics[metric] metric = self.metrics[metric]
metric_type = metric["type"] metric_type = metric["type"]
if metric_type not in metrics_method_map: if metric_type not in metrics_method_map:
raise Exception(f"Unsupported rate metric type: {metric_type}, supported: {metrics_method_map.keys()}") raise Exception(
f"Unsupported rate metric type: {metric_type}, supported: {metrics_method_map.keys()}"
)
return metrics_method_map[metric_type](metric) return metrics_method_map[metric_type](metric)
@ -82,149 +115,38 @@ class OperationMetric(ABC):
def _get_gauge_metric(self, metric: str) -> int: def _get_gauge_metric(self, metric: str) -> int:
return metric["values"]["value"] return metric["values"]["value"]
def _get_trend_metrics(self, metric: str) -> int:
return metric["values"]
class WriteOperationMetric(OperationMetric):
_NAME = "Write"
_SUCCESS = ""
_ERRORS = ""
_THROUGHPUT = "data_sent"
_LATENCY = ""
class ReadOperationMetric(OperationMetric):
_NAME = "Read"
_SUCCESS = ""
_ERRORS = ""
_THROUGHPUT = "data_received"
_LATENCY = ""
class DeleteOperationMetric(OperationMetric):
_NAME = "Delete"
_SUCCESS = ""
_ERRORS = ""
_THROUGHPUT = ""
_LATENCY = ""
class GrpcWriteOperationMetric(WriteOperationMetric):
_SUCCESS = "frostfs_obj_put_success"
_ERRORS = "frostfs_obj_put_fails"
_LATENCY = "frostfs_obj_put_duration"
class GrpcReadOperationMetric(ReadOperationMetric):
_SUCCESS = "frostfs_obj_get_success"
_ERRORS = "frostfs_obj_get_fails"
_LATENCY = "frostfs_obj_get_duration"
class GrpcDeleteOperationMetric(DeleteOperationMetric):
_SUCCESS = "frostfs_obj_delete_success"
_ERRORS = "frostfs_obj_delete_fails"
_LATENCY = "frostfs_obj_delete_duration"
class S3WriteOperationMetric(WriteOperationMetric):
_SUCCESS = "aws_obj_put_success"
_ERRORS = "aws_obj_put_fails"
_LATENCY = "aws_obj_put_duration"
class S3ReadOperationMetric(ReadOperationMetric):
_SUCCESS = "aws_obj_get_success"
_ERRORS = "aws_obj_get_fails"
_LATENCY = "aws_obj_get_duration"
class S3DeleteOperationMetric(DeleteOperationMetric):
_SUCCESS = "aws_obj_delete_success"
_ERRORS = "aws_obj_delete_fails"
_LATENCY = "aws_obj_delete_duration"
class S3LocalWriteOperationMetric(WriteOperationMetric):
_SUCCESS = "s3local_obj_put_success"
_ERRORS = "s3local_obj_put_fails"
_LATENCY = "s3local_obj_put_duration"
class S3LocalReadOperationMetric(ReadOperationMetric):
_SUCCESS = "s3local_obj_get_success"
_ERRORS = "s3local_obj_get_fails"
_LATENCY = "s3local_obj_get_duration"
class LocalWriteOperationMetric(WriteOperationMetric):
_SUCCESS = "local_obj_put_success"
_ERRORS = "local_obj_put_fails"
_LATENCY = "local_obj_put_duration"
class LocalReadOperationMetric(ReadOperationMetric):
_SUCCESS = "local_obj_get_success"
_ERRORS = "local_obj_get_fails"
class LocalDeleteOperationMetric(DeleteOperationMetric):
_SUCCESS = "local_obj_delete_success"
_ERRORS = "local_obj_delete_fails"
class VerifyReadOperationMetric(ReadOperationMetric):
_SUCCESS = "verified_obj"
_ERRORS = "invalid_obj"
class MetricsBase(ABC):
def __init__(self) -> None:
self.write: Optional[WriteOperationMetric] = None
self.read: Optional[ReadOperationMetric] = None
self.delete: Optional[DeleteOperationMetric] = None
@property
def operations(self) -> list[OperationMetric]:
return [metric for metric in [self.write, self.read, self.delete] if metric is not None]
class GrpcMetrics(MetricsBase): class GrpcMetrics(MetricsBase):
def __init__(self, summary) -> None: _WRITE_SUCCESS = "frostfs_obj_put_total"
super().__init__() _WRITE_ERRORS = "frostfs_obj_put_fails"
self.write = GrpcWriteOperationMetric(summary)
self.read = GrpcReadOperationMetric(summary) _READ_SUCCESS = "frostfs_obj_get_total"
self.delete = GrpcDeleteOperationMetric(summary) _READ_ERRORS = "frostfs_obj_get_fails"
_DELETE_SUCCESS = "frostfs_obj_delete_total"
_DELETE_ERRORS = "frostfs_obj_delete_fails"
class S3Metrics(MetricsBase): class S3Metrics(MetricsBase):
def __init__(self, summary) -> None: _WRITE_SUCCESS = "aws_obj_put_total"
super().__init__() _WRITE_ERRORS = "aws_obj_put_fails"
self.write = S3WriteOperationMetric(summary)
self.read = S3ReadOperationMetric(summary)
self.delete = S3DeleteOperationMetric(summary)
_READ_SUCCESS = "aws_obj_get_total"
_READ_ERRORS = "aws_obj_get_fails"
class S3LocalMetrics(MetricsBase): _DELETE_SUCCESS = "aws_obj_delete_total"
def __init__(self, summary) -> None: _DELETE_ERRORS = "aws_obj_delete_fails"
super().__init__()
self.write = S3LocalWriteOperationMetric(summary)
self.read = S3LocalReadOperationMetric(summary)
class LocalMetrics(MetricsBase):
def __init__(self, summary) -> None:
super().__init__()
self.write = LocalWriteOperationMetric(summary)
self.read = LocalReadOperationMetric(summary)
self.delete = LocalDeleteOperationMetric(summary)
class VerifyMetrics(MetricsBase): class VerifyMetrics(MetricsBase):
def __init__(self, summary) -> None: _WRITE_SUCCESS = "N/A"
super().__init__() _WRITE_ERRORS = "N/A"
self.read = VerifyReadOperationMetric(summary)
_READ_SUCCESS = "verified_obj"
_READ_ERRORS = "invalid_obj"
_DELETE_SUCCESS = "N/A"
_DELETE_ERRORS = "N/A"
def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> MetricsBase: def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> MetricsBase:
@ -234,10 +156,7 @@ def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> Metr
LoadScenario.HTTP: GrpcMetrics, LoadScenario.HTTP: GrpcMetrics,
LoadScenario.S3: S3Metrics, LoadScenario.S3: S3Metrics,
LoadScenario.S3_CAR: S3Metrics, LoadScenario.S3_CAR: S3Metrics,
LoadScenario.S3_MULTIPART: S3Metrics,
LoadScenario.S3_LOCAL: S3LocalMetrics,
LoadScenario.VERIFY: VerifyMetrics, LoadScenario.VERIFY: VerifyMetrics,
LoadScenario.LOCAL: LocalMetrics,
} }
return class_map[load_type](summary) return class_map[load_type](summary)

View file

@ -1,11 +1,10 @@
from datetime import datetime from datetime import datetime
from typing import Optional from typing import Optional, Tuple
import yaml import yaml
from frostfs_testlib.load.interfaces.summarized import SummarizedStats
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario
from frostfs_testlib.utils.converting_utils import calc_unit from frostfs_testlib.load.load_metrics import get_metrics_object
class LoadReport: class LoadReport:
@ -17,15 +16,11 @@ class LoadReport:
self.start_time: Optional[datetime] = None self.start_time: Optional[datetime] = None
self.end_time: Optional[datetime] = None self.end_time: Optional[datetime] = None
def set_start_time(self, time: datetime = None): def set_start_time(self):
if time is None: self.start_time = datetime.utcnow()
time = datetime.utcnow()
self.start_time = time
def set_end_time(self, time: datetime = None): def set_end_time(self):
if time is None: self.end_time = datetime.utcnow()
time = datetime.utcnow()
self.end_time = time
def add_summaries(self, load_summaries: dict): def add_summaries(self, load_summaries: dict):
self.load_summaries_list.append(load_summaries) self.load_summaries_list.append(load_summaries)
@ -35,7 +30,6 @@ class LoadReport:
def get_report_html(self): def get_report_html(self):
report_sections = [ report_sections = [
[self.load_params, self._get_load_id_section_html],
[self.load_test, self._get_load_params_section_html], [self.load_test, self._get_load_params_section_html],
[self.load_summaries_list, self._get_totals_section_html], [self.load_summaries_list, self._get_totals_section_html],
[self.end_time, self._get_test_time_html], [self.end_time, self._get_test_time_html],
@ -49,8 +43,8 @@ class LoadReport:
return html return html
def _get_load_params_section_html(self) -> str: def _get_load_params_section_html(self) -> str:
params: str = yaml.safe_dump([self.load_test], sort_keys=False, indent=2, explicit_start=True) params: str = yaml.safe_dump(self.load_test, sort_keys=False)
params = params.replace("\n", "<br>").replace(" ", "&nbsp;") params = params.replace("\n", "<br>")
section_html = f"""<h3>Scenario params</h3> section_html = f"""<h3>Scenario params</h3>
<pre>{params}</pre> <pre>{params}</pre>
@ -58,23 +52,25 @@ class LoadReport:
return section_html return section_html
def _get_load_id_section_html(self) -> str:
section_html = f"""<h3>Load ID: {self.load_params.load_id}</h3>
<hr>"""
return section_html
def _get_test_time_html(self) -> str: def _get_test_time_html(self) -> str:
if not self.start_time or not self.end_time: html = f"""<h3>Scenario duration in UTC time (from agent)</h3>
return ""
html = f"""<h3>Scenario duration</h3>
{self.start_time} - {self.end_time}<br> {self.start_time} - {self.end_time}<br>
<hr> <hr>
""" """
return html return html
def _calc_unit(self, value: float, skip_units: int = 0) -> Tuple[float, str]:
units = ["B", "KiB", "MiB", "GiB", "TiB"]
for unit in units[skip_units:]:
if value < 1024:
return value, unit
value = value / 1024.0
return value, unit
def _seconds_to_formatted_duration(self, seconds: int) -> str: def _seconds_to_formatted_duration(self, seconds: int) -> str:
"""Converts N number of seconds to formatted output ignoring zeroes. """Converts N number of seconds to formatted output ignoring zeroes.
Examples: Examples:
@ -104,62 +100,57 @@ class LoadReport:
model_map = { model_map = {
LoadScenario.gRPC: "closed model", LoadScenario.gRPC: "closed model",
LoadScenario.S3: "closed model", LoadScenario.S3: "closed model",
LoadScenario.S3_MULTIPART: "closed model",
LoadScenario.HTTP: "closed model", LoadScenario.HTTP: "closed model",
LoadScenario.gRPC_CAR: "open model", LoadScenario.gRPC_CAR: "open model",
LoadScenario.S3_CAR: "open model", LoadScenario.S3_CAR: "open model",
LoadScenario.LOCAL: "local fill",
LoadScenario.S3_LOCAL: "local fill",
} }
return model_map[self.load_params.scenario] return model_map[self.load_params.scenario]
def _get_operations_sub_section_html(self, operation_type: str, stats: SummarizedStats): def _get_oprations_sub_section_html(
self,
operation_type: str,
total_operations: int,
requested_rate_str: str,
vus_str: str,
total_rate: float,
throughput: float,
errors: dict[str, int],
):
throughput_html = "" throughput_html = ""
if stats.throughput > 0: if throughput > 0:
throughput, unit = calc_unit(stats.throughput) throughput, unit = self._calc_unit(throughput)
throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec") throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec")
bytes_html = ""
if stats.total_bytes > 0:
total_bytes, total_bytes_unit = calc_unit(stats.total_bytes)
bytes_html = self._row("Total transferred", f"{total_bytes:.2f} {total_bytes_unit}")
per_node_errors_html = "" per_node_errors_html = ""
for node_key, errors in stats.errors.by_node.items(): total_errors = 0
if self.load_params.k6_process_allocation_strategy == K6ProcessAllocationStrategy.PER_ENDPOINT: if errors:
total_errors: int = 0
for node_key, errors in errors.items():
total_errors += errors
if (
self.load_params.k6_process_allocation_strategy
== K6ProcessAllocationStrategy.PER_ENDPOINT
):
per_node_errors_html += self._row(f"At {node_key}", errors) per_node_errors_html += self._row(f"At {node_key}", errors)
latency_html = "" object_size, object_size_unit = self._calc_unit(self.load_params.object_size, 1)
for node_key, latencies in stats.latencies.by_node.items():
latency_values = "N/A"
if latencies:
latency_values = ""
for param_name, param_val in latencies.items():
latency_values += f"{param_name}={param_val:.2f}ms "
latency_html += self._row(f"{operation_type} latency {node_key.split(':')[0]}", latency_values)
object_size, object_size_unit = calc_unit(self.load_params.object_size, 1)
duration = self._seconds_to_formatted_duration(self.load_params.load_time) duration = self._seconds_to_formatted_duration(self.load_params.load_time)
model = self._get_model_string() model = self._get_model_string()
requested_rate_str = f"{stats.requested_rate}op/sec" if stats.requested_rate else ""
# write 8KB 15h49m 50op/sec 50th open model/closed model/min_iteration duration=1s - 1.636MB/s 199.57451/s # write 8KB 15h49m 50op/sec 50th open model/closed model/min_iteration duration=1s - 1.636MB/s 199.57451/s
short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {stats.threads}th {model} - {throughput:.2f}{unit}/s {stats.rate:.2f}/s" short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {vus_str} {model} - {throughput:.2f}{unit} {total_rate:.2f}/s"
html = f""" html = f"""
<table border="1" cellpadding="5px"><tbody> <table border="1" cellpadding="5px"><tbody>
<tr><th colspan="2" bgcolor="gainsboro">{short_summary}</th></tr> <tr><th colspan="2" bgcolor="gainsboro">{short_summary}</th></tr>
<tr><th colspan="2" bgcolor="gainsboro">Metrics</th></tr> <tr><th colspan="2" bgcolor="gainsboro">Metrics</th></tr>
{self._row("Total operations", stats.operations)} {self._row("Total operations", total_operations)}
{self._row("OP/sec", f"{stats.rate:.2f}")} {self._row("OP/sec", f"{total_rate:.2f}")}
{bytes_html}
{throughput_html} {throughput_html}
{latency_html}
<tr><th colspan="2" bgcolor="gainsboro">Errors</th></tr> <tr><th colspan="2" bgcolor="gainsboro">Errors</th></tr>
{per_node_errors_html} {per_node_errors_html}
{self._row("Total", f"{stats.errors.total} ({stats.errors.percent:.2f}%)")} {self._row("Total", f"{total_errors} ({total_errors/total_operations*100.0:.2f}%)")}
{self._row("Threshold", f"{stats.errors.threshold:.2f}%")}
</tbody></table><br><hr> </tbody></table><br><hr>
""" """
@ -167,12 +158,112 @@ class LoadReport:
def _get_totals_section_html(self): def _get_totals_section_html(self):
html = "" html = ""
for i in range(len(self.load_summaries_list)): for i, load_summaries in enumerate(self.load_summaries_list, 1):
html += f"<h3>Load Results for load #{i+1}</h3>" html += f"<h3>Load Results for load #{i}</h3>"
summarized = SummarizedStats.collect(self.load_params, self.load_summaries_list[i]) write_operations = 0
for operation_type, stats in summarized.items(): write_op_sec = 0
if stats.operations: write_throughput = 0
html += self._get_operations_sub_section_html(operation_type, stats) write_errors = {}
requested_write_rate = self.load_params.write_rate
requested_write_rate_str = (
f"{requested_write_rate}op/sec" if requested_write_rate else ""
)
read_operations = 0
read_op_sec = 0
read_throughput = 0
read_errors = {}
requested_read_rate = self.load_params.read_rate
requested_read_rate_str = f"{requested_read_rate}op/sec" if requested_read_rate else ""
delete_operations = 0
delete_op_sec = 0
delete_errors = {}
requested_delete_rate = self.load_params.delete_rate
requested_delete_rate_str = (
f"{requested_delete_rate}op/sec" if requested_delete_rate else ""
)
if self.load_params.scenario in [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]:
delete_vus = max(
self.load_params.preallocated_deleters or 0, self.load_params.max_deleters or 0
)
write_vus = max(
self.load_params.preallocated_writers or 0, self.load_params.max_writers or 0
)
read_vus = max(
self.load_params.preallocated_readers or 0, self.load_params.max_readers or 0
)
else:
write_vus = self.load_params.writers
read_vus = self.load_params.readers
delete_vus = self.load_params.deleters
write_vus_str = f"{write_vus}th"
read_vus_str = f"{read_vus}th"
delete_vus_str = f"{delete_vus}th"
write_section_required = False
read_section_required = False
delete_section_required = False
for node_key, load_summary in load_summaries.items():
metrics = get_metrics_object(self.load_params.scenario, load_summary)
write_operations += metrics.write_total_iterations
if write_operations:
write_section_required = True
write_op_sec += metrics.write_rate
write_throughput += metrics.write_throughput
if metrics.write_failed_iterations:
write_errors[node_key] = metrics.write_failed_iterations
read_operations += metrics.read_total_iterations
if read_operations:
read_section_required = True
read_op_sec += metrics.read_rate
read_throughput += metrics.read_throughput
if metrics.read_failed_iterations:
read_errors[node_key] = metrics.read_failed_iterations
delete_operations += metrics.delete_total_iterations
if delete_operations:
delete_section_required = True
delete_op_sec += metrics.delete_rate
if metrics.delete_failed_iterations:
delete_errors[node_key] = metrics.delete_failed_iterations
if write_section_required:
html += self._get_oprations_sub_section_html(
"Write",
write_operations,
requested_write_rate_str,
write_vus_str,
write_op_sec,
write_throughput,
write_errors,
)
if read_section_required:
html += self._get_oprations_sub_section_html(
"Read",
read_operations,
requested_read_rate_str,
read_vus_str,
read_op_sec,
read_throughput,
read_errors,
)
if delete_section_required:
html += self._get_oprations_sub_section_html(
"Delete",
delete_operations,
requested_delete_rate_str,
delete_vus_str,
delete_op_sec,
0,
delete_errors,
)
return html return html

View file

@ -0,0 +1,191 @@
import copy
import itertools
import math
import re
from dataclasses import fields
from frostfs_testlib.cli import FrostfsAuthmate
from frostfs_testlib.load.k6 import K6
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC
from frostfs_testlib.resources.load_params import (
BACKGROUND_LOAD_VUS_COUNT_DIVISOR,
LOAD_NODE_SSH_USER,
)
from frostfs_testlib.shell import CommandOptions, SSHShell
from frostfs_testlib.shell.interfaces import InteractiveInput, SshCredentials
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
reporter = get_reporter()
STOPPED_HOSTS = []
@reporter.step_deco("Init s3 client on load nodes")
def init_s3_client(
load_nodes: list[str],
load_params: LoadParams,
k6_directory: str,
ssh_credentials: SshCredentials,
nodes_under_load: list[ClusterNode],
wallet: WalletInfo,
):
storage_node = nodes_under_load[0].service(StorageNode)
s3_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in nodes_under_load]
grpc_peer = storage_node.get_rpc_endpoint()
for load_node in load_nodes:
ssh_client = _get_shell(ssh_credentials, load_node)
frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(ssh_client, FROSTFS_AUTHMATE_EXEC)
issue_secret_output = frostfs_authmate_exec.secret.issue(
wallet=wallet.path,
peer=grpc_peer,
bearer_rules=f"{k6_directory}/scenarios/files/rules.json",
gate_public_key=s3_public_keys,
container_placement_policy=load_params.preset.container_placement_policy,
container_policy=f"{k6_directory}/scenarios/files/policy.json",
wallet_password=wallet.password,
).stdout
aws_access_key_id = str(
re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group(
"aws_access_key_id"
)
)
aws_secret_access_key = str(
re.search(
r"secret_access_key.*:\s.(?P<aws_secret_access_key>\w*)", issue_secret_output
).group("aws_secret_access_key")
)
# prompt_pattern doesn't work at the moment
configure_input = [
InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id),
InteractiveInput(
prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key
),
InteractiveInput(prompt_pattern=r".*", input=""),
InteractiveInput(prompt_pattern=r".*", input=""),
]
ssh_client.exec("aws configure", CommandOptions(interactive_inputs=configure_input))
@reporter.step_deco("Prepare K6 instances and objects")
def prepare_k6_instances(
load_nodes: list[str],
ssh_credentials: SshCredentials,
k6_dir: str,
load_params: LoadParams,
endpoints: list[str],
loaders_wallet: WalletInfo,
) -> list[K6]:
k6_load_objects: list[K6] = []
nodes = itertools.cycle(load_nodes)
k6_distribution_count = {
K6ProcessAllocationStrategy.PER_LOAD_NODE: len(load_nodes),
K6ProcessAllocationStrategy.PER_ENDPOINT: len(endpoints),
}
endpoints_generators = {
K6ProcessAllocationStrategy.PER_LOAD_NODE: itertools.cycle([endpoints]),
K6ProcessAllocationStrategy.PER_ENDPOINT: itertools.cycle(
[[endpoint] for endpoint in endpoints]
),
}
k6_processes_count = k6_distribution_count[load_params.k6_process_allocation_strategy]
endpoints_gen = endpoints_generators[load_params.k6_process_allocation_strategy]
distributed_load_params_list = _get_distributed_load_params_list(
load_params, k6_processes_count
)
for distributed_load_params in distributed_load_params_list:
load_node = next(nodes)
shell = _get_shell(ssh_credentials, load_node)
# Make working_dir directory
shell.exec(f"sudo mkdir -p {distributed_load_params.working_dir}")
shell.exec(f"sudo chown {LOAD_NODE_SSH_USER} {distributed_load_params.working_dir}")
k6_load_object = K6(
distributed_load_params,
next(endpoints_gen),
k6_dir,
shell,
load_node,
loaders_wallet,
)
k6_load_objects.append(k6_load_object)
if load_params.preset:
k6_load_object.preset()
return k6_load_objects
def _get_shell(ssh_credentials: SshCredentials, load_node: str) -> SSHShell:
ssh_client = SSHShell(
host=load_node,
login=ssh_credentials.ssh_login,
password=ssh_credentials.ssh_password,
private_key_path=ssh_credentials.ssh_key_path,
private_key_passphrase=ssh_credentials.ssh_key_passphrase,
)
return ssh_client
def _get_distributed_load_params_list(
original_load_params: LoadParams, workers_count: int
) -> list[LoadParams]:
divisor = int(BACKGROUND_LOAD_VUS_COUNT_DIVISOR)
distributed_load_params: list[LoadParams] = []
for i in range(workers_count):
load_params = copy.deepcopy(original_load_params)
# Append #i here in case if multiple k6 processes goes into same load node
load_params.set_id(f"{load_params.load_id}_{i}")
distributed_load_params.append(load_params)
load_fields = fields(original_load_params)
for field in load_fields:
if (
field.metadata
and original_load_params.scenario in field.metadata["applicable_scenarios"]
and field.metadata["distributed"]
and getattr(original_load_params, field.name) is not None
):
original_value = getattr(original_load_params, field.name)
distribution = _get_distribution(math.ceil(original_value / divisor), workers_count)
for i in range(workers_count):
setattr(distributed_load_params[i], field.name, distribution[i])
return distributed_load_params
def _get_distribution(clients_count: int, workers_count: int) -> list[int]:
"""
This function will distribute evenly as possible X clients to Y workers.
For example if we have 150 readers (clients) and we want to spread it over 4 load nodes (workers)
this will return [38, 38, 37, 37].
Args:
clients_count: amount of things needs to be distributed.
workers_count: amount of workers.
Returns:
list of distribution.
"""
if workers_count < 1:
raise Exception("Workers cannot be less then 1")
# Amount of guaranteed payload on one worker
clients_per_worker = clients_count // workers_count
# Remainder of clients left to be distributed
remainder = clients_count - clients_per_worker * workers_count
distribution = [
clients_per_worker + 1 if i < remainder else clients_per_worker
for i in range(workers_count)
]
return distribution

View file

@ -1,68 +1,63 @@
from frostfs_testlib import reporter import logging
from frostfs_testlib.load.interfaces.summarized import SummarizedStats
from frostfs_testlib.load.load_config import LoadParams, LoadScenario from frostfs_testlib.load.load_config import LoadParams, LoadScenario
from frostfs_testlib.load.load_metrics import get_metrics_object from frostfs_testlib.load.load_metrics import get_metrics_object
logger = logging.getLogger("NeoLogger")
class LoadVerifier: class LoadVerifier:
def __init__(self, load_params: LoadParams) -> None: def __init__(self, load_params: LoadParams) -> None:
self.load_params = load_params self.load_params = load_params
def collect_load_issues(self, load_summaries: dict[str, dict]) -> list[str]: def verify_summaries(self, load_summary, verification_summary) -> None:
summarized = SummarizedStats.collect(self.load_params, load_summaries) exceptions = []
issues = []
for operation_type, stats in summarized.items(): if not verification_summary or not load_summary:
if stats.threads and not stats.operations: logger.info("Can't check load results due to missing summary")
issues.append(f"No any {operation_type.lower()} operation was performed")
if stats.errors.percent > stats.errors.threshold:
rate_str = self._get_rate_str(stats.errors.percent)
issues.append(f"{operation_type} errors exceeded threshold: {rate_str} > {stats.errors.threshold}%")
return issues
def collect_verify_issues(self, load_summaries, verification_summaries) -> list[str]:
verify_issues: list[str] = []
for k6_process_label in load_summaries:
with reporter.step(f"Check verify scenario results for {k6_process_label}"):
verify_issues.extend(
self._collect_verify_issues_on_process(
k6_process_label,
load_summaries[k6_process_label],
verification_summaries[k6_process_label],
)
)
return verify_issues
def _get_rate_str(self, rate: float, minimal: float = 0.01) -> str:
return f"{rate:.2f}%" if rate >= minimal else f"~{minimal}%"
def _collect_verify_issues_on_process(self, label, load_summary, verification_summary) -> list[str]:
issues = []
load_metrics = get_metrics_object(self.load_params.scenario, load_summary) load_metrics = get_metrics_object(self.load_params.scenario, load_summary)
writers = self.load_params.writers or self.load_params.preallocated_writers or 0 writers = self.load_params.writers or self.load_params.preallocated_writers or 0
readers = self.load_params.readers or self.load_params.preallocated_readers or 0
deleters = self.load_params.deleters or self.load_params.preallocated_deleters or 0 deleters = self.load_params.deleters or self.load_params.preallocated_deleters or 0
delete_success = 0 objects_count = load_metrics.write_success_iterations
fails_count = load_metrics.write_failed_iterations
if writers > 0:
if objects_count < 1:
exceptions.append("Total put objects should be greater than 0")
if fails_count > 0:
exceptions.append(f"There were {fails_count} failed write operations")
if readers > 0:
read_count = load_metrics.read_success_iterations
read_fails_count = load_metrics.read_failed_iterations
if read_count < 1:
exceptions.append("Total read operations should be greater than 0")
if read_fails_count > 0:
exceptions.append(f"There were {read_fails_count} failed read operations")
if deleters > 0: if deleters > 0:
delete_success = load_metrics.delete.success_iterations delete_count = load_metrics.delete_success_iterations
delete_fails_count = load_metrics.delete_failed_iterations
if delete_count < 1:
exceptions.append("Total delete operations should be greater than 0")
if delete_fails_count > 0:
exceptions.append(f"There were {delete_fails_count} failed delete operations")
if verification_summary: if verification_summary:
verify_metrics = get_metrics_object(LoadScenario.VERIFY, verification_summary) verify_metrics = get_metrics_object(LoadScenario.VERIFY, verification_summary)
verified_objects = verify_metrics.read.success_iterations verified_objects = verify_metrics.read_success_iterations
invalid_objects = verify_metrics.read.failed_iterations invalid_objects = verify_metrics.read_failed_iterations
total_left_objects = load_metrics.write.success_iterations - delete_success
if invalid_objects > 0: if invalid_objects > 0:
issues.append(f"There were {invalid_objects} verification fails (hash mismatch).") exceptions.append(f"There were {invalid_objects} verification fails")
# Due to interruptions we may see total verified objects to be less than written on writers count # Due to interruptions we may see total verified objects to be less than written on writers count
if abs(total_left_objects - verified_objects) > writers: if abs(objects_count - verified_objects) > writers:
issues.append( exceptions.append(
f"Verified objects mismatch for {label}. Total: {total_left_objects}, Verified: {verified_objects}. Writers: {writers}." f"Verified objects mismatch. Total: {objects_count}, Verified: {verified_objects}. Writers: {writers}."
) )
return issues assert not exceptions, "\n".join(exceptions)

View file

@ -1,60 +0,0 @@
from frostfs_testlib.load.interfaces.loader import Loader
from frostfs_testlib.resources.load_params import (
LOAD_NODE_SSH_PASSWORD,
LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE,
LOAD_NODE_SSH_PRIVATE_KEY_PATH,
LOAD_NODE_SSH_USER,
)
from frostfs_testlib.shell.interfaces import Shell, SshCredentials
from frostfs_testlib.shell.ssh_shell import SSHShell
from frostfs_testlib.storage.cluster import ClusterNode
class RemoteLoader(Loader):
def __init__(self, ssh_credentials: SshCredentials, ip: str) -> None:
self.ssh_credentials = ssh_credentials
self._ip = ip
@property
def ip(self):
return self._ip
def get_shell(self) -> Shell:
ssh_client = SSHShell(
host=self.ip,
login=self.ssh_credentials.ssh_login,
password=self.ssh_credentials.ssh_password,
private_key_path=self.ssh_credentials.ssh_key_path,
private_key_passphrase=self.ssh_credentials.ssh_key_passphrase,
)
return ssh_client
@classmethod
def from_ip_list(cls, ip_list: list[str]) -> list[Loader]:
loaders: list[Loader] = []
ssh_credentials = SshCredentials(
LOAD_NODE_SSH_USER,
LOAD_NODE_SSH_PASSWORD,
LOAD_NODE_SSH_PRIVATE_KEY_PATH,
LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE,
)
for ip in ip_list:
loaders.append(RemoteLoader(ssh_credentials, ip))
return loaders
class NodeLoader(Loader):
"""When ClusterNode is the loader for itself (for Local scenario only)."""
def __init__(self, cluster_node: ClusterNode) -> None:
self.cluster_node = cluster_node
def get_shell(self) -> Shell:
return self.cluster_node.host.get_shell()
@property
def ip(self):
return self.cluster_node.host_ip

View file

@ -1,466 +0,0 @@
import copy
import itertools
import math
import time
from dataclasses import fields
from threading import Event
from typing import Optional
from urllib.parse import urlparse
from frostfs_testlib import reporter
from frostfs_testlib.credentials.interfaces import S3Credentials, User
from frostfs_testlib.load.interfaces.loader import Loader
from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner
from frostfs_testlib.load.k6 import K6
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType
from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader
from frostfs_testlib.resources import optionals
from frostfs_testlib.resources.common import STORAGE_USER_NAME
from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_VUS_COUNT_DIVISOR, LOAD_NODE_SSH_USER, LOAD_NODES
from frostfs_testlib.shell.command_inspectors import SuInspector
from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode
from frostfs_testlib.testing import parallel, run_optionally
from frostfs_testlib.testing.test_control import retry
from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.utils.file_keeper import FileKeeper
class RunnerBase(ScenarioRunner):
k6_instances: list[K6]
loaders: list[Loader]
@reporter.step("Run preset on loaders")
def preset(self):
parallel([k6.preset for k6 in self.k6_instances])
@reporter.step("Wait until load finish")
def wait_until_finish(self, soft_timeout: int = 0):
event = Event()
parallel([k6.wait_until_finished for k6 in self.k6_instances], event=event, soft_timeout=soft_timeout)
@property
def is_running(self):
futures = parallel([k6.is_running for k6 in self.k6_instances])
return any([future.result() for future in futures])
def get_k6_instances(self):
return self.k6_instances
def get_loaders(self) -> list[Loader]:
return self.loaders
class DefaultRunner(RunnerBase):
user: User
def __init__(
self,
user: User,
load_ip_list: Optional[list[str]] = None,
) -> None:
if load_ip_list is None:
load_ip_list = LOAD_NODES
self.loaders = RemoteLoader.from_ip_list(load_ip_list)
self.user = user
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step("Preparation steps")
def prepare(
self,
load_params: LoadParams,
cluster_nodes: list[ClusterNode],
nodes_under_load: list[ClusterNode],
k6_dir: str,
):
if load_params.force_fresh_registry and load_params.custom_registry:
with reporter.step("Forcing fresh registry files"):
parallel(self._force_fresh_registry, self.loaders, load_params)
if load_params.load_type != LoadType.S3:
return
with reporter.step("Init s3 client on loaders"):
s3_credentials = self.user.s3_credentials
parallel(self._aws_configure_on_loader, self.loaders, s3_credentials)
def _force_fresh_registry(self, loader: Loader, load_params: LoadParams):
with reporter.step(f"Forcing fresh registry on {loader.ip}"):
shell = loader.get_shell()
shell.exec(f"rm -f {load_params.registry_file}")
def _aws_configure_on_loader(
self,
loader: Loader,
s3_credentials: S3Credentials,
):
with reporter.step(f"Aws configure on {loader.ip}"):
configure_input = [
InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=s3_credentials.access_key),
InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=s3_credentials.secret_key),
InteractiveInput(prompt_pattern=r".*", input=""),
InteractiveInput(prompt_pattern=r".*", input=""),
]
loader.get_shell().exec("aws configure", CommandOptions(interactive_inputs=configure_input))
@reporter.step("Init k6 instances")
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
self.k6_instances = []
cycled_loaders = itertools.cycle(self.loaders)
k6_distribution_count = {
K6ProcessAllocationStrategy.PER_LOAD_NODE: len(self.loaders),
K6ProcessAllocationStrategy.PER_ENDPOINT: len(endpoints),
}
endpoints_generators = {
K6ProcessAllocationStrategy.PER_LOAD_NODE: itertools.cycle([endpoints]),
K6ProcessAllocationStrategy.PER_ENDPOINT: itertools.cycle([[endpoint] for endpoint in endpoints]),
}
k6_processes_count = k6_distribution_count[load_params.k6_process_allocation_strategy]
endpoints_gen = endpoints_generators[load_params.k6_process_allocation_strategy]
distributed_load_params_list = self._get_distributed_load_params_list(load_params, k6_processes_count)
futures = parallel(
self._init_k6_instance,
distributed_load_params_list,
loader=cycled_loaders,
endpoints=endpoints_gen,
k6_dir=k6_dir,
)
self.k6_instances = [future.result() for future in futures]
def _init_k6_instance(self, load_params_for_loader: LoadParams, loader: Loader, endpoints: list[str], k6_dir: str):
shell = loader.get_shell()
with reporter.step(f"Init K6 instance on {loader.ip} for endpoints {endpoints}"):
with reporter.step(f"Make working directory"):
shell.exec(f"sudo mkdir -p {load_params_for_loader.working_dir}")
shell.exec(f"sudo chown {LOAD_NODE_SSH_USER} {load_params_for_loader.working_dir}")
return K6(
load_params_for_loader,
endpoints,
k6_dir,
shell,
loader,
self.user,
)
def _get_distributed_load_params_list(self, original_load_params: LoadParams, workers_count: int) -> list[LoadParams]:
divisor = int(BACKGROUND_LOAD_VUS_COUNT_DIVISOR)
distributed_load_params: list[LoadParams] = []
for i in range(workers_count):
load_params = copy.deepcopy(original_load_params)
# Append #i here in case if multiple k6 processes goes into same load node
load_params.set_id(f"{load_params.load_id}_{i}")
distributed_load_params.append(load_params)
load_fields = fields(original_load_params)
for field in load_fields:
if (
field.metadata
and original_load_params.scenario in field.metadata["applicable_scenarios"]
and field.metadata["distributed"]
and getattr(original_load_params, field.name) is not None
):
original_value = getattr(original_load_params, field.name)
distribution = self._get_distribution(math.ceil(original_value / divisor), workers_count)
for i in range(workers_count):
setattr(distributed_load_params[i], field.name, distribution[i])
return distributed_load_params
def _get_distribution(self, clients_count: int, workers_count: int) -> list[int]:
"""
This function will distribute evenly as possible X clients to Y workers.
For example if we have 150 readers (clients) and we want to spread it over 4 load nodes (workers)
this will return [38, 38, 37, 37].
Args:
clients_count: amount of things needs to be distributed.
workers_count: amount of workers.
Returns:
list of distribution.
"""
if workers_count < 1:
raise Exception("Workers cannot be less then 1")
# Amount of guaranteed payload on one worker
clients_per_worker = clients_count // workers_count
# Remainder of clients left to be distributed
remainder = clients_count - clients_per_worker * workers_count
distribution = [clients_per_worker + 1 if i < remainder else clients_per_worker for i in range(workers_count)]
return distribution
def start(self):
load_params = self.k6_instances[0].load_params
parallel([k6.start for k6 in self.k6_instances])
wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5
with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"):
time.sleep(wait_after_start_time)
def stop(self):
for k6_instance in self.k6_instances:
k6_instance.stop()
def get_results(self) -> dict:
results = {}
for k6_instance in self.k6_instances:
if k6_instance.load_params.k6_process_allocation_strategy is None:
raise RuntimeError("k6_process_allocation_strategy should not be none")
result = k6_instance.get_results()
endpoint = urlparse(k6_instance.endpoints[0]).netloc or k6_instance.endpoints[0]
keys_map = {
K6ProcessAllocationStrategy.PER_LOAD_NODE: k6_instance.loader.ip,
K6ProcessAllocationStrategy.PER_ENDPOINT: endpoint,
}
key = keys_map[k6_instance.load_params.k6_process_allocation_strategy]
results[key] = result
return results
class LocalRunner(RunnerBase):
cluster_state_controller: ClusterStateController
file_keeper: FileKeeper
user: User
def __init__(
self,
cluster_state_controller: ClusterStateController,
file_keeper: FileKeeper,
nodes_under_load: list[ClusterNode],
user: User,
) -> None:
self.cluster_state_controller = cluster_state_controller
self.file_keeper = file_keeper
self.loaders = [NodeLoader(node) for node in nodes_under_load]
self.nodes_under_load = nodes_under_load
self.user = user
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step("Preparation steps")
def prepare(
self,
load_params: LoadParams,
cluster_nodes: list[ClusterNode],
nodes_under_load: list[ClusterNode],
k6_dir: str,
):
parallel(self.prepare_node, nodes_under_load, k6_dir, load_params)
@retry(3, 5, expected_result=True)
def allow_user_to_login_in_system(self, cluster_node: ClusterNode):
shell = cluster_node.host.get_shell()
result = None
try:
shell.exec(f"sudo chsh -s /bin/bash {STORAGE_USER_NAME}")
self.lock_passwd_on_node(cluster_node)
options = CommandOptions(check=False, extra_inspectors=[SuInspector(STORAGE_USER_NAME)])
result = shell.exec("whoami", options)
finally:
if not result or result.return_code:
self.restore_passwd_on_node(cluster_node)
return False
return True
@reporter.step("Prepare node {cluster_node}")
def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams):
shell = cluster_node.host.get_shell()
with reporter.step("Allow storage user to login into system"):
self.allow_user_to_login_in_system(cluster_node)
with reporter.step("Update limits.conf"):
limits_path = "/etc/security/limits.conf"
self.file_keeper.add(cluster_node.storage_node, limits_path)
content = f"{STORAGE_USER_NAME} hard nofile 65536\n{STORAGE_USER_NAME} soft nofile 65536\n"
shell.exec(f"echo '{content}' | sudo tee {limits_path}")
with reporter.step("Download K6"):
shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}")
shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}")
shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz --strip-components 2 -C {k6_dir}")
shell.exec(f"sudo chmod -R 777 {k6_dir}")
with reporter.step("chmod 777 wallet related files on loader"):
shell.exec(f"sudo chmod -R 777 {self.user.wallet.config_path}")
shell.exec(f"sudo chmod -R 777 {self.user.wallet.path}")
@reporter.step("Init k6 instances")
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
self.k6_instances = []
futures = parallel(
self._init_k6_instance,
self.loaders,
load_params,
k6_dir,
)
self.k6_instances = [future.result() for future in futures]
def _init_k6_instance(self, loader: Loader, load_params: LoadParams, k6_dir: str):
shell = loader.get_shell()
with reporter.step(f"Init K6 instance on {loader.ip}"):
with reporter.step(f"Make working directory"):
shell.exec(f"sudo mkdir -p {load_params.working_dir}")
# If we chmod /home/<user_name> folder we can no longer ssh to the node
# !! IMPORTANT !!
if (
load_params.working_dir
and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}"
and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/"
):
shell.exec(f"sudo chmod -R 777 {load_params.working_dir}")
return K6(
load_params,
["localhost:8080"],
k6_dir,
shell,
loader,
self.user,
)
def start(self):
load_params = self.k6_instances[0].load_params
self.cluster_state_controller.stop_services_of_type(S3Gate)
self.cluster_state_controller.stop_services_of_type(StorageNode)
parallel([k6.start for k6 in self.k6_instances])
wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5
with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"):
time.sleep(wait_after_start_time)
@reporter.step("Restore passwd on {cluster_node}")
def restore_passwd_on_node(self, cluster_node: ClusterNode):
shell = cluster_node.host.get_shell()
shell.exec("sudo chattr -i /etc/passwd")
@reporter.step("Lock passwd on {cluster_node}")
def lock_passwd_on_node(self, cluster_node: ClusterNode):
shell = cluster_node.host.get_shell()
shell.exec("sudo chattr +i /etc/passwd")
def stop(self):
for k6_instance in self.k6_instances:
k6_instance.stop()
self.cluster_state_controller.start_all_stopped_services()
def get_results(self) -> dict:
results = {}
for k6_instance in self.k6_instances:
result = k6_instance.get_results()
results[k6_instance.loader.ip] = result
parallel(self.restore_passwd_on_node, self.nodes_under_load)
return results
class S3LocalRunner(LocalRunner):
endpoints: list[str]
k6_dir: str
@reporter.step("Run preset on loaders")
def preset(self):
LocalRunner.preset(self)
with reporter.step(f"Resolve containers in preset"):
parallel(self._resolve_containers_in_preset, self.k6_instances)
@reporter.step("Resolve containers in preset")
def _resolve_containers_in_preset(self, k6_instance: K6):
k6_instance.shell.exec(
f"sudo {self.k6_dir}/scenarios/preset/resolve_containers_in_preset.py --endpoint {k6_instance.endpoints[0]} --preset_file {k6_instance.load_params.preset.pregen_json}"
)
@reporter.step("Init k6 instances")
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
self.k6_instances = []
futures = parallel(
self._init_k6_instance_,
self.loaders,
load_params,
endpoints,
k6_dir,
)
self.k6_instances = [future.result() for future in futures]
def _init_k6_instance_(self, loader: Loader, load_params: LoadParams, endpoints: list[str], k6_dir: str):
shell = loader.get_shell()
with reporter.step(f"Init K6 instance on {loader.ip} for endpoints {endpoints}"):
with reporter.step(f"Make working directory"):
shell.exec(f"sudo mkdir -p {load_params.working_dir}")
# If we chmod /home/<user_name> folder we can no longer ssh to the node
# !! IMPORTANT !!
if (
load_params.working_dir
and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}"
and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/"
):
shell.exec(f"sudo chmod -R 777 {load_params.working_dir}")
return K6(
load_params,
self.endpoints,
k6_dir,
shell,
loader,
self.user,
)
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step("Preparation steps")
def prepare(
self,
load_params: LoadParams,
cluster_nodes: list[ClusterNode],
nodes_under_load: list[ClusterNode],
k6_dir: str,
):
self.k6_dir = k6_dir
parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, cluster_nodes)
@reporter.step("Prepare node {cluster_node}")
def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, cluster_nodes: list[ClusterNode]):
LocalRunner.prepare_node(self, cluster_node, k6_dir, load_params)
self.endpoints = cluster_node.s3_gate.get_all_endpoints()
shell = cluster_node.host.get_shell()
with reporter.step("Uninstall previous installation of aws cli"):
shell.exec(f"sudo rm -rf /usr/local/aws-cli")
shell.exec(f"sudo rm -rf /usr/local/bin/aws")
shell.exec(f"sudo rm -rf /usr/local/bin/aws_completer")
with reporter.step("Install aws cli"):
shell.exec(f"sudo curl {load_params.awscli_url} -o {k6_dir}/awscliv2.zip")
shell.exec(f"sudo unzip -q {k6_dir}/awscliv2.zip -d {k6_dir}")
shell.exec(f"sudo {k6_dir}/aws/install")
with reporter.step("Install requests python module"):
shell.exec(f"sudo apt-get -y install python3-pip")
shell.exec(f"sudo curl -so {k6_dir}/requests.tar.gz {load_params.requests_module_url}")
shell.exec(f"sudo python3 -m pip install -I {k6_dir}/requests.tar.gz")
with reporter.step(f"Init s3 client on {cluster_node.host_ip}"):
configure_input = [
InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=self.user.s3_credentials.access_key),
InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=self.user.s3_credentials.secret_key),
InteractiveInput(prompt_pattern=r".*", input=""),
InteractiveInput(prompt_pattern=r".*", input=""),
]
shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input))

View file

@ -1,6 +1,12 @@
from importlib.metadata import entry_points import sys
from typing import Any from typing import Any
if sys.version_info < (3, 10):
# On Python prior 3.10 we need to use backport of entry points
from importlib_metadata import entry_points
else:
from importlib.metadata import entry_points
def load_plugin(plugin_group: str, name: str) -> Any: def load_plugin(plugin_group: str, name: str) -> Any:
"""Loads plugin using entry point specification. """Loads plugin using entry point specification.
@ -17,16 +23,3 @@ def load_plugin(plugin_group: str, name: str) -> Any:
return None return None
plugin = plugins[name] plugin = plugins[name]
return plugin.load() return plugin.load()
def load_all(group: str) -> Any:
"""Loads all plugins using entry point specification.
Args:
group: Name of plugin group.
Returns:
Classes from specified group.
"""
plugins = entry_points(group=group)
return [plugin.load() for plugin in plugins]

View file

@ -8,40 +8,28 @@ from tenacity import retry
from tenacity.stop import stop_after_attempt from tenacity.stop import stop_after_attempt
from tenacity.wait import wait_fixed from tenacity.wait import wait_fixed
from frostfs_testlib import reporter from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.shell.command_inspectors import SuInspector from frostfs_testlib.shell.interfaces import CommandOptions
from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions
reporter = get_reporter()
class RemoteProcess: class RemoteProcess:
def __init__( def __init__(self, cmd: str, process_dir: str, shell: Shell):
self, cmd: str, process_dir: str, shell: Shell, cmd_inspector: Optional[CommandInspector], proc_id: str
):
self.process_dir = process_dir self.process_dir = process_dir
self.cmd = cmd self.cmd = cmd
self.stdout_last_line_number = 0 self.stdout_last_line_number = 0
self.stderr_last_line_number = 0 self.stderr_last_line_number = 0
self.pid: Optional[str] = None self.pid: Optional[str] = None
self.proc_rc: Optional[int] = None self.proc_rc: Optional[int] = None
self.proc_start_time: Optional[int] = None
self.proc_end_time: Optional[int] = None
self.saved_stdout: Optional[str] = None self.saved_stdout: Optional[str] = None
self.saved_stderr: Optional[str] = None self.saved_stderr: Optional[str] = None
self.shell = shell self.shell = shell
self.proc_id: str = proc_id
self.cmd_inspectors: list[CommandInspector] = [cmd_inspector] if cmd_inspector else []
@classmethod @classmethod
@reporter.step("Create remote process") @reporter.step_deco("Create remote process")
def create( def create(cls, command: str, shell: Shell, working_dir: str = "/tmp") -> RemoteProcess:
cls,
command: str,
shell: Shell,
working_dir: str = "/tmp",
user: Optional[str] = None,
proc_id: Optional[str] = None,
) -> RemoteProcess:
""" """
Create a process on a remote host. Create a process on a remote host.
@ -51,8 +39,6 @@ class RemoteProcess:
rc: contains script return code rc: contains script return code
stderr: contains script errors stderr: contains script errors
stdout: contains script output stdout: contains script output
user: user on behalf whom command will be executed
proc_id: process string identificator
Args: Args:
shell: Shell instance shell: Shell instance
@ -62,32 +48,16 @@ class RemoteProcess:
Returns: Returns:
RemoteProcess instance for further examination RemoteProcess instance for further examination
""" """
if proc_id is None:
proc_id = f"{uuid.uuid4()}"
cmd_inspector = SuInspector(user) if user else None
remote_process = cls( remote_process = cls(
cmd=command, cmd=command, process_dir=os.path.join(working_dir, f"proc_{uuid.uuid4()}"), shell=shell
process_dir=os.path.join(working_dir, f"proc_{proc_id}"),
shell=shell,
cmd_inspector=cmd_inspector,
proc_id=proc_id,
) )
remote_process._create_process_dir()
remote_process._generate_command_script(command)
remote_process._start_process()
remote_process.pid = remote_process._get_pid()
return remote_process return remote_process
@reporter.step("Start remote process") @reporter.step_deco("Get process stdout")
def start(self):
"""
Starts a process on a remote host.
"""
self._create_process_dir()
self._generate_command_script()
self._start_process()
self.pid = self._get_pid()
@reporter.step("Get process stdout")
def stdout(self, full: bool = False) -> str: def stdout(self, full: bool = False) -> str:
""" """
Method to get process stdout, either fresh info or full. Method to get process stdout, either fresh info or full.
@ -103,8 +73,7 @@ class RemoteProcess:
cur_stdout = self.saved_stdout cur_stdout = self.saved_stdout
else: else:
terminal = self.shell.exec( terminal = self.shell.exec(
f"cat {self.process_dir}/stdout", f"cat {self.process_dir}/stdout", options=CommandOptions(no_log=True)
options=CommandOptions(no_log=True, extra_inspectors=self.cmd_inspectors),
) )
if self.proc_rc is not None: if self.proc_rc is not None:
self.saved_stdout = terminal.stdout self.saved_stdout = terminal.stdout
@ -119,7 +88,7 @@ class RemoteProcess:
return resulted_stdout return resulted_stdout
return "" return ""
@reporter.step("Get process stderr") @reporter.step_deco("Get process stderr")
def stderr(self, full: bool = False) -> str: def stderr(self, full: bool = False) -> str:
""" """
Method to get process stderr, either fresh info or full. Method to get process stderr, either fresh info or full.
@ -135,8 +104,7 @@ class RemoteProcess:
cur_stderr = self.saved_stderr cur_stderr = self.saved_stderr
else: else:
terminal = self.shell.exec( terminal = self.shell.exec(
f"cat {self.process_dir}/stderr", f"cat {self.process_dir}/stderr", options=CommandOptions(no_log=True)
options=CommandOptions(no_log=True, extra_inspectors=self.cmd_inspectors),
) )
if self.proc_rc is not None: if self.proc_rc is not None:
self.saved_stderr = terminal.stdout self.saved_stderr = terminal.stdout
@ -150,131 +118,84 @@ class RemoteProcess:
return resulted_stderr return resulted_stderr
return "" return ""
@reporter.step("Get process rc") @reporter.step_deco("Get process rc")
def rc(self) -> Optional[int]: def rc(self) -> Optional[int]:
if self.proc_rc is not None: if self.proc_rc is not None:
return self.proc_rc return self.proc_rc
result = self._cat_proc_file("rc") terminal = self.shell.exec(f"cat {self.process_dir}/rc", CommandOptions(check=False))
if not result:
return None
self.proc_rc = int(result)
return self.proc_rc
@reporter.step("Get process start time")
def start_time(self) -> Optional[int]:
if self.proc_start_time is not None:
return self.proc_start_time
result = self._cat_proc_file("start_time")
if not result:
return None
self.proc_start_time = int(result)
return self.proc_start_time
@reporter.step("Get process end time")
def end_time(self) -> Optional[int]:
if self.proc_end_time is not None:
return self.proc_end_time
result = self._cat_proc_file("end_time")
if not result:
return None
self.proc_end_time = int(result)
return self.proc_end_time
def _cat_proc_file(self, file: str) -> Optional[str]:
terminal = self.shell.exec(
f"cat {self.process_dir}/{file}",
CommandOptions(check=False, extra_inspectors=self.cmd_inspectors, no_log=True),
)
if "No such file or directory" in terminal.stderr: if "No such file or directory" in terminal.stderr:
return None return None
elif terminal.stderr or terminal.return_code != 0: elif terminal.stderr or terminal.return_code != 0:
raise AssertionError(f"cat process {file} was not successful: {terminal.stderr}") raise AssertionError(f"cat process rc was not successful: {terminal.stderr}")
return terminal.stdout self.proc_rc = int(terminal.stdout)
return self.proc_rc
@reporter.step("Check if process is running") @reporter.step_deco("Check if process is running")
def running(self) -> bool: def running(self) -> bool:
return self.rc() is None return self.rc() is None
@reporter.step("Send signal to process") @reporter.step_deco("Send signal to process")
def send_signal(self, signal: int) -> None: def send_signal(self, signal: int) -> None:
kill_res = self.shell.exec( kill_res = self.shell.exec(f"kill -{signal} {self.pid}", CommandOptions(check=False))
f"kill -{signal} {self.pid}",
CommandOptions(check=False, extra_inspectors=self.cmd_inspectors),
)
if "No such process" in kill_res.stderr: if "No such process" in kill_res.stderr:
return return
if kill_res.return_code: if kill_res.return_code:
raise AssertionError(f"Signal {signal} not sent. Return code of kill: {kill_res.return_code}") raise AssertionError(
f"Signal {signal} not sent. Return code of kill: {kill_res.return_code}"
)
@reporter.step("Stop process") @reporter.step_deco("Stop process")
def stop(self) -> None: def stop(self) -> None:
self.send_signal(15) self.send_signal(15)
@reporter.step("Kill process") @reporter.step_deco("Kill process")
def kill(self) -> None: def kill(self) -> None:
self.send_signal(9) self.send_signal(9)
@reporter.step("Clear process directory") @reporter.step_deco("Clear process directory")
def clear(self) -> None: def clear(self) -> None:
if self.process_dir == "/": if self.process_dir == "/":
raise AssertionError(f"Invalid path to delete: {self.process_dir}") raise AssertionError(f"Invalid path to delete: {self.process_dir}")
self.shell.exec(f"rm -rf {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) self.shell.exec(f"rm -rf {self.process_dir}")
@reporter.step("Start remote process") @reporter.step_deco("Start remote process")
def _start_process(self) -> None: def _start_process(self) -> None:
self.shell.exec( self.shell.exec(
f"nohup {self.process_dir}/command.sh </dev/null " f"nohup {self.process_dir}/command.sh </dev/null "
f">{self.process_dir}/stdout " f">{self.process_dir}/stdout "
f"2>{self.process_dir}/stderr &", f"2>{self.process_dir}/stderr &"
CommandOptions(extra_inspectors=self.cmd_inspectors),
) )
@reporter.step("Create process directory") @reporter.step_deco("Create process directory")
def _create_process_dir(self) -> None: def _create_process_dir(self) -> None:
self.shell.exec(f"mkdir -p {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) self.shell.exec(f"mkdir {self.process_dir}")
self.shell.exec(f"chmod 777 {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) self.shell.exec(f"chmod 777 {self.process_dir}")
terminal = self.shell.exec(f"realpath {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) terminal = self.shell.exec(f"realpath {self.process_dir}")
self.process_dir = terminal.stdout.strip() self.process_dir = terminal.stdout.strip()
@reporter.step("Get pid") @reporter.step_deco("Get pid")
@retry(wait=wait_fixed(10), stop=stop_after_attempt(5), reraise=True) @retry(wait=wait_fixed(10), stop=stop_after_attempt(5), reraise=True)
def _get_pid(self) -> str: def _get_pid(self) -> str:
terminal = self.shell.exec(f"cat {self.process_dir}/pid", CommandOptions(extra_inspectors=self.cmd_inspectors)) terminal = self.shell.exec(f"cat {self.process_dir}/pid")
assert terminal.stdout, f"invalid pid: {terminal.stdout}" assert terminal.stdout, f"invalid pid: {terminal.stdout}"
return terminal.stdout.strip() return terminal.stdout.strip()
@reporter.step("Generate command script") @reporter.step_deco("Generate command script")
def _generate_command_script(self) -> None: def _generate_command_script(self, command: str) -> None:
command = self.cmd.replace('"', '\\"').replace("\\", "\\\\") command = command.replace('"', '\\"').replace("\\", "\\\\")
script = ( script = (
f"#!/bin/bash\n" f"#!/bin/bash\n"
f"cd {self.process_dir}\n" f"cd {self.process_dir}\n"
f"date +%s > {self.process_dir}/start_time\n"
f"{command} &\n" f"{command} &\n"
f"pid=\$!\n" f"pid=\$!\n"
f"cd {self.process_dir}\n" f"cd {self.process_dir}\n"
f"echo \$pid > {self.process_dir}/pid\n" f"echo \$pid > {self.process_dir}/pid\n"
f"wait \$pid\n" f"wait \$pid\n"
f"echo $? > {self.process_dir}/rc\n" f"echo $? > {self.process_dir}/rc"
f"date +%s > {self.process_dir}/end_time\n"
) )
self.shell.exec( self.shell.exec(f'echo "{script}" > {self.process_dir}/command.sh')
f'echo "{script}" > {self.process_dir}/command.sh', self.shell.exec(f"cat {self.process_dir}/command.sh")
CommandOptions(extra_inspectors=self.cmd_inspectors), self.shell.exec(f"chmod +x {self.process_dir}/command.sh")
)
self.shell.exec(
f"cat {self.process_dir}/command.sh",
CommandOptions(extra_inspectors=self.cmd_inspectors),
)
self.shell.exec(
f"chmod +x {self.process_dir}/command.sh",
CommandOptions(extra_inspectors=self.cmd_inspectors),
)

View file

@ -1,9 +1,6 @@
from typing import Any
from frostfs_testlib.reporter.allure_handler import AllureHandler from frostfs_testlib.reporter.allure_handler import AllureHandler
from frostfs_testlib.reporter.interfaces import ReporterHandler from frostfs_testlib.reporter.interfaces import ReporterHandler
from frostfs_testlib.reporter.reporter import Reporter from frostfs_testlib.reporter.reporter import Reporter
from frostfs_testlib.reporter.steps_logger import StepsLogger
__reporter = Reporter() __reporter = Reporter()
@ -18,11 +15,3 @@ def get_reporter() -> Reporter:
Singleton reporter instance. Singleton reporter instance.
""" """
return __reporter return __reporter
def step(title: str):
return __reporter.step(title)
def attach(content: Any, file_name: str):
return __reporter.attach(content, file_name)

View file

@ -1,5 +1,5 @@
import os import os
from contextlib import AbstractContextManager, ContextDecorator from contextlib import AbstractContextManager
from textwrap import shorten from textwrap import shorten
from typing import Any, Callable from typing import Any, Callable
@ -12,8 +12,8 @@ from frostfs_testlib.reporter.interfaces import ReporterHandler
class AllureHandler(ReporterHandler): class AllureHandler(ReporterHandler):
"""Handler that stores test artifacts in Allure report.""" """Handler that stores test artifacts in Allure report."""
def step(self, name: str) -> AbstractContextManager | ContextDecorator: def step(self, name: str) -> AbstractContextManager:
name = shorten(name, width=140, placeholder="...") name = shorten(name, width=70, placeholder="...")
return allure.step(name) return allure.step(name)
def step_decorator(self, name: str) -> Callable: def step_decorator(self, name: str) -> Callable:
@ -21,13 +21,8 @@ class AllureHandler(ReporterHandler):
def attach(self, body: Any, file_name: str) -> None: def attach(self, body: Any, file_name: str) -> None:
attachment_name, extension = os.path.splitext(file_name) attachment_name, extension = os.path.splitext(file_name)
if extension.startswith("."):
extension = extension[1:]
attachment_type = self._resolve_attachment_type(extension) attachment_type = self._resolve_attachment_type(extension)
if os.path.exists(body):
allure.attach.file(body, file_name, attachment_type, extension)
else:
allure.attach(body, attachment_name, attachment_type, extension) allure.attach(body, attachment_name, attachment_type, extension)
def _resolve_attachment_type(self, extension: str) -> attachment_type: def _resolve_attachment_type(self, extension: str) -> attachment_type:

View file

@ -1,5 +1,5 @@
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from contextlib import AbstractContextManager, ContextDecorator from contextlib import AbstractContextManager
from typing import Any, Callable from typing import Any, Callable
@ -7,7 +7,7 @@ class ReporterHandler(ABC):
"""Interface of handler that stores test artifacts in some reporting tool.""" """Interface of handler that stores test artifacts in some reporting tool."""
@abstractmethod @abstractmethod
def step(self, name: str) -> AbstractContextManager | ContextDecorator: def step(self, name: str) -> AbstractContextManager:
"""Register a new step in test execution. """Register a new step in test execution.
Args: Args:

View file

@ -5,7 +5,6 @@ from typing import Any, Callable, Optional
from frostfs_testlib.plugins import load_plugin from frostfs_testlib.plugins import load_plugin
from frostfs_testlib.reporter.interfaces import ReporterHandler from frostfs_testlib.reporter.interfaces import ReporterHandler
from frostfs_testlib.utils.func_utils import format_by_args
@contextmanager @contextmanager
@ -64,8 +63,7 @@ class Reporter:
def wrapper(*a, **kw): def wrapper(*a, **kw):
resulting_func = func resulting_func = func
for handler in self.handlers: for handler in self.handlers:
parsed_name = format_by_args(func, name, *a, **kw) decorator = handler.step_decorator(name)
decorator = handler.step_decorator(parsed_name)
resulting_func = decorator(resulting_func) resulting_func = decorator(resulting_func)
return resulting_func(*a, **kw) return resulting_func(*a, **kw)
@ -83,11 +81,11 @@ class Reporter:
Returns: Returns:
Step context. Step context.
""" """
if not self.handlers:
return _empty_step()
step_contexts = [handler.step(name) for handler in self.handlers] step_contexts = [handler.step(name) for handler in self.handlers]
if not step_contexts: return AggregateContextManager(step_contexts)
step_contexts = [_empty_step()]
decorated_wrapper = self.step_deco(name)
return AggregateContextManager(step_contexts, decorated_wrapper)
def attach(self, content: Any, file_name: str) -> None: def attach(self, content: Any, file_name: str) -> None:
"""Attach specified content with given file name to the test report. """Attach specified content with given file name to the test report.
@ -106,10 +104,9 @@ class AggregateContextManager(AbstractContextManager):
contexts: list[AbstractContextManager] contexts: list[AbstractContextManager]
def __init__(self, contexts: list[AbstractContextManager], decorated_wrapper: Callable) -> None: def __init__(self, contexts: list[AbstractContextManager]) -> None:
super().__init__() super().__init__()
self.contexts = contexts self.contexts = contexts
self.wrapper = decorated_wrapper
def __enter__(self): def __enter__(self):
for context in self.contexts: for context in self.contexts:
@ -130,6 +127,3 @@ class AggregateContextManager(AbstractContextManager):
# If all context agreed to suppress exception, then suppress it; # If all context agreed to suppress exception, then suppress it;
# otherwise return None to reraise # otherwise return None to reraise
return True if all(suppress_decisions) else None return True if all(suppress_decisions) else None
def __call__(self, *args: Any, **kwds: Any) -> Any:
return self.wrapper(*args, **kwds)

View file

@ -1,56 +0,0 @@
import logging
import threading
from contextlib import AbstractContextManager, ContextDecorator
from functools import wraps
from types import TracebackType
from typing import Any, Callable
from frostfs_testlib.reporter.interfaces import ReporterHandler
class StepsLogger(ReporterHandler):
"""Handler that prints steps to log."""
def step(self, name: str) -> AbstractContextManager | ContextDecorator:
return StepLoggerContext(name)
def step_decorator(self, name: str) -> Callable:
return StepLoggerContext(name)
def attach(self, body: Any, file_name: str) -> None:
pass
class StepLoggerContext(AbstractContextManager):
INDENT = {}
def __init__(self, title: str):
self.title = title
self.logger = logging.getLogger("NeoLogger")
self.thread = threading.get_ident()
if self.thread not in StepLoggerContext.INDENT:
StepLoggerContext.INDENT[self.thread] = 1
def __enter__(self) -> Any:
indent = ">" * StepLoggerContext.INDENT[self.thread]
self.logger.info(f"[{self.thread}] {indent} {self.title}")
StepLoggerContext.INDENT[self.thread] += 1
def __exit__(
self,
__exc_type: type[BaseException] | None,
__exc_value: BaseException | None,
__traceback: TracebackType | None,
) -> bool | None:
StepLoggerContext.INDENT[self.thread] -= 1
indent = "<" * StepLoggerContext.INDENT[self.thread]
self.logger.info(f"[{self.thread}] {indent} {self.title}")
def __call__(self, func):
@wraps(func)
def impl(*a, **kw):
with self:
return func(*a, **kw)
return impl

View file

@ -9,4 +9,4 @@ FROSTFS_ADM_EXEC = os.getenv("FROSTFS_ADM_EXEC", "frostfs-adm")
# Config for frostfs-adm utility. Optional if tests are running against devenv # Config for frostfs-adm utility. Optional if tests are running against devenv
FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH") FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH")
CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", "100s") CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", None)

View file

@ -10,8 +10,6 @@ COMPLEX_OBJECT_TAIL_SIZE = os.getenv("COMPLEX_OBJECT_TAIL_SIZE", "1000")
SERVICE_MAX_STARTUP_TIME = os.getenv("SERVICE_MAX_STARTUP_TIME", "5m") SERVICE_MAX_STARTUP_TIME = os.getenv("SERVICE_MAX_STARTUP_TIME", "5m")
STORAGE_USER_NAME = "frostfs-storage"
MORPH_TIMEOUT = os.getenv("MORPH_BLOCK_TIME", "8s") MORPH_TIMEOUT = os.getenv("MORPH_BLOCK_TIME", "8s")
MORPH_BLOCK_TIME = os.getenv("MORPH_BLOCK_TIME", "8s") MORPH_BLOCK_TIME = os.getenv("MORPH_BLOCK_TIME", "8s")
FROSTFS_CONTRACT_CACHE_TIMEOUT = os.getenv("FROSTFS_CONTRACT_CACHE_TIMEOUT", "30s") FROSTFS_CONTRACT_CACHE_TIMEOUT = os.getenv("FROSTFS_CONTRACT_CACHE_TIMEOUT", "30s")
@ -43,14 +41,6 @@ with open(DEFAULT_WALLET_CONFIG, "w") as file:
# Number of attempts that S3 clients will attempt per each request (1 means single attempt # Number of attempts that S3 clients will attempt per each request (1 means single attempt
# without any retries) # without any retries)
MAX_REQUEST_ATTEMPTS = 5 MAX_REQUEST_ATTEMPTS = 1
RETRY_MODE = "standard" RETRY_MODE = "standard"
CREDENTIALS_CREATE_TIMEOUT = "1m" CREDENTIALS_CREATE_TIMEOUT = "1m"
HOSTING_CONFIG_FILE = os.getenv(
"HOSTING_CONFIG_FILE", os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..", ".devenv.hosting.yaml"))
)
MORE_LOG = os.getenv("MORE_LOG", "1")
EXPIRATION_EPOCH_ATTRIBUTE = "__SYSTEM__EXPIRATION_EPOCH"

View file

@ -23,10 +23,6 @@ INVALID_RANGE_OVERFLOW = "invalid '{range}' range: uint64 overflow"
INVALID_OFFSET_SPECIFIER = "invalid '{range}' range offset specifier" INVALID_OFFSET_SPECIFIER = "invalid '{range}' range offset specifier"
INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier" INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier"
S3_BUCKET_DOES_NOT_ALLOW_ACL = "The bucket does not allow ACLs" S3_MALFORMED_XML_REQUEST = (
S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not validate against our published schema." "The XML you provided was not well-formed or did not validate against our published schema."
)
RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied"
RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied"
NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound"
NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound"

View file

@ -11,9 +11,8 @@ BACKGROUND_WRITERS_COUNT = os.getenv("BACKGROUND_WRITERS_COUNT", 0)
BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 0) BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 0)
BACKGROUND_DELETERS_COUNT = os.getenv("BACKGROUND_DELETERS_COUNT", 0) BACKGROUND_DELETERS_COUNT = os.getenv("BACKGROUND_DELETERS_COUNT", 0)
BACKGROUND_VERIFIERS_COUNT = os.getenv("BACKGROUND_VERIFIERS_COUNT", 0) BACKGROUND_VERIFIERS_COUNT = os.getenv("BACKGROUND_VERIFIERS_COUNT", 0)
BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 1800) BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 600)
BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE = os.getenv("BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE", 32) BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE = os.getenv("BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE", 32)
BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME = float(os.getenv("BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME", 0.8))
BACKGROUND_LOAD_SETUP_TIMEOUT = os.getenv("BACKGROUND_LOAD_SETUP_TIMEOUT", "5s") BACKGROUND_LOAD_SETUP_TIMEOUT = os.getenv("BACKGROUND_LOAD_SETUP_TIMEOUT", "5s")
# This will decrease load params for some weak environments # This will decrease load params for some weak environments
@ -26,9 +25,8 @@ BACKGROUND_LOAD_CONTAINER_PLACEMENT_POLICY = os.getenv(
) )
BACKGROUND_LOAD_S3_LOCATION = os.getenv("BACKGROUND_LOAD_S3_LOCATION", "node-off") BACKGROUND_LOAD_S3_LOCATION = os.getenv("BACKGROUND_LOAD_S3_LOCATION", "node-off")
PRESET_CONTAINERS_COUNT = os.getenv("CONTAINERS_COUNT", "40") PRESET_CONTAINERS_COUNT = os.getenv("CONTAINERS_COUNT", "40")
PRESET_CONTAINER_CREATION_RETRY_COUNT = os.getenv("CONTAINER_CREATION_RETRY_COUNT", "20")
# TODO: At lease one object is required due to bug in xk6 (buckets with no objects produce millions exceptions in read) # TODO: At lease one object is required due to bug in xk6 (buckets with no objects produce millions exceptions in read)
PRESET_OBJECTS_COUNT = os.getenv("OBJ_COUNT", "1") PRESET_OBJECTS_COUNT = os.getenv("OBJ_COUNT", "10")
K6_DIRECTORY = os.getenv("K6_DIRECTORY", "/etc/k6") K6_DIRECTORY = os.getenv("K6_DIRECTORY", "/etc/k6")
K6_TEARDOWN_PERIOD = os.getenv("K6_TEARDOWN_PERIOD", "30") K6_TEARDOWN_PERIOD = os.getenv("K6_TEARDOWN_PERIOD", "30")
K6_STOP_SIGNAL_TIMEOUT = int(os.getenv("K6_STOP_SIGNAL_TIMEOUT", 300)) K6_STOP_SIGNAL_TIMEOUT = int(os.getenv("K6_STOP_SIGNAL_TIMEOUT", 300))

View file

@ -16,10 +16,11 @@ OPTIONAL_NODE_UNDER_LOAD = os.getenv("OPTIONAL_NODE_UNDER_LOAD")
OPTIONAL_FAILOVER_ENABLED = str_to_bool(os.getenv("OPTIONAL_FAILOVER_ENABLED", "true")) OPTIONAL_FAILOVER_ENABLED = str_to_bool(os.getenv("OPTIONAL_FAILOVER_ENABLED", "true"))
# Set this to True to disable background load. I.E. node which supposed to be stopped will not be actually stopped. # Set this to True to disable background load. I.E. node which supposed to be stopped will not be actually stopped.
OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool(os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true")) OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool(
os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true")
)
# Set this to False for disable autouse fixture like node healthcheck during developing time. # Set this to False for disable autouse fixture like node healthcheck during developing time.
OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool(os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true")) OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool(
os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true")
# Use cache for fixtures with @cachec_fixture decorator )
OPTIONAL_CACHE_FIXTURES = str_to_bool(os.getenv("OPTIONAL_CACHE_FIXTURES", "false"))

View file

@ -1,9 +0,0 @@
ALL_USERS_GROUP_URI = "http://acs.amazonaws.com/groups/global/AllUsers"
ALL_USERS_GROUP_WRITE_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROUP_URI}, "Permission": "WRITE"}
ALL_USERS_GROUP_READ_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROUP_URI}, "Permission": "READ"}
CANONICAL_USER_FULL_CONTROL_GRANT = {"Grantee": {"Type": "CanonicalUser"}, "Permission": "FULL_CONTROL"}
# https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl
PRIVATE_GRANTS = []
PUBLIC_READ_GRANTS = [ALL_USERS_GROUP_READ_GRANT]
PUBLIC_READ_WRITE_GRANTS = [ALL_USERS_GROUP_WRITE_GRANT, ALL_USERS_GROUP_READ_GRANT]

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,16 +0,0 @@
import re
from frostfs_testlib.cli.generic_cli import GenericCli
from frostfs_testlib.s3.interfaces import BucketContainerResolver
from frostfs_testlib.storage.cluster import ClusterNode
class CurlBucketContainerResolver(BucketContainerResolver):
def resolve(self, node: ClusterNode, bucket_name: str, **kwargs: dict) -> str:
curl = GenericCli("curl", node.host)
output = curl(f"-I http://127.0.0.1:8084/{bucket_name}")
pattern = r"X-Container-Id: (\S+)"
cid = re.findall(pattern, output.stdout)
if cid:
return cid[0]
return None

View file

@ -1,11 +1,8 @@
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from datetime import datetime from datetime import datetime
from enum import Enum
from typing import Literal, Optional, Union from typing import Literal, Optional, Union
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.testing.readable import HumanReadableABC, HumanReadableEnum
from frostfs_testlib.utils.file_utils import TestFile
def _make_objs_dict(key_names): def _make_objs_dict(key_names):
objs_list = [] objs_list = []
@ -16,8 +13,7 @@ def _make_objs_dict(key_names):
return objs_dict return objs_dict
class VersioningStatus(HumanReadableEnum): class VersioningStatus(Enum):
UNDEFINED = None
ENABLED = "Enabled" ENABLED = "Enabled"
SUSPENDED = "Suspended" SUSPENDED = "Suspended"
@ -33,35 +29,15 @@ ACL_COPY = [
] ]
class BucketContainerResolver(ABC): class S3ClientWrapper(ABC):
@abstractmethod @abstractmethod
def resolve(self, node: ClusterNode, bucket_name: str, **kwargs: dict) -> str: def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None:
"""
Resolve Container ID from bucket name
Args:
node: node from where we want to resolve
bucket_name: name of the bucket
**kwargs: any other required params
Returns: Container ID
"""
raise NotImplementedError("Call from abstract class")
class S3ClientWrapper(HumanReadableABC):
@abstractmethod
def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str, region: str) -> None:
pass pass
@abstractmethod @abstractmethod
def set_endpoint(self, s3gate_endpoint: str): def set_endpoint(self, s3gate_endpoint: str):
"""Set endpoint""" """Set endpoint"""
@abstractmethod
def set_iam_endpoint(self, iam_endpoint: str):
"""Set iam endpoint"""
@abstractmethod @abstractmethod
def create_bucket( def create_bucket(
self, self,
@ -157,10 +133,6 @@ class S3ClientWrapper(HumanReadableABC):
def get_bucket_policy(self, bucket: str) -> str: def get_bucket_policy(self, bucket: str) -> str:
"""Returns the policy of a specified bucket.""" """Returns the policy of a specified bucket."""
@abstractmethod
def delete_bucket_policy(self, bucket: str) -> str:
"""Deletes the policy of a specified bucket."""
@abstractmethod @abstractmethod
def put_bucket_policy(self, bucket: str, policy: dict) -> None: def put_bucket_policy(self, bucket: str, policy: dict) -> None:
"""Applies S3 bucket policy to an S3 bucket.""" """Applies S3 bucket policy to an S3 bucket."""
@ -294,7 +266,7 @@ class S3ClientWrapper(HumanReadableABC):
version_id: Optional[str] = None, version_id: Optional[str] = None,
object_range: Optional[tuple[int, int]] = None, object_range: Optional[tuple[int, int]] = None,
full_output: bool = False, full_output: bool = False,
) -> dict | TestFile: ) -> Union[dict, str]:
"""Retrieves objects from S3.""" """Retrieves objects from S3."""
@abstractmethod @abstractmethod
@ -322,11 +294,15 @@ class S3ClientWrapper(HumanReadableABC):
abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.""" abort a given multipart upload multiple times in order to completely free all storage consumed by all parts."""
@abstractmethod @abstractmethod
def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: def upload_part(
self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str
) -> str:
"""Uploads a part in a multipart upload.""" """Uploads a part in a multipart upload."""
@abstractmethod @abstractmethod
def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: def upload_part_copy(
self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str
) -> str:
"""Uploads a part by copying data from an existing object as data source.""" """Uploads a part by copying data from an existing object as data source."""
@abstractmethod @abstractmethod
@ -370,18 +346,6 @@ class S3ClientWrapper(HumanReadableABC):
def delete_object_tagging(self, bucket: str, key: str) -> None: def delete_object_tagging(self, bucket: str, key: str) -> None:
"""Removes the entire tag set from the specified object.""" """Removes the entire tag set from the specified object."""
@abstractmethod
def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict:
"""Adds or updates bucket lifecycle configuration"""
@abstractmethod
def get_bucket_lifecycle_configuration(self, bucket: str) -> dict:
"""Gets bucket lifecycle configuration"""
@abstractmethod
def delete_bucket_lifecycle(self, bucket: str) -> dict:
"""Deletes bucket lifecycle"""
@abstractmethod @abstractmethod
def get_object_attributes( def get_object_attributes(
self, self,
@ -416,165 +380,3 @@ class S3ClientWrapper(HumanReadableABC):
"""cp directory TODO: Add proper description""" """cp directory TODO: Add proper description"""
# END OF OBJECT METHODS # # END OF OBJECT METHODS #
# IAM METHODS #
@abstractmethod
def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict:
"""Adds the specified user to the specified group"""
@abstractmethod
def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict:
"""Attaches the specified managed policy to the specified IAM group"""
@abstractmethod
def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict:
"""Attaches the specified managed policy to the specified user"""
@abstractmethod
def iam_create_access_key(self, user_name: str) -> dict:
"""Creates a new AWS secret access key and access key ID for the specified user"""
@abstractmethod
def iam_create_group(self, group_name: str) -> dict:
"""Creates a new group"""
@abstractmethod
def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict:
"""Creates a new managed policy for your AWS account"""
@abstractmethod
def iam_create_user(self, user_name: str) -> dict:
"""Creates a new IAM user for your AWS account"""
@abstractmethod
def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict:
"""Deletes the access key pair associated with the specified IAM user"""
@abstractmethod
def iam_delete_group(self, group_name: str) -> dict:
"""Deletes the specified IAM group"""
@abstractmethod
def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict:
"""Deletes the specified inline policy that is embedded in the specified IAM group"""
@abstractmethod
def iam_delete_policy(self, policy_arn: str) -> dict:
"""Deletes the specified managed policy"""
@abstractmethod
def iam_delete_user(self, user_name: str) -> dict:
"""Deletes the specified IAM user"""
@abstractmethod
def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict:
"""Deletes the specified inline policy that is embedded in the specified IAM user"""
@abstractmethod
def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict:
"""Removes the specified managed policy from the specified IAM group"""
@abstractmethod
def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict:
"""Removes the specified managed policy from the specified user"""
@abstractmethod
def iam_get_group(self, group_name: str) -> dict:
"""Returns a list of IAM users that are in the specified IAM group"""
@abstractmethod
def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict:
"""Retrieves the specified inline policy document that is embedded in the specified IAM group"""
@abstractmethod
def iam_get_policy(self, policy_arn: str) -> dict:
"""Retrieves information about the specified managed policy"""
@abstractmethod
def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict:
"""Retrieves information about the specified version of the specified managed policy"""
@abstractmethod
def iam_get_user(self, user_name: str) -> dict:
"""Retrieves information about the specified IAM user"""
@abstractmethod
def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict:
"""Retrieves the specified inline policy document that is embedded in the specified IAM user"""
@abstractmethod
def iam_list_access_keys(self, user_name: str) -> dict:
"""Returns information about the access key IDs associated with the specified IAM user"""
@abstractmethod
def iam_list_attached_group_policies(self, group_name: str) -> dict:
"""Lists all managed policies that are attached to the specified IAM group"""
@abstractmethod
def iam_list_attached_user_policies(self, user_name: str) -> dict:
"""Lists all managed policies that are attached to the specified IAM user"""
@abstractmethod
def iam_list_entities_for_policy(self, policy_arn: str) -> dict:
"""Lists all IAM users, groups, and roles that the specified managed policy is attached to"""
@abstractmethod
def iam_list_group_policies(self, group_name: str) -> dict:
"""Lists the names of the inline policies that are embedded in the specified IAM group"""
@abstractmethod
def iam_list_groups(self) -> dict:
"""Lists the IAM groups"""
@abstractmethod
def iam_list_groups_for_user(self, user_name: str) -> dict:
"""Lists the IAM groups that the specified IAM user belongs to"""
@abstractmethod
def iam_list_policies(self) -> dict:
"""Lists all the managed policies that are available in your AWS account"""
@abstractmethod
def iam_list_policy_versions(self, policy_arn: str) -> dict:
"""Lists information about the versions of the specified managed policy"""
@abstractmethod
def iam_list_user_policies(self, user_name: str) -> dict:
"""Lists the names of the inline policies embedded in the specified IAM user"""
@abstractmethod
def iam_list_users(self) -> dict:
"""Lists the IAM users"""
@abstractmethod
def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict:
"""Adds or updates an inline policy document that is embedded in the specified IAM group"""
@abstractmethod
def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict:
"""Adds or updates an inline policy document that is embedded in the specified IAM user"""
@abstractmethod
def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict:
"""Removes the specified user from the specified group"""
@abstractmethod
def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
"""Updates the name and/or the path of the specified IAM group"""
@abstractmethod
def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
"""Updates the name and/or the path of the specified IAM user"""
@abstractmethod
def iam_tag_user(self, user_name: str, tags: list) -> dict:
"""Adds one or more tags to an IAM user"""
@abstractmethod
def iam_list_user_tags(self, user_name: str) -> dict:
"""List tags of IAM user"""
@abstractmethod
def iam_untag_user(self, user_name: str, tag_keys: list) -> dict:
"""Removes the specified tags from the user"""

View file

@ -1,3 +1,3 @@
from frostfs_testlib.shell.interfaces import CommandOptions, CommandResult, InteractiveInput, Shell from frostfs_testlib.shell.interfaces import CommandOptions, CommandResult, InteractiveInput, Shell
from frostfs_testlib.shell.local_shell import LocalShell from frostfs_testlib.shell.local_shell import LocalShell
from frostfs_testlib.shell.ssh_shell import SshConnectionProvider, SSHShell from frostfs_testlib.shell.ssh_shell import SSHShell

View file

@ -7,23 +7,7 @@ class SudoInspector(CommandInspector):
If command is already prepended with sudo, then has no effect. If command is already prepended with sudo, then has no effect.
""" """
def inspect(self, original_command: str, command: str) -> str: def inspect(self, command: str) -> str:
if not command.startswith("sudo"): if not command.startswith("sudo"):
return f"sudo {command}" return f"sudo {command}"
return command return command
class SuInspector(CommandInspector):
"""Allows to run command as another user via sudo su call
If command is already prepended with sudo su, then has no effect.
"""
def __init__(self, user: str) -> None:
self.user = user
def inspect(self, original_command: str, command: str) -> str:
if not original_command.startswith("sudo su"):
cmd = original_command.replace('"', '\\"').replace("\$", "\\\\\\$")
return f'sudo su - {self.user} -c "{cmd}"'
return original_command

View file

@ -22,12 +22,11 @@ class CommandInspector(ABC):
"""Interface of inspector that processes command text before execution.""" """Interface of inspector that processes command text before execution."""
@abstractmethod @abstractmethod
def inspect(self, original_command: str, command: str) -> str: def inspect(self, command: str) -> str:
"""Transforms command text and returns modified command. """Transforms command text and returns modified command.
Args: Args:
command: Command to transform with this inspector. command: Command to transform with this inspector.
original_command: Untransformed command to transform with this inspector. Depending on type of the inspector it might be required to modify original command
Returns: Returns:
Transformed command text. Transformed command text.
@ -48,7 +47,6 @@ class CommandOptions:
check: Controls whether to check return code of the command. Set to False to check: Controls whether to check return code of the command. Set to False to
ignore non-zero return codes. ignore non-zero return codes.
no_log: Do not print output to logger if True. no_log: Do not print output to logger if True.
extra_inspectors: Exctra command inspectors to process command
""" """
interactive_inputs: Optional[list[InteractiveInput]] = None interactive_inputs: Optional[list[InteractiveInput]] = None
@ -56,7 +54,6 @@ class CommandOptions:
timeout: Optional[int] = None timeout: Optional[int] = None
check: bool = True check: bool = True
no_log: bool = False no_log: bool = False
extra_inspectors: Optional[list[CommandInspector]] = None
def __post_init__(self): def __post_init__(self):
if self.timeout is None: if self.timeout is None:

View file

@ -1,18 +1,16 @@
import logging import logging
import subprocess import subprocess
import tempfile import tempfile
from contextlib import nullcontext
from datetime import datetime from datetime import datetime
from typing import IO, Optional from typing import IO, Optional
import pexpect import pexpect
from frostfs_testlib import reporter from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.common import MORE_LOG
from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell
logger = logging.getLogger("frostfs.testlib.shell") logger = logging.getLogger("frostfs.testlib.shell")
step_context = reporter.step if MORE_LOG == "1" else nullcontext reporter = get_reporter()
class LocalShell(Shell): class LocalShell(Shell):
@ -26,12 +24,10 @@ class LocalShell(Shell):
# If no options were provided, use default options # If no options were provided, use default options
options = options or CommandOptions() options = options or CommandOptions()
original_command = command for inspector in self.command_inspectors:
extra_inspectors = options.extra_inspectors if options.extra_inspectors else [] command = inspector.inspect(command)
for inspector in [*self.command_inspectors, *extra_inspectors]:
command = inspector.inspect(original_command, command)
with step_context(f"Executing command: {command}"): logger.info(f"Executing command: {command}")
if options.interactive_inputs: if options.interactive_inputs:
return self._exec_interactive(command, options) return self._exec_interactive(command, options)
return self._exec_non_interactive(command, options) return self._exec_non_interactive(command, options)
@ -41,7 +37,7 @@ class LocalShell(Shell):
log_file = tempfile.TemporaryFile() # File is reliable cross-platform way to capture output log_file = tempfile.TemporaryFile() # File is reliable cross-platform way to capture output
try: try:
command_process = pexpect.spawn(command, timeout=options.timeout, use_poll=True) command_process = pexpect.spawn(command, timeout=options.timeout)
except (pexpect.ExceptionPexpect, OSError) as exc: except (pexpect.ExceptionPexpect, OSError) as exc:
raise RuntimeError(f"Command: {command}") from exc raise RuntimeError(f"Command: {command}") from exc
@ -63,7 +59,8 @@ class LocalShell(Shell):
if options.check and result.return_code != 0: if options.check and result.return_code != 0:
raise RuntimeError( raise RuntimeError(
f"Command: {command}\nreturn code: {result.return_code}\n" f"Output: {result.stdout}\n" f"Stderr: {result.stderr}\n" f"Command: {command}\nreturn code: {result.return_code}\n"
f"Output: {result.stdout}"
) )
return result return result
@ -94,7 +91,11 @@ class LocalShell(Shell):
stderr="", stderr="",
return_code=exc.returncode, return_code=exc.returncode,
) )
raise RuntimeError(f"Command: {command}\nError with retcode: {exc.returncode}\n Output: {exc.output}") from exc raise RuntimeError(
f"Command: {command}\nError:\n"
f"return code: {exc.returncode}\n"
f"output: {exc.output}"
) from exc
except OSError as exc: except OSError as exc:
raise RuntimeError(f"Command: {command}\nOutput: {exc.strerror}") from exc raise RuntimeError(f"Command: {command}\nOutput: {exc.strerror}") from exc
finally: finally:
@ -128,13 +129,15 @@ class LocalShell(Shell):
end_time: datetime, end_time: datetime,
result: Optional[CommandResult], result: Optional[CommandResult],
) -> None: ) -> None:
if not result: # TODO: increase logging level if return code is non 0, should be warning at least
logger.warning(f"Command: {command}\n" f"Error: result is None") logger.info(
return f"Command: {command}\n"
f"{'Success:' if result and result.return_code == 0 else 'Error:'}\n"
status, log_method = ("Success", logger.info) if result.return_code == 0 else ("Error", logger.warning) f"return code: {result.return_code if result else ''} "
log_method(f"Command: {command}\n" f"{status} with retcode {result.return_code}\n" f"Output: \n{result.stdout}") f"\nOutput: {result.stdout if result else ''}"
)
if result:
elapsed_time = end_time - start_time elapsed_time = end_time - start_time
command_attachment = ( command_attachment = (
f"COMMAND: {command}\n" f"COMMAND: {command}\n"
@ -143,4 +146,5 @@ class LocalShell(Shell):
f"STDERR:\n{result.stderr}\n" f"STDERR:\n{result.stderr}\n"
f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}"
) )
with reporter.step(f"COMMAND: {command}"):
reporter.attach(command_attachment, "Command execution.txt") reporter.attach(command_attachment, "Command execution.txt")

View file

@ -6,111 +6,24 @@ from functools import lru_cache, wraps
from time import sleep from time import sleep
from typing import ClassVar, Optional, Tuple from typing import ClassVar, Optional, Tuple
from paramiko import AutoAddPolicy, Channel, ECDSAKey, Ed25519Key, PKey, RSAKey, SSHClient, SSHException, ssh_exception from paramiko import (
AutoAddPolicy,
Channel,
ECDSAKey,
Ed25519Key,
PKey,
RSAKey,
SSHClient,
SSHException,
ssh_exception,
)
from paramiko.ssh_exception import AuthenticationException from paramiko.ssh_exception import AuthenticationException
from frostfs_testlib import reporter from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell, SshCredentials from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell
logger = logging.getLogger("frostfs.testlib.shell") logger = logging.getLogger("frostfs.testlib.shell")
reporter = get_reporter()
class SshConnectionProvider:
SSH_CONNECTION_ATTEMPTS: ClassVar[int] = 4
SSH_ATTEMPTS_INTERVAL: ClassVar[int] = 10
CONNECTION_TIMEOUT = 60
instance = None
connections: dict[str, SSHClient] = {}
creds: dict[str, SshCredentials] = {}
def __new__(cls):
if not cls.instance:
cls.instance = super(SshConnectionProvider, cls).__new__(cls)
return cls.instance
def store_creds(self, host: str, ssh_creds: SshCredentials):
self.creds[host] = ssh_creds
def provide(self, host: str, port: str) -> SSHClient:
if host not in self.creds:
raise RuntimeError(f"Please add credentials for host {host}")
if host in self.connections:
client = self.connections[host]
if client:
return client
creds = self.creds[host]
client = self._create_connection(host, port, creds)
self.connections[host] = client
return client
def drop(self, host: str):
if host in self.connections:
client = self.connections.pop(host)
client.close()
def drop_all(self):
hosts = list(self.connections.keys())
for host in hosts:
self.drop(host)
def _create_connection(
self,
host: str,
port: str,
creds: SshCredentials,
) -> SSHClient:
for attempt in range(self.SSH_CONNECTION_ATTEMPTS):
connection = SSHClient()
connection.set_missing_host_key_policy(AutoAddPolicy())
try:
if creds.ssh_key_path:
logger.info(
f"Trying to connect to host {host} as {creds.ssh_login} using SSH key "
f"{creds.ssh_key_path} (attempt {attempt})"
)
connection.connect(
hostname=host,
port=port,
username=creds.ssh_login,
pkey=_load_private_key(creds.ssh_key_path, creds.ssh_key_passphrase),
timeout=self.CONNECTION_TIMEOUT,
)
else:
logger.info(
f"Trying to connect to host {host} as {creds.ssh_login} using password " f"(attempt {attempt})"
)
connection.connect(
hostname=host,
port=port,
username=creds.ssh_login,
password=creds.ssh_password,
timeout=self.CONNECTION_TIMEOUT,
)
return connection
except AuthenticationException:
connection.close()
logger.exception(f"Can't connect to host {host}")
raise
except (
SSHException,
ssh_exception.NoValidConnectionsError,
AttributeError,
socket.timeout,
OSError,
) as exc:
connection.close()
can_retry = attempt + 1 < self.SSH_CONNECTION_ATTEMPTS
if can_retry:
logger.warn(
f"Can't connect to host {host}, will retry after {self.SSH_ATTEMPTS_INTERVAL}s. Error: {exc}"
)
sleep(self.SSH_ATTEMPTS_INTERVAL)
continue
logger.exception(f"Can't connect to host {host}")
raise HostIsNotAvailable(host) from exc
class HostIsNotAvailable(Exception): class HostIsNotAvailable(Exception):
@ -123,7 +36,9 @@ class HostIsNotAvailable(Exception):
def log_command(func): def log_command(func):
@wraps(func) @wraps(func)
def wrapper(shell: "SSHShell", command: str, options: CommandOptions, *args, **kwargs) -> CommandResult: def wrapper(
shell: "SSHShell", command: str, options: CommandOptions, *args, **kwargs
) -> CommandResult:
command_info = command.removeprefix("$ProgressPreference='SilentlyContinue'\n") command_info = command.removeprefix("$ProgressPreference='SilentlyContinue'\n")
with reporter.step(command_info): with reporter.step(command_info):
logger.info(f'Execute command "{command}" on "{shell.host}"') logger.info(f'Execute command "{command}" on "{shell.host}"')
@ -176,6 +91,9 @@ class SSHShell(Shell):
# to allow remote command to flush its output buffer # to allow remote command to flush its output buffer
DELAY_AFTER_EXIT = 0.2 DELAY_AFTER_EXIT = 0.2
SSH_CONNECTION_ATTEMPTS: ClassVar[int] = 3
CONNECTION_TIMEOUT = 90
def __init__( def __init__(
self, self,
host: str, host: str,
@ -185,34 +103,31 @@ class SSHShell(Shell):
private_key_passphrase: Optional[str] = None, private_key_passphrase: Optional[str] = None,
port: str = "22", port: str = "22",
command_inspectors: Optional[list[CommandInspector]] = None, command_inspectors: Optional[list[CommandInspector]] = None,
custom_environment: Optional[dict] = None
) -> None: ) -> None:
super().__init__() super().__init__()
self.connection_provider = SshConnectionProvider()
self.connection_provider.store_creds(
host, SshCredentials(login, password, private_key_path, private_key_passphrase)
)
self.host = host self.host = host
self.port = port self.port = port
self.login = login
self.password = password
self.private_key_path = private_key_path
self.private_key_passphrase = private_key_passphrase
self.command_inspectors = command_inspectors or [] self.command_inspectors = command_inspectors or []
self.__connection: Optional[SSHClient] = None
self.environment = custom_environment
@property @property
def _connection(self): def _connection(self):
return self.connection_provider.provide(self.host, self.port) if not self.__connection:
self.__connection = self._create_connection()
return self.__connection
def drop(self): def drop(self):
self.connection_provider.drop(self.host) self._reset_connection()
def exec(self, command: str, options: Optional[CommandOptions] = None) -> CommandResult: def exec(self, command: str, options: Optional[CommandOptions] = None) -> CommandResult:
options = options or CommandOptions() options = options or CommandOptions()
original_command = command for inspector in self.command_inspectors:
extra_inspectors = options.extra_inspectors if options.extra_inspectors else [] command = inspector.inspect(command)
for inspector in [*self.command_inspectors, *extra_inspectors]:
command = inspector.inspect(original_command, command)
if options.interactive_inputs: if options.interactive_inputs:
result = self._exec_interactive(command, options) result = self._exec_interactive(command, options)
@ -221,13 +136,15 @@ class SSHShell(Shell):
if options.check and result.return_code != 0: if options.check and result.return_code != 0:
raise RuntimeError( raise RuntimeError(
f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}\nStderr: {result.stderr}\n" f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}"
) )
return result return result
@log_command @log_command
def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult: def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult:
stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout, get_pty=True, environment=self.environment) stdin, stdout, stderr = self._connection.exec_command(
command, timeout=options.timeout, get_pty=True
)
for interactive_input in options.interactive_inputs: for interactive_input in options.interactive_inputs:
input = interactive_input.input input = interactive_input.input
if not input.endswith("\n"): if not input.endswith("\n"):
@ -254,7 +171,7 @@ class SSHShell(Shell):
@log_command @log_command
def _exec_non_interactive(self, command: str, options: CommandOptions) -> CommandResult: def _exec_non_interactive(self, command: str, options: CommandOptions) -> CommandResult:
try: try:
stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout, environment=self.environment) stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout)
if options.close_stdin: if options.close_stdin:
stdin.close() stdin.close()
@ -276,7 +193,7 @@ class SSHShell(Shell):
socket.timeout, socket.timeout,
) as exc: ) as exc:
logger.exception(f"Can't execute command {command} on host: {self.host}") logger.exception(f"Can't execute command {command} on host: {self.host}")
self.drop() self._reset_connection()
raise HostIsNotAvailable(self.host) from exc raise HostIsNotAvailable(self.host) from exc
def _read_channels( def _read_channels(
@ -331,3 +248,57 @@ class SSHShell(Shell):
full_stderr = b"".join(stderr_chunks) full_stderr = b"".join(stderr_chunks)
return (full_stdout.decode(errors="ignore"), full_stderr.decode(errors="ignore")) return (full_stdout.decode(errors="ignore"), full_stderr.decode(errors="ignore"))
def _create_connection(self, attempts: int = SSH_CONNECTION_ATTEMPTS) -> SSHClient:
for attempt in range(attempts):
connection = SSHClient()
connection.set_missing_host_key_policy(AutoAddPolicy())
try:
if self.private_key_path:
logger.info(
f"Trying to connect to host {self.host} as {self.login} using SSH key "
f"{self.private_key_path} (attempt {attempt})"
)
connection.connect(
hostname=self.host,
port=self.port,
username=self.login,
pkey=_load_private_key(self.private_key_path, self.private_key_passphrase),
timeout=self.CONNECTION_TIMEOUT,
)
else:
logger.info(
f"Trying to connect to host {self.host} as {self.login} using password "
f"(attempt {attempt})"
)
connection.connect(
hostname=self.host,
port=self.port,
username=self.login,
password=self.password,
timeout=self.CONNECTION_TIMEOUT,
)
return connection
except AuthenticationException:
connection.close()
logger.exception(f"Can't connect to host {self.host}")
raise
except (
SSHException,
ssh_exception.NoValidConnectionsError,
AttributeError,
socket.timeout,
OSError,
) as exc:
connection.close()
can_retry = attempt + 1 < attempts
if can_retry:
logger.warn(f"Can't connect to host {self.host}, will retry. Error: {exc}")
continue
logger.exception(f"Can't connect to host {self.host}")
raise HostIsNotAvailable(self.host) from exc
def _reset_connection(self) -> None:
if self.__connection:
self.__connection.close()
self.__connection = None

View file

@ -8,23 +8,29 @@ from typing import List, Optional, Union
import base58 import base58
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.dataclasses.acl import EACL_LIFETIME, FROSTFS_CONTRACT_CACHE_TIMEOUT, EACLPubKey, EACLRole, EACLRule from frostfs_testlib.storage.dataclasses.acl import (
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo EACL_LIFETIME,
FROSTFS_CONTRACT_CACHE_TIMEOUT,
EACLPubKey,
EACLRole,
EACLRule,
)
from frostfs_testlib.utils import wallet_utils from frostfs_testlib.utils import wallet_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@reporter.step("Get extended ACL") @reporter.step_deco("Get extended ACL")
def get_eacl(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str) -> Optional[str]: def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optional[str]:
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
try: try:
result = cli.container.get_eacl(rpc_endpoint=endpoint, cid=cid) result = cli.container.get_eacl(wallet=wallet_path, rpc_endpoint=endpoint, cid=cid)
except RuntimeError as exc: except RuntimeError as exc:
logger.info("Extended ACL table is not set for this container") logger.info("Extended ACL table is not set for this container")
logger.info(f"Got exception while getting eacl: {exc}") logger.info(f"Got exception while getting eacl: {exc}")
@ -34,17 +40,18 @@ def get_eacl(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str) -> Optio
return result.stdout return result.stdout
@reporter.step("Set extended ACL") @reporter.step_deco("Set extended ACL")
def set_eacl( def set_eacl(
wallet: WalletInfo, wallet_path: str,
cid: str, cid: str,
eacl_table_path: str, eacl_table_path: str,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
session_token: Optional[str] = None, session_token: Optional[str] = None,
) -> None: ) -> None:
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
cli.container.set_eacl( cli.container.set_eacl(
wallet=wallet_path,
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
cid=cid, cid=cid,
table=eacl_table_path, table=eacl_table_path,
@ -60,7 +67,7 @@ def _encode_cid_for_eacl(cid: str) -> str:
def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str: def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str:
table_file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"eacl_table_{str(uuid.uuid4())}.json") table_file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"eacl_table_{str(uuid.uuid4())}.json")
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
cli.acl.extended_create(cid=cid, out=table_file_path, rule=rules_list) cli.acl.extended_create(cid=cid, out=table_file_path, rule=rules_list)
with open(table_file_path, "r") as file: with open(table_file_path, "r") as file:
@ -71,7 +78,7 @@ def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str:
def form_bearertoken_file( def form_bearertoken_file(
wallet: WalletInfo, wif: str,
cid: str, cid: str,
eacl_rule_list: List[Union[EACLRule, EACLPubKey]], eacl_rule_list: List[Union[EACLRule, EACLPubKey]],
shell: Shell, shell: Shell,
@ -86,7 +93,7 @@ def form_bearertoken_file(
enc_cid = _encode_cid_for_eacl(cid) if cid else None enc_cid = _encode_cid_for_eacl(cid) if cid else None
file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
eacl = get_eacl(wallet, cid, shell, endpoint) eacl = get_eacl(wif, cid, shell, endpoint)
json_eacl = dict() json_eacl = dict()
if eacl: if eacl:
eacl = eacl.replace("eACL: ", "").split("Signature")[0] eacl = eacl.replace("eACL: ", "").split("Signature")[0]
@ -127,7 +134,7 @@ def form_bearertoken_file(
if sign: if sign:
sign_bearer( sign_bearer(
shell=shell, shell=shell,
wallet=wallet, wallet_path=wif,
eacl_rules_file_from=file_path, eacl_rules_file_from=file_path,
eacl_rules_file_to=file_path, eacl_rules_file_to=file_path,
json=True, json=True,
@ -158,19 +165,27 @@ def eacl_rules(access: str, verbs: list, user: str) -> list[str]:
return rules return rules
def sign_bearer(shell: Shell, wallet: WalletInfo, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool) -> None: def sign_bearer(
frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) shell: Shell, wallet_path: str, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool
frostfscli.util.sign_bearer_token(eacl_rules_file_from, eacl_rules_file_to, json=json) ) -> None:
frostfscli = FrostfsCli(
shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG
)
frostfscli.util.sign_bearer_token(
wallet=wallet_path, from_file=eacl_rules_file_from, to_file=eacl_rules_file_to, json=json
)
@reporter.step("Wait for eACL cache expired") @reporter.step_deco("Wait for eACL cache expired")
def wait_for_cache_expired(): def wait_for_cache_expired():
sleep(FROSTFS_CONTRACT_CACHE_TIMEOUT) sleep(FROSTFS_CONTRACT_CACHE_TIMEOUT)
return return
@reporter.step("Return bearer token in base64 to caller") @reporter.step_deco("Return bearer token in base64 to caller")
def bearer_token_base64_from_file(bearer_path: str) -> str: def bearer_token_base64_from_file(
bearer_path: str,
) -> str:
with open(bearer_path, "rb") as file: with open(bearer_path, "rb") as file:
signed = file.read() signed = file.read()
return base64.b64encode(signed).decode("utf-8") return base64.b64encode(signed).decode("utf-8")

View file

@ -5,11 +5,10 @@ from dataclasses import dataclass
from time import sleep from time import sleep
from typing import Optional, Union from typing import Optional, Union
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.plugins import load_plugin from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node
from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.cluster import Cluster, ClusterNode
@ -18,13 +17,14 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.utils import json_utils from frostfs_testlib.utils import json_utils
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@dataclass @dataclass
class StorageContainerInfo: class StorageContainerInfo:
id: str id: str
wallet: WalletInfo wallet_file: WalletInfo
class StorageContainer: class StorageContainer:
@ -41,10 +41,13 @@ class StorageContainer:
def get_id(self) -> str: def get_id(self) -> str:
return self.storage_container_info.id return self.storage_container_info.id
def get_wallet(self) -> str: def get_wallet_path(self) -> str:
return self.storage_container_info.wallet return self.storage_container_info.wallet_file.path
@reporter.step("Generate new object and put in container") def get_wallet_config_path(self) -> str:
return self.storage_container_info.wallet_file.config_path
@reporter.step_deco("Generate new object and put in container")
def generate_object( def generate_object(
self, self,
size: int, size: int,
@ -57,34 +60,37 @@ class StorageContainer:
file_hash = get_file_hash(file_path) file_hash = get_file_hash(file_path)
container_id = self.get_id() container_id = self.get_id()
wallet = self.get_wallet() wallet_path = self.get_wallet_path()
wallet_config = self.get_wallet_config_path()
with reporter.step(f"Put object with size {size} to container {container_id}"): with reporter.step(f"Put object with size {size} to container {container_id}"):
if endpoint: if endpoint:
object_id = put_object( object_id = put_object(
wallet=wallet, wallet=wallet_path,
path=file_path, path=file_path,
cid=container_id, cid=container_id,
expire_at=expire_at, expire_at=expire_at,
shell=self.shell, shell=self.shell,
endpoint=endpoint, endpoint=endpoint,
bearer=bearer_token, bearer=bearer_token,
wallet_config=wallet_config,
) )
else: else:
object_id = put_object_to_random_node( object_id = put_object_to_random_node(
wallet=wallet, wallet=wallet_path,
path=file_path, path=file_path,
cid=container_id, cid=container_id,
expire_at=expire_at, expire_at=expire_at,
shell=self.shell, shell=self.shell,
cluster=self.cluster, cluster=self.cluster,
bearer=bearer_token, bearer=bearer_token,
wallet_config=wallet_config,
) )
storage_object = StorageObjectInfo( storage_object = StorageObjectInfo(
container_id, container_id,
object_id, object_id,
size=size, size=size,
wallet=wallet, wallet_file_path=wallet_path,
file_path=file_path, file_path=file_path,
file_hash=file_hash, file_hash=file_hash,
) )
@ -95,18 +101,18 @@ class StorageContainer:
DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X" SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X"
REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X"
DEFAULT_EC_PLACEMENT_RULE = "EC 3.1"
@reporter.step("Create Container") @reporter.step_deco("Create Container")
def create_container( def create_container(
wallet: WalletInfo, wallet: str,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
rule: str = DEFAULT_PLACEMENT_RULE, rule: str = DEFAULT_PLACEMENT_RULE,
basic_acl: str = "", basic_acl: str = "",
attributes: Optional[dict] = None, attributes: Optional[dict] = None,
session_token: str = "", session_token: str = "",
session_wallet: str = "",
name: Optional[str] = None, name: Optional[str] = None,
options: Optional[dict] = None, options: Optional[dict] = None,
await_mode: bool = True, await_mode: bool = True,
@ -117,7 +123,7 @@ def create_container(
A wrapper for `frostfs-cli container create` call. A wrapper for `frostfs-cli container create` call.
Args: Args:
wallet (WalletInfo): a wallet on whose behalf a container is created wallet (str): a wallet on whose behalf a container is created
rule (optional, str): placement rule for container rule (optional, str): placement rule for container
basic_acl (optional, str): an ACL for container, will be basic_acl (optional, str): an ACL for container, will be
appended to `--basic-acl` key appended to `--basic-acl` key
@ -139,9 +145,10 @@ def create_container(
(str): CID of the created container (str): CID of the created container
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
result = cli.container.create( result = cli.container.create(
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
wallet=session_wallet if session_wallet else wallet,
policy=rule, policy=rule,
basic_acl=basic_acl, basic_acl=basic_acl,
attributes=attributes, attributes=attributes,
@ -162,17 +169,23 @@ def create_container(
return cid return cid
def wait_for_container_creation(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, attempts: int = 15, sleep_interval: int = 1): def wait_for_container_creation(
wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 15, sleep_interval: int = 1
):
for _ in range(attempts): for _ in range(attempts):
containers = list_containers(wallet, shell, endpoint) containers = list_containers(wallet, shell, endpoint)
if cid in containers: if cid in containers:
return return
logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue") logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue")
sleep(sleep_interval) sleep(sleep_interval)
raise RuntimeError(f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting") raise RuntimeError(
f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting"
)
def wait_for_container_deletion(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, attempts: int = 30, sleep_interval: int = 1): def wait_for_container_deletion(
wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 30, sleep_interval: int = 1
):
for _ in range(attempts): for _ in range(attempts):
try: try:
get_container(wallet, cid, shell=shell, endpoint=endpoint) get_container(wallet, cid, shell=shell, endpoint=endpoint)
@ -185,27 +198,30 @@ def wait_for_container_deletion(wallet: WalletInfo, cid: str, shell: Shell, endp
raise AssertionError(f"Expected container deleted during {attempts * sleep_interval} sec.") raise AssertionError(f"Expected container deleted during {attempts * sleep_interval} sec.")
@reporter.step("List Containers") @reporter.step_deco("List Containers")
def list_containers(wallet: WalletInfo, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT) -> list[str]: def list_containers(
wallet: str, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT
) -> list[str]:
""" """
A wrapper for `frostfs-cli container list` call. It returns all the A wrapper for `frostfs-cli container list` call. It returns all the
available containers for the given wallet. available containers for the given wallet.
Args: Args:
wallet (WalletInfo): a wallet on whose behalf we list the containers wallet (str): a wallet on whose behalf we list the containers
shell: executor for cli command shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
timeout: Timeout for the operation. timeout: Timeout for the operation.
Returns: Returns:
(list): list of containers (list): list of containers
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
result = cli.container.list(rpc_endpoint=endpoint, timeout=timeout) result = cli.container.list(rpc_endpoint=endpoint, wallet=wallet, timeout=timeout)
logger.info(f"Containers: \n{result}")
return result.stdout.split() return result.stdout.split()
@reporter.step("List Objects in container") @reporter.step_deco("List Objects in container")
def list_objects( def list_objects(
wallet: WalletInfo, wallet: str,
shell: Shell, shell: Shell,
container_id: str, container_id: str,
endpoint: str, endpoint: str,
@ -215,7 +231,7 @@ def list_objects(
A wrapper for `frostfs-cli container list-objects` call. It returns all the A wrapper for `frostfs-cli container list-objects` call. It returns all the
available objects in container. available objects in container.
Args: Args:
wallet (WalletInfo): a wallet on whose behalf we list the containers objects wallet (str): a wallet on whose behalf we list the containers objects
shell: executor for cli command shell: executor for cli command
container_id: cid of container container_id: cid of container
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
@ -223,15 +239,17 @@ def list_objects(
Returns: Returns:
(list): list of containers (list): list of containers
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
result = cli.container.list_objects(rpc_endpoint=endpoint, cid=container_id, timeout=timeout) result = cli.container.list_objects(
rpc_endpoint=endpoint, wallet=wallet, cid=container_id, timeout=timeout
)
logger.info(f"Container objects: \n{result}") logger.info(f"Container objects: \n{result}")
return result.stdout.split() return result.stdout.split()
@reporter.step("Get Container") @reporter.step_deco("Get Container")
def get_container( def get_container(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
@ -242,7 +260,7 @@ def get_container(
A wrapper for `frostfs-cli container get` call. It extracts container's A wrapper for `frostfs-cli container get` call. It extracts container's
attributes and rearranges them into a more compact view. attributes and rearranges them into a more compact view.
Args: Args:
wallet (WalletInfo): path to a wallet on whose behalf we get the container wallet (str): path to a wallet on whose behalf we get the container
cid (str): ID of the container to get cid (str): ID of the container to get
shell: executor for cli command shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
@ -252,8 +270,10 @@ def get_container(
(dict, str): dict of container attributes (dict, str): dict of container attributes
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
result = cli.container.get(rpc_endpoint=endpoint, cid=cid, json_mode=json_mode, timeout=timeout) result = cli.container.get(
rpc_endpoint=endpoint, wallet=wallet, cid=cid, json_mode=json_mode, timeout=timeout
)
if not json_mode: if not json_mode:
return result.stdout return result.stdout
@ -267,37 +287,40 @@ def get_container(
return container_info return container_info
@reporter.step("Delete Container") @reporter.step_deco("Delete Container")
# TODO: make the error message about a non-found container more user-friendly # TODO: make the error message about a non-found container more user-friendly
def delete_container( def delete_container(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
force: bool = False, force: bool = False,
session_token: Optional[str] = None, session_token: Optional[str] = None,
await_mode: bool = False, await_mode: bool = False,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> None: ) -> None:
""" """
A wrapper for `frostfs-cli container delete` call. A wrapper for `frostfs-cli container delete` call.
Args: Args:
await_mode: Block execution until container is removed. wallet (str): path to a wallet on whose behalf we delete the container
wallet (WalletInfo): path to a wallet on whose behalf we delete the container
cid (str): ID of the container to delete cid (str): ID of the container to delete
shell: executor for cli command shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
force (bool): do not check whether container contains locks and remove immediately force (bool): do not check whether container contains locks and remove immediately
session_token: a path to session token file session_token: a path to session token file
timeout: Timeout for the operation.
This function doesn't return anything. This function doesn't return anything.
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
cli.container.delete( cli.container.delete(
wallet=wallet,
cid=cid, cid=cid,
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
force=force, force=force,
session=session_token, session=session_token,
await_mode=await_mode, await_mode=await_mode,
timeout=timeout,
) )
@ -327,17 +350,29 @@ def _parse_cid(output: str) -> str:
return splitted[1] return splitted[1]
@reporter.step("Search for nodes with a container") @reporter.step_deco("Search container by name")
def search_container_by_name(wallet: str, name: str, shell: Shell, endpoint: str):
list_cids = list_containers(wallet, shell, endpoint)
for cid in list_cids:
cont_info = get_container(wallet, cid, shell, endpoint, True)
if cont_info.get("attributes", {}).get("Name", None) == name:
return cid
return None
@reporter.step_deco("Search for nodes with a container")
def search_nodes_with_container( def search_nodes_with_container(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
cluster: Cluster, cluster: Cluster,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> list[ClusterNode]: ) -> list[ClusterNode]:
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
result = cli.container.search_node(rpc_endpoint=endpoint, cid=cid, timeout=timeout) result = cli.container.search_node(
rpc_endpoint=endpoint, wallet=wallet, cid=cid, timeout=timeout
)
pattern = r"[0-9]+(?:\.[0-9]+){3}" pattern = r"[0-9]+(?:\.[0-9]+){3}"
nodes_ip = list(set(re.findall(pattern, result.stdout))) nodes_ip = list(set(re.findall(pattern, result.stdout)))

View file

@ -5,25 +5,22 @@ import re
import uuid import uuid
from typing import Any, Optional from typing import Any, Optional
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.cli.neogo import NeoGo from frostfs_testlib.cli.neogo import NeoGo
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE
from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing import wait_for_success
from frostfs_testlib.utils import json_utils from frostfs_testlib.utils import json_utils
from frostfs_testlib.utils.cli_utils import parse_netmap_output
from frostfs_testlib.utils.file_utils import TestFile
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
reporter = get_reporter()
@reporter.step("Get object from random node") @reporter.step_deco("Get object from random node")
def get_object_from_random_node( def get_object_from_random_node(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, shell: Shell,
@ -31,6 +28,7 @@ def get_object_from_random_node(
bearer: Optional[str] = None, bearer: Optional[str] = None,
write_object: Optional[str] = None, write_object: Optional[str] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
wallet_config: Optional[str] = None,
no_progress: bool = True, no_progress: bool = True,
session: Optional[str] = None, session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
@ -46,6 +44,7 @@ def get_object_from_random_node(
cluster: cluster object cluster: cluster object
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key bearer (optional, str): path to Bearer Token file, appends to `--bearer` key
write_object (optional, str): path to downloaded file, appends to `--file` key write_object (optional, str): path to downloaded file, appends to `--file` key
wallet_config(optional, str): path to the wallet config
no_progress(optional, bool): do not show progress bar no_progress(optional, bool): do not show progress bar
xhdr (optional, dict): Request X-Headers in form of Key=Value xhdr (optional, dict): Request X-Headers in form of Key=Value
session (optional, dict): path to a JSON-encoded container session token session (optional, dict): path to a JSON-encoded container session token
@ -63,15 +62,16 @@ def get_object_from_random_node(
bearer, bearer,
write_object, write_object,
xhdr, xhdr,
wallet_config,
no_progress, no_progress,
session, session,
timeout, timeout,
) )
@reporter.step("Get object from {endpoint}") @reporter.step_deco("Get object from {endpoint}")
def get_object( def get_object(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, shell: Shell,
@ -79,21 +79,23 @@ def get_object(
bearer: Optional[str] = None, bearer: Optional[str] = None,
write_object: Optional[str] = None, write_object: Optional[str] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
wallet_config: Optional[str] = None,
no_progress: bool = True, no_progress: bool = True,
session: Optional[str] = None, session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> TestFile: ) -> str:
""" """
GET from FrostFS. GET from FrostFS.
Args: Args:
wallet (WalletInfo): wallet on whose behalf GET is done wallet (str): wallet on whose behalf GET is done
cid (str): ID of Container where we get the Object from cid (str): ID of Container where we get the Object from
oid (str): Object ID oid (str): Object ID
shell: executor for cli command shell: executor for cli command
bearer: path to Bearer Token file, appends to `--bearer` key bearer: path to Bearer Token file, appends to `--bearer` key
write_object: path to downloaded file, appends to `--file` key write_object: path to downloaded file, appends to `--file` key
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config(optional, str): path to the wallet config
no_progress(optional, bool): do not show progress bar no_progress(optional, bool): do not show progress bar
xhdr (optional, dict): Request X-Headers in form of Key=Value xhdr (optional, dict): Request X-Headers in form of Key=Value
session (optional, dict): path to a JSON-encoded container session token session (optional, dict): path to a JSON-encoded container session token
@ -104,14 +106,15 @@ def get_object(
if not write_object: if not write_object:
write_object = str(uuid.uuid4()) write_object = str(uuid.uuid4())
test_file = TestFile(os.path.join(ASSETS_DIR, write_object)) file_path = os.path.join(ASSETS_DIR, write_object)
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
cli.object.get( cli.object.get(
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
wallet=wallet,
cid=cid, cid=cid,
oid=oid, oid=oid,
file=test_file, file=file_path,
bearer=bearer, bearer=bearer,
no_progress=no_progress, no_progress=no_progress,
xhdr=xhdr, xhdr=xhdr,
@ -119,18 +122,19 @@ def get_object(
timeout=timeout, timeout=timeout,
) )
return test_file return file_path
@reporter.step("Get Range Hash from {endpoint}") @reporter.step_deco("Get Range Hash from {endpoint}")
def get_range_hash( def get_range_hash(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
oid: str, oid: str,
range_cut: str, range_cut: str,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
bearer: Optional[str] = None, bearer: Optional[str] = None,
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
session: Optional[str] = None, session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
@ -147,15 +151,17 @@ def get_range_hash(
range_cut: Range to take hash from in the form offset1:length1,..., range_cut: Range to take hash from in the form offset1:length1,...,
value to pass to the `--range` parameter value to pass to the `--range` parameter
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config: path to the wallet config
xhdr: Request X-Headers in form of Key=Values xhdr: Request X-Headers in form of Key=Values
session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session. session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session.
timeout: Timeout for the operation. timeout: Timeout for the operation.
Returns: Returns:
None None
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.hash( result = cli.object.hash(
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
wallet=wallet,
cid=cid, cid=cid,
oid=oid, oid=oid,
range=range_cut, range=range_cut,
@ -169,17 +175,17 @@ def get_range_hash(
return result.stdout.split(":")[1].strip() return result.stdout.split(":")[1].strip()
@reporter.step("Put object to random node") @reporter.step_deco("Put object to random node")
def put_object_to_random_node( def put_object_to_random_node(
wallet: WalletInfo, wallet: str,
path: str, path: str,
cid: str, cid: str,
shell: Shell, shell: Shell,
cluster: Cluster, cluster: Cluster,
bearer: Optional[str] = None, bearer: Optional[str] = None,
copies_number: Optional[int] = None,
attributes: Optional[dict] = None, attributes: Optional[dict] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
wallet_config: Optional[str] = None,
expire_at: Optional[int] = None, expire_at: Optional[int] = None,
no_progress: bool = True, no_progress: bool = True,
session: Optional[str] = None, session: Optional[str] = None,
@ -195,9 +201,9 @@ def put_object_to_random_node(
shell: executor for cli command shell: executor for cli command
cluster: cluster under test cluster: cluster under test
bearer: path to Bearer Token file, appends to `--bearer` key bearer: path to Bearer Token file, appends to `--bearer` key
copies_number: Number of copies of the object to store within the RPC call
attributes: User attributes in form of Key1=Value1,Key2=Value2 attributes: User attributes in form of Key1=Value1,Key2=Value2
cluster: cluster under test cluster: cluster under test
wallet_config: path to the wallet config
no_progress: do not show progress bar no_progress: do not show progress bar
expire_at: Last epoch in the life of the object expire_at: Last epoch in the life of the object
xhdr: Request X-Headers in form of Key=Value xhdr: Request X-Headers in form of Key=Value
@ -215,9 +221,9 @@ def put_object_to_random_node(
shell, shell,
endpoint, endpoint,
bearer, bearer,
copies_number,
attributes, attributes,
xhdr, xhdr,
wallet_config,
expire_at, expire_at,
no_progress, no_progress,
session, session,
@ -225,17 +231,17 @@ def put_object_to_random_node(
) )
@reporter.step("Put object at {endpoint} in container {cid}") @reporter.step_deco("Put object at {endpoint} in container {cid}")
def put_object( def put_object(
wallet: WalletInfo, wallet: str,
path: str, path: str,
cid: str, cid: str,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
bearer: Optional[str] = None, bearer: Optional[str] = None,
copies_number: Optional[int] = None,
attributes: Optional[dict] = None, attributes: Optional[dict] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
wallet_config: Optional[str] = None,
expire_at: Optional[int] = None, expire_at: Optional[int] = None,
no_progress: bool = True, no_progress: bool = True,
session: Optional[str] = None, session: Optional[str] = None,
@ -250,9 +256,9 @@ def put_object(
cid: ID of Container where we get the Object from cid: ID of Container where we get the Object from
shell: executor for cli command shell: executor for cli command
bearer: path to Bearer Token file, appends to `--bearer` key bearer: path to Bearer Token file, appends to `--bearer` key
copies_number: Number of copies of the object to store within the RPC call
attributes: User attributes in form of Key1=Value1,Key2=Value2 attributes: User attributes in form of Key1=Value1,Key2=Value2
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config: path to the wallet config
no_progress: do not show progress bar no_progress: do not show progress bar
expire_at: Last epoch in the life of the object expire_at: Last epoch in the life of the object
xhdr: Request X-Headers in form of Key=Value xhdr: Request X-Headers in form of Key=Value
@ -262,14 +268,14 @@ def put_object(
(str): ID of uploaded Object (str): ID of uploaded Object
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.put( result = cli.object.put(
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
wallet=wallet,
file=path, file=path,
cid=cid, cid=cid,
attributes=attributes, attributes=attributes,
bearer=bearer, bearer=bearer,
copies_number=copies_number,
expire_at=expire_at, expire_at=expire_at,
no_progress=no_progress, no_progress=no_progress,
xhdr=xhdr, xhdr=xhdr,
@ -283,14 +289,15 @@ def put_object(
return oid.strip() return oid.strip()
@reporter.step("Delete object {cid}/{oid} from {endpoint}") @reporter.step_deco("Delete object {cid}/{oid} from {endpoint}")
def delete_object( def delete_object(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
bearer: str = "", bearer: str = "",
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
session: Optional[str] = None, session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
@ -305,6 +312,7 @@ def delete_object(
shell: executor for cli command shell: executor for cli command
bearer: path to Bearer Token file, appends to `--bearer` key bearer: path to Bearer Token file, appends to `--bearer` key
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config: path to the wallet config
xhdr: Request X-Headers in form of Key=Value xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token session: path to a JSON-encoded container session token
timeout: Timeout for the operation. timeout: Timeout for the operation.
@ -312,9 +320,10 @@ def delete_object(
(str): Tombstone ID (str): Tombstone ID
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.delete( result = cli.object.delete(
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
wallet=wallet,
cid=cid, cid=cid,
oid=oid, oid=oid,
bearer=bearer, bearer=bearer,
@ -328,14 +337,15 @@ def delete_object(
return tombstone.strip() return tombstone.strip()
@reporter.step("Get Range") @reporter.step_deco("Get Range")
def get_range( def get_range(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
oid: str, oid: str,
range_cut: str, range_cut: str,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
wallet_config: Optional[str] = None,
bearer: str = "", bearer: str = "",
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
session: Optional[str] = None, session: Optional[str] = None,
@ -352,35 +362,37 @@ def get_range(
shell: executor for cli command shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
bearer: path to Bearer Token file, appends to `--bearer` key bearer: path to Bearer Token file, appends to `--bearer` key
wallet_config: path to the wallet config
xhdr: Request X-Headers in form of Key=Value xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token session: path to a JSON-encoded container session token
timeout: Timeout for the operation. timeout: Timeout for the operation.
Returns: Returns:
(str, bytes) - path to the file with range content and content of this file as bytes (str, bytes) - path to the file with range content and content of this file as bytes
""" """
test_file = TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4()))) range_file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4()))
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
cli.object.range( cli.object.range(
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
wallet=wallet,
cid=cid, cid=cid,
oid=oid, oid=oid,
range=range_cut, range=range_cut,
file=test_file, file=range_file_path,
bearer=bearer, bearer=bearer,
xhdr=xhdr, xhdr=xhdr,
session=session, session=session,
timeout=timeout, timeout=timeout,
) )
with open(test_file, "rb") as file: with open(range_file_path, "rb") as file:
content = file.read() content = file.read()
return test_file, content return range_file_path, content
@reporter.step("Lock Object") @reporter.step_deco("Lock Object")
def lock_object( def lock_object(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, shell: Shell,
@ -390,6 +402,7 @@ def lock_object(
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
session: Optional[str] = None, session: Optional[str] = None,
wallet_config: Optional[str] = None,
ttl: Optional[int] = None, ttl: Optional[int] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
@ -416,12 +429,13 @@ def lock_object(
Lock object ID Lock object ID
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.lock( result = cli.object.lock(
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
lifetime=lifetime, lifetime=lifetime,
expire_at=expire_at, expire_at=expire_at,
address=address, address=address,
wallet=wallet,
cid=cid, cid=cid,
oid=oid, oid=oid,
bearer=bearer, bearer=bearer,
@ -437,15 +451,16 @@ def lock_object(
return oid.strip() return oid.strip()
@reporter.step("Search object") @reporter.step_deco("Search object")
def search_object( def search_object(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
bearer: str = "", bearer: str = "",
filters: Optional[dict] = None, filters: Optional[dict] = None,
expected_objects_list: Optional[list] = None, expected_objects_list: Optional[list] = None,
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
session: Optional[str] = None, session: Optional[str] = None,
phy: bool = False, phy: bool = False,
@ -463,6 +478,7 @@ def search_object(
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
filters: key=value pairs to filter Objects filters: key=value pairs to filter Objects
expected_objects_list: a list of ObjectIDs to compare found Objects with expected_objects_list: a list of ObjectIDs to compare found Objects with
wallet_config: path to the wallet config
xhdr: Request X-Headers in form of Key=Value xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token session: path to a JSON-encoded container session token
phy: Search physically stored objects. phy: Search physically stored objects.
@ -473,13 +489,16 @@ def search_object(
list of found ObjectIDs list of found ObjectIDs
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.search( result = cli.object.search(
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
wallet=wallet,
cid=cid, cid=cid,
bearer=bearer, bearer=bearer,
xhdr=xhdr, xhdr=xhdr,
filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None, filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()]
if filters
else None,
session=session, session=session,
phy=phy, phy=phy,
root=root, root=root,
@ -490,18 +509,25 @@ def search_object(
if expected_objects_list: if expected_objects_list:
if sorted(found_objects) == sorted(expected_objects_list): if sorted(found_objects) == sorted(expected_objects_list):
logger.info(f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'") logger.info(
f"Found objects list '{found_objects}' "
f"is equal for expected list '{expected_objects_list}'"
)
else: else:
logger.warning(f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'") logger.warning(
f"Found object list {found_objects} "
f"is not equal to expected list '{expected_objects_list}'"
)
return found_objects return found_objects
@reporter.step("Get netmap netinfo") @reporter.step_deco("Get netmap netinfo")
def get_netmap_netinfo( def get_netmap_netinfo(
wallet: WalletInfo, wallet: str,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
wallet_config: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
ttl: Optional[int] = None, ttl: Optional[int] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
@ -511,7 +537,7 @@ def get_netmap_netinfo(
Get netmap netinfo output from node Get netmap netinfo output from node
Args: Args:
wallet (WalletInfo): wallet on whose behalf request is done wallet (str): wallet on whose behalf request is done
shell: executor for cli command shell: executor for cli command
endpoint (optional, str): FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint (optional, str): FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
address: Address of wallet account address: Address of wallet account
@ -524,8 +550,9 @@ def get_netmap_netinfo(
(dict): dict of parsed command output (dict): dict of parsed command output
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
output = cli.netmap.netinfo( output = cli.netmap.netinfo(
wallet=wallet,
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
address=address, address=address,
ttl=ttl, ttl=ttl,
@ -547,9 +574,9 @@ def get_netmap_netinfo(
return settings return settings
@reporter.step("Head object") @reporter.step_deco("Head object")
def head_object( def head_object(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, shell: Shell,
@ -559,6 +586,7 @@ def head_object(
json_output: bool = True, json_output: bool = True,
is_raw: bool = False, is_raw: bool = False,
is_direct: bool = False, is_direct: bool = False,
wallet_config: Optional[str] = None,
session: Optional[str] = None, session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
): ):
@ -566,7 +594,7 @@ def head_object(
HEAD an Object. HEAD an Object.
Args: Args:
wallet (WalletInfo): wallet on whose behalf HEAD is done wallet (str): wallet on whose behalf HEAD is done
cid (str): ID of Container where we get the Object from cid (str): ID of Container where we get the Object from
oid (str): ObjectID to HEAD oid (str): ObjectID to HEAD
shell: executor for cli command shell: executor for cli command
@ -578,6 +606,7 @@ def head_object(
turns into `--raw` key turns into `--raw` key
is_direct(optional, bool): send request directly to the node or not; this flag is_direct(optional, bool): send request directly to the node or not; this flag
turns into `--ttl 1` key turns into `--ttl 1` key
wallet_config(optional, str): path to the wallet config
xhdr (optional, dict): Request X-Headers in form of Key=Value xhdr (optional, dict): Request X-Headers in form of Key=Value
session (optional, dict): path to a JSON-encoded container session token session (optional, dict): path to a JSON-encoded container session token
timeout: Timeout for the operation. timeout: Timeout for the operation.
@ -588,9 +617,10 @@ def head_object(
(str): HEAD response as a plain text (str): HEAD response as a plain text
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.head( result = cli.object.head(
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
wallet=wallet,
cid=cid, cid=cid,
oid=oid, oid=oid,
bearer=bearer, bearer=bearer,
@ -616,32 +646,32 @@ def head_object(
fst_line_idx = result.stdout.find("\n") fst_line_idx = result.stdout.find("\n")
decoded = json.loads(result.stdout[fst_line_idx:]) decoded = json.loads(result.stdout[fst_line_idx:])
# if response
if "chunks" in decoded.keys():
logger.info("decoding ec chunks")
return decoded["chunks"]
# If response is Complex Object header, it has `splitId` key # If response is Complex Object header, it has `splitId` key
if "splitId" in decoded.keys(): if "splitId" in decoded.keys():
logger.info("decoding split header")
return json_utils.decode_split_header(decoded) return json_utils.decode_split_header(decoded)
# If response is Last or Linking Object header, # If response is Last or Linking Object header,
# it has `header` dictionary and non-null `split` dictionary # it has `header` dictionary and non-null `split` dictionary
if "split" in decoded["header"].keys(): if "split" in decoded["header"].keys():
if decoded["header"]["split"]: if decoded["header"]["split"]:
logger.info("decoding linking object")
return json_utils.decode_linking_object(decoded) return json_utils.decode_linking_object(decoded)
if decoded["header"]["objectType"] == "STORAGE_GROUP": if decoded["header"]["objectType"] == "STORAGE_GROUP":
logger.info("decoding storage group")
return json_utils.decode_storage_group(decoded) return json_utils.decode_storage_group(decoded)
if decoded["header"]["objectType"] == "TOMBSTONE": if decoded["header"]["objectType"] == "TOMBSTONE":
logger.info("decoding tombstone")
return json_utils.decode_tombstone(decoded) return json_utils.decode_tombstone(decoded)
logger.info("decoding simple header")
return json_utils.decode_simple_header(decoded) return json_utils.decode_simple_header(decoded)
@reporter.step("Run neo-go dump-keys") @reporter.step_deco("Run neo-go dump-keys")
def neo_go_dump_keys(shell: Shell, wallet: WalletInfo) -> dict: def neo_go_dump_keys(shell: Shell, wallet: str) -> dict:
""" """
Run neo-go dump keys command Run neo-go dump keys command
@ -665,7 +695,7 @@ def neo_go_dump_keys(shell: Shell, wallet: WalletInfo) -> dict:
return {address_id: wallet_key} return {address_id: wallet_key}
@reporter.step("Run neo-go query height") @reporter.step_deco("Run neo-go query height")
def neo_go_query_height(shell: Shell, endpoint: str) -> dict: def neo_go_query_height(shell: Shell, endpoint: str) -> dict:
""" """
Run neo-go query height command Run neo-go query height command
@ -690,69 +720,8 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict:
latest_block = first_line.split(":") latest_block = first_line.split(":")
# taking second line from command's output contain wallet key # taking second line from command's output contain wallet key
second_line = output.split("\n")[1] second_line = output.split("\n")[1]
if second_line != "":
validated_state = second_line.split(":") validated_state = second_line.split(":")
return { return {
latest_block[0].replace(":", ""): int(latest_block[1]), latest_block[0].replace(":", ""): int(latest_block[1]),
validated_state[0].replace(":", ""): int(validated_state[1]), validated_state[0].replace(":", ""): int(validated_state[1]),
} }
return {latest_block[0].replace(":", ""): int(latest_block[1])}
@wait_for_success()
@reporter.step("Search object nodes")
def get_object_nodes(
cluster: Cluster,
cid: str,
oid: str,
alive_node: ClusterNode,
bearer: str = "",
xhdr: Optional[dict] = None,
is_direct: bool = False,
verify_presence_all: bool = False,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> list[ClusterNode]:
shell = alive_node.host.get_shell()
endpoint = alive_node.storage_node.get_rpc_endpoint()
wallet = alive_node.storage_node.get_remote_wallet_path()
wallet_config = alive_node.storage_node.get_remote_wallet_config_path()
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config)
response = cli.object.nodes(
rpc_endpoint=endpoint,
cid=cid,
oid=oid,
bearer=bearer,
ttl=1 if is_direct else None,
json=True,
xhdr=xhdr,
timeout=timeout,
verify_presence_all=verify_presence_all,
)
response_json = json.loads(response.stdout)
# Currently, the command will show expected and confirmed nodes.
# And we (currently) count only nodes which are both expected and confirmed
object_nodes_id = {
required_node
for data_object in response_json["data_objects"]
for required_node in data_object["required_nodes"]
if required_node in data_object["confirmed_nodes"]
}
netmap_nodes_list = parse_netmap_output(
cli.netmap.snapshot(
rpc_endpoint=endpoint,
wallet=wallet,
).stdout
)
netmap_nodes = [
netmap_node for object_node in object_nodes_id for netmap_node in netmap_nodes_list if object_node == netmap_node.node_id
]
object_nodes = [
cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes if netmap_node.node == cluster_node.host_ip
]
return object_nodes

View file

@ -1,35 +0,0 @@
import logging
from typing import Optional
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.plugins import load_plugin
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
logger = logging.getLogger("NeoLogger")
@reporter.step("Get Tree List")
def get_tree_list(
wallet: WalletInfo,
cid: str,
shell: Shell,
endpoint: str,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> None:
"""
A wrapper for `frostfs-cli tree list` call.
Args:
wallet (WalletInfo): path to a wallet on whose behalf we delete the container
cid (str): ID of the container to delete
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
timeout: Timeout for the operation.
This function doesn't return anything.
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
cli.tree.list(cid=cid, rpc_endpoint=endpoint, timeout=timeout)

View file

@ -12,14 +12,15 @@
import logging import logging
from typing import Optional, Tuple from typing import Optional, Tuple
from frostfs_testlib import reporter from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import head_object from frostfs_testlib.steps.cli.object import head_object
from frostfs_testlib.storage.cluster import Cluster, StorageNode from frostfs_testlib.storage.cluster import Cluster, StorageNode
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -44,7 +45,7 @@ def get_storage_object_chunks(
with reporter.step(f"Get complex object chunks (f{storage_object.oid})"): with reporter.step(f"Get complex object chunks (f{storage_object.oid})"):
split_object_id = get_link_object( split_object_id = get_link_object(
storage_object.wallet, storage_object.wallet_file_path,
storage_object.cid, storage_object.cid,
storage_object.oid, storage_object.oid,
shell, shell,
@ -53,7 +54,7 @@ def get_storage_object_chunks(
timeout=timeout, timeout=timeout,
) )
head = head_object( head = head_object(
storage_object.wallet, storage_object.wallet_file_path,
storage_object.cid, storage_object.cid,
split_object_id, split_object_id,
shell, shell,
@ -96,7 +97,7 @@ def get_complex_object_split_ranges(
chunks_ids = get_storage_object_chunks(storage_object, shell, cluster) chunks_ids = get_storage_object_chunks(storage_object, shell, cluster)
for chunk_id in chunks_ids: for chunk_id in chunks_ids:
head = head_object( head = head_object(
storage_object.wallet, storage_object.wallet_file_path,
storage_object.cid, storage_object.cid,
chunk_id, chunk_id,
shell, shell,
@ -112,14 +113,15 @@ def get_complex_object_split_ranges(
return ranges return ranges
@reporter.step("Get Link Object") @reporter.step_deco("Get Link Object")
def get_link_object( def get_link_object(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, shell: Shell,
nodes: list[StorageNode], nodes: list[StorageNode],
bearer: str = "", bearer: str = "",
wallet_config: str = DEFAULT_WALLET_CONFIG,
is_direct: bool = True, is_direct: bool = True,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
): ):
@ -153,6 +155,7 @@ def get_link_object(
is_raw=True, is_raw=True,
is_direct=is_direct, is_direct=is_direct,
bearer=bearer, bearer=bearer,
wallet_config=wallet_config,
timeout=timeout, timeout=timeout,
) )
if resp["link"]: if resp["link"]:
@ -163,9 +166,9 @@ def get_link_object(
return None return None
@reporter.step("Get Last Object") @reporter.step_deco("Get Last Object")
def get_last_object( def get_last_object(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, shell: Shell,

View file

@ -2,9 +2,15 @@ import logging
from time import sleep from time import sleep
from typing import Optional from typing import Optional
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import (
CLI_DEFAULT_TIMEOUT,
FROSTFS_ADM_CONFIG_PATH,
FROSTFS_ADM_EXEC,
FROSTFS_CLI_EXEC,
NEOGO_EXECUTABLE,
)
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.payment_neogo import get_contract_hash from frostfs_testlib.steps.payment_neogo import get_contract_hash
@ -13,10 +19,11 @@ from frostfs_testlib.storage.dataclasses.frostfs_services import InnerRing, Morp
from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils import datetime_utils, wallet_utils from frostfs_testlib.utils import datetime_utils, wallet_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@reporter.step("Get epochs from nodes") @reporter.step_deco("Get epochs from nodes")
def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]: def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]:
""" """
Get current epochs on each node. Get current epochs on each node.
@ -34,8 +41,10 @@ def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]:
return epochs_by_node return epochs_by_node
@reporter.step("Ensure fresh epoch") @reporter.step_deco("Ensure fresh epoch")
def ensure_fresh_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None) -> int: def ensure_fresh_epoch(
shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None
) -> int:
# ensure new fresh epoch to avoid epoch switch during test session # ensure new fresh epoch to avoid epoch switch during test session
alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] alive_node = alive_node if alive_node else cluster.services(StorageNode)[0]
current_epoch = get_epoch(shell, cluster, alive_node) current_epoch = get_epoch(shell, cluster, alive_node)
@ -45,17 +54,19 @@ def ensure_fresh_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[Stor
return epoch return epoch
@reporter.step("Wait up to {timeout} seconds for nodes on cluster to align epochs") @reporter.step_deco("Wait for epochs align in whole cluster")
def wait_for_epochs_align(shell: Shell, cluster: Cluster, timeout=60): @wait_for_success(60, 5)
@wait_for_success(timeout, 5, None, True) def wait_for_epochs_align(shell: Shell, cluster: Cluster) -> None:
def check_epochs(): epochs = []
epochs_by_node = get_epochs_from_nodes(shell, cluster) for node in cluster.services(StorageNode):
assert len(set(epochs_by_node.values())) == 1, f"unaligned epochs found: {epochs_by_node}" epochs.append(get_epoch(shell, cluster, node))
unique_epochs = list(set(epochs))
check_epochs() assert (
len(unique_epochs) == 1
), f"unaligned epochs found, {epochs}, count of unique epochs {len(unique_epochs)}"
@reporter.step("Get Epoch") @reporter.step_deco("Get Epoch")
def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None): def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None):
alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] alive_node = alive_node if alive_node else cluster.services(StorageNode)[0]
endpoint = alive_node.get_rpc_endpoint() endpoint = alive_node.get_rpc_endpoint()
@ -68,8 +79,8 @@ def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode]
return int(epoch.stdout) return int(epoch.stdout)
@reporter.step("Tick Epoch") @reporter.step_deco("Tick Epoch")
def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None, delta: Optional[int] = None): def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None):
""" """
Tick epoch using frostfs-adm or NeoGo if frostfs-adm is not available (DevEnv) Tick epoch using frostfs-adm or NeoGo if frostfs-adm is not available (DevEnv)
Args: Args:
@ -81,24 +92,19 @@ def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode]
alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] alive_node = alive_node if alive_node else cluster.services(StorageNode)[0]
remote_shell = alive_node.host.get_shell() remote_shell = alive_node.host.get_shell()
if "force_transactions" not in alive_node.host.config.attributes: if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH:
# If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests) # If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests)
frostfs_adm = FrostfsAdm( frostfs_adm = FrostfsAdm(
shell=remote_shell, shell=remote_shell,
frostfs_adm_exec_path=FROSTFS_ADM_EXEC, frostfs_adm_exec_path=FROSTFS_ADM_EXEC,
config_file=FROSTFS_ADM_CONFIG_PATH, config_file=FROSTFS_ADM_CONFIG_PATH,
) )
frostfs_adm.morph.force_new_epoch(delta=delta) frostfs_adm.morph.force_new_epoch()
return return
# Otherwise we tick epoch using transaction # Otherwise we tick epoch using transaction
cur_epoch = get_epoch(shell, cluster) cur_epoch = get_epoch(shell, cluster)
if delta:
next_epoch = cur_epoch + delta
else:
next_epoch = cur_epoch + 1
# Use first node by default # Use first node by default
ir_node = cluster.services(InnerRing)[0] ir_node = cluster.services(InnerRing)[0]
# In case if no local_wallet_path is provided, we use wallet_path # In case if no local_wallet_path is provided, we use wallet_path
@ -115,7 +121,7 @@ def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode]
wallet_password=ir_wallet_pass, wallet_password=ir_wallet_pass,
scripthash=get_contract_hash(morph_chain, "netmap.frostfs", shell=shell), scripthash=get_contract_hash(morph_chain, "netmap.frostfs", shell=shell),
method="newEpoch", method="newEpoch",
arguments=f"int:{next_epoch}", arguments=f"int:{cur_epoch + 1}",
multisig_hash=f"{ir_address}:Global", multisig_hash=f"{ir_address}:Global",
address=ir_address, address=ir_address,
rpc_endpoint=morph_endpoint, rpc_endpoint=morph_endpoint,

View file

@ -10,28 +10,28 @@ from urllib.parse import quote_plus
import requests import requests
from frostfs_testlib import reporter from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.cli import GenericCli from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE
from frostfs_testlib.resources.common import ASSETS_DIR, SIMPLE_OBJECT_SIZE from frostfs_testlib.s3.aws_cli_client import LONG_TIMEOUT
from frostfs_testlib.s3.aws_cli_client import command_options
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.shell.local_shell import LocalShell
from frostfs_testlib.steps.cli.object import get_object from frostfs_testlib.steps.cli.object import get_object
from frostfs_testlib.steps.storage_policy import get_nodes_without_object from frostfs_testlib.steps.storage_policy import get_nodes_without_object
from frostfs_testlib.storage.cluster import ClusterNode, StorageNode from frostfs_testlib.storage.cluster import StorageNode
from frostfs_testlib.testing.test_control import retry from frostfs_testlib.utils.cli_utils import _cmd_run
from frostfs_testlib.utils.file_utils import TestFile, get_file_hash from frostfs_testlib.utils.file_utils import get_file_hash
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
local_shell = LocalShell() ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/")
@reporter.step("Get via HTTP Gate") @reporter.step_deco("Get via HTTP Gate")
def get_via_http_gate( def get_via_http_gate(
cid: str, cid: str,
oid: str, oid: str,
node: ClusterNode, endpoint: str,
request_path: Optional[str] = None, request_path: Optional[str] = None,
timeout: Optional[int] = 300, timeout: Optional[int] = 300,
): ):
@ -39,74 +39,72 @@ def get_via_http_gate(
This function gets given object from HTTP gate This function gets given object from HTTP gate
cid: container id to get object from cid: container id to get object from
oid: object ID oid: object ID
node: node to make request endpoint: http gate endpoint
request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}] request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}]
""" """
# if `request_path` parameter omitted, use default # if `request_path` parameter omitted, use default
if request_path is None: if request_path is None:
request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" request = f"{endpoint}/get/{cid}/{oid}"
else: else:
request = f"{node.http_gate.get_endpoint()}{request_path}" request = f"{endpoint}{request_path}"
resp = requests.get(request, stream=True, timeout=timeout, verify=False) resp = requests.get(request, stream=True, timeout=timeout)
if not resp.ok: if not resp.ok:
raise Exception( raise Exception(
f"""Failed to get object via HTTP gate: f"""Failed to get object via HTTP gate:
request: {resp.request.path_url}, request: {resp.request.path_url},
response: {resp.text}, response: {resp.text},
headers: {resp.headers},
status code: {resp.status_code} {resp.reason}""" status code: {resp.status_code} {resp.reason}"""
) )
logger.info(f"Request: {request}") logger.info(f"Request: {request}")
_attach_allure_step(request, resp.status_code) _attach_allure_step(request, resp.status_code)
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}")) file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}")
with open(test_file, "wb") as file: with open(file_path, "wb") as file:
shutil.copyfileobj(resp.raw, file) shutil.copyfileobj(resp.raw, file)
return test_file return file_path
@reporter.step("Get via Zip HTTP Gate") @reporter.step_deco("Get via Zip HTTP Gate")
def get_via_zip_http_gate(cid: str, prefix: str, node: ClusterNode, timeout: Optional[int] = 300): def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, timeout: Optional[int] = 300):
""" """
This function gets given object from HTTP gate This function gets given object from HTTP gate
cid: container id to get object from cid: container id to get object from
prefix: common prefix prefix: common prefix
node: node to make request endpoint: http gate endpoint
""" """
request = f"{node.http_gate.get_endpoint()}/zip/{cid}/{prefix}" request = f"{endpoint}/zip/{cid}/{prefix}"
resp = requests.get(request, stream=True, timeout=timeout, verify=False) resp = requests.get(request, stream=True, timeout=timeout)
if not resp.ok: if not resp.ok:
raise Exception( raise Exception(
f"""Failed to get object via HTTP gate: f"""Failed to get object via HTTP gate:
request: {resp.request.path_url}, request: {resp.request.path_url},
response: {resp.text}, response: {resp.text},
headers: {resp.headers},
status code: {resp.status_code} {resp.reason}""" status code: {resp.status_code} {resp.reason}"""
) )
logger.info(f"Request: {request}") logger.info(f"Request: {request}")
_attach_allure_step(request, resp.status_code) _attach_allure_step(request, resp.status_code)
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_archive.zip")) file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_archive.zip")
with open(test_file, "wb") as file: with open(file_path, "wb") as file:
shutil.copyfileobj(resp.raw, file) shutil.copyfileobj(resp.raw, file)
with zipfile.ZipFile(test_file, "r") as zip_ref: with zipfile.ZipFile(file_path, "r") as zip_ref:
zip_ref.extractall(ASSETS_DIR) zip_ref.extractall(ASSETS_DIR)
return os.path.join(os.getcwd(), ASSETS_DIR, prefix) return os.path.join(os.getcwd(), ASSETS_DIR, prefix)
@reporter.step("Get via HTTP Gate by attribute") @reporter.step_deco("Get via HTTP Gate by attribute")
def get_via_http_gate_by_attribute( def get_via_http_gate_by_attribute(
cid: str, cid: str,
attribute: dict, attribute: dict,
node: ClusterNode, endpoint: str,
request_path: Optional[str] = None, request_path: Optional[str] = None,
timeout: Optional[int] = 300, timeout: Optional[int] = 300,
): ):
@ -121,32 +119,33 @@ def get_via_http_gate_by_attribute(
attr_value = quote_plus(str(attribute.get(attr_name))) attr_value = quote_plus(str(attribute.get(attr_name)))
# if `request_path` parameter ommited, use default # if `request_path` parameter ommited, use default
if request_path is None: if request_path is None:
request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" request = f"{endpoint}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}"
else: else:
request = f"{node.http_gate.get_endpoint()}{request_path}" request = f"{endpoint}{request_path}"
resp = requests.get(request, stream=True, timeout=timeout, verify=False) resp = requests.get(request, stream=True, timeout=timeout)
if not resp.ok: if not resp.ok:
raise Exception( raise Exception(
f"""Failed to get object via HTTP gate: f"""Failed to get object via HTTP gate:
request: {resp.request.path_url}, request: {resp.request.path_url},
response: {resp.text}, response: {resp.text},
headers: {resp.headers},
status code: {resp.status_code} {resp.reason}""" status code: {resp.status_code} {resp.reason}"""
) )
logger.info(f"Request: {request}") logger.info(f"Request: {request}")
_attach_allure_step(request, resp.status_code) _attach_allure_step(request, resp.status_code)
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{str(uuid.uuid4())}")) file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{str(uuid.uuid4())}")
with open(test_file, "wb") as file: with open(file_path, "wb") as file:
shutil.copyfileobj(resp.raw, file) shutil.copyfileobj(resp.raw, file)
return test_file return file_path
@reporter.step("Upload via HTTP Gate") @reporter.step_deco("Upload via HTTP Gate")
def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300) -> str: def upload_via_http_gate(
cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300
) -> str:
""" """
This function upload given object through HTTP gate This function upload given object through HTTP gate
cid: CID to get object from cid: CID to get object from
@ -157,7 +156,7 @@ def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[d
request = f"{endpoint}/upload/{cid}" request = f"{endpoint}/upload/{cid}"
files = {"upload_file": open(path, "rb")} files = {"upload_file": open(path, "rb")}
body = {"filename": path} body = {"filename": path}
resp = requests.post(request, files=files, data=body, headers=headers, timeout=timeout, verify=False) resp = requests.post(request, files=files, data=body, headers=headers, timeout=timeout)
if not resp.ok: if not resp.ok:
raise Exception( raise Exception(
@ -175,7 +174,7 @@ def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[d
return resp.json().get("object_id") return resp.json().get("object_id")
@reporter.step("Check is the passed object large") @reporter.step_deco("Check is the passed object large")
def is_object_large(filepath: str) -> bool: def is_object_large(filepath: str) -> bool:
""" """
This function check passed file size and return True if file_size > SIMPLE_OBJECT_SIZE This function check passed file size and return True if file_size > SIMPLE_OBJECT_SIZE
@ -189,7 +188,7 @@ def is_object_large(filepath: str) -> bool:
return False return False
@reporter.step("Upload via HTTP Gate using Curl") @reporter.step_deco("Upload via HTTP Gate using Curl")
def upload_via_http_gate_curl( def upload_via_http_gate_curl(
cid: str, cid: str,
filepath: str, filepath: str,
@ -214,16 +213,16 @@ def upload_via_http_gate_curl(
large_object = is_object_large(filepath) large_object = is_object_large(filepath)
if large_object: if large_object:
# pre-clean # pre-clean
local_shell.exec("rm pipe -f") _cmd_run("rm pipe -f")
files = f"file=@pipe;filename={os.path.basename(filepath)}" files = f"file=@pipe;filename={os.path.basename(filepath)}"
cmd = f"mkfifo pipe;cat {filepath} > pipe & curl -k --no-buffer -F '{files}' {attributes} {request}" cmd = f"mkfifo pipe;cat {filepath} > pipe & curl --no-buffer -F '{files}' {attributes} {request}"
output = local_shell.exec(cmd, command_options) output = _cmd_run(cmd, LONG_TIMEOUT)
# clean up pipe # clean up pipe
local_shell.exec("rm pipe") _cmd_run("rm pipe")
else: else:
files = f"file=@{filepath};filename={os.path.basename(filepath)}" files = f"file=@{filepath};filename={os.path.basename(filepath)}"
cmd = f"curl -k -F '{files}' {attributes} {request}" cmd = f"curl -F '{files}' {attributes} {request}"
output = local_shell.exec(cmd) output = _cmd_run(cmd)
if error_pattern: if error_pattern:
match = error_pattern.casefold() in str(output).casefold() match = error_pattern.casefold() in str(output).casefold()
@ -236,22 +235,21 @@ def upload_via_http_gate_curl(
return oid_re.group(1) return oid_re.group(1)
@retry(max_attempts=3, sleep_interval=1) @reporter.step_deco("Get via HTTP Gate using Curl")
@reporter.step("Get via HTTP Gate using Curl") def get_via_http_curl(cid: str, oid: str, endpoint: str) -> str:
def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> TestFile:
""" """
This function gets given object from HTTP gate using curl utility. This function gets given object from HTTP gate using curl utility.
cid: CID to get object from cid: CID to get object from
oid: object OID oid: object OID
node: node for request endpoint: http gate endpoint
""" """
request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" request = f"{endpoint}/get/{cid}/{oid}"
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}")) file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}")
curl = GenericCli("curl", node.host) cmd = f"curl {request} > {file_path}"
curl(f"-k ", f"{request} > {test_file}", shell=local_shell) _cmd_run(cmd)
return test_file return file_path
def _attach_allure_step(request: str, status_code: int, req_type="GET"): def _attach_allure_step(request: str, status_code: int, req_type="GET"):
@ -260,31 +258,26 @@ def _attach_allure_step(request: str, status_code: int, req_type="GET"):
reporter.attach(command_attachment, f"{req_type} Request") reporter.attach(command_attachment, f"{req_type} Request")
@reporter.step("Try to get object and expect error") @reporter.step_deco("Try to get object and expect error")
def try_to_get_object_and_expect_error( def try_to_get_object_and_expect_error(
cid: str, cid: str, oid: str, error_pattern: str, endpoint: str
oid: str,
node: ClusterNode,
error_pattern: str,
) -> None: ) -> None:
try: try:
get_via_http_gate(cid=cid, oid=oid, node=node) get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint)
raise AssertionError(f"Expected error on getting object with cid: {cid}") raise AssertionError(f"Expected error on getting object with cid: {cid}")
except Exception as err: except Exception as err:
match = error_pattern.casefold() in str(err).casefold() match = error_pattern.casefold() in str(err).casefold()
assert match, f"Expected {err} to match {error_pattern}" assert match, f"Expected {err} to match {error_pattern}"
@reporter.step("Verify object can be get using HTTP header attribute") @reporter.step_deco("Verify object can be get using HTTP header attribute")
def get_object_by_attr_and_verify_hashes( def get_object_by_attr_and_verify_hashes(
oid: str, oid: str, file_name: str, cid: str, attrs: dict, endpoint: str
file_name: str,
cid: str,
attrs: dict,
node: ClusterNode,
) -> None: ) -> None:
got_file_path_http = get_via_http_gate(cid=cid, oid=oid, node=node) got_file_path_http = get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint)
got_file_path_http_attr = get_via_http_gate_by_attribute(cid=cid, attribute=attrs, node=node) got_file_path_http_attr = get_via_http_gate_by_attribute(
cid=cid, attribute=attrs, endpoint=endpoint
)
assert_hashes_are_equal(file_name, got_file_path_http, got_file_path_http_attr) assert_hashes_are_equal(file_name, got_file_path_http, got_file_path_http_attr)
@ -295,7 +288,7 @@ def verify_object_hash(
cid: str, cid: str,
shell: Shell, shell: Shell,
nodes: list[StorageNode], nodes: list[StorageNode],
request_node: ClusterNode, endpoint: str,
object_getter=None, object_getter=None,
) -> None: ) -> None:
@ -321,7 +314,7 @@ def verify_object_hash(
shell=shell, shell=shell,
endpoint=random_node.get_rpc_endpoint(), endpoint=random_node.get_rpc_endpoint(),
) )
got_file_path_http = object_getter(cid=cid, oid=oid, node=request_node) got_file_path_http = object_getter(cid=cid, oid=oid, endpoint=endpoint)
assert_hashes_are_equal(file_name, got_file_path, got_file_path_http) assert_hashes_are_equal(file_name, got_file_path, got_file_path_http)
@ -330,14 +323,18 @@ def assert_hashes_are_equal(orig_file_name: str, got_file_1: str, got_file_2: st
msg = "Expected hashes are equal for files {f1} and {f2}" msg = "Expected hashes are equal for files {f1} and {f2}"
got_file_hash_http = get_file_hash(got_file_1) got_file_hash_http = get_file_hash(got_file_1)
assert get_file_hash(got_file_2) == got_file_hash_http, msg.format(f1=got_file_2, f2=got_file_1) assert get_file_hash(got_file_2) == got_file_hash_http, msg.format(f1=got_file_2, f2=got_file_1)
assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format(f1=orig_file_name, f2=got_file_1) assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format(
f1=orig_file_name, f2=got_file_1
)
def attr_into_header(attrs: dict) -> dict: def attr_into_header(attrs: dict) -> dict:
return {f"X-Attribute-{_key}": _value for _key, _value in attrs.items()} return {f"X-Attribute-{_key}": _value for _key, _value in attrs.items()}
@reporter.step("Convert each attribute (Key=Value) to the following format: -H 'X-Attribute-Key: Value'") @reporter.step_deco(
"Convert each attribute (Key=Value) to the following format: -H 'X-Attribute-Key: Value'"
)
def attr_into_str_header_curl(attrs: dict) -> list: def attr_into_str_header_curl(attrs: dict) -> list:
headers = [] headers = []
for k, v in attrs.items(): for k, v in attrs.items():
@ -346,29 +343,23 @@ def attr_into_str_header_curl(attrs: dict) -> list:
return headers return headers
@reporter.step("Try to get object via http (pass http_request and optional attributes) and expect error") @reporter.step_deco(
"Try to get object via http (pass http_request and optional attributes) and expect error"
)
def try_to_get_object_via_passed_request_and_expect_error( def try_to_get_object_via_passed_request_and_expect_error(
cid: str, cid: str,
oid: str, oid: str,
node: ClusterNode,
error_pattern: str, error_pattern: str,
endpoint: str,
http_request_path: str, http_request_path: str,
attrs: Optional[dict] = None, attrs: Optional[dict] = None,
) -> None: ) -> None:
try: try:
if attrs is None: if attrs is None:
get_via_http_gate( get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, request_path=http_request_path)
cid=cid,
oid=oid,
node=node,
request_path=http_request_path,
)
else: else:
get_via_http_gate_by_attribute( get_via_http_gate_by_attribute(
cid=cid, cid=cid, attribute=attrs, endpoint=endpoint, request_path=http_request_path
attribute=attrs,
node=node,
request_path=http_request_path,
) )
raise AssertionError(f"Expected error on getting object with cid: {cid}") raise AssertionError(f"Expected error on getting object with cid: {cid}")
except Exception as err: except Exception as err:

View file

@ -1,45 +0,0 @@
import re
from frostfs_testlib import reporter
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.testing.test_control import wait_for_success
@reporter.step("Check metrics result")
@wait_for_success(interval=10)
def check_metrics_counter(
cluster_nodes: list[ClusterNode],
operator: str = "==",
counter_exp: int = 0,
parse_from_command: bool = False,
**metrics_greps: str,
):
counter_act = 0
for cluster_node in cluster_nodes:
counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps)
assert eval(
f"{counter_act} {operator} {counter_exp}"
), f"Expected: {counter_exp} {operator} Actual: {counter_act} in nodes: {cluster_nodes}"
@reporter.step("Get metrics value from node: {node}")
def get_metrics_value(node: ClusterNode, parse_from_command: bool = False, **metrics_greps: str):
try:
command_result = node.metrics.storage.get_metrics_search_by_greps(**metrics_greps)
if parse_from_command:
metrics_counter = calc_metrics_count_from_stdout(command_result.stdout, **metrics_greps)
else:
metrics_counter = calc_metrics_count_from_stdout(command_result.stdout)
except RuntimeError as e:
metrics_counter = 0
return metrics_counter
@reporter.step("Parse metrics count and calc sum of result")
def calc_metrics_count_from_stdout(metric_result_stdout: str, command: str = None):
if command:
result = re.findall(rf"{command}\s*([\d.e+-]+)", metric_result_stdout)
else:
result = re.findall(r"}\s*([\d.e+-]+)", metric_result_stdout)
return sum(map(lambda x: int(float(x)), result))

View file

@ -1,19 +0,0 @@
from frostfs_testlib.shell import CommandOptions
from frostfs_testlib.storage.cluster import ClusterNode
class IpHelper:
@staticmethod
def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[str]) -> None:
shell = node.host.get_shell()
for ip in block_ip:
shell.exec(f"ip route add blackhole {ip}")
@staticmethod
def restore_input_traffic_to_node(node: ClusterNode) -> None:
shell = node.host.get_shell()
unlock_ip = shell.exec("ip route list | grep blackhole", CommandOptions(check=False))
if unlock_ip.return_code != 0:
return
for ip in unlock_ip.stdout.strip().split("\n"):
shell.exec(f"ip route del blackhole {ip.split(' ')[1]}")

View file

@ -6,16 +6,21 @@ from dataclasses import dataclass
from time import sleep from time import sleep
from typing import Optional from typing import Optional
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli from frostfs_testlib.cli import FrostfsAdm, FrostfsCli
from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import (
FROSTFS_ADM_CONFIG_PATH,
FROSTFS_ADM_EXEC,
FROSTFS_CLI_EXEC,
)
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align from frostfs_testlib.steps.epoch import tick_epoch
from frostfs_testlib.storage.cluster import Cluster, StorageNode from frostfs_testlib.storage.cluster import Cluster, StorageNode
from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate
from frostfs_testlib.utils import datetime_utils from frostfs_testlib.utils import datetime_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -35,7 +40,45 @@ class HealthStatus:
return HealthStatus(network, health) return HealthStatus(network, health)
@reporter.step("Get Locode from random storage node") @reporter.step_deco("Stop random storage nodes")
def stop_random_storage_nodes(number: int, nodes: list[StorageNode]) -> list[StorageNode]:
"""
Shuts down the given number of randomly selected storage nodes.
Args:
number: the number of storage nodes to stop
nodes: the list of storage nodes to stop
Returns:
the list of nodes that were stopped
"""
nodes_to_stop = random.sample(nodes, number)
for node in nodes_to_stop:
node.stop_service()
return nodes_to_stop
@reporter.step_deco("Start storage node")
def start_storage_nodes(nodes: list[StorageNode]) -> None:
"""
The function starts specified storage nodes.
Args:
nodes: the list of nodes to start
"""
for node in nodes:
node.start_service()
@reporter.step_deco("Stop storage node")
def stop_storage_nodes(nodes: list[StorageNode]) -> None:
"""
The function starts specified storage nodes.
Args:
nodes: the list of nodes to start
"""
for node in nodes:
node.stop_service()
@reporter.step_deco("Get Locode from random storage node")
def get_locode_from_random_node(cluster: Cluster) -> str: def get_locode_from_random_node(cluster: Cluster) -> str:
node = random.choice(cluster.services(StorageNode)) node = random.choice(cluster.services(StorageNode))
locode = node.get_un_locode() locode = node.get_un_locode()
@ -43,7 +86,7 @@ def get_locode_from_random_node(cluster: Cluster) -> str:
return locode return locode
@reporter.step("Healthcheck for storage node {node}") @reporter.step_deco("Healthcheck for storage node {node}")
def storage_node_healthcheck(node: StorageNode) -> HealthStatus: def storage_node_healthcheck(node: StorageNode) -> HealthStatus:
""" """
The function returns storage node's health status. The function returns storage node's health status.
@ -52,27 +95,12 @@ def storage_node_healthcheck(node: StorageNode) -> HealthStatus:
Returns: Returns:
health status as HealthStatus object. health status as HealthStatus object.
""" """
command = "control healthcheck"
host = node.host output = _run_control_command_with_retries(node, command)
service_config = host.get_service_config(node.name) return HealthStatus.from_stdout(output)
wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"]
control_endpoint = service_config.attributes["control_endpoint"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{node.name}-config.yaml"
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
cli_config = host.get_cli_config("frostfs-cli")
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
result = cli.control.healthcheck(control_endpoint)
return HealthStatus.from_stdout(result.stdout)
@reporter.step("Set status for {node}") @reporter.step_deco("Set status for {node}")
def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> None: def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> None:
""" """
The function sets particular status for given node. The function sets particular status for given node.
@ -81,24 +109,11 @@ def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) ->
status: online or offline. status: online or offline.
retries (optional, int): number of retry attempts if it didn't work from the first time retries (optional, int): number of retry attempts if it didn't work from the first time
""" """
host = node.host command = f"control set-status --status {status}"
service_config = host.get_service_config(node.name) _run_control_command_with_retries(node, command, retries)
wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"]
control_endpoint = service_config.attributes["control_endpoint"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{node.name}-config.yaml"
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
cli_config = host.get_cli_config("frostfs-cli")
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
cli.control.set_status(control_endpoint, status)
@reporter.step("Get netmap snapshot") @reporter.step_deco("Get netmap snapshot")
def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str: def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str:
""" """
The function returns string representation of netmap snapshot. The function returns string representation of netmap snapshot.
@ -112,11 +127,14 @@ def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str:
storage_wallet_path = node.get_wallet_path() storage_wallet_path = node.get_wallet_path()
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, config_file=storage_wallet_config) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, config_file=storage_wallet_config)
return cli.netmap.snapshot(rpc_endpoint=node.get_rpc_endpoint(), wallet=storage_wallet_path).stdout return cli.netmap.snapshot(
rpc_endpoint=node.get_rpc_endpoint(),
wallet=storage_wallet_path,
).stdout
@reporter.step("Get shard list for {node}") @reporter.step_deco("Get shard list for {node}")
def node_shard_list(node: StorageNode, json: Optional[bool] = None) -> list[str]: def node_shard_list(node: StorageNode) -> list[str]:
""" """
The function returns list of shards for specified storage node. The function returns list of shards for specified storage node.
Args: Args:
@ -124,139 +142,112 @@ def node_shard_list(node: StorageNode, json: Optional[bool] = None) -> list[str]
Returns: Returns:
list of shards. list of shards.
""" """
host = node.host command = "control shards list"
service_config = host.get_service_config(node.name) output = _run_control_command_with_retries(node, command)
wallet_path = service_config.attributes["wallet_path"] return re.findall(r"Shard (.*):", output)
wallet_password = service_config.attributes["wallet_password"]
control_endpoint = service_config.attributes["control_endpoint"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{node.name}-config.yaml"
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
cli_config = host.get_cli_config("frostfs-cli")
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
result = cli.shards.list(endpoint=control_endpoint, json_mode=json)
return re.findall(r"Shard (.*):", result.stdout)
@reporter.step("Shard set for {node}") @reporter.step_deco("Shard set for {node}")
def node_shard_set_mode(node: StorageNode, shard: list[str], mode: str) -> None: def node_shard_set_mode(node: StorageNode, shard: str, mode: str) -> str:
""" """
The function sets mode for specified shard. The function sets mode for specified shard.
Args: Args:
node: node on which shard mode should be set. node: node on which shard mode should be set.
""" """
host = node.host command = f"control shards set-mode --id {shard} --mode {mode}"
service_config = host.get_service_config(node.name) return _run_control_command_with_retries(node, command)
wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"]
control_endpoint = service_config.attributes["control_endpoint"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{node.name}-config.yaml"
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
cli_config = host.get_cli_config("frostfs-cli")
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
cli.shards.set_mode(endpoint=control_endpoint, mode=mode, id=shard)
@reporter.step("Drop object from {node}") @reporter.step_deco("Drop object from {node}")
def drop_object(node: StorageNode, cid: str, oid: str) -> None: def drop_object(node: StorageNode, cid: str, oid: str) -> str:
""" """
The function drops object from specified node. The function drops object from specified node.
Args: Args:
node: node from which object should be dropped. node_id str: node from which object should be dropped.
""" """
host = node.host command = f"control drop-objects -o {cid}/{oid}"
service_config = host.get_service_config(node.name) return _run_control_command_with_retries(node, command)
wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"]
control_endpoint = service_config.attributes["control_endpoint"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{node.name}-config.yaml"
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
cli_config = host.get_cli_config("frostfs-cli")
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
objects = f"{cid}/{oid}"
cli.control.drop_objects(control_endpoint, objects)
@reporter.step("Delete data from host for node {node}") @reporter.step_deco("Delete data from host for node {node}")
def delete_node_data(node: StorageNode) -> None: def delete_node_data(node: StorageNode) -> None:
node.stop_service() node.stop_service()
node.host.delete_storage_node_data(node.name) node.host.delete_storage_node_data(node.name)
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
@reporter.step("Exclude node {node_to_exclude} from network map") @reporter.step_deco("Exclude node {node_to_exclude} from network map")
def exclude_node_from_network_map(node_to_exclude: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None: def exclude_node_from_network_map(
node_to_exclude: StorageNode,
alive_node: StorageNode,
shell: Shell,
cluster: Cluster,
) -> None:
node_netmap_key = node_to_exclude.get_wallet_public_key() node_netmap_key = node_to_exclude.get_wallet_public_key()
storage_node_set_status(node_to_exclude, status="offline") storage_node_set_status(node_to_exclude, status="offline")
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
tick_epoch(shell, cluster) tick_epoch(shell, cluster)
wait_for_epochs_align(shell, cluster)
snapshot = get_netmap_snapshot(node=alive_node, shell=shell) snapshot = get_netmap_snapshot(node=alive_node, shell=shell)
assert node_netmap_key not in snapshot, f"Expected node with key {node_netmap_key} to be absent in network map" assert (
node_netmap_key not in snapshot
), f"Expected node with key {node_netmap_key} to be absent in network map"
@reporter.step("Include node {node_to_include} into network map") @reporter.step_deco("Include node {node_to_include} into network map")
def include_node_to_network_map(node_to_include: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None: def include_node_to_network_map(
node_to_include: StorageNode,
alive_node: StorageNode,
shell: Shell,
cluster: Cluster,
) -> None:
storage_node_set_status(node_to_include, status="online") storage_node_set_status(node_to_include, status="online")
# Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch. # Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch.
# First sleep can be omitted after https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/60 complete. # First sleep can be omitted after https://github.com/TrueCloudLab/frostfs-node/issues/60 complete.
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2) time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
tick_epoch(shell, cluster) tick_epoch(shell, cluster)
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2) time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
await_node_in_map(node_to_include, shell, alive_node) check_node_in_map(node_to_include, shell, alive_node)
@reporter.step("Check node {node} in network map") @reporter.step_deco("Check node {node} in network map")
def check_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: def check_node_in_map(
node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None
) -> None:
alive_node = alive_node or node alive_node = alive_node or node
node_netmap_key = node.get_wallet_public_key() node_netmap_key = node.get_wallet_public_key()
logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}") logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}")
snapshot = get_netmap_snapshot(alive_node, shell) snapshot = get_netmap_snapshot(alive_node, shell)
assert node_netmap_key in snapshot, f"Expected node with key {node_netmap_key} to be in network map" assert (
node_netmap_key in snapshot
), f"Expected node with key {node_netmap_key} to be in network map"
@wait_for_success(300, 15, title="Await node {node} in network map") @reporter.step_deco("Check node {node} NOT in network map")
def await_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: def check_node_not_in_map(
check_node_in_map(node, shell, alive_node) node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None
) -> None:
@reporter.step("Check node {node} NOT in network map")
def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None:
alive_node = alive_node or node alive_node = alive_node or node
node_netmap_key = node.get_wallet_public_key() node_netmap_key = node.get_wallet_public_key()
logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}") logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}")
snapshot = get_netmap_snapshot(alive_node, shell) snapshot = get_netmap_snapshot(alive_node, shell)
assert node_netmap_key not in snapshot, f"Expected node with key {node_netmap_key} to be NOT in network map" assert (
node_netmap_key not in snapshot
), f"Expected node with key {node_netmap_key} to be NOT in network map"
@reporter.step("Wait for node {node} is ready") @reporter.step_deco("Wait for node {node} is ready")
def wait_for_node_to_be_ready(node: StorageNode) -> None: def wait_for_node_to_be_ready(node: StorageNode) -> None:
timeout, attempts = 60, 15 timeout, attempts = 30, 6
for _ in range(attempts): for _ in range(attempts):
try: try:
health_check = storage_node_healthcheck(node) health_check = storage_node_healthcheck(node)
@ -265,11 +256,18 @@ def wait_for_node_to_be_ready(node: StorageNode) -> None:
except Exception as err: except Exception as err:
logger.warning(f"Node {node} is not ready:\n{err}") logger.warning(f"Node {node} is not ready:\n{err}")
sleep(timeout) sleep(timeout)
raise AssertionError(f"Node {node} hasn't gone to the READY state after {timeout * attempts} seconds") raise AssertionError(
f"Node {node} hasn't gone to the READY state after {timeout * attempts} seconds"
)
@reporter.step("Remove nodes from network map trough cli-adm morph command") @reporter.step_deco("Remove nodes from network map trough cli-adm morph command")
def remove_nodes_from_map_morph(shell: Shell, cluster: Cluster, remove_nodes: list[StorageNode], alive_node: Optional[StorageNode] = None): def remove_nodes_from_map_morph(
shell: Shell,
cluster: Cluster,
remove_nodes: list[StorageNode],
alive_node: Optional[StorageNode] = None,
):
""" """
Move node to the Offline state in the candidates list and tick an epoch to update the netmap Move node to the Offline state in the candidates list and tick an epoch to update the netmap
using frostfs-adm using frostfs-adm
@ -288,5 +286,66 @@ def remove_nodes_from_map_morph(shell: Shell, cluster: Cluster, remove_nodes: li
if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH: if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH:
# If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests) # If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests)
frostfsadm = FrostfsAdm(shell=remote_shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH) frostfsadm = FrostfsAdm(
shell=remote_shell,
frostfs_adm_exec_path=FROSTFS_ADM_EXEC,
config_file=FROSTFS_ADM_CONFIG_PATH,
)
frostfsadm.morph.remove_nodes(node_netmap_keys) frostfsadm.morph.remove_nodes(node_netmap_keys)
def _run_control_command_with_retries(node: StorageNode, command: str, retries: int = 0) -> str:
for attempt in range(1 + retries): # original attempt + specified retries
try:
return _run_control_command(node, command)
except AssertionError as err:
if attempt < retries:
logger.warning(f"Command {command} failed with error {err} and will be retried")
continue
raise AssertionError(f"Command {command} failed with error {err}") from err
def _run_control_command(node: StorageNode, command: str) -> None:
host = node.host
service_config = host.get_service_config(node.name)
wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"]
control_endpoint = service_config.attributes["control_endpoint"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{node.name}-config.yaml"
wallet_config = f'password: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
cli_config = host.get_cli_config("frostfs-cli")
# TODO: implement cli.control
# cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
result = shell.exec(
f"{cli_config.exec_path} {command} --endpoint {control_endpoint} "
f"--wallet {wallet_path} --config {wallet_config_path}"
)
return result.stdout
@reporter.step_deco("Start services s3gate ")
def start_s3gates(cluster: Cluster) -> None:
"""
The function starts specified storage nodes.
Args:
cluster: cluster instance under test
"""
for gate in cluster.services(S3Gate):
gate.start_service()
@reporter.step_deco("Stop services s3gate ")
def stop_s3gates(cluster: Cluster) -> None:
"""
The function starts specified storage nodes.
Args:
cluster: cluster instance under test
"""
for gate in cluster.services(S3Gate):
gate.stop_service()

View file

@ -8,18 +8,20 @@ from typing import Optional
from neo3.wallet import utils as neo3_utils from neo3.wallet import utils as neo3_utils
from neo3.wallet import wallet as neo3_wallet from neo3.wallet import wallet as neo3_wallet
from frostfs_testlib import reporter
from frostfs_testlib.cli import NeoGo from frostfs_testlib.cli import NeoGo
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import NEOGO_EXECUTABLE from frostfs_testlib.resources.cli import NEOGO_EXECUTABLE
from frostfs_testlib.resources.common import FROSTFS_CONTRACT, GAS_HASH, MORPH_BLOCK_TIME from frostfs_testlib.resources.common import FROSTFS_CONTRACT, GAS_HASH, MORPH_BLOCK_TIME
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain from frostfs_testlib.storage.dataclasses.frostfs_services import MainChain, MorphChain
from frostfs_testlib.utils import converting_utils, datetime_utils, wallet_utils from frostfs_testlib.utils import converting_utils, datetime_utils, wallet_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
EMPTY_PASSWORD = "" EMPTY_PASSWORD = ""
TX_PERSIST_TIMEOUT = 15 # seconds TX_PERSIST_TIMEOUT = 15 # seconds
ASSET_POWER_MAINCHAIN = 10**8
ASSET_POWER_SIDECHAIN = 10**12 ASSET_POWER_SIDECHAIN = 10**12
@ -40,7 +42,32 @@ def get_contract_hash(morph_chain: MorphChain, resolve_name: str, shell: Shell)
return bytes.decode(base64.b64decode(stack_data[0]["value"])) return bytes.decode(base64.b64decode(stack_data[0]["value"]))
def transaction_accepted(morph_chain: MorphChain, tx_id: str): @reporter.step_deco("Withdraw Mainnet Gas")
def withdraw_mainnet_gas(shell: Shell, main_chain: MainChain, wlt: str, amount: int):
address = wallet_utils.get_last_address_from_wallet(wlt, EMPTY_PASSWORD)
scripthash = neo3_utils.address_to_script_hash(address)
neogo = NeoGo(shell=shell, neo_go_exec_path=NEOGO_EXECUTABLE)
out = neogo.contract.invokefunction(
wallet=wlt,
address=address,
rpc_endpoint=main_chain.get_endpoint(),
scripthash=FROSTFS_CONTRACT,
method="withdraw",
arguments=f"{scripthash} int:{amount}",
multisig_hash=f"{scripthash}:Global",
wallet_password="",
)
m = re.match(r"^Sent invocation transaction (\w{64})$", out.stdout)
if m is None:
raise Exception("Can not get Tx.")
tx = m.group(1)
if not transaction_accepted(main_chain, tx):
raise AssertionError(f"TX {tx} hasn't been processed")
def transaction_accepted(main_chain: MainChain, tx_id: str):
""" """
This function returns True in case of accepted TX. This function returns True in case of accepted TX.
Args: Args:
@ -52,8 +79,8 @@ def transaction_accepted(morph_chain: MorphChain, tx_id: str):
try: try:
for _ in range(0, TX_PERSIST_TIMEOUT): for _ in range(0, TX_PERSIST_TIMEOUT):
time.sleep(1) time.sleep(1)
neogo = NeoGo(shell=morph_chain.host.get_shell(), neo_go_exec_path=NEOGO_EXECUTABLE) neogo = NeoGo(shell=main_chain.host.get_shell(), neo_go_exec_path=NEOGO_EXECUTABLE)
resp = neogo.query.tx(tx_hash=tx_id, rpc_endpoint=morph_chain.get_endpoint()) resp = neogo.query.tx(tx_hash=tx_id, rpc_endpoint=main_chain.get_endpoint())
if resp is not None: if resp is not None:
logger.info(f"TX is accepted in block: {resp}") logger.info(f"TX is accepted in block: {resp}")
return True, resp return True, resp
@ -63,7 +90,7 @@ def transaction_accepted(morph_chain: MorphChain, tx_id: str):
return False return False
@reporter.step("Get FrostFS Balance") @reporter.step_deco("Get FrostFS Balance")
def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_password: str = ""): def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_password: str = ""):
""" """
This function returns FrostFS balance for given wallet. This function returns FrostFS balance for given wallet.
@ -84,11 +111,11 @@ def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_
raise out raise out
@reporter.step("Transfer Gas") @reporter.step_deco("Transfer Gas")
def transfer_gas( def transfer_gas(
shell: Shell, shell: Shell,
amount: int, amount: int,
morph_chain: MorphChain, main_chain: MainChain,
wallet_from_path: Optional[str] = None, wallet_from_path: Optional[str] = None,
wallet_from_password: Optional[str] = None, wallet_from_password: Optional[str] = None,
address_from: Optional[str] = None, address_from: Optional[str] = None,
@ -111,16 +138,22 @@ def transfer_gas(
address_to: The address of the wallet to transfer assets to. address_to: The address of the wallet to transfer assets to.
amount: Amount of gas to transfer. amount: Amount of gas to transfer.
""" """
wallet_from_path = wallet_from_path or morph_chain.get_wallet_path() wallet_from_path = wallet_from_path or main_chain.get_wallet_path()
wallet_from_password = ( wallet_from_password = (
wallet_from_password if wallet_from_password is not None else morph_chain.get_wallet_password() wallet_from_password
if wallet_from_password is not None
else main_chain.get_wallet_password()
)
address_from = address_from or wallet_utils.get_last_address_from_wallet(
wallet_from_path, wallet_from_password
)
address_to = address_to or wallet_utils.get_last_address_from_wallet(
wallet_to_path, wallet_to_password
) )
address_from = address_from or wallet_utils.get_last_address_from_wallet(wallet_from_path, wallet_from_password)
address_to = address_to or wallet_utils.get_last_address_from_wallet(wallet_to_path, wallet_to_password)
neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE)
out = neogo.nep17.transfer( out = neogo.nep17.transfer(
rpc_endpoint=morph_chain.get_endpoint(), rpc_endpoint=main_chain.get_endpoint(),
wallet=wallet_from_path, wallet=wallet_from_path,
wallet_password=wallet_from_password, wallet_password=wallet_from_password,
amount=amount, amount=amount,
@ -132,12 +165,50 @@ def transfer_gas(
txid = out.stdout.strip().split("\n")[-1] txid = out.stdout.strip().split("\n")[-1]
if len(txid) != 64: if len(txid) != 64:
raise Exception("Got no TXID after run the command") raise Exception("Got no TXID after run the command")
if not transaction_accepted(morph_chain, txid): if not transaction_accepted(main_chain, txid):
raise AssertionError(f"TX {txid} hasn't been processed") raise AssertionError(f"TX {txid} hasn't been processed")
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
@reporter.step("Get Sidechain Balance") @reporter.step_deco("FrostFS Deposit")
def deposit_gas(
shell: Shell,
main_chain: MainChain,
amount: int,
wallet_from_path: str,
wallet_from_password: str,
):
"""
Transferring GAS from given wallet to FrostFS contract address.
"""
# get FrostFS contract address
deposit_addr = converting_utils.contract_hash_to_address(FROSTFS_CONTRACT)
logger.info(f"FrostFS contract address: {deposit_addr}")
address_from = wallet_utils.get_last_address_from_wallet(
wallet_path=wallet_from_path, wallet_password=wallet_from_password
)
transfer_gas(
shell=shell,
main_chain=main_chain,
amount=amount,
wallet_from_path=wallet_from_path,
wallet_from_password=wallet_from_password,
address_to=deposit_addr,
address_from=address_from,
)
@reporter.step_deco("Get Mainnet Balance")
def get_mainnet_balance(main_chain: MainChain, address: str):
resp = main_chain.rpc_client.get_nep17_balances(address=address)
logger.info(f"Got getnep17balances response: {resp}")
for balance in resp["balance"]:
if balance["assethash"] == GAS_HASH:
return float(balance["amount"]) / ASSET_POWER_MAINCHAIN
return float(0)
@reporter.step_deco("Get Sidechain Balance")
def get_sidechain_balance(morph_chain: MorphChain, address: str): def get_sidechain_balance(morph_chain: MorphChain, address: str):
resp = morph_chain.rpc_client.get_nep17_balances(address=address) resp = morph_chain.rpc_client.get_nep17_balances(address=address)
logger.info(f"Got getnep17balances response: {resp}") logger.info(f"Got getnep17balances response: {resp}")

View file

@ -1,22 +1,34 @@
import json
import logging import logging
import os import os
import re
import uuid
from datetime import datetime, timedelta from datetime import datetime, timedelta
from typing import Optional from typing import Optional
from dateutil.parser import parse from dateutil.parser import parse
from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAuthmate
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC
from frostfs_testlib.resources.common import CREDENTIALS_CREATE_TIMEOUT
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.shell import CommandOptions, InteractiveInput, Shell
from frostfs_testlib.shell import Shell from frostfs_testlib.shell.interfaces import SshCredentials
from frostfs_testlib.steps.cli.container import search_nodes_with_container from frostfs_testlib.steps.cli.container import (
search_container_by_name,
search_nodes_with_container,
)
from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.utils.cli_utils import _run_with_passwd
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@reporter.step("Expected all objects are presented in the bucket") @reporter.step_deco("Expected all objects are presented in the bucket")
def check_objects_in_bucket( def check_objects_in_bucket(
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
bucket: str, bucket: str,
@ -25,29 +37,37 @@ def check_objects_in_bucket(
) -> None: ) -> None:
unexpected_objects = unexpected_objects or [] unexpected_objects = unexpected_objects or []
bucket_objects = s3_client.list_objects(bucket) bucket_objects = s3_client.list_objects(bucket)
assert len(bucket_objects) == len(expected_objects), f"Expected {len(expected_objects)} objects in the bucket" assert len(bucket_objects) == len(
expected_objects
), f"Expected {len(expected_objects)} objects in the bucket"
for bucket_object in expected_objects: for bucket_object in expected_objects:
assert bucket_object in bucket_objects, f"Expected object {bucket_object} in objects list {bucket_objects}" assert (
bucket_object in bucket_objects
), f"Expected object {bucket_object} in objects list {bucket_objects}"
for bucket_object in unexpected_objects: for bucket_object in unexpected_objects:
assert bucket_object not in bucket_objects, f"Expected object {bucket_object} not in objects list {bucket_objects}" assert (
bucket_object not in bucket_objects
), f"Expected object {bucket_object} not in objects list {bucket_objects}"
@reporter.step("Try to get object and got error") @reporter.step_deco("Try to get object and got error")
def try_to_get_objects_and_expect_error(s3_client: S3ClientWrapper, bucket: str, object_keys: list) -> None: def try_to_get_objects_and_expect_error(
s3_client: S3ClientWrapper, bucket: str, object_keys: list
) -> None:
for obj in object_keys: for obj in object_keys:
try: try:
s3_client.get_object(bucket, obj) s3_client.get_object(bucket, obj)
raise AssertionError(f"Object {obj} found in bucket {bucket}") raise AssertionError(f"Object {obj} found in bucket {bucket}")
except Exception as err: except Exception as err:
assert "The specified key does not exist" in str(err), f"Expected error in exception {err}" assert "The specified key does not exist" in str(
err
), f"Expected error in exception {err}"
@reporter.step("Set versioning status to '{status}' for bucket '{bucket}'") @reporter.step_deco("Set versioning status to '{status}' for bucket '{bucket}'")
def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: VersioningStatus): def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: VersioningStatus):
if status == VersioningStatus.UNDEFINED: s3_client.get_bucket_versioning_status(bucket)
return
s3_client.put_bucket_versioning(bucket, status=status) s3_client.put_bucket_versioning(bucket, status=status)
bucket_status = s3_client.get_bucket_versioning_status(bucket) bucket_status = s3_client.get_bucket_versioning_status(bucket)
assert bucket_status == status.value, f"Expected {bucket_status} status. Got {status.value}" assert bucket_status == status.value, f"Expected {bucket_status} status. Got {status.value}"
@ -57,9 +77,15 @@ def object_key_from_file_path(full_path: str) -> str:
return os.path.basename(full_path) return os.path.basename(full_path)
def assert_tags(actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None) -> None: def assert_tags(
expected_tags = [{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else [] actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None
unexpected_tags = [{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else [] ) -> None:
expected_tags = (
[{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else []
)
unexpected_tags = (
[{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else []
)
if expected_tags == []: if expected_tags == []:
assert not actual_tags, f"Expected there is no tags, got {actual_tags}" assert not actual_tags, f"Expected there is no tags, got {actual_tags}"
assert len(expected_tags) == len(actual_tags) assert len(expected_tags) == len(actual_tags)
@ -69,7 +95,7 @@ def assert_tags(actual_tags: list, expected_tags: Optional[list] = None, unexpec
assert tag not in actual_tags, f"Tag {tag} should not be in {actual_tags}" assert tag not in actual_tags, f"Tag {tag} should not be in {actual_tags}"
@reporter.step("Expected all tags are presented in object") @reporter.step_deco("Expected all tags are presented in object")
def check_tags_by_object( def check_tags_by_object(
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
bucket: str, bucket: str,
@ -78,10 +104,12 @@ def check_tags_by_object(
unexpected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None,
) -> None: ) -> None:
actual_tags = s3_client.get_object_tagging(bucket, key) actual_tags = s3_client.get_object_tagging(bucket, key)
assert_tags(expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags) assert_tags(
expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags
)
@reporter.step("Expected all tags are presented in bucket") @reporter.step_deco("Expected all tags are presented in bucket")
def check_tags_by_bucket( def check_tags_by_bucket(
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
bucket: str, bucket: str,
@ -89,7 +117,9 @@ def check_tags_by_bucket(
unexpected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None,
) -> None: ) -> None:
actual_tags = s3_client.get_bucket_tagging(bucket) actual_tags = s3_client.get_bucket_tagging(bucket)
assert_tags(expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags) assert_tags(
expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags
)
def assert_object_lock_mode( def assert_object_lock_mode(
@ -102,49 +132,102 @@ def assert_object_lock_mode(
retain_period: Optional[int] = None, retain_period: Optional[int] = None,
): ):
object_dict = s3_client.get_object(bucket, file_name, full_output=True) object_dict = s3_client.get_object(bucket, file_name, full_output=True)
assert object_dict.get("ObjectLockMode") == object_lock_mode, f"Expected Object Lock Mode is {object_lock_mode}" assert (
object_dict.get("ObjectLockMode") == object_lock_mode
), f"Expected Object Lock Mode is {object_lock_mode}"
assert ( assert (
object_dict.get("ObjectLockLegalHoldStatus") == legal_hold_status object_dict.get("ObjectLockLegalHoldStatus") == legal_hold_status
), f"Expected Object Lock Legal Hold Status is {legal_hold_status}" ), f"Expected Object Lock Legal Hold Status is {legal_hold_status}"
object_retain_date = object_dict.get("ObjectLockRetainUntilDate") object_retain_date = object_dict.get("ObjectLockRetainUntilDate")
retain_date = parse(object_retain_date) if isinstance(object_retain_date, str) else object_retain_date retain_date = (
parse(object_retain_date) if isinstance(object_retain_date, str) else object_retain_date
)
if retain_until_date: if retain_until_date:
assert retain_date.strftime("%Y-%m-%dT%H:%M:%S") == retain_until_date.strftime( assert retain_date.strftime("%Y-%m-%dT%H:%M:%S") == retain_until_date.strftime(
"%Y-%m-%dT%H:%M:%S" "%Y-%m-%dT%H:%M:%S"
), f'Expected Object Lock Retain Until Date is {str(retain_until_date.strftime("%Y-%m-%dT%H:%M:%S"))}' ), f'Expected Object Lock Retain Until Date is {str(retain_until_date.strftime("%Y-%m-%dT%H:%M:%S"))}'
elif retain_period: elif retain_period:
last_modify_date = object_dict.get("LastModified") last_modify_date = object_dict.get("LastModified")
last_modify = parse(last_modify_date) if isinstance(last_modify_date, str) else last_modify_date last_modify = (
parse(last_modify_date) if isinstance(last_modify_date, str) else last_modify_date
)
assert ( assert (
retain_date - last_modify + timedelta(seconds=1) retain_date - last_modify + timedelta(seconds=1)
).days == retain_period, f"Expected retention period is {retain_period} days" ).days == retain_period, f"Expected retention period is {retain_period} days"
def _format_grants_as_strings(grants: list[dict]) -> list: def assert_s3_acl(acl_grants: list, permitted_users: str):
grantee_format = "{g_type}::{uri}:{permission}" if permitted_users == "AllUsers":
return set( grantees = {"AllUsers": 0, "CanonicalUser": 0}
[ for acl_grant in acl_grants:
grantee_format.format( if acl_grant.get("Grantee", {}).get("Type") == "Group":
g_type=grant.get("Grantee", {}).get("Type", ""), uri = acl_grant.get("Grantee", {}).get("URI")
uri=grant.get("Grantee", {}).get("URI", ""), permission = acl_grant.get("Permission")
permission=grant.get("Permission", ""), assert (uri, permission) == (
"http://acs.amazonaws.com/groups/global/AllUsers",
"FULL_CONTROL",
), "All Groups should have FULL_CONTROL"
grantees["AllUsers"] += 1
if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser":
permission = acl_grant.get("Permission")
assert permission == "FULL_CONTROL", "Canonical User should have FULL_CONTROL"
grantees["CanonicalUser"] += 1
assert grantees["AllUsers"] >= 1, "All Users should have FULL_CONTROL"
assert grantees["CanonicalUser"] >= 1, "Canonical User should have FULL_CONTROL"
if permitted_users == "CanonicalUser":
for acl_grant in acl_grants:
if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser":
permission = acl_grant.get("Permission")
assert permission == "FULL_CONTROL", "Only CanonicalUser should have FULL_CONTROL"
else:
logger.error("FULL_CONTROL is given to All Users")
@reporter.step_deco("Init S3 Credentials")
def init_s3_credentials(
wallet: WalletInfo,
shell: Shell,
cluster: Cluster,
s3_bearer_rules_file: str,
policy: Optional[dict] = None,
s3gates: Optional[list[S3Gate]] = None,
):
gate_public_keys = []
bucket = str(uuid.uuid4())
if not s3gates:
s3gates = [cluster.s3_gates[0]]
for s3gate in s3gates:
gate_public_keys.append(s3gate.get_wallet_public_key())
frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC)
issue_secret_output = frostfs_authmate_exec.secret.issue(
wallet=wallet.path,
peer=cluster.default_rpc_endpoint,
bearer_rules=s3_bearer_rules_file,
gate_public_key=gate_public_keys,
wallet_password=wallet.password,
container_policy=policy,
container_friendly_name=bucket,
).stdout
aws_access_key_id = str(
re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group(
"aws_access_key_id"
) )
for grant in grants
]
) )
aws_secret_access_key = str(
re.search(
r"secret_access_key.*:\s.(?P<aws_secret_access_key>\w*)", issue_secret_output
).group("aws_secret_access_key")
)
cid = str(
re.search(r"container_id.*:\s.(?P<container_id>\w*)", issue_secret_output).group(
"container_id"
)
)
return cid, aws_access_key_id, aws_secret_access_key
@reporter.step("Verify ACL permissions") @reporter.step_deco("Delete bucket with all objects")
def verify_acl_permissions(actual_acl_grants: list[dict], expected_acl_grants: list[dict], strict: bool = True):
actual_grants = _format_grants_as_strings(actual_acl_grants)
expected_grants = _format_grants_as_strings(expected_acl_grants)
assert expected_grants <= actual_grants, "Permissions mismatch"
if strict:
assert expected_grants == actual_grants, "Extra permissions found, must not be there"
@reporter.step("Delete bucket with all objects")
def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str): def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str):
versioning_status = s3_client.get_bucket_versioning_status(bucket) versioning_status = s3_client.get_bucket_versioning_status(bucket)
if versioning_status == VersioningStatus.ENABLED.value: if versioning_status == VersioningStatus.ENABLED.value:
@ -169,19 +252,16 @@ def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str):
s3_client.delete_bucket(bucket) s3_client.delete_bucket(bucket)
@reporter.step("Search nodes bucket") @reporter.step_deco("Search nodes bucket")
def search_nodes_with_bucket( def search_nodes_with_bucket(
cluster: Cluster, cluster: Cluster,
bucket_name: str, bucket_name: str,
wallet: WalletInfo, wallet: str,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
bucket_container_resolver: BucketContainerResolver,
) -> list[ClusterNode]: ) -> list[ClusterNode]:
cid = None cid = search_container_by_name(wallet=wallet, name=bucket_name, shell=shell, endpoint=endpoint)
for cluster_node in cluster.cluster_nodes: nodes_list = search_nodes_with_container(
cid = bucket_container_resolver.resolve(cluster_node, bucket_name) wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster
if cid: )
break
nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster)
return nodes_list return nodes_list

View file

@ -4,18 +4,19 @@ import logging
import os import os
import uuid import uuid
from dataclasses import dataclass from dataclasses import dataclass
from enum import Enum
from typing import Any, Optional from typing import Any, Optional
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.readable import HumanReadableEnum
from frostfs_testlib.utils import json_utils, wallet_utils from frostfs_testlib.utils import json_utils, wallet_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
UNRELATED_KEY = "unrelated key in the session" UNRELATED_KEY = "unrelated key in the session"
@ -25,7 +26,7 @@ WRONG_VERB = "wrong verb of the session"
INVALID_SIGNATURE = "invalid signature of the session data" INVALID_SIGNATURE = "invalid signature of the session data"
class ObjectVerb(HumanReadableEnum): class ObjectVerb(Enum):
PUT = "PUT" PUT = "PUT"
DELETE = "DELETE" DELETE = "DELETE"
GET = "GET" GET = "GET"
@ -35,7 +36,7 @@ class ObjectVerb(HumanReadableEnum):
SEARCH = "SEARCH" SEARCH = "SEARCH"
class ContainerVerb(HumanReadableEnum): class ContainerVerb(Enum):
CREATE = "PUT" CREATE = "PUT"
DELETE = "DELETE" DELETE = "DELETE"
SETEACL = "SETEACL" SETEACL = "SETEACL"
@ -48,7 +49,7 @@ class Lifetime:
iat: int = 0 iat: int = 0
@reporter.step("Generate Session Token") @reporter.step_deco("Generate Session Token")
def generate_session_token( def generate_session_token(
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
session_wallet: WalletInfo, session_wallet: WalletInfo,
@ -70,7 +71,9 @@ def generate_session_token(
file_path = os.path.join(tokens_dir, str(uuid.uuid4())) file_path = os.path.join(tokens_dir, str(uuid.uuid4()))
pub_key_64 = wallet_utils.get_wallet_public_key(session_wallet.path, session_wallet.password, "base64") pub_key_64 = wallet_utils.get_wallet_public_key(
session_wallet.path, session_wallet.password, "base64"
)
lifetime = lifetime or Lifetime() lifetime = lifetime or Lifetime()
@ -95,7 +98,7 @@ def generate_session_token(
return file_path return file_path
@reporter.step("Generate Session Token For Container") @reporter.step_deco("Generate Session Token For Container")
def generate_container_session_token( def generate_container_session_token(
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
session_wallet: WalletInfo, session_wallet: WalletInfo,
@ -122,7 +125,11 @@ def generate_container_session_token(
"container": { "container": {
"verb": verb.value, "verb": verb.value,
"wildcard": cid is None, "wildcard": cid is None,
**({"containerID": {"value": f"{json_utils.encode_for_json(cid)}"}} if cid is not None else {}), **(
{"containerID": {"value": f"{json_utils.encode_for_json(cid)}"}}
if cid is not None
else {}
),
}, },
} }
@ -135,7 +142,7 @@ def generate_container_session_token(
) )
@reporter.step("Generate Session Token For Object") @reporter.step_deco("Generate Session Token For Object")
def generate_object_session_token( def generate_object_session_token(
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
session_wallet: WalletInfo, session_wallet: WalletInfo,
@ -177,7 +184,7 @@ def generate_object_session_token(
) )
@reporter.step("Get signed token for container session") @reporter.step_deco("Get signed token for container session")
def get_container_signed_token( def get_container_signed_token(
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
user_wallet: WalletInfo, user_wallet: WalletInfo,
@ -199,7 +206,7 @@ def get_container_signed_token(
return sign_session_token(shell, session_token_file, owner_wallet) return sign_session_token(shell, session_token_file, owner_wallet)
@reporter.step("Get signed token for object session") @reporter.step_deco("Get signed token for object session")
def get_object_signed_token( def get_object_signed_token(
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
user_wallet: WalletInfo, user_wallet: WalletInfo,
@ -226,11 +233,12 @@ def get_object_signed_token(
return sign_session_token(shell, session_token_file, owner_wallet) return sign_session_token(shell, session_token_file, owner_wallet)
@reporter.step("Create Session Token") @reporter.step_deco("Create Session Token")
def create_session_token( def create_session_token(
shell: Shell, shell: Shell,
owner: str, owner: str,
wallet: WalletInfo, wallet_path: str,
wallet_password: str,
rpc_endpoint: str, rpc_endpoint: str,
) -> str: ) -> str:
""" """
@ -245,18 +253,19 @@ def create_session_token(
The path to the generated session token file. The path to the generated session token file.
""" """
session_token = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) session_token = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC)
frostfscli.session.create( frostfscli.session.create(
rpc_endpoint=rpc_endpoint, rpc_endpoint=rpc_endpoint,
address=owner, address=owner,
wallet=wallet_path,
wallet_password=wallet_password,
out=session_token, out=session_token,
wallet=wallet.path,
) )
return session_token return session_token
@reporter.step("Sign Session Token") @reporter.step_deco("Sign Session Token")
def sign_session_token(shell: Shell, session_token_file: str, wallet: WalletInfo) -> str: def sign_session_token(shell: Shell, session_token_file: str, wlt: WalletInfo) -> str:
""" """
This function signs the session token by the given wallet. This function signs the session token by the given wallet.
@ -269,6 +278,10 @@ def sign_session_token(shell: Shell, session_token_file: str, wallet: WalletInfo
The path to the signed token. The path to the signed token.
""" """
signed_token_file = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) signed_token_file = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) frostfscli = FrostfsCli(
frostfscli.util.sign_session_token(session_token_file, signed_token_file) shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG
)
frostfscli.util.sign_session_token(
wallet=wlt.path, from_file=session_token_file, to_file=signed_token_file
)
return signed_token_file return signed_token_file

View file

@ -3,7 +3,7 @@ from time import sleep
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.error_patterns import OBJECT_ALREADY_REMOVED from frostfs_testlib.resources.error_patterns import OBJECT_ALREADY_REMOVED
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import delete_object, get_object from frostfs_testlib.steps.cli.object import delete_object, get_object
@ -12,13 +12,16 @@ from frostfs_testlib.steps.tombstone import verify_head_tombstone
from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
CLEANUP_TIMEOUT = 10 CLEANUP_TIMEOUT = 10
@reporter.step("Delete Objects") @reporter.step_deco("Delete Objects")
def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster) -> None: def delete_objects(
storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster
) -> None:
""" """
Deletes given storage objects. Deletes given storage objects.
@ -30,14 +33,14 @@ def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, clust
with reporter.step("Delete objects"): with reporter.step("Delete objects"):
for storage_object in storage_objects: for storage_object in storage_objects:
storage_object.tombstone = delete_object( storage_object.tombstone = delete_object(
storage_object.wallet, storage_object.wallet_file_path,
storage_object.cid, storage_object.cid,
storage_object.oid, storage_object.oid,
shell=shell, shell=shell,
endpoint=cluster.default_rpc_endpoint, endpoint=cluster.default_rpc_endpoint,
) )
verify_head_tombstone( verify_head_tombstone(
wallet=storage_object.wallet, wallet_path=storage_object.wallet_file_path,
cid=storage_object.cid, cid=storage_object.cid,
oid_ts=storage_object.tombstone, oid_ts=storage_object.tombstone,
oid=storage_object.oid, oid=storage_object.oid,
@ -52,7 +55,7 @@ def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, clust
for storage_object in storage_objects: for storage_object in storage_objects:
with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED): with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED):
get_object( get_object(
storage_object.wallet, storage_object.wallet_file_path,
storage_object.cid, storage_object.cid,
storage_object.oid, storage_object.oid,
shell=shell, shell=shell,

View file

@ -6,21 +6,22 @@
""" """
import logging import logging
from frostfs_testlib import reporter from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import head_object from frostfs_testlib.steps.cli.object import head_object
from frostfs_testlib.steps.complex_object_actions import get_last_object from frostfs_testlib.steps.complex_object_actions import get_last_object
from frostfs_testlib.storage.cluster import StorageNode from frostfs_testlib.storage.cluster import StorageNode
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.utils import string_utils from frostfs_testlib.utils import string_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
# TODO: Unused, remove or make use of @reporter.step_deco("Get Object Copies")
@reporter.step("Get Object Copies") def get_object_copies(
def get_object_copies(complexity: str, wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: complexity: str, wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> int:
""" """
The function performs requests to all nodes of the container and The function performs requests to all nodes of the container and
finds out if they store a copy of the object. The procedure is finds out if they store a copy of the object. The procedure is
@ -44,8 +45,10 @@ def get_object_copies(complexity: str, wallet: WalletInfo, cid: str, oid: str, s
) )
@reporter.step("Get Simple Object Copies") @reporter.step_deco("Get Simple Object Copies")
def get_simple_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: def get_simple_object_copies(
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> int:
""" """
To figure out the number of a simple object copies, only direct To figure out the number of a simple object copies, only direct
HEAD requests should be made to the every node of the container. HEAD requests should be made to the every node of the container.
@ -63,7 +66,9 @@ def get_simple_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shel
copies = 0 copies = 0
for node in nodes: for node in nodes:
try: try:
response = head_object(wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True) response = head_object(
wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True
)
if response: if response:
logger.info(f"Found object {oid} on node {node}") logger.info(f"Found object {oid} on node {node}")
copies += 1 copies += 1
@ -73,8 +78,10 @@ def get_simple_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shel
return copies return copies
@reporter.step("Get Complex Object Copies") @reporter.step_deco("Get Complex Object Copies")
def get_complex_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: def get_complex_object_copies(
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> int:
""" """
To figure out the number of a complex object copies, we firstly To figure out the number of a complex object copies, we firstly
need to retrieve its Last object. We consider that the number of need to retrieve its Last object. We consider that the number of
@ -95,8 +102,10 @@ def get_complex_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: She
return get_simple_object_copies(wallet, cid, last_oid, shell, nodes) return get_simple_object_copies(wallet, cid, last_oid, shell, nodes)
@reporter.step("Get Nodes With Object") @reporter.step_deco("Get Nodes With Object")
def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> list[StorageNode]: def get_nodes_with_object(
cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> list[StorageNode]:
""" """
The function returns list of nodes which store The function returns list of nodes which store
the given object. the given object.
@ -111,7 +120,8 @@ def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageN
nodes_list = [] nodes_list = []
for node in nodes: for node in nodes:
wallet = WalletInfo.from_node(node) wallet = node.get_wallet_path()
wallet_config = node.get_wallet_config_path()
try: try:
res = head_object( res = head_object(
wallet, wallet,
@ -120,6 +130,7 @@ def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageN
shell=shell, shell=shell,
endpoint=node.get_rpc_endpoint(), endpoint=node.get_rpc_endpoint(),
is_direct=True, is_direct=True,
wallet_config=wallet_config,
) )
if res is not None: if res is not None:
logger.info(f"Found object {oid} on node {node}") logger.info(f"Found object {oid} on node {node}")
@ -130,8 +141,10 @@ def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageN
return nodes_list return nodes_list
@reporter.step("Get Nodes Without Object") @reporter.step_deco("Get Nodes Without Object")
def get_nodes_without_object(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> list[StorageNode]: def get_nodes_without_object(
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> list[StorageNode]:
""" """
The function returns list of nodes which do not store The function returns list of nodes which do not store
the given object. the given object.
@ -147,7 +160,9 @@ def get_nodes_without_object(wallet: WalletInfo, cid: str, oid: str, shell: Shel
nodes_list = [] nodes_list = []
for node in nodes: for node in nodes:
try: try:
res = head_object(wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True) res = head_object(
wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True
)
if res is None: if res is None:
nodes_list.append(node) nodes_list.append(node)
except Exception as err: except Exception as err:

View file

@ -1,24 +1,41 @@
import json
import logging import logging
from frostfs_testlib import reporter from neo3.wallet import wallet
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import head_object from frostfs_testlib.steps.cli.object import head_object
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@reporter.step("Verify Head Tombstone") @reporter.step_deco("Verify Head Tombstone")
def verify_head_tombstone(wallet: WalletInfo, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str): def verify_head_tombstone(
header = head_object(wallet, cid, oid_ts, shell=shell, endpoint=endpoint)["header"] wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str
):
header = head_object(wallet_path, cid, oid_ts, shell=shell, endpoint=endpoint)["header"]
s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"] s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"]
logger.info(f"Header Session OIDs is {s_oid}") logger.info(f"Header Session OIDs is {s_oid}")
logger.info(f"OID is {oid}") logger.info(f"OID is {oid}")
assert header["containerID"] == cid, "Tombstone Header CID is wrong" assert header["containerID"] == cid, "Tombstone Header CID is wrong"
assert header["ownerID"] == wallet.get_address_from_json(0), "Tombstone Owner ID is wrong"
with open(wallet_path, "r") as file:
wlt_data = json.loads(file.read())
wlt = wallet.Wallet.from_json(wlt_data, password="")
addr = wlt.accounts[0].address
assert header["ownerID"] == addr, "Tombstone Owner ID is wrong"
assert header["objectType"] == "TOMBSTONE", "Header Type isn't Tombstone" assert header["objectType"] == "TOMBSTONE", "Header Type isn't Tombstone"
assert header["sessionToken"]["body"]["object"]["verb"] == "DELETE", "Header Session Type isn't DELETE" assert (
assert header["sessionToken"]["body"]["object"]["target"]["container"] == cid, "Header Session ID is wrong" header["sessionToken"]["body"]["object"]["verb"] == "DELETE"
assert oid in header["sessionToken"]["body"]["object"]["target"]["objects"], "Header Session OID is wrong" ), "Header Session Type isn't DELETE"
assert (
header["sessionToken"]["body"]["object"]["target"]["container"] == cid
), "Header Session ID is wrong"
assert (
oid in header["sessionToken"]["body"]["object"]["target"]["objects"]
), "Header Session OID is wrong"

View file

@ -1,7 +1,25 @@
from frostfs_testlib.storage.constants import _FrostfsServicesNames
from frostfs_testlib.storage.dataclasses.frostfs_services import (
HTTPGate,
InnerRing,
MainChain,
MorphChain,
S3Gate,
StorageNode,
)
from frostfs_testlib.storage.service_registry import ServiceRegistry from frostfs_testlib.storage.service_registry import ServiceRegistry
__class_registry = ServiceRegistry() __class_registry = ServiceRegistry()
# Register default public services
__class_registry.register_service(_FrostfsServicesNames.STORAGE, StorageNode)
__class_registry.register_service(_FrostfsServicesNames.INNER_RING, InnerRing)
__class_registry.register_service(_FrostfsServicesNames.MORPH_CHAIN, MorphChain)
__class_registry.register_service(_FrostfsServicesNames.S3_GATE, S3Gate)
__class_registry.register_service(_FrostfsServicesNames.HTTP_GATE, HTTPGate)
# # TODO: Remove this since we are no longer have main chain
__class_registry.register_service(_FrostfsServicesNames.MAIN_CHAIN, MainChain)
def get_service_registry() -> ServiceRegistry: def get_service_registry() -> ServiceRegistry:
"""Returns registry with registered classes related to cluster and cluster nodes. """Returns registry with registered classes related to cluster and cluster nodes.

View file

@ -4,18 +4,23 @@ import re
import yaml import yaml
from yarl import URL from yarl import URL
from frostfs_testlib import reporter
from frostfs_testlib.hosting import Host, Hosting from frostfs_testlib.hosting import Host, Hosting
from frostfs_testlib.hosting.config import ServiceConfig from frostfs_testlib.hosting.config import ServiceConfig
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.storage import get_service_registry from frostfs_testlib.storage import get_service_registry
from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml
from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.storage.constants import ConfigAttributes
from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import (
from frostfs_testlib.storage.dataclasses.metrics import Metrics HTTPGate,
InnerRing,
MorphChain,
S3Gate,
StorageNode,
)
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces
from frostfs_testlib.storage.service_registry import ServiceRegistry from frostfs_testlib.storage.service_registry import ServiceRegistry
reporter = get_reporter()
class ClusterNode: class ClusterNode:
""" """
@ -25,13 +30,11 @@ class ClusterNode:
class_registry: ServiceRegistry class_registry: ServiceRegistry
id: int id: int
host: Host host: Host
metrics: Metrics
def __init__(self, host: Host, id: int) -> None: def __init__(self, host: Host, id: int) -> None:
self.host = host self.host = host
self.id = id self.id = id
self.class_registry = get_service_registry() self.class_registry = get_service_registry()
self.metrics = Metrics(host=self.host, metrics_endpoint=self.storage_node.get_metrics_endpoint())
@property @property
def host_ip(self): def host_ip(self):
@ -74,7 +77,6 @@ class ClusterNode:
def s3_gate(self) -> S3Gate: def s3_gate(self) -> S3Gate:
return self.service(S3Gate) return self.service(S3Gate)
# TODO: Deprecated. Use config with ServiceConfigurationYml interface
def get_config(self, config_file_path: str) -> dict: def get_config(self, config_file_path: str) -> dict:
shell = self.host.get_shell() shell = self.host.get_shell()
@ -84,17 +86,13 @@ class ClusterNode:
config = yaml.safe_load(config_text) config = yaml.safe_load(config_text)
return config return config
# TODO: Deprecated. Use config with ServiceConfigurationYml interface
def save_config(self, new_config: dict, config_file_path: str) -> None: def save_config(self, new_config: dict, config_file_path: str) -> None:
shell = self.host.get_shell() shell = self.host.get_shell()
config_str = yaml.dump(new_config) config_str = yaml.dump(new_config)
shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}") shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}")
def config(self, service_type: ServiceClass) -> ServiceConfigurationYml: def service(self, service_type: type[ServiceClass]) -> ServiceClass:
return self.service(service_type).config
def service(self, service_type: ServiceClass) -> ServiceClass:
""" """
Get a service cluster node of specified type. Get a service cluster node of specified type.
@ -109,7 +107,7 @@ class ClusterNode:
service_entry = self.class_registry.get_entry(service_type) service_entry = self.class_registry.get_entry(service_type)
service_name = service_entry["hosting_service_name"] service_name = service_entry["hosting_service_name"]
pattern = f"{service_name}_{self.id:02}" pattern = f"{service_name}{self.id:02}"
config = self.host.get_service_config(pattern) config = self.host.get_service_config(pattern)
return service_type( return service_type(
@ -118,42 +116,10 @@ class ClusterNode:
self.host, self.host,
) )
@property def get_list_of_services(self) -> list[str]:
def services(self) -> list[NodeBase]: return [
svcs: list[NodeBase] = [] config.attributes[ConfigAttributes.SERVICE_NAME] for config in self.host.config.services
svcs_names_on_node = [svc.name for svc in self.host.config.services] ]
for entry in self.class_registry._class_mapping.values():
hosting_svc_name = entry["hosting_service_name"]
pattern = f"{hosting_svc_name}_{self.id:02}"
if pattern in svcs_names_on_node:
config = self.host.get_service_config(pattern)
svcs.append(
entry["cls"](
self.id,
config.name,
self.host,
)
)
return svcs
def get_all_interfaces(self) -> dict[str, str]:
return self.host.config.interfaces
def get_interface(self, interface: Interfaces) -> str:
return self.host.config.interfaces[interface.value]
def get_data_interfaces(self) -> list[str]:
return [ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "data" in name_interface]
def get_data_interface(self, search_interface: str) -> list[str]:
return [self.host.config.interfaces[interface] for interface in self.host.config.interfaces.keys() if search_interface == interface]
def get_internal_interfaces(self) -> list[str]:
return [ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "internal" in name_interface]
def get_internal_interface(self, search_internal: str) -> list[str]:
return [self.host.config.interfaces[interface] for interface in self.host.config.interfaces.keys() if search_internal == interface]
class Cluster: class Cluster:
@ -203,40 +169,6 @@ class Cluster:
def morph_chain(self) -> list[MorphChain]: def morph_chain(self) -> list[MorphChain]:
return self.services(MorphChain) return self.services(MorphChain)
def nodes(self, services: list[ServiceClass]) -> list[ClusterNode]:
"""
Resolve which cluster nodes hosting the specified services.
Args:
services: list of services to resolve hosting cluster nodes.
Returns:
list of cluster nodes which host specified services.
"""
cluster_nodes = set()
for service in services:
cluster_nodes.update([node for node in self.cluster_nodes if node.service(type(service)) == service])
return list(cluster_nodes)
def node(self, service: ServiceClass) -> ClusterNode:
"""
Resolve single cluster node hosting the specified service.
Args:
services: list of services to resolve hosting cluster nodes.
Returns:
list of cluster nodes which host specified services.
"""
nodes = [node for node in self.cluster_nodes if node.service(type(service)) == service]
if not len(nodes):
raise RuntimeError(f"Cannot find service {service} on any node")
return nodes[0]
def services(self, service_type: type[ServiceClass]) -> list[ServiceClass]: def services(self, service_type: type[ServiceClass]) -> list[ServiceClass]:
""" """
Get all services in a cluster of specified type. Get all services in a cluster of specified type.
@ -253,13 +185,13 @@ class Cluster:
service_name = service["hosting_service_name"] service_name = service["hosting_service_name"]
cls: type[NodeBase] = service["cls"] cls: type[NodeBase] = service["cls"]
pattern = f"{service_name}_\d*$" pattern = f"{service_name}\d*$"
configs = self.hosting.find_service_configs(pattern) configs = self.hosting.find_service_configs(pattern)
found_nodes = [] found_nodes = []
for config in configs: for config in configs:
# config.name is something like s3-gate01. Cut last digits to know service type # config.name is something like s3-gate01. Cut last digits to know service type
service_type = re.findall("(.*)_\d+", config.name)[0] service_type = re.findall(".*\D", config.name)[0]
# exclude unsupported services # exclude unsupported services
if service_type != service_name: if service_type != service_name:
continue continue
@ -324,6 +256,8 @@ class Cluster:
return [node.get_endpoint() for node in nodes] return [node.get_endpoint() for node in nodes]
def get_nodes_by_ip(self, ips: list[str]) -> list[ClusterNode]: def get_nodes_by_ip(self, ips: list[str]) -> list[ClusterNode]:
cluster_nodes = [node for node in self.cluster_nodes if URL(node.morph_chain.get_endpoint()).host in ips] cluster_nodes = [
node for node in self.cluster_nodes if URL(node.morph_chain.get_endpoint()).host in ips
]
with reporter.step(f"Return cluster nodes - {cluster_nodes}"): with reporter.step(f"Return cluster nodes - {cluster_nodes}"):
return cluster_nodes return cluster_nodes

View file

@ -1,65 +0,0 @@
from abc import ABC, abstractmethod
from typing import Any
class ServiceConfigurationYml(ABC):
"""
Class to manipulate yml configuration for service
"""
def _find_option(self, key: str, data: dict):
tree = key.split(":")
current = data
for node in tree:
if isinstance(current, list) and len(current) - 1 >= int(node):
current = current[int(node)]
continue
if node not in current:
return None
current = current[node]
return current
def _set_option(self, key: str, value: Any, data: dict):
tree = key.split(":")
current = data
for node in tree[:-1]:
if isinstance(current, list) and len(current) - 1 >= int(node):
current = current[int(node)]
continue
if node not in current:
current[node] = {}
current = current[node]
current[tree[-1]] = value
@abstractmethod
def get(self, key: str) -> str:
"""
Get parameter value from current configuration
Args:
key: key of the parameter in yaml format like 'storage:shard:default:resync_metabase'
Returns:
value of the parameter
"""
@abstractmethod
def set(self, values: dict[str, Any]):
"""
Sets parameters to configuration
Args:
values: dict where key is the key of the parameter in yaml format like 'storage:shard:default:resync_metabase' and value is the value of the option to set
"""
@abstractmethod
def revert(self):
"""
Revert changes
"""

Some files were not shown because too many files have changed in this diff Show more