forked from TrueCloudLab/frostfs-testlib
Compare commits
58 commits
14c85e0a9e
...
f8562da7e0
Author | SHA1 | Date | |
---|---|---|---|
f8562da7e0 | |||
c8227e80af | |||
1f50166e78 | |||
03c45d7592 | |||
e970fe2788 | |||
8ee2985c89 | |||
137fd21561 | |||
f3c160f313 | |||
3af4dfd977 | |||
8a360683ae | |||
f4111a1374 | |||
b1a3d740e9 | |||
0c3bb20af5 | |||
e1f3444e92 | |||
cff5db5a67 | |||
1c3bbe26f7 | |||
dd347dd8fb | |||
98f9c78f09 | |||
2c2af7f8ed | |||
d039bcc221 | |||
e919064bb9 | |||
98ccd4c382 | |||
9feb8135e3 | |||
64f004d5a5 | |||
eb37573df8 | |||
602de43bff | |||
fc1f373477 | |||
b039ee9940 | |||
be9b3f5855 | |||
f7ef8cb881 | |||
ecf8f0841a | |||
19b8b96898 | |||
f2d34dbf2e | |||
e14896400f | |||
449c18bb1a | |||
aa277fdd6a | |||
7059596506 | |||
7112bf9c88 | |||
b1c21e0e5b | |||
02c079eda3 | |||
d28f3cdc28 | |||
e4878f4d1e | |||
807235af95 | |||
716a780a13 | |||
d6e08c477b | |||
612e088763 | |||
|
b856e82008 | ||
2240be09d2 | |||
38742badf2 | |||
9c792c091e | |||
49ccd47e81 | |||
675183cd9a | |||
8dcfae5cb2 | |||
15862e5901 | |||
4896abcec3 | |||
62216293f8 | |||
59b4157991 | |||
917dc6f6d8 |
60 changed files with 2698 additions and 869 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -1,6 +1,7 @@
|
||||||
# ignore IDE files
|
# ignore IDE files
|
||||||
.vscode
|
.vscode
|
||||||
.idea
|
.idea
|
||||||
|
venv.*
|
||||||
|
|
||||||
# ignore temp files under any path
|
# ignore temp files under any path
|
||||||
.DS_Store
|
.DS_Store
|
||||||
|
@ -10,3 +11,4 @@
|
||||||
/dist
|
/dist
|
||||||
/build
|
/build
|
||||||
*.egg-info
|
*.egg-info
|
||||||
|
wallet_config.yml
|
|
@ -63,9 +63,9 @@ $ git checkout -b feature/123-something_awesome
|
||||||
```
|
```
|
||||||
|
|
||||||
### Test your changes
|
### Test your changes
|
||||||
Before submitting any changes to the library, please, make sure that all unit tests are passing. To run the tests, please, use the following command:
|
Before submitting any changes to the library, please, make sure that linter and all unit tests are passing. To run the tests, please, use the following command:
|
||||||
```shell
|
```shell
|
||||||
$ python -m unittest discover --start-directory tests
|
$ make validation
|
||||||
```
|
```
|
||||||
|
|
||||||
To enable tests that interact with SSH server, please, setup SSH server and set the following environment variables before running the tests:
|
To enable tests that interact with SSH server, please, setup SSH server and set the following environment variables before running the tests:
|
||||||
|
|
41
Makefile
41
Makefile
|
@ -1,8 +1,11 @@
|
||||||
SHELL := /bin/bash
|
SHELL := /bin/bash
|
||||||
PYTHON_VERSION := 3.10
|
PYTHON_VERSION := 3.10
|
||||||
VENV_DIR := venv.frostfs-testlib
|
VENV_NAME := frostfs-testlib
|
||||||
|
VENV_DIR := venv.${VENV_NAME}
|
||||||
|
|
||||||
current_dir := $(shell pwd)
|
current_dir := $(shell pwd)
|
||||||
|
DIRECTORIES := $(sort $(dir $(wildcard ../frostfs-testlib-plugin-*/ ../*-testcases/)))
|
||||||
|
FROM_VENV := . ${VENV_DIR}/bin/activate &&
|
||||||
|
|
||||||
venv: create requirements paths precommit
|
venv: create requirements paths precommit
|
||||||
@echo Ready
|
@echo Ready
|
||||||
|
@ -13,15 +16,35 @@ precommit:
|
||||||
|
|
||||||
paths:
|
paths:
|
||||||
@echo Append paths for project
|
@echo Append paths for project
|
||||||
@echo Virtual environment: ${VENV_DIR}
|
@echo Virtual environment: ${current_dir}/${VENV_DIR}
|
||||||
@sudo rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
|
@rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
|
||||||
@sudo touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
|
@touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
|
||||||
@echo ${current_dir}/src/frostfs_testlib_frostfs_testlib | sudo tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
|
@echo ${current_dir}/src | tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
|
||||||
|
|
||||||
create:
|
create: ${VENV_DIR}
|
||||||
@echo Create virtual environment for
|
|
||||||
virtualenv --python=python${PYTHON_VERSION} --prompt=frostfs-testlib ${VENV_DIR}
|
${VENV_DIR}:
|
||||||
|
@echo Create virtual environment ${current_dir}/${VENV_DIR}
|
||||||
|
virtualenv --python=python${PYTHON_VERSION} --prompt=${VENV_NAME} ${VENV_DIR}
|
||||||
|
|
||||||
requirements:
|
requirements:
|
||||||
@echo Isntalling pip requirements
|
@echo Isntalling pip requirements
|
||||||
. ${VENV_DIR}/bin/activate && pip install -Ur requirements.txt
|
. ${VENV_DIR}/bin/activate && pip install -Ur requirements.txt
|
||||||
|
|
||||||
|
|
||||||
|
#### VALIDATION SECTION ####
|
||||||
|
lint: create requirements
|
||||||
|
${FROM_VENV} pylint --disable R,C,W ./src
|
||||||
|
|
||||||
|
unit_test:
|
||||||
|
@echo Starting unit tests
|
||||||
|
${FROM_VENV} python -m pytest tests
|
||||||
|
|
||||||
|
.PHONY: lint_dependent $(DIRECTORIES)
|
||||||
|
lint_dependent: $(DIRECTORIES)
|
||||||
|
|
||||||
|
$(DIRECTORIES):
|
||||||
|
@echo checking dependent repo $@
|
||||||
|
$(MAKE) validation -C $@
|
||||||
|
|
||||||
|
validation: lint unit_test lint_dependent
|
|
@ -18,11 +18,11 @@ keywords = ["frostfs", "test"]
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"allure-python-commons>=2.13.2",
|
"allure-python-commons>=2.13.2",
|
||||||
"docker>=4.4.0",
|
"docker>=4.4.0",
|
||||||
"importlib_metadata>=5.0; python_version < '3.10'",
|
"pyyaml==6.0.1",
|
||||||
"neo-mamba==1.0.0",
|
"neo-mamba==1.0.0",
|
||||||
"paramiko>=2.10.3",
|
"paramiko>=2.10.3",
|
||||||
"pexpect>=4.8.0",
|
"pexpect>=4.8.0",
|
||||||
"requests>=2.28.0",
|
"requests==2.28.1",
|
||||||
"docstring_parser>=0.15",
|
"docstring_parser>=0.15",
|
||||||
"testrail-api>=1.12.0",
|
"testrail-api>=1.12.0",
|
||||||
"pytest==7.1.2",
|
"pytest==7.1.2",
|
||||||
|
@ -44,13 +44,16 @@ allure = "frostfs_testlib.reporter.allure_handler:AllureHandler"
|
||||||
[project.entry-points."frostfs.testlib.hosting"]
|
[project.entry-points."frostfs.testlib.hosting"]
|
||||||
docker = "frostfs_testlib.hosting.docker_host:DockerHost"
|
docker = "frostfs_testlib.hosting.docker_host:DockerHost"
|
||||||
|
|
||||||
|
[project.entry-points."frostfs.testlib.healthcheck"]
|
||||||
|
basic = "frostfs_testlib.healthcheck.basic_healthcheck:BasicHealthcheck"
|
||||||
|
|
||||||
[tool.isort]
|
[tool.isort]
|
||||||
profile = "black"
|
profile = "black"
|
||||||
src_paths = ["src", "tests"]
|
src_paths = ["src", "tests"]
|
||||||
line_length = 100
|
line_length = 120
|
||||||
|
|
||||||
[tool.black]
|
[tool.black]
|
||||||
line-length = 100
|
line-length = 120
|
||||||
target-version = ["py310"]
|
target-version = ["py310"]
|
||||||
|
|
||||||
[tool.bumpver]
|
[tool.bumpver]
|
||||||
|
@ -64,3 +67,9 @@ push = false
|
||||||
[tool.bumpver.file_patterns]
|
[tool.bumpver.file_patterns]
|
||||||
"pyproject.toml" = ['current_version = "{version}"', 'version = "{version}"']
|
"pyproject.toml" = ['current_version = "{version}"', 'version = "{version}"']
|
||||||
"src/frostfs_testlib/__init__.py" = ["{version}"]
|
"src/frostfs_testlib/__init__.py" = ["{version}"]
|
||||||
|
|
||||||
|
[tool.pytest.ini_options]
|
||||||
|
filterwarnings = [
|
||||||
|
"ignore:Blowfish has been deprecated:cryptography.utils.CryptographyDeprecationWarning",
|
||||||
|
]
|
||||||
|
testpaths = ["tests"]
|
|
@ -1,6 +1,5 @@
|
||||||
allure-python-commons==2.13.2
|
allure-python-commons==2.13.2
|
||||||
docker==4.4.0
|
docker==4.4.0
|
||||||
importlib_metadata==5.0.0
|
|
||||||
neo-mamba==1.0.0
|
neo-mamba==1.0.0
|
||||||
paramiko==2.10.3
|
paramiko==2.10.3
|
||||||
pexpect==4.8.0
|
pexpect==4.8.0
|
||||||
|
@ -17,6 +16,7 @@ black==22.8.0
|
||||||
bumpver==2022.1118
|
bumpver==2022.1118
|
||||||
isort==5.12.0
|
isort==5.12.0
|
||||||
pre-commit==2.20.0
|
pre-commit==2.20.0
|
||||||
|
pylint==2.17.4
|
||||||
|
|
||||||
# Packaging dependencies
|
# Packaging dependencies
|
||||||
build==0.8.0
|
build==0.8.0
|
||||||
|
|
|
@ -6,6 +6,7 @@ from docstring_parser.google import DEFAULT_SECTIONS, Section, SectionType
|
||||||
|
|
||||||
DEFAULT_SECTIONS.append(Section("Steps", "steps", SectionType.MULTIPLE))
|
DEFAULT_SECTIONS.append(Section("Steps", "steps", SectionType.MULTIPLE))
|
||||||
|
|
||||||
|
|
||||||
class TestCase:
|
class TestCase:
|
||||||
"""
|
"""
|
||||||
Test case object implementation for use in collector and exporters
|
Test case object implementation for use in collector and exporters
|
||||||
|
@ -106,7 +107,9 @@ class TestCaseCollector:
|
||||||
# Read test_case suite and section name from test class if possible and get test function from class
|
# Read test_case suite and section name from test class if possible and get test function from class
|
||||||
if test.cls:
|
if test.cls:
|
||||||
suite_name = test.cls.__dict__.get("__test_case_suite_name__", suite_name)
|
suite_name = test.cls.__dict__.get("__test_case_suite_name__", suite_name)
|
||||||
suite_section_name = test.cls.__dict__.get("__test_case_suite_section__", suite_section_name)
|
suite_section_name = test.cls.__dict__.get(
|
||||||
|
"__test_case_suite_section__", suite_section_name
|
||||||
|
)
|
||||||
test_function = test.cls.__dict__[test.originalname]
|
test_function = test.cls.__dict__[test.originalname]
|
||||||
else:
|
else:
|
||||||
# If no test class, read test function from module
|
# If no test class, read test function from module
|
||||||
|
@ -117,7 +120,9 @@ class TestCaseCollector:
|
||||||
test_case_title = test_function.__dict__.get("__test_case_title__", None)
|
test_case_title = test_function.__dict__.get("__test_case_title__", None)
|
||||||
test_case_priority = test_function.__dict__.get("__test_case_priority__", None)
|
test_case_priority = test_function.__dict__.get("__test_case_priority__", None)
|
||||||
suite_name = test_function.__dict__.get("__test_case_suite_name__", suite_name)
|
suite_name = test_function.__dict__.get("__test_case_suite_name__", suite_name)
|
||||||
suite_section_name = test_function.__dict__.get("__test_case_suite_section__", suite_section_name)
|
suite_section_name = test_function.__dict__.get(
|
||||||
|
"__test_case_suite_section__", suite_section_name
|
||||||
|
)
|
||||||
|
|
||||||
# Parce test_steps if they define in __doc__
|
# Parce test_steps if they define in __doc__
|
||||||
doc_string = parse(test_function.__doc__, style=DocstringStyle.GOOGLE)
|
doc_string = parse(test_function.__doc__, style=DocstringStyle.GOOGLE)
|
||||||
|
@ -125,7 +130,9 @@ class TestCaseCollector:
|
||||||
if doc_string.short_description:
|
if doc_string.short_description:
|
||||||
test_case_description = doc_string.short_description
|
test_case_description = doc_string.short_description
|
||||||
if doc_string.long_description:
|
if doc_string.long_description:
|
||||||
test_case_description = f"{doc_string.short_description}\r\n{doc_string.long_description}"
|
test_case_description = (
|
||||||
|
f"{doc_string.short_description}\r\n{doc_string.long_description}"
|
||||||
|
)
|
||||||
|
|
||||||
if doc_string.meta:
|
if doc_string.meta:
|
||||||
for meta in doc_string.meta:
|
for meta in doc_string.meta:
|
||||||
|
@ -140,25 +147,27 @@ class TestCaseCollector:
|
||||||
test_case_params = test_case_call_spec.id
|
test_case_params = test_case_call_spec.id
|
||||||
# Format title with params
|
# Format title with params
|
||||||
if test_case_title:
|
if test_case_title:
|
||||||
test_case_title = self.__format_string_with_params__(test_case_title,test_case_call_spec.params)
|
test_case_title = self.__format_string_with_params__(
|
||||||
|
test_case_title, test_case_call_spec.params
|
||||||
|
)
|
||||||
# Format steps with params
|
# Format steps with params
|
||||||
if test_case_steps:
|
if test_case_steps:
|
||||||
for key, value in test_case_steps.items():
|
for key, value in test_case_steps.items():
|
||||||
value = self.__format_string_with_params__(value,test_case_call_spec.params)
|
value = self.__format_string_with_params__(value, test_case_call_spec.params)
|
||||||
test_case_steps[key] = value
|
test_case_steps[key] = value
|
||||||
|
|
||||||
# If there is set basic test case attributes create TestCase and return
|
# If there is set basic test case attributes create TestCase and return
|
||||||
if test_case_id and test_case_title and suite_name and suite_name:
|
if test_case_id and test_case_title and suite_name and suite_name:
|
||||||
test_case = TestCase(
|
test_case = TestCase(
|
||||||
id=test_case_id,
|
uuid_id=test_case_id,
|
||||||
title=test_case_title,
|
title=test_case_title,
|
||||||
description=test_case_description,
|
description=test_case_description,
|
||||||
priority=test_case_priority,
|
priority=test_case_priority,
|
||||||
steps=test_case_steps,
|
steps=test_case_steps,
|
||||||
params=test_case_params,
|
params=test_case_params,
|
||||||
suite_name=suite_name,
|
suite_name=suite_name,
|
||||||
suite_section_name=suite_section_name,
|
suite_section_name=suite_section_name,
|
||||||
)
|
)
|
||||||
return test_case
|
return test_case
|
||||||
# Return None if there is no enough information for return test case
|
# Return None if there is no enough information for return test case
|
||||||
return None
|
return None
|
||||||
|
@ -187,4 +196,4 @@ class TestCaseCollector:
|
||||||
test_case = self.__get_test_case_from_pytest_test__(test)
|
test_case = self.__get_test_case_from_pytest_test__(test)
|
||||||
if test_case:
|
if test_case:
|
||||||
test_cases.append(test_case)
|
test_cases.append(test_case)
|
||||||
return test_cases
|
return test_cases
|
||||||
|
|
|
@ -67,6 +67,6 @@ class TestExporter(ABC):
|
||||||
steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()]
|
steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()]
|
||||||
|
|
||||||
if test_case_in_tms:
|
if test_case_in_tms:
|
||||||
self.update_test_case(test_case, test_case_in_tms)
|
self.update_test_case(test_case, test_case_in_tms, test_suite, test_section)
|
||||||
else:
|
else:
|
||||||
self.create_test_case(test_case)
|
self.create_test_case(test_case, test_suite, test_section)
|
||||||
|
|
|
@ -8,6 +8,7 @@ from frostfs_testlib.cli.frostfs_cli.object import FrostfsCliObject
|
||||||
from frostfs_testlib.cli.frostfs_cli.session import FrostfsCliSession
|
from frostfs_testlib.cli.frostfs_cli.session import FrostfsCliSession
|
||||||
from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards
|
from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards
|
||||||
from frostfs_testlib.cli.frostfs_cli.storagegroup import FrostfsCliStorageGroup
|
from frostfs_testlib.cli.frostfs_cli.storagegroup import FrostfsCliStorageGroup
|
||||||
|
from frostfs_testlib.cli.frostfs_cli.tree import FrostfsCliTree
|
||||||
from frostfs_testlib.cli.frostfs_cli.util import FrostfsCliUtil
|
from frostfs_testlib.cli.frostfs_cli.util import FrostfsCliUtil
|
||||||
from frostfs_testlib.cli.frostfs_cli.version import FrostfsCliVersion
|
from frostfs_testlib.cli.frostfs_cli.version import FrostfsCliVersion
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
|
@ -36,3 +37,4 @@ class FrostfsCli:
|
||||||
self.storagegroup = FrostfsCliStorageGroup(shell, frostfs_cli_exec_path, config=config_file)
|
self.storagegroup = FrostfsCliStorageGroup(shell, frostfs_cli_exec_path, config=config_file)
|
||||||
self.util = FrostfsCliUtil(shell, frostfs_cli_exec_path, config=config_file)
|
self.util = FrostfsCliUtil(shell, frostfs_cli_exec_path, config=config_file)
|
||||||
self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file)
|
self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file)
|
||||||
|
self.tree = FrostfsCliTree(shell, frostfs_cli_exec_path, config=config_file)
|
||||||
|
|
|
@ -224,6 +224,7 @@ class FrostfsCliObject(CliCommand):
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
attributes: Optional[dict] = None,
|
attributes: Optional[dict] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
|
copies_number: Optional[int] = None,
|
||||||
disable_filename: bool = False,
|
disable_filename: bool = False,
|
||||||
disable_timestamp: bool = False,
|
disable_timestamp: bool = False,
|
||||||
expire_at: Optional[int] = None,
|
expire_at: Optional[int] = None,
|
||||||
|
@ -241,6 +242,7 @@ class FrostfsCliObject(CliCommand):
|
||||||
address: Address of wallet account.
|
address: Address of wallet account.
|
||||||
attributes: User attributes in form of Key1=Value1,Key2=Value2.
|
attributes: User attributes in form of Key1=Value1,Key2=Value2.
|
||||||
bearer: File with signed JSON or binary encoded bearer token.
|
bearer: File with signed JSON or binary encoded bearer token.
|
||||||
|
copies_number: Number of copies of the object to store within the RPC call.
|
||||||
cid: Container ID.
|
cid: Container ID.
|
||||||
disable_filename: Do not set well-known filename attribute.
|
disable_filename: Do not set well-known filename attribute.
|
||||||
disable_timestamp: Do not set well-known timestamp attribute.
|
disable_timestamp: Do not set well-known timestamp attribute.
|
||||||
|
@ -349,3 +351,45 @@ class FrostfsCliObject(CliCommand):
|
||||||
"object search",
|
"object search",
|
||||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def nodes(
|
||||||
|
self,
|
||||||
|
rpc_endpoint: str,
|
||||||
|
wallet: str,
|
||||||
|
cid: str,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
bearer: Optional[str] = None,
|
||||||
|
generate_key: Optional = None,
|
||||||
|
oid: Optional[str] = None,
|
||||||
|
trace: bool = False,
|
||||||
|
root: bool = False,
|
||||||
|
verify_presence_all: bool = False,
|
||||||
|
ttl: Optional[int] = None,
|
||||||
|
xhdr: Optional[dict] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""
|
||||||
|
Search object nodes.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
address: Address of wallet account.
|
||||||
|
bearer: File with signed JSON or binary encoded bearer token.
|
||||||
|
cid: Container ID.
|
||||||
|
generate_key: Generate new private key.
|
||||||
|
oid: Object ID.
|
||||||
|
trace: Generate trace ID and print it.
|
||||||
|
root: Search for user objects.
|
||||||
|
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
||||||
|
verify_presence_all: Verify the actual presence of the object on all netmap nodes.
|
||||||
|
ttl: TTL value in request meta header (default 2).
|
||||||
|
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
||||||
|
xhdr: Dict with request X-Headers.
|
||||||
|
timeout: Timeout for the operation (default 15s).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command's result.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"object nodes",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
|
|
29
src/frostfs_testlib/cli/frostfs_cli/tree.py
Normal file
29
src/frostfs_testlib/cli/frostfs_cli/tree.py
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from frostfs_testlib.cli.cli_command import CliCommand
|
||||||
|
from frostfs_testlib.shell import CommandResult
|
||||||
|
|
||||||
|
|
||||||
|
class FrostfsCliTree(CliCommand):
|
||||||
|
def healthcheck(
|
||||||
|
self,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
rpc_endpoint: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""Get internal balance of FrostFS account
|
||||||
|
|
||||||
|
Args:
|
||||||
|
address: Address of wallet account.
|
||||||
|
owner: Owner of balance account (omit to use owner from private key).
|
||||||
|
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
||||||
|
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command's result.
|
||||||
|
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"tree healthcheck",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
43
src/frostfs_testlib/healthcheck/basic_healthcheck.py
Normal file
43
src/frostfs_testlib/healthcheck/basic_healthcheck.py
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
|
||||||
|
from frostfs_testlib.healthcheck.interfaces import Healthcheck
|
||||||
|
from frostfs_testlib.reporter import get_reporter
|
||||||
|
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
|
||||||
|
from frostfs_testlib.steps.node_management import storage_node_healthcheck
|
||||||
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
|
|
||||||
|
reporter = get_reporter()
|
||||||
|
|
||||||
|
|
||||||
|
class BasicHealthcheck(Healthcheck):
|
||||||
|
@reporter.step_deco("Perform healthcheck for {cluster_node}")
|
||||||
|
def perform(self, cluster_node: ClusterNode):
|
||||||
|
result = self.storage_healthcheck(cluster_node)
|
||||||
|
if result:
|
||||||
|
raise AssertionError(result)
|
||||||
|
|
||||||
|
@reporter.step_deco("Tree healthcheck on {cluster_node}")
|
||||||
|
def tree_healthcheck(self, cluster_node: ClusterNode) -> str | None:
|
||||||
|
host = cluster_node.host
|
||||||
|
service_config = host.get_service_config(cluster_node.storage_node.name)
|
||||||
|
wallet_path = service_config.attributes["wallet_path"]
|
||||||
|
wallet_password = service_config.attributes["wallet_password"]
|
||||||
|
|
||||||
|
shell = host.get_shell()
|
||||||
|
wallet_config_path = f"/tmp/{cluster_node.storage_node.name}-config.yaml"
|
||||||
|
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
|
||||||
|
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
|
||||||
|
|
||||||
|
remote_cli = FrostfsCli(
|
||||||
|
shell,
|
||||||
|
host.get_cli_config(FROSTFS_CLI_EXEC).exec_path,
|
||||||
|
config_file=wallet_config_path,
|
||||||
|
)
|
||||||
|
result = remote_cli.tree.healthcheck(rpc_endpoint="127.0.0.1:8080")
|
||||||
|
if result.return_code != 0:
|
||||||
|
return f"Error during tree healthcheck (rc={result.return_code}): {result.stdout}. \n Stderr: {result.stderr}"
|
||||||
|
|
||||||
|
@reporter.step_deco("Storage healthcheck on {cluster_node}")
|
||||||
|
def storage_healthcheck(self, cluster_node: ClusterNode) -> str | None:
|
||||||
|
result = storage_node_healthcheck(cluster_node.storage_node)
|
||||||
|
if result.health_status != "READY" or result.network_status != "ONLINE":
|
||||||
|
return f"Node {cluster_node} is not healthy. Health={result.health_status}. Network={result.network_status}"
|
17
src/frostfs_testlib/healthcheck/interfaces.py
Normal file
17
src/frostfs_testlib/healthcheck/interfaces.py
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
|
|
||||||
|
|
||||||
|
class Healthcheck(ABC):
|
||||||
|
@abstractmethod
|
||||||
|
def perform(self, cluster_node: ClusterNode):
|
||||||
|
"""Perform healthcheck on the target cluster node"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def tree_healthcheck(self, cluster_node: ClusterNode):
|
||||||
|
"""Check tree sync status on target cluster node"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def storage_healthcheck(self, cluster_node: ClusterNode):
|
||||||
|
"""Perform storage node healthcheck on target cluster node"""
|
|
@ -52,6 +52,7 @@ class HostConfig:
|
||||||
|
|
||||||
Attributes:
|
Attributes:
|
||||||
plugin_name: Name of plugin that should be used to manage the host.
|
plugin_name: Name of plugin that should be used to manage the host.
|
||||||
|
healthcheck_plugin_name: Name of the plugin for healthcheck operations.
|
||||||
address: Address of the machine (IP or DNS name).
|
address: Address of the machine (IP or DNS name).
|
||||||
services: List of services hosted on the machine.
|
services: List of services hosted on the machine.
|
||||||
clis: List of CLI tools available on the machine.
|
clis: List of CLI tools available on the machine.
|
||||||
|
@ -60,10 +61,12 @@ class HostConfig:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
plugin_name: str
|
plugin_name: str
|
||||||
|
healthcheck_plugin_name: str
|
||||||
address: str
|
address: str
|
||||||
services: list[ServiceConfig] = field(default_factory=list)
|
services: list[ServiceConfig] = field(default_factory=list)
|
||||||
clis: list[CLIConfig] = field(default_factory=list)
|
clis: list[CLIConfig] = field(default_factory=list)
|
||||||
attributes: dict[str, str] = field(default_factory=dict)
|
attributes: dict[str, str] = field(default_factory=dict)
|
||||||
|
interfaces: dict[str, str] = field(default_factory=dict)
|
||||||
|
|
||||||
def __post_init__(self) -> None:
|
def __post_init__(self) -> None:
|
||||||
self.services = [ServiceConfig(**service) for service in self.services or []]
|
self.services = [ServiceConfig(**service) for service in self.services or []]
|
||||||
|
|
|
@ -11,7 +11,7 @@ import docker
|
||||||
from requests import HTTPError
|
from requests import HTTPError
|
||||||
|
|
||||||
from frostfs_testlib.hosting.config import ParsedAttributes
|
from frostfs_testlib.hosting.config import ParsedAttributes
|
||||||
from frostfs_testlib.hosting.interfaces import DiskInfo, Host
|
from frostfs_testlib.hosting.interfaces import DiskInfo, Host, HostStatus
|
||||||
from frostfs_testlib.shell import LocalShell, Shell, SSHShell
|
from frostfs_testlib.shell import LocalShell, Shell, SSHShell
|
||||||
from frostfs_testlib.shell.command_inspectors import SudoInspector
|
from frostfs_testlib.shell.command_inspectors import SudoInspector
|
||||||
|
|
||||||
|
@ -61,10 +61,10 @@ class ServiceAttributes(ParsedAttributes):
|
||||||
class DockerHost(Host):
|
class DockerHost(Host):
|
||||||
"""Manages services hosted in Docker containers running on a local or remote machine."""
|
"""Manages services hosted in Docker containers running on a local or remote machine."""
|
||||||
|
|
||||||
def get_shell(self) -> Shell:
|
def get_shell(self, sudo: bool = False) -> Shell:
|
||||||
host_attributes = HostAttributes.parse(self._config.attributes)
|
host_attributes = HostAttributes.parse(self._config.attributes)
|
||||||
command_inspectors = []
|
command_inspectors = []
|
||||||
if host_attributes.sudo_shell:
|
if sudo:
|
||||||
command_inspectors.append(SudoInspector())
|
command_inspectors.append(SudoInspector())
|
||||||
|
|
||||||
if not host_attributes.ssh_login:
|
if not host_attributes.ssh_login:
|
||||||
|
@ -87,6 +87,15 @@ class DockerHost(Host):
|
||||||
for service_config in self._config.services:
|
for service_config in self._config.services:
|
||||||
self.start_service(service_config.name)
|
self.start_service(service_config.name)
|
||||||
|
|
||||||
|
def get_host_status(self) -> HostStatus:
|
||||||
|
# We emulate host status by checking all services.
|
||||||
|
for service_config in self._config.services:
|
||||||
|
state = self._get_container_state(service_config.name)
|
||||||
|
if state != "running":
|
||||||
|
return HostStatus.OFFLINE
|
||||||
|
|
||||||
|
return HostStatus.ONLINE
|
||||||
|
|
||||||
def stop_host(self) -> None:
|
def stop_host(self) -> None:
|
||||||
# We emulate stopping machine by stopping all services
|
# We emulate stopping machine by stopping all services
|
||||||
# As an alternative we can probably try to stop docker service...
|
# As an alternative we can probably try to stop docker service...
|
||||||
|
@ -117,6 +126,14 @@ class DockerHost(Host):
|
||||||
timeout=service_attributes.stop_timeout,
|
timeout=service_attributes.stop_timeout,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def mask_service(self, service_name: str) -> None:
|
||||||
|
# Not required for Docker
|
||||||
|
return
|
||||||
|
|
||||||
|
def unmask_service(self, service_name: str) -> None:
|
||||||
|
# Not required for Docker
|
||||||
|
return
|
||||||
|
|
||||||
def wait_success_suspend_process(self, service_name: str):
|
def wait_success_suspend_process(self, service_name: str):
|
||||||
raise NotImplementedError("Not supported for docker")
|
raise NotImplementedError("Not supported for docker")
|
||||||
|
|
||||||
|
@ -135,9 +152,19 @@ class DockerHost(Host):
|
||||||
timeout=service_attributes.start_timeout,
|
timeout=service_attributes.start_timeout,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def wait_for_service_to_be_in_state(
|
||||||
|
self, systemd_service_name: str, expected_state: str, timeout: int
|
||||||
|
) -> None:
|
||||||
|
raise NotImplementedError("Not implemented for docker")
|
||||||
|
|
||||||
def get_data_directory(self, service_name: str) -> str:
|
def get_data_directory(self, service_name: str) -> str:
|
||||||
service_attributes = self._get_service_attributes(service_name)
|
service_attributes = self._get_service_attributes(service_name)
|
||||||
return service_attributes.data_directory_path
|
|
||||||
|
client = self._get_docker_client()
|
||||||
|
volume_info = client.inspect_volume(service_attributes.volume_name)
|
||||||
|
volume_path = volume_info["Mountpoint"]
|
||||||
|
|
||||||
|
return volume_path
|
||||||
|
|
||||||
def delete_metabase(self, service_name: str) -> None:
|
def delete_metabase(self, service_name: str) -> None:
|
||||||
raise NotImplementedError("Not implemented for docker")
|
raise NotImplementedError("Not implemented for docker")
|
||||||
|
@ -155,11 +182,7 @@ class DockerHost(Host):
|
||||||
raise NotImplementedError("Not implemented for docker")
|
raise NotImplementedError("Not implemented for docker")
|
||||||
|
|
||||||
def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None:
|
def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None:
|
||||||
service_attributes = self._get_service_attributes(service_name)
|
volume_path = self.get_data_directory(service_name)
|
||||||
|
|
||||||
client = self._get_docker_client()
|
|
||||||
volume_info = client.inspect_volume(service_attributes.volume_name)
|
|
||||||
volume_path = volume_info["Mountpoint"]
|
|
||||||
|
|
||||||
shell = self.get_shell()
|
shell = self.get_shell()
|
||||||
meta_clean_cmd = f"rm -rf {volume_path}/meta*/*"
|
meta_clean_cmd = f"rm -rf {volume_path}/meta*/*"
|
||||||
|
@ -206,11 +229,36 @@ class DockerHost(Host):
|
||||||
with open(file_path, "wb") as file:
|
with open(file_path, "wb") as file:
|
||||||
file.write(logs)
|
file.write(logs)
|
||||||
|
|
||||||
|
def get_filtered_logs(
|
||||||
|
self,
|
||||||
|
filter_regex: str,
|
||||||
|
since: Optional[datetime] = None,
|
||||||
|
until: Optional[datetime] = None,
|
||||||
|
unit: Optional[str] = None,
|
||||||
|
) -> str:
|
||||||
|
client = self._get_docker_client()
|
||||||
|
filtered_logs = ""
|
||||||
|
for service_config in self._config.services:
|
||||||
|
container_name = self._get_service_attributes(service_config.name).container_name
|
||||||
|
try:
|
||||||
|
filtered_logs = client.logs(container_name, since=since, until=until)
|
||||||
|
except HTTPError as exc:
|
||||||
|
logger.info(f"Got exception while dumping logs of '{container_name}': {exc}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
matches = re.findall(filter_regex, filtered_logs, re.IGNORECASE + re.MULTILINE)
|
||||||
|
found = list(matches)
|
||||||
|
if found:
|
||||||
|
filtered_logs += f"{container_name}:\n{os.linesep.join(found)}"
|
||||||
|
|
||||||
|
return filtered_logs
|
||||||
|
|
||||||
def is_message_in_logs(
|
def is_message_in_logs(
|
||||||
self,
|
self,
|
||||||
message_regex: str,
|
message_regex: str,
|
||||||
since: Optional[datetime] = None,
|
since: Optional[datetime] = None,
|
||||||
until: Optional[datetime] = None,
|
until: Optional[datetime] = None,
|
||||||
|
unit: Optional[str] = None,
|
||||||
) -> bool:
|
) -> bool:
|
||||||
client = self._get_docker_client()
|
client = self._get_docker_client()
|
||||||
for service_config in self._config.services:
|
for service_config in self._config.services:
|
||||||
|
@ -262,11 +310,16 @@ class DockerHost(Host):
|
||||||
# To speed things up, we break timeout in smaller iterations and check container state
|
# To speed things up, we break timeout in smaller iterations and check container state
|
||||||
# several times. This way waiting stops as soon as container reaches the expected state
|
# several times. This way waiting stops as soon as container reaches the expected state
|
||||||
for _ in range(iterations):
|
for _ in range(iterations):
|
||||||
container = self._get_container_by_name(container_name)
|
state = self._get_container_state(container_name)
|
||||||
logger.debug(f"Current container state\n:{json.dumps(container, indent=2)}")
|
|
||||||
|
|
||||||
if container and container["State"] == expected_state:
|
if state == expected_state:
|
||||||
return
|
return
|
||||||
time.sleep(iteration_wait_time)
|
time.sleep(iteration_wait_time)
|
||||||
|
|
||||||
raise RuntimeError(f"Container {container_name} is not in {expected_state} state.")
|
raise RuntimeError(f"Container {container_name} is not in {expected_state} state.")
|
||||||
|
|
||||||
|
def _get_container_state(self, container_name: str) -> str:
|
||||||
|
container = self._get_container_by_name(container_name)
|
||||||
|
logger.debug(f"Current container state\n:{json.dumps(container, indent=2)}")
|
||||||
|
|
||||||
|
return container.get("State", None)
|
||||||
|
|
|
@ -4,6 +4,13 @@ from typing import Optional
|
||||||
|
|
||||||
from frostfs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig
|
from frostfs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig
|
||||||
from frostfs_testlib.shell.interfaces import Shell
|
from frostfs_testlib.shell.interfaces import Shell
|
||||||
|
from frostfs_testlib.testing.readable import HumanReadableEnum
|
||||||
|
|
||||||
|
|
||||||
|
class HostStatus(HumanReadableEnum):
|
||||||
|
ONLINE = "Online"
|
||||||
|
OFFLINE = "Offline"
|
||||||
|
UNKNOWN = "Unknown"
|
||||||
|
|
||||||
|
|
||||||
class DiskInfo(dict):
|
class DiskInfo(dict):
|
||||||
|
@ -65,9 +72,12 @@ class Host(ABC):
|
||||||
return cli_config
|
return cli_config
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get_shell(self) -> Shell:
|
def get_shell(self, sudo: bool = True) -> Shell:
|
||||||
"""Returns shell to this host.
|
"""Returns shell to this host.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sudo: if True, run all commands in shell with elevated rights
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Shell that executes commands on this host.
|
Shell that executes commands on this host.
|
||||||
"""
|
"""
|
||||||
|
@ -76,6 +86,10 @@ class Host(ABC):
|
||||||
def start_host(self) -> None:
|
def start_host(self) -> None:
|
||||||
"""Starts the host machine."""
|
"""Starts the host machine."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_host_status(self) -> HostStatus:
|
||||||
|
"""Check host status."""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def stop_host(self, mode: str) -> None:
|
def stop_host(self, mode: str) -> None:
|
||||||
"""Stops the host machine.
|
"""Stops the host machine.
|
||||||
|
@ -104,6 +118,26 @@ class Host(ABC):
|
||||||
service_name: Name of the service to stop.
|
service_name: Name of the service to stop.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def mask_service(self, service_name: str) -> None:
|
||||||
|
"""Prevent the service from start by any activity by masking it.
|
||||||
|
|
||||||
|
The service must be hosted on this host.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
service_name: Name of the service to mask.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def unmask_service(self, service_name: str) -> None:
|
||||||
|
"""Allow the service to start by any activity by unmasking it.
|
||||||
|
|
||||||
|
The service must be hosted on this host.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
service_name: Name of the service to unmask.
|
||||||
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def restart_service(self, service_name: str) -> None:
|
def restart_service(self, service_name: str) -> None:
|
||||||
"""Restarts the service with specified name and waits until it starts.
|
"""Restarts the service with specified name and waits until it starts.
|
||||||
|
@ -112,7 +146,6 @@ class Host(ABC):
|
||||||
service_name: Name of the service to restart.
|
service_name: Name of the service to restart.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get_data_directory(self, service_name: str) -> str:
|
def get_data_directory(self, service_name: str) -> str:
|
||||||
"""
|
"""
|
||||||
|
@ -123,7 +156,6 @@ class Host(ABC):
|
||||||
service_name: Name of storage node service.
|
service_name: Name of storage node service.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def wait_success_suspend_process(self, process_name: str) -> None:
|
def wait_success_suspend_process(self, process_name: str) -> None:
|
||||||
"""Search for a service ID by its name and stop the process
|
"""Search for a service ID by its name and stop the process
|
||||||
|
@ -248,12 +280,34 @@ class Host(ABC):
|
||||||
filter_regex: regex to filter output
|
filter_regex: regex to filter output
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_filtered_logs(
|
||||||
|
self,
|
||||||
|
filter_regex: str,
|
||||||
|
since: Optional[datetime] = None,
|
||||||
|
until: Optional[datetime] = None,
|
||||||
|
unit: Optional[str] = None,
|
||||||
|
) -> str:
|
||||||
|
"""Get logs from host filtered by regex.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
filter_regex: regex filter for logs.
|
||||||
|
since: If set, limits the time from which logs should be collected. Must be in UTC.
|
||||||
|
until: If set, limits the time until which logs should be collected. Must be in UTC.
|
||||||
|
unit: required unit.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Found entries as str if any found.
|
||||||
|
Empty string otherwise.
|
||||||
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def is_message_in_logs(
|
def is_message_in_logs(
|
||||||
self,
|
self,
|
||||||
message_regex: str,
|
message_regex: str,
|
||||||
since: Optional[datetime] = None,
|
since: Optional[datetime] = None,
|
||||||
until: Optional[datetime] = None,
|
until: Optional[datetime] = None,
|
||||||
|
unit: Optional[str] = None,
|
||||||
) -> bool:
|
) -> bool:
|
||||||
"""Checks logs on host for specified message regex.
|
"""Checks logs on host for specified message regex.
|
||||||
|
|
||||||
|
@ -266,3 +320,17 @@ class Host(ABC):
|
||||||
True if message found in logs in the given time frame.
|
True if message found in logs in the given time frame.
|
||||||
False otherwise.
|
False otherwise.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def wait_for_service_to_be_in_state(
|
||||||
|
self, systemd_service_name: str, expected_state: str, timeout: int
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Waites for service to be in specified state.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
systemd_service_name: Service to wait state of.
|
||||||
|
expected_state: State to wait for
|
||||||
|
timeout: Seconds to wait
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
|
@ -7,6 +7,7 @@ from frostfs_testlib.load.load_config import (
|
||||||
LoadType,
|
LoadType,
|
||||||
NodesSelectionStrategy,
|
NodesSelectionStrategy,
|
||||||
Preset,
|
Preset,
|
||||||
|
ReadFrom,
|
||||||
)
|
)
|
||||||
from frostfs_testlib.load.load_report import LoadReport
|
from frostfs_testlib.load.load_report import LoadReport
|
||||||
from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader
|
from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader
|
||||||
|
|
|
@ -22,6 +22,7 @@ class ScenarioRunner(ABC):
|
||||||
def prepare(
|
def prepare(
|
||||||
self,
|
self,
|
||||||
load_params: LoadParams,
|
load_params: LoadParams,
|
||||||
|
cluster_nodes: list[ClusterNode],
|
||||||
nodes_under_load: list[ClusterNode],
|
nodes_under_load: list[ClusterNode],
|
||||||
k6_dir: str,
|
k6_dir: str,
|
||||||
):
|
):
|
||||||
|
@ -49,7 +50,7 @@ class ScenarioRunner(ABC):
|
||||||
"""Returns True if load is running at the moment"""
|
"""Returns True if load is running at the moment"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def wait_until_finish(self):
|
def wait_until_finish(self, soft_timeout: int = 0):
|
||||||
"""Wait until load is finished"""
|
"""Wait until load is finished"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
|
|
|
@ -2,9 +2,11 @@ import json
|
||||||
import logging
|
import logging
|
||||||
import math
|
import math
|
||||||
import os
|
import os
|
||||||
from dataclasses import dataclass, fields
|
from dataclasses import dataclass
|
||||||
|
from datetime import datetime
|
||||||
from time import sleep
|
from time import sleep
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
from frostfs_testlib.load.interfaces import Loader
|
from frostfs_testlib.load.interfaces import Loader
|
||||||
from frostfs_testlib.load.load_config import (
|
from frostfs_testlib.load.load_config import (
|
||||||
|
@ -16,11 +18,7 @@ from frostfs_testlib.load.load_config import (
|
||||||
from frostfs_testlib.processes.remote_process import RemoteProcess
|
from frostfs_testlib.processes.remote_process import RemoteProcess
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib.reporter import get_reporter
|
||||||
from frostfs_testlib.resources.common import STORAGE_USER_NAME
|
from frostfs_testlib.resources.common import STORAGE_USER_NAME
|
||||||
from frostfs_testlib.resources.load_params import (
|
from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, K6_TEARDOWN_PERIOD
|
||||||
K6_STOP_SIGNAL_TIMEOUT,
|
|
||||||
K6_TEARDOWN_PERIOD,
|
|
||||||
LOAD_NODE_SSH_USER,
|
|
||||||
)
|
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing.test_control import wait_for_success
|
from frostfs_testlib.testing.test_control import wait_for_success
|
||||||
|
@ -42,6 +40,7 @@ class LoadResults:
|
||||||
|
|
||||||
class K6:
|
class K6:
|
||||||
_k6_process: RemoteProcess
|
_k6_process: RemoteProcess
|
||||||
|
_start_time: datetime
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
|
@ -60,10 +59,9 @@ class K6:
|
||||||
self.loader: Loader = loader
|
self.loader: Loader = loader
|
||||||
self.shell: Shell = shell
|
self.shell: Shell = shell
|
||||||
self.wallet = wallet
|
self.wallet = wallet
|
||||||
self.scenario: LoadScenario = load_params.scenario
|
|
||||||
self.summary_json: str = os.path.join(
|
self.summary_json: str = os.path.join(
|
||||||
self.load_params.working_dir,
|
self.load_params.working_dir,
|
||||||
f"{self.load_params.load_id}_{self.scenario.value}_summary.json",
|
f"{self.load_params.load_id}_{self.load_params.scenario.value}_summary.json",
|
||||||
)
|
)
|
||||||
|
|
||||||
self._k6_dir: str = k6_dir
|
self._k6_dir: str = k6_dir
|
||||||
|
@ -98,24 +96,7 @@ class K6:
|
||||||
preset_scenario = preset_map[self.load_params.load_type]
|
preset_scenario = preset_map[self.load_params.load_type]
|
||||||
command_args = base_args[preset_scenario].copy()
|
command_args = base_args[preset_scenario].copy()
|
||||||
|
|
||||||
command_args += [
|
command_args += self.load_params.get_preset_arguments()
|
||||||
f"--{field.metadata['preset_argument']} '{getattr(self.load_params, field.name)}'"
|
|
||||||
for field in fields(self.load_params)
|
|
||||||
if field.metadata
|
|
||||||
and self.scenario in field.metadata["applicable_scenarios"]
|
|
||||||
and field.metadata["preset_argument"]
|
|
||||||
and getattr(self.load_params, field.name) is not None
|
|
||||||
]
|
|
||||||
|
|
||||||
if self.load_params.preset:
|
|
||||||
command_args += [
|
|
||||||
f"--{field.metadata['preset_argument']} '{getattr(self.load_params.preset, field.name)}'"
|
|
||||||
for field in fields(self.load_params.preset)
|
|
||||||
if field.metadata
|
|
||||||
and self.scenario in field.metadata["applicable_scenarios"]
|
|
||||||
and field.metadata["preset_argument"]
|
|
||||||
and getattr(self.load_params.preset, field.name) is not None
|
|
||||||
]
|
|
||||||
|
|
||||||
command = " ".join(command_args)
|
command = " ".join(command_args)
|
||||||
result = self.shell.exec(command)
|
result = self.shell.exec(command)
|
||||||
|
@ -127,26 +108,7 @@ class K6:
|
||||||
|
|
||||||
@reporter.step_deco("Generate K6 command")
|
@reporter.step_deco("Generate K6 command")
|
||||||
def _generate_env_variables(self) -> str:
|
def _generate_env_variables(self) -> str:
|
||||||
env_vars = {
|
env_vars = self.load_params.get_env_vars()
|
||||||
field.metadata["env_variable"]: getattr(self.load_params, field.name)
|
|
||||||
for field in fields(self.load_params)
|
|
||||||
if field.metadata
|
|
||||||
and self.scenario in field.metadata["applicable_scenarios"]
|
|
||||||
and field.metadata["env_variable"]
|
|
||||||
and getattr(self.load_params, field.name) is not None
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.load_params.preset:
|
|
||||||
env_vars.update(
|
|
||||||
{
|
|
||||||
field.metadata["env_variable"]: getattr(self.load_params.preset, field.name)
|
|
||||||
for field in fields(self.load_params.preset)
|
|
||||||
if field.metadata
|
|
||||||
and self.scenario in field.metadata["applicable_scenarios"]
|
|
||||||
and field.metadata["env_variable"]
|
|
||||||
and getattr(self.load_params.preset, field.name) is not None
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints)
|
env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints)
|
||||||
env_vars["SUMMARY_JSON"] = self.summary_json
|
env_vars["SUMMARY_JSON"] = self.summary_json
|
||||||
|
@ -162,16 +124,17 @@ class K6:
|
||||||
with reporter.step(
|
with reporter.step(
|
||||||
f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"
|
f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"
|
||||||
):
|
):
|
||||||
|
self._start_time = int(datetime.utcnow().timestamp())
|
||||||
command = (
|
command = (
|
||||||
f"{self._k6_dir}/k6 run {self._generate_env_variables()} "
|
f"{self._k6_dir}/k6 run {self._generate_env_variables()} "
|
||||||
f"{self._k6_dir}/scenarios/{self.scenario.value}.js"
|
f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js"
|
||||||
)
|
)
|
||||||
user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None
|
user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None
|
||||||
self._k6_process = RemoteProcess.create(
|
self._k6_process = RemoteProcess.create(
|
||||||
command, self.shell, self.load_params.working_dir, user
|
command, self.shell, self.load_params.working_dir, user
|
||||||
)
|
)
|
||||||
|
|
||||||
def wait_until_finished(self) -> None:
|
def wait_until_finished(self, soft_timeout: int = 0) -> None:
|
||||||
with reporter.step(
|
with reporter.step(
|
||||||
f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"
|
f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"
|
||||||
):
|
):
|
||||||
|
@ -180,9 +143,36 @@ class K6:
|
||||||
else:
|
else:
|
||||||
timeout = self.load_params.load_time or 0
|
timeout = self.load_params.load_time or 0
|
||||||
|
|
||||||
timeout += int(K6_TEARDOWN_PERIOD)
|
current_time = int(datetime.utcnow().timestamp())
|
||||||
|
working_time = current_time - self._start_time
|
||||||
|
remaining_time = timeout - working_time
|
||||||
|
|
||||||
|
setup_teardown_time = (
|
||||||
|
int(K6_TEARDOWN_PERIOD)
|
||||||
|
+ self.load_params.get_init_time()
|
||||||
|
+ int(self.load_params.setup_timeout.replace("s", "").strip())
|
||||||
|
)
|
||||||
|
remaining_time_including_setup_and_teardown = remaining_time + setup_teardown_time
|
||||||
|
timeout = remaining_time_including_setup_and_teardown
|
||||||
|
|
||||||
|
if soft_timeout:
|
||||||
|
timeout = min(timeout, soft_timeout)
|
||||||
|
|
||||||
original_timeout = timeout
|
original_timeout = timeout
|
||||||
|
|
||||||
|
timeouts = {
|
||||||
|
"K6 start time": self._start_time,
|
||||||
|
"Current time": current_time,
|
||||||
|
"K6 working time": working_time,
|
||||||
|
"Remaining time for load": remaining_time,
|
||||||
|
"Setup and teardown": setup_teardown_time,
|
||||||
|
"Remaining time including setup/teardown": remaining_time_including_setup_and_teardown,
|
||||||
|
"Soft timeout": soft_timeout,
|
||||||
|
"Selected timeout": original_timeout,
|
||||||
|
}
|
||||||
|
|
||||||
|
reporter.attach("\n".join([f"{k}: {v}" for k, v in timeouts.items()]), "timeouts.txt")
|
||||||
|
|
||||||
min_wait_interval = 10
|
min_wait_interval = 10
|
||||||
wait_interval = min_wait_interval
|
wait_interval = min_wait_interval
|
||||||
if self._k6_process is None:
|
if self._k6_process is None:
|
||||||
|
@ -190,7 +180,9 @@ class K6:
|
||||||
while timeout > 0:
|
while timeout > 0:
|
||||||
if not self._k6_process.running():
|
if not self._k6_process.running():
|
||||||
return
|
return
|
||||||
logger.info(f"K6 is running. Waiting {wait_interval} seconds...")
|
remaining_time_hours = f"{timeout//3600}h" if timeout//3600 != 0 else ""
|
||||||
|
remaining_time_minutes = f"{timeout//60%60}m" if timeout//60%60 != 0 else ""
|
||||||
|
logger.info(f"K6 is running. Remaining time {remaining_time_hours}{remaining_time_minutes}{timeout%60}s. Next check after {wait_interval} seconds...")
|
||||||
sleep(wait_interval)
|
sleep(wait_interval)
|
||||||
timeout -= min(timeout, wait_interval)
|
timeout -= min(timeout, wait_interval)
|
||||||
wait_interval = max(
|
wait_interval = max(
|
||||||
|
@ -202,7 +194,8 @@ class K6:
|
||||||
return
|
return
|
||||||
|
|
||||||
self.stop()
|
self.stop()
|
||||||
raise TimeoutError(f"Expected K6 to finish after {original_timeout} sec.")
|
if not soft_timeout:
|
||||||
|
raise TimeoutError(f"Expected K6 to finish after {original_timeout} sec.")
|
||||||
|
|
||||||
def get_results(self) -> Any:
|
def get_results(self) -> Any:
|
||||||
with reporter.step(
|
with reporter.step(
|
||||||
|
@ -215,10 +208,10 @@ class K6:
|
||||||
|
|
||||||
summary_text = self.shell.exec(f"cat {self.summary_json}").stdout
|
summary_text = self.shell.exec(f"cat {self.summary_json}").stdout
|
||||||
summary_json = json.loads(summary_text)
|
summary_json = json.loads(summary_text)
|
||||||
|
endpoint = urlparse(self.endpoints[0]).netloc or self.endpoints[0]
|
||||||
allure_filenames = {
|
allure_filenames = {
|
||||||
K6ProcessAllocationStrategy.PER_LOAD_NODE: f"{self.loader.ip}_{self.scenario.value}_summary.json",
|
K6ProcessAllocationStrategy.PER_LOAD_NODE: f"{self.loader.ip}_{self.load_params.scenario.value}_summary.json",
|
||||||
K6ProcessAllocationStrategy.PER_ENDPOINT: f"{self.loader.ip}_{self.scenario.value}_{self.endpoints[0]}_summary.json",
|
K6ProcessAllocationStrategy.PER_ENDPOINT: f"{self.loader.ip}_{self.load_params.scenario.value}_{endpoint}_summary.json",
|
||||||
}
|
}
|
||||||
allure_filename = allure_filenames[self.load_params.k6_process_allocation_strategy]
|
allure_filename = allure_filenames[self.load_params.k6_process_allocation_strategy]
|
||||||
|
|
||||||
|
@ -227,7 +220,7 @@ class K6:
|
||||||
|
|
||||||
def stop(self) -> None:
|
def stop(self) -> None:
|
||||||
with reporter.step(f"Stop load from loader {self.loader.ip} on endpoints {self.endpoints}"):
|
with reporter.step(f"Stop load from loader {self.loader.ip} on endpoints {self.endpoints}"):
|
||||||
if self.is_running:
|
if self.is_running():
|
||||||
self._k6_process.stop()
|
self._k6_process.stop()
|
||||||
|
|
||||||
self._wait_until_process_end()
|
self._wait_until_process_end()
|
||||||
|
|
|
@ -1,7 +1,11 @@
|
||||||
|
import math
|
||||||
import os
|
import os
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field, fields, is_dataclass
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Optional
|
from types import MappingProxyType
|
||||||
|
from typing import Any, Optional, get_args
|
||||||
|
|
||||||
|
from frostfs_testlib.utils.converting_utils import calc_unit
|
||||||
|
|
||||||
|
|
||||||
class LoadType(Enum):
|
class LoadType(Enum):
|
||||||
|
@ -15,11 +19,18 @@ class LoadScenario(Enum):
|
||||||
gRPC_CAR = "grpc_car"
|
gRPC_CAR = "grpc_car"
|
||||||
S3 = "s3"
|
S3 = "s3"
|
||||||
S3_CAR = "s3_car"
|
S3_CAR = "s3_car"
|
||||||
|
S3_MULTIPART = "s3_multipart"
|
||||||
HTTP = "http"
|
HTTP = "http"
|
||||||
VERIFY = "verify"
|
VERIFY = "verify"
|
||||||
LOCAL = "local"
|
LOCAL = "local"
|
||||||
|
|
||||||
|
|
||||||
|
class ReadFrom(Enum):
|
||||||
|
REGISTRY = "registry"
|
||||||
|
PRESET = "preset"
|
||||||
|
MANUAL = "manual"
|
||||||
|
|
||||||
|
|
||||||
all_load_scenarios = [
|
all_load_scenarios = [
|
||||||
LoadScenario.gRPC,
|
LoadScenario.gRPC,
|
||||||
LoadScenario.S3,
|
LoadScenario.S3,
|
||||||
|
@ -27,10 +38,11 @@ all_load_scenarios = [
|
||||||
LoadScenario.S3_CAR,
|
LoadScenario.S3_CAR,
|
||||||
LoadScenario.gRPC_CAR,
|
LoadScenario.gRPC_CAR,
|
||||||
LoadScenario.LOCAL,
|
LoadScenario.LOCAL,
|
||||||
|
LoadScenario.S3_MULTIPART
|
||||||
]
|
]
|
||||||
all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY]
|
all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY]
|
||||||
|
|
||||||
constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP, LoadScenario.LOCAL]
|
constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP, LoadScenario.LOCAL, LoadScenario.S3_MULTIPART]
|
||||||
constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]
|
constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]
|
||||||
|
|
||||||
grpc_preset_scenarios = [
|
grpc_preset_scenarios = [
|
||||||
|
@ -39,13 +51,21 @@ grpc_preset_scenarios = [
|
||||||
LoadScenario.gRPC_CAR,
|
LoadScenario.gRPC_CAR,
|
||||||
LoadScenario.LOCAL,
|
LoadScenario.LOCAL,
|
||||||
]
|
]
|
||||||
s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR]
|
s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class MetaField:
|
||||||
|
name: str
|
||||||
|
metadata: MappingProxyType
|
||||||
|
value: Any
|
||||||
|
|
||||||
|
|
||||||
def metadata_field(
|
def metadata_field(
|
||||||
applicable_scenarios: list[LoadScenario],
|
applicable_scenarios: list[LoadScenario],
|
||||||
preset_param: Optional[str] = None,
|
preset_param: Optional[str] = None,
|
||||||
scenario_variable: Optional[str] = None,
|
scenario_variable: Optional[str] = None,
|
||||||
|
string_repr: Optional[bool] = True,
|
||||||
distributed: Optional[bool] = False,
|
distributed: Optional[bool] = False,
|
||||||
):
|
):
|
||||||
return field(
|
return field(
|
||||||
|
@ -54,6 +74,7 @@ def metadata_field(
|
||||||
"applicable_scenarios": applicable_scenarios,
|
"applicable_scenarios": applicable_scenarios,
|
||||||
"preset_argument": preset_param,
|
"preset_argument": preset_param,
|
||||||
"env_variable": scenario_variable,
|
"env_variable": scenario_variable,
|
||||||
|
"string_repr": string_repr,
|
||||||
"distributed": distributed,
|
"distributed": distributed,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
@ -93,25 +114,33 @@ class K6ProcessAllocationStrategy(Enum):
|
||||||
class Preset:
|
class Preset:
|
||||||
# ------ COMMON ------
|
# ------ COMMON ------
|
||||||
# Amount of objects which should be created
|
# Amount of objects which should be created
|
||||||
objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None)
|
objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None, False)
|
||||||
# Preset json. Filled automatically.
|
# Preset json. Filled automatically.
|
||||||
pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON")
|
pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False)
|
||||||
# Workers count for preset
|
# Workers count for preset
|
||||||
workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None)
|
workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False)
|
||||||
|
|
||||||
# ------ GRPC ------
|
# ------ GRPC ------
|
||||||
# Amount of containers which should be created
|
# Amount of containers which should be created
|
||||||
containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None)
|
containers_count: Optional[int] = metadata_field(
|
||||||
|
grpc_preset_scenarios, "containers", None, False
|
||||||
|
)
|
||||||
# Container placement policy for containers for gRPC
|
# Container placement policy for containers for gRPC
|
||||||
container_placement_policy: Optional[str] = metadata_field(
|
container_placement_policy: Optional[str] = metadata_field(
|
||||||
grpc_preset_scenarios, "policy", None
|
grpc_preset_scenarios, "policy", None, False
|
||||||
)
|
)
|
||||||
|
|
||||||
# ------ S3 ------
|
# ------ S3 ------
|
||||||
# Amount of buckets which should be created
|
# Amount of buckets which should be created
|
||||||
buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None)
|
buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None, False)
|
||||||
# S3 region (AKA placement policy for S3 buckets)
|
# S3 region (AKA placement policy for S3 buckets)
|
||||||
s3_location: Optional[str] = metadata_field(s3_preset_scenarios, "location", None)
|
s3_location: Optional[str] = metadata_field(s3_preset_scenarios, "location", None, False)
|
||||||
|
|
||||||
|
# Delay between containers creation and object upload for preset
|
||||||
|
object_upload_delay: Optional[int] = metadata_field(all_load_scenarios, "sleep", None, False)
|
||||||
|
|
||||||
|
# Flag to control preset erorrs
|
||||||
|
ignore_errors: Optional[bool] = metadata_field(all_load_scenarios, "ignore-errors", None, False)
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
|
@ -132,96 +161,237 @@ class LoadParams:
|
||||||
verify: Optional[bool] = None
|
verify: Optional[bool] = None
|
||||||
# Just id for load so distinct it between runs. Filled automatically.
|
# Just id for load so distinct it between runs. Filled automatically.
|
||||||
load_id: Optional[str] = None
|
load_id: Optional[str] = None
|
||||||
|
# Acceptable number of load errors in %
|
||||||
|
# 100 means 100% errors allowed
|
||||||
|
# 1.5 means 1.5% errors allowed
|
||||||
|
# 0 means no errors allowed
|
||||||
|
error_threshold: Optional[float] = None
|
||||||
# Working directory
|
# Working directory
|
||||||
working_dir: Optional[str] = None
|
working_dir: Optional[str] = None
|
||||||
# Preset for the k6 run
|
# Preset for the k6 run
|
||||||
preset: Optional[Preset] = None
|
preset: Optional[Preset] = None
|
||||||
# K6 download url
|
# K6 download url
|
||||||
k6_url: Optional[str] = None
|
k6_url: Optional[str] = None
|
||||||
|
# No ssl verification flag
|
||||||
|
no_verify_ssl: Optional[bool] = metadata_field(
|
||||||
|
[LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART, LoadScenario.VERIFY, LoadScenario.HTTP],
|
||||||
|
"no-verify-ssl",
|
||||||
|
"NO_VERIFY_SSL",
|
||||||
|
False,
|
||||||
|
)
|
||||||
|
|
||||||
# ------- COMMON SCENARIO PARAMS -------
|
# ------- COMMON SCENARIO PARAMS -------
|
||||||
# Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value.
|
# Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value.
|
||||||
load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION")
|
load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION", False)
|
||||||
# Object size in KB for load and preset.
|
# Object size in KB for load and preset.
|
||||||
object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE")
|
object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False)
|
||||||
|
# For read operations, controls from which set get objects to read
|
||||||
|
read_from: Optional[ReadFrom] = None
|
||||||
# Output registry K6 file. Filled automatically.
|
# Output registry K6 file. Filled automatically.
|
||||||
registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE")
|
registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False)
|
||||||
# Specifies the minimum duration of every single execution (i.e. iteration).
|
# Specifies the minimum duration of every single execution (i.e. iteration).
|
||||||
# Any iterations that are shorter than this value will cause that VU to
|
# Any iterations that are shorter than this value will cause that VU to
|
||||||
# sleep for the remainder of the time until the specified minimum duration is reached.
|
# sleep for the remainder of the time until the specified minimum duration is reached.
|
||||||
min_iteration_duration: Optional[str] = metadata_field(
|
min_iteration_duration: Optional[str] = metadata_field(
|
||||||
all_load_scenarios, None, "K6_MIN_ITERATION_DURATION"
|
all_load_scenarios, None, "K6_MIN_ITERATION_DURATION", False
|
||||||
|
)
|
||||||
|
# Prepare/cut objects locally on client before sending
|
||||||
|
prepare_locally: Optional[bool] = metadata_field(
|
||||||
|
[LoadScenario.gRPC, LoadScenario.gRPC_CAR], None, "PREPARE_LOCALLY", False
|
||||||
)
|
)
|
||||||
# Specifies K6 setupTimeout time. Currently hardcoded in xk6 as 5 seconds for all scenarios
|
# Specifies K6 setupTimeout time. Currently hardcoded in xk6 as 5 seconds for all scenarios
|
||||||
# https://k6.io/docs/using-k6/k6-options/reference/#setup-timeout
|
# https://k6.io/docs/using-k6/k6-options/reference/#setup-timeout
|
||||||
setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT")
|
setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT", False)
|
||||||
|
|
||||||
|
# Delay for read operations in case if we read from registry
|
||||||
|
read_age: Optional[int] = metadata_field(all_load_scenarios, None, "READ_AGE", None, False)
|
||||||
|
|
||||||
|
# Initialization time for each VU for k6 load
|
||||||
|
vu_init_time: Optional[float] = None
|
||||||
|
|
||||||
# ------- CONSTANT VUS SCENARIO PARAMS -------
|
# ------- CONSTANT VUS SCENARIO PARAMS -------
|
||||||
# Amount of Writers VU.
|
# Amount of Writers VU.
|
||||||
writers: Optional[int] = metadata_field(constant_vus_scenarios, None, "WRITERS", True)
|
writers: Optional[int] = metadata_field(constant_vus_scenarios, None, "WRITERS", True, True)
|
||||||
# Amount of Readers VU.
|
# Amount of Readers VU.
|
||||||
readers: Optional[int] = metadata_field(constant_vus_scenarios, None, "READERS", True)
|
readers: Optional[int] = metadata_field(constant_vus_scenarios, None, "READERS", True, True)
|
||||||
# Amount of Deleters VU.
|
# Amount of Deleters VU.
|
||||||
deleters: Optional[int] = metadata_field(constant_vus_scenarios, None, "DELETERS", True)
|
deleters: Optional[int] = metadata_field(constant_vus_scenarios, None, "DELETERS", True, True)
|
||||||
|
|
||||||
# ------- CONSTANT ARRIVAL RATE SCENARIO PARAMS -------
|
# ------- CONSTANT ARRIVAL RATE SCENARIO PARAMS -------
|
||||||
# Number of iterations to start during each timeUnit period for write.
|
# Number of iterations to start during each timeUnit period for write.
|
||||||
write_rate: Optional[int] = metadata_field(
|
write_rate: Optional[int] = metadata_field(
|
||||||
constant_arrival_rate_scenarios, None, "WRITE_RATE", True
|
constant_arrival_rate_scenarios, None, "WRITE_RATE", True, True
|
||||||
)
|
)
|
||||||
|
|
||||||
# Number of iterations to start during each timeUnit period for read.
|
# Number of iterations to start during each timeUnit period for read.
|
||||||
read_rate: Optional[int] = metadata_field(
|
read_rate: Optional[int] = metadata_field(
|
||||||
constant_arrival_rate_scenarios, None, "READ_RATE", True
|
constant_arrival_rate_scenarios, None, "READ_RATE", True, True
|
||||||
)
|
)
|
||||||
|
|
||||||
# Number of iterations to start during each timeUnit period for delete.
|
# Number of iterations to start during each timeUnit period for delete.
|
||||||
delete_rate: Optional[int] = metadata_field(
|
delete_rate: Optional[int] = metadata_field(
|
||||||
constant_arrival_rate_scenarios, None, "DELETE_RATE", True
|
constant_arrival_rate_scenarios, None, "DELETE_RATE", True, True
|
||||||
)
|
)
|
||||||
|
|
||||||
# Amount of preAllocatedVUs for write operations.
|
# Amount of preAllocatedVUs for write operations.
|
||||||
preallocated_writers: Optional[int] = metadata_field(
|
preallocated_writers: Optional[int] = metadata_field(
|
||||||
constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True
|
constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True, True
|
||||||
)
|
)
|
||||||
# Amount of maxVUs for write operations.
|
# Amount of maxVUs for write operations.
|
||||||
max_writers: Optional[int] = metadata_field(
|
max_writers: Optional[int] = metadata_field(
|
||||||
constant_arrival_rate_scenarios, None, "MAX_WRITERS", True
|
constant_arrival_rate_scenarios, None, "MAX_WRITERS", False, True
|
||||||
)
|
)
|
||||||
|
|
||||||
# Amount of preAllocatedVUs for read operations.
|
# Amount of preAllocatedVUs for read operations.
|
||||||
preallocated_readers: Optional[int] = metadata_field(
|
preallocated_readers: Optional[int] = metadata_field(
|
||||||
constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True
|
constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True, True
|
||||||
)
|
)
|
||||||
# Amount of maxVUs for read operations.
|
# Amount of maxVUs for read operations.
|
||||||
max_readers: Optional[int] = metadata_field(
|
max_readers: Optional[int] = metadata_field(
|
||||||
constant_arrival_rate_scenarios, None, "MAX_READERS", True
|
constant_arrival_rate_scenarios, None, "MAX_READERS", False, True
|
||||||
)
|
)
|
||||||
|
|
||||||
# Amount of preAllocatedVUs for read operations.
|
# Amount of preAllocatedVUs for read operations.
|
||||||
preallocated_deleters: Optional[int] = metadata_field(
|
preallocated_deleters: Optional[int] = metadata_field(
|
||||||
constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True
|
constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True, True
|
||||||
)
|
)
|
||||||
# Amount of maxVUs for delete operations.
|
# Amount of maxVUs for delete operations.
|
||||||
max_deleters: Optional[int] = metadata_field(
|
max_deleters: Optional[int] = metadata_field(
|
||||||
constant_arrival_rate_scenarios, None, "MAX_DELETERS", True
|
constant_arrival_rate_scenarios, None, "MAX_DELETERS", False, True
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Multipart
|
||||||
|
# Number of parts to upload in parallel
|
||||||
|
writers_multipart: Optional[int] = metadata_field(
|
||||||
|
[LoadScenario.S3_MULTIPART], None, "WRITERS_MULTIPART", False, True
|
||||||
|
)
|
||||||
|
# part size must be greater than (5 MB)
|
||||||
|
write_object_part_size: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITE_OBJ_PART_SIZE", False)
|
||||||
|
|
||||||
# Period of time to apply the rate value.
|
# Period of time to apply the rate value.
|
||||||
time_unit: Optional[str] = metadata_field(constant_arrival_rate_scenarios, None, "TIME_UNIT")
|
time_unit: Optional[str] = metadata_field(
|
||||||
|
constant_arrival_rate_scenarios, None, "TIME_UNIT", False
|
||||||
|
)
|
||||||
|
|
||||||
# ------- VERIFY SCENARIO PARAMS -------
|
# ------- VERIFY SCENARIO PARAMS -------
|
||||||
# Maximum verification time for k6 to verify objects. Default is BACKGROUND_LOAD_MAX_VERIFY_TIME (3600).
|
# Maximum verification time for k6 to verify objects. Default is BACKGROUND_LOAD_MAX_VERIFY_TIME (3600).
|
||||||
verify_time: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "TIME_LIMIT")
|
verify_time: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "TIME_LIMIT", False)
|
||||||
# Amount of Verification VU.
|
# Amount of Verification VU.
|
||||||
verify_clients: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "CLIENTS", True)
|
verify_clients: Optional[int] = metadata_field(
|
||||||
|
[LoadScenario.VERIFY], None, "CLIENTS", True, False
|
||||||
|
)
|
||||||
|
|
||||||
# ------- LOCAL SCENARIO PARAMS -------
|
# ------- LOCAL SCENARIO PARAMS -------
|
||||||
# Config file location (filled automatically)
|
# Config file location (filled automatically)
|
||||||
config_file: Optional[str] = metadata_field([LoadScenario.LOCAL], None, "CONFIG_FILE")
|
config_file: Optional[str] = metadata_field([LoadScenario.LOCAL], None, "CONFIG_FILE", False)
|
||||||
|
|
||||||
def set_id(self, load_id):
|
def set_id(self, load_id):
|
||||||
self.load_id = load_id
|
self.load_id = load_id
|
||||||
self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt")
|
|
||||||
|
if self.read_from == ReadFrom.REGISTRY:
|
||||||
|
self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt")
|
||||||
|
if self.read_from == ReadFrom.PRESET:
|
||||||
|
self.registry_file = None
|
||||||
|
|
||||||
if self.preset:
|
if self.preset:
|
||||||
self.preset.pregen_json = os.path.join(self.working_dir, f"{load_id}_prepare.json")
|
self.preset.pregen_json = os.path.join(self.working_dir, f"{load_id}_prepare.json")
|
||||||
|
|
||||||
|
def get_env_vars(self):
|
||||||
|
env_vars = {
|
||||||
|
meta_field.metadata["env_variable"]: meta_field.value
|
||||||
|
for meta_field in self._get_meta_fields(self)
|
||||||
|
if self.scenario in meta_field.metadata["applicable_scenarios"]
|
||||||
|
and meta_field.metadata["env_variable"]
|
||||||
|
and meta_field.value is not None
|
||||||
|
}
|
||||||
|
|
||||||
|
return env_vars
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
default_scenario_map = {
|
||||||
|
LoadType.gRPC: LoadScenario.gRPC,
|
||||||
|
LoadType.HTTP: LoadScenario.HTTP,
|
||||||
|
LoadType.S3: LoadScenario.S3,
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.scenario is None:
|
||||||
|
self.scenario = default_scenario_map[self.load_type]
|
||||||
|
|
||||||
|
def get_preset_arguments(self):
|
||||||
|
command_args = [
|
||||||
|
self._get_preset_argument(meta_field)
|
||||||
|
for meta_field in self._get_meta_fields(self)
|
||||||
|
if self.scenario in meta_field.metadata["applicable_scenarios"]
|
||||||
|
and meta_field.metadata["preset_argument"]
|
||||||
|
and meta_field.value is not None
|
||||||
|
and self._get_preset_argument(meta_field)
|
||||||
|
]
|
||||||
|
|
||||||
|
return command_args
|
||||||
|
|
||||||
|
def get_init_time(self) -> int:
|
||||||
|
return math.ceil(self._get_total_vus() * self.vu_init_time)
|
||||||
|
|
||||||
|
def _get_total_vus(self) -> int:
|
||||||
|
vu_fields = ["writers", "preallocated_writers"]
|
||||||
|
data_fields = [
|
||||||
|
getattr(self, field.name) or 0 for field in fields(self) if field.name in vu_fields
|
||||||
|
]
|
||||||
|
return sum(data_fields)
|
||||||
|
|
||||||
|
def _get_applicable_fields(self):
|
||||||
|
applicable_fields = [
|
||||||
|
meta_field
|
||||||
|
for meta_field in self._get_meta_fields(self)
|
||||||
|
if self.scenario in meta_field.metadata["applicable_scenarios"] and meta_field.value
|
||||||
|
]
|
||||||
|
|
||||||
|
return applicable_fields
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_preset_argument(meta_field: MetaField) -> str:
|
||||||
|
if isinstance(meta_field.value, bool):
|
||||||
|
# For preset calls, bool values are passed with just --<argument_name> if the value is True
|
||||||
|
return f"--{meta_field.metadata['preset_argument']}" if meta_field.value else ""
|
||||||
|
|
||||||
|
return f"--{meta_field.metadata['preset_argument']} '{meta_field.value}'"
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_meta_fields(instance) -> list[MetaField]:
|
||||||
|
data_fields = fields(instance)
|
||||||
|
|
||||||
|
fields_with_data = [
|
||||||
|
MetaField(field.name, field.metadata, getattr(instance, field.name))
|
||||||
|
for field in data_fields
|
||||||
|
if field.metadata and getattr(instance, field.name) is not None
|
||||||
|
]
|
||||||
|
|
||||||
|
for field in data_fields:
|
||||||
|
actual_field_type = (
|
||||||
|
get_args(field.type)[0] if len(get_args(field.type)) else get_args(field.type)
|
||||||
|
)
|
||||||
|
if is_dataclass(actual_field_type) and getattr(instance, field.name):
|
||||||
|
fields_with_data += LoadParams._get_meta_fields(getattr(instance, field.name))
|
||||||
|
|
||||||
|
return fields_with_data or []
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
load_type_str = self.scenario.value if self.scenario else self.load_type.value
|
||||||
|
# TODO: migrate load_params defaults to testlib
|
||||||
|
if self.object_size is not None:
|
||||||
|
size, unit = calc_unit(self.object_size, 1)
|
||||||
|
static_params = [f"{load_type_str} {size:.4g} {unit}"]
|
||||||
|
else:
|
||||||
|
static_params = [f"{load_type_str}"]
|
||||||
|
|
||||||
|
dynamic_params = [
|
||||||
|
f"{meta_field.name}={meta_field.value}"
|
||||||
|
for meta_field in self._get_applicable_fields()
|
||||||
|
if meta_field.metadata["string_repr"]
|
||||||
|
]
|
||||||
|
params = ", ".join(static_params + dynamic_params)
|
||||||
|
|
||||||
|
return params
|
||||||
|
|
||||||
|
def __repr__(self) -> str:
|
||||||
|
return self.__str__()
|
||||||
|
|
|
@ -8,12 +8,15 @@ class MetricsBase(ABC):
|
||||||
_WRITE_SUCCESS = ""
|
_WRITE_SUCCESS = ""
|
||||||
_WRITE_ERRORS = ""
|
_WRITE_ERRORS = ""
|
||||||
_WRITE_THROUGHPUT = "data_sent"
|
_WRITE_THROUGHPUT = "data_sent"
|
||||||
|
_WRITE_LATENCY = ""
|
||||||
|
|
||||||
_READ_SUCCESS = ""
|
_READ_SUCCESS = ""
|
||||||
_READ_ERRORS = ""
|
_READ_ERRORS = ""
|
||||||
|
_READ_LATENCY = ""
|
||||||
_READ_THROUGHPUT = "data_received"
|
_READ_THROUGHPUT = "data_received"
|
||||||
|
|
||||||
_DELETE_SUCCESS = ""
|
_DELETE_SUCCESS = ""
|
||||||
|
_DELETE_LATENCY = ""
|
||||||
_DELETE_ERRORS = ""
|
_DELETE_ERRORS = ""
|
||||||
|
|
||||||
def __init__(self, summary) -> None:
|
def __init__(self, summary) -> None:
|
||||||
|
@ -28,6 +31,10 @@ class MetricsBase(ABC):
|
||||||
def write_success_iterations(self) -> int:
|
def write_success_iterations(self) -> int:
|
||||||
return self._get_metric(self._WRITE_SUCCESS)
|
return self._get_metric(self._WRITE_SUCCESS)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def write_latency(self) -> dict:
|
||||||
|
return self._get_metric(self._WRITE_LATENCY)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def write_rate(self) -> float:
|
def write_rate(self) -> float:
|
||||||
return self._get_metric_rate(self._WRITE_SUCCESS)
|
return self._get_metric_rate(self._WRITE_SUCCESS)
|
||||||
|
@ -48,6 +55,10 @@ class MetricsBase(ABC):
|
||||||
def read_success_iterations(self) -> int:
|
def read_success_iterations(self) -> int:
|
||||||
return self._get_metric(self._READ_SUCCESS)
|
return self._get_metric(self._READ_SUCCESS)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def read_latency(self) -> dict:
|
||||||
|
return self._get_metric(self._READ_LATENCY)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def read_rate(self) -> int:
|
def read_rate(self) -> int:
|
||||||
return self._get_metric_rate(self._READ_SUCCESS)
|
return self._get_metric_rate(self._READ_SUCCESS)
|
||||||
|
@ -68,6 +79,10 @@ class MetricsBase(ABC):
|
||||||
def delete_success_iterations(self) -> int:
|
def delete_success_iterations(self) -> int:
|
||||||
return self._get_metric(self._DELETE_SUCCESS)
|
return self._get_metric(self._DELETE_SUCCESS)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def delete_latency(self) -> dict:
|
||||||
|
return self._get_metric(self._DELETE_LATENCY)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def delete_failed_iterations(self) -> int:
|
def delete_failed_iterations(self) -> int:
|
||||||
return self._get_metric(self._DELETE_ERRORS)
|
return self._get_metric(self._DELETE_ERRORS)
|
||||||
|
@ -77,7 +92,11 @@ class MetricsBase(ABC):
|
||||||
return self._get_metric_rate(self._DELETE_SUCCESS)
|
return self._get_metric_rate(self._DELETE_SUCCESS)
|
||||||
|
|
||||||
def _get_metric(self, metric: str) -> int:
|
def _get_metric(self, metric: str) -> int:
|
||||||
metrics_method_map = {"counter": self._get_counter_metric, "gauge": self._get_gauge_metric}
|
metrics_method_map = {
|
||||||
|
"counter": self._get_counter_metric,
|
||||||
|
"gauge": self._get_gauge_metric,
|
||||||
|
"trend": self._get_trend_metrics,
|
||||||
|
}
|
||||||
|
|
||||||
if metric not in self.metrics:
|
if metric not in self.metrics:
|
||||||
return 0
|
return 0
|
||||||
|
@ -115,32 +134,42 @@ class MetricsBase(ABC):
|
||||||
def _get_gauge_metric(self, metric: str) -> int:
|
def _get_gauge_metric(self, metric: str) -> int:
|
||||||
return metric["values"]["value"]
|
return metric["values"]["value"]
|
||||||
|
|
||||||
|
def _get_trend_metrics(self, metric: str) -> int:
|
||||||
|
return metric["values"]
|
||||||
|
|
||||||
|
|
||||||
class GrpcMetrics(MetricsBase):
|
class GrpcMetrics(MetricsBase):
|
||||||
_WRITE_SUCCESS = "frostfs_obj_put_total"
|
_WRITE_SUCCESS = "frostfs_obj_put_total"
|
||||||
_WRITE_ERRORS = "frostfs_obj_put_fails"
|
_WRITE_ERRORS = "frostfs_obj_put_fails"
|
||||||
|
_WRITE_LATENCY = "frostfs_obj_put_duration"
|
||||||
|
|
||||||
_READ_SUCCESS = "frostfs_obj_get_total"
|
_READ_SUCCESS = "frostfs_obj_get_total"
|
||||||
_READ_ERRORS = "frostfs_obj_get_fails"
|
_READ_ERRORS = "frostfs_obj_get_fails"
|
||||||
|
_READ_LATENCY = "frostfs_obj_get_duration"
|
||||||
|
|
||||||
_DELETE_SUCCESS = "frostfs_obj_delete_total"
|
_DELETE_SUCCESS = "frostfs_obj_delete_total"
|
||||||
_DELETE_ERRORS = "frostfs_obj_delete_fails"
|
_DELETE_ERRORS = "frostfs_obj_delete_fails"
|
||||||
|
_DELETE_LATENCY = "frostfs_obj_delete_duration"
|
||||||
|
|
||||||
|
|
||||||
class S3Metrics(MetricsBase):
|
class S3Metrics(MetricsBase):
|
||||||
_WRITE_SUCCESS = "aws_obj_put_total"
|
_WRITE_SUCCESS = "aws_obj_put_total"
|
||||||
_WRITE_ERRORS = "aws_obj_put_fails"
|
_WRITE_ERRORS = "aws_obj_put_fails"
|
||||||
|
_WRITE_LATENCY = "aws_obj_put_duration"
|
||||||
|
|
||||||
_READ_SUCCESS = "aws_obj_get_total"
|
_READ_SUCCESS = "aws_obj_get_total"
|
||||||
_READ_ERRORS = "aws_obj_get_fails"
|
_READ_ERRORS = "aws_obj_get_fails"
|
||||||
|
_READ_LATENCY = "aws_obj_get_duration"
|
||||||
|
|
||||||
_DELETE_SUCCESS = "aws_obj_delete_total"
|
_DELETE_SUCCESS = "aws_obj_delete_total"
|
||||||
_DELETE_ERRORS = "aws_obj_delete_fails"
|
_DELETE_ERRORS = "aws_obj_delete_fails"
|
||||||
|
_DELETE_LATENCY = "aws_obj_delete_duration"
|
||||||
|
|
||||||
|
|
||||||
class LocalMetrics(MetricsBase):
|
class LocalMetrics(MetricsBase):
|
||||||
_WRITE_SUCCESS = "local_obj_put_total"
|
_WRITE_SUCCESS = "local_obj_put_total"
|
||||||
_WRITE_ERRORS = "local_obj_put_fails"
|
_WRITE_ERRORS = "local_obj_put_fails"
|
||||||
|
_WRITE_LATENCY = "local_obj_put_duration"
|
||||||
|
|
||||||
_READ_SUCCESS = "local_obj_get_total"
|
_READ_SUCCESS = "local_obj_get_total"
|
||||||
_READ_ERRORS = "local_obj_get_fails"
|
_READ_ERRORS = "local_obj_get_fails"
|
||||||
|
@ -167,6 +196,7 @@ def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> Metr
|
||||||
LoadScenario.HTTP: GrpcMetrics,
|
LoadScenario.HTTP: GrpcMetrics,
|
||||||
LoadScenario.S3: S3Metrics,
|
LoadScenario.S3: S3Metrics,
|
||||||
LoadScenario.S3_CAR: S3Metrics,
|
LoadScenario.S3_CAR: S3Metrics,
|
||||||
|
LoadScenario.S3_MULTIPART: S3Metrics,
|
||||||
LoadScenario.VERIFY: VerifyMetrics,
|
LoadScenario.VERIFY: VerifyMetrics,
|
||||||
LoadScenario.LOCAL: LocalMetrics,
|
LoadScenario.LOCAL: LocalMetrics,
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,10 +1,11 @@
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from typing import Optional, Tuple
|
from typing import Optional
|
||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario
|
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario
|
||||||
from frostfs_testlib.load.load_metrics import get_metrics_object
|
from frostfs_testlib.load.load_metrics import get_metrics_object
|
||||||
|
from frostfs_testlib.utils.converting_utils import calc_unit
|
||||||
|
|
||||||
|
|
||||||
class LoadReport:
|
class LoadReport:
|
||||||
|
@ -62,17 +63,6 @@ class LoadReport:
|
||||||
|
|
||||||
return html
|
return html
|
||||||
|
|
||||||
def _calc_unit(self, value: float, skip_units: int = 0) -> Tuple[float, str]:
|
|
||||||
units = ["B", "KiB", "MiB", "GiB", "TiB"]
|
|
||||||
|
|
||||||
for unit in units[skip_units:]:
|
|
||||||
if value < 1024:
|
|
||||||
return value, unit
|
|
||||||
|
|
||||||
value = value / 1024.0
|
|
||||||
|
|
||||||
return value, unit
|
|
||||||
|
|
||||||
def _seconds_to_formatted_duration(self, seconds: int) -> str:
|
def _seconds_to_formatted_duration(self, seconds: int) -> str:
|
||||||
"""Converts N number of seconds to formatted output ignoring zeroes.
|
"""Converts N number of seconds to formatted output ignoring zeroes.
|
||||||
Examples:
|
Examples:
|
||||||
|
@ -102,6 +92,7 @@ class LoadReport:
|
||||||
model_map = {
|
model_map = {
|
||||||
LoadScenario.gRPC: "closed model",
|
LoadScenario.gRPC: "closed model",
|
||||||
LoadScenario.S3: "closed model",
|
LoadScenario.S3: "closed model",
|
||||||
|
LoadScenario.S3_MULTIPART: "closed model",
|
||||||
LoadScenario.HTTP: "closed model",
|
LoadScenario.HTTP: "closed model",
|
||||||
LoadScenario.gRPC_CAR: "open model",
|
LoadScenario.gRPC_CAR: "open model",
|
||||||
LoadScenario.S3_CAR: "open model",
|
LoadScenario.S3_CAR: "open model",
|
||||||
|
@ -110,7 +101,7 @@ class LoadReport:
|
||||||
|
|
||||||
return model_map[self.load_params.scenario]
|
return model_map[self.load_params.scenario]
|
||||||
|
|
||||||
def _get_oprations_sub_section_html(
|
def _get_operations_sub_section_html(
|
||||||
self,
|
self,
|
||||||
operation_type: str,
|
operation_type: str,
|
||||||
total_operations: int,
|
total_operations: int,
|
||||||
|
@ -119,10 +110,11 @@ class LoadReport:
|
||||||
total_rate: float,
|
total_rate: float,
|
||||||
throughput: float,
|
throughput: float,
|
||||||
errors: dict[str, int],
|
errors: dict[str, int],
|
||||||
|
latency: dict[str, dict],
|
||||||
):
|
):
|
||||||
throughput_html = ""
|
throughput_html = ""
|
||||||
if throughput > 0:
|
if throughput > 0:
|
||||||
throughput, unit = self._calc_unit(throughput)
|
throughput, unit = calc_unit(throughput)
|
||||||
throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec")
|
throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec")
|
||||||
|
|
||||||
per_node_errors_html = ""
|
per_node_errors_html = ""
|
||||||
|
@ -137,11 +129,27 @@ class LoadReport:
|
||||||
):
|
):
|
||||||
per_node_errors_html += self._row(f"At {node_key}", errors)
|
per_node_errors_html += self._row(f"At {node_key}", errors)
|
||||||
|
|
||||||
object_size, object_size_unit = self._calc_unit(self.load_params.object_size, 1)
|
latency_html = ""
|
||||||
|
if latency:
|
||||||
|
for node_key, latency_dict in latency.items():
|
||||||
|
latency_values = "N/A"
|
||||||
|
if latency_dict:
|
||||||
|
latency_values = ""
|
||||||
|
for param_name, param_val in latency_dict.items():
|
||||||
|
latency_values += f"{param_name}={param_val:.2f}ms "
|
||||||
|
|
||||||
|
latency_html += self._row(
|
||||||
|
f"{operation_type} latency {node_key.split(':')[0]}", latency_values
|
||||||
|
)
|
||||||
|
|
||||||
|
object_size, object_size_unit = calc_unit(self.load_params.object_size, 1)
|
||||||
duration = self._seconds_to_formatted_duration(self.load_params.load_time)
|
duration = self._seconds_to_formatted_duration(self.load_params.load_time)
|
||||||
model = self._get_model_string()
|
model = self._get_model_string()
|
||||||
# write 8KB 15h49m 50op/sec 50th open model/closed model/min_iteration duration=1s - 1.636MB/s 199.57451/s
|
# write 8KB 15h49m 50op/sec 50th open model/closed model/min_iteration duration=1s - 1.636MB/s 199.57451/s
|
||||||
short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {vus_str} {model} - {throughput:.2f}{unit}/s {total_rate:.2f}/s"
|
short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {vus_str} {model} - {throughput:.2f}{unit}/s {total_rate:.2f}/s"
|
||||||
|
errors_percent = 0
|
||||||
|
if total_operations:
|
||||||
|
errors_percent = total_errors / total_operations * 100.0
|
||||||
|
|
||||||
html = f"""
|
html = f"""
|
||||||
<table border="1" cellpadding="5px"><tbody>
|
<table border="1" cellpadding="5px"><tbody>
|
||||||
|
@ -150,10 +158,11 @@ class LoadReport:
|
||||||
{self._row("Total operations", total_operations)}
|
{self._row("Total operations", total_operations)}
|
||||||
{self._row("OP/sec", f"{total_rate:.2f}")}
|
{self._row("OP/sec", f"{total_rate:.2f}")}
|
||||||
{throughput_html}
|
{throughput_html}
|
||||||
|
{latency_html}
|
||||||
<tr><th colspan="2" bgcolor="gainsboro">Errors</th></tr>
|
<tr><th colspan="2" bgcolor="gainsboro">Errors</th></tr>
|
||||||
{per_node_errors_html}
|
{per_node_errors_html}
|
||||||
{self._row("Total", f"{total_errors} ({total_errors/total_operations*100.0:.2f}%)")}
|
{self._row("Total", f"{total_errors} ({errors_percent:.2f}%)")}
|
||||||
|
{self._row("Threshold", f"{self.load_params.error_threshold:.2f}%")}
|
||||||
</tbody></table><br><hr>
|
</tbody></table><br><hr>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -167,6 +176,7 @@ class LoadReport:
|
||||||
write_operations = 0
|
write_operations = 0
|
||||||
write_op_sec = 0
|
write_op_sec = 0
|
||||||
write_throughput = 0
|
write_throughput = 0
|
||||||
|
write_latency = {}
|
||||||
write_errors = {}
|
write_errors = {}
|
||||||
requested_write_rate = self.load_params.write_rate
|
requested_write_rate = self.load_params.write_rate
|
||||||
requested_write_rate_str = (
|
requested_write_rate_str = (
|
||||||
|
@ -176,12 +186,14 @@ class LoadReport:
|
||||||
read_operations = 0
|
read_operations = 0
|
||||||
read_op_sec = 0
|
read_op_sec = 0
|
||||||
read_throughput = 0
|
read_throughput = 0
|
||||||
|
read_latency = {}
|
||||||
read_errors = {}
|
read_errors = {}
|
||||||
requested_read_rate = self.load_params.read_rate
|
requested_read_rate = self.load_params.read_rate
|
||||||
requested_read_rate_str = f"{requested_read_rate}op/sec" if requested_read_rate else ""
|
requested_read_rate_str = f"{requested_read_rate}op/sec" if requested_read_rate else ""
|
||||||
|
|
||||||
delete_operations = 0
|
delete_operations = 0
|
||||||
delete_op_sec = 0
|
delete_op_sec = 0
|
||||||
|
delete_latency = {}
|
||||||
delete_errors = {}
|
delete_errors = {}
|
||||||
requested_delete_rate = self.load_params.delete_rate
|
requested_delete_rate = self.load_params.delete_rate
|
||||||
requested_delete_rate_str = (
|
requested_delete_rate_str = (
|
||||||
|
@ -217,6 +229,7 @@ class LoadReport:
|
||||||
if write_operations:
|
if write_operations:
|
||||||
write_section_required = True
|
write_section_required = True
|
||||||
write_op_sec += metrics.write_rate
|
write_op_sec += metrics.write_rate
|
||||||
|
write_latency[node_key] = metrics.write_latency
|
||||||
write_throughput += metrics.write_throughput
|
write_throughput += metrics.write_throughput
|
||||||
if metrics.write_failed_iterations:
|
if metrics.write_failed_iterations:
|
||||||
write_errors[node_key] = metrics.write_failed_iterations
|
write_errors[node_key] = metrics.write_failed_iterations
|
||||||
|
@ -226,6 +239,7 @@ class LoadReport:
|
||||||
read_section_required = True
|
read_section_required = True
|
||||||
read_op_sec += metrics.read_rate
|
read_op_sec += metrics.read_rate
|
||||||
read_throughput += metrics.read_throughput
|
read_throughput += metrics.read_throughput
|
||||||
|
read_latency[node_key] = metrics.read_latency
|
||||||
if metrics.read_failed_iterations:
|
if metrics.read_failed_iterations:
|
||||||
read_errors[node_key] = metrics.read_failed_iterations
|
read_errors[node_key] = metrics.read_failed_iterations
|
||||||
|
|
||||||
|
@ -233,11 +247,12 @@ class LoadReport:
|
||||||
if delete_operations:
|
if delete_operations:
|
||||||
delete_section_required = True
|
delete_section_required = True
|
||||||
delete_op_sec += metrics.delete_rate
|
delete_op_sec += metrics.delete_rate
|
||||||
|
delete_latency[node_key] = metrics.delete_latency
|
||||||
if metrics.delete_failed_iterations:
|
if metrics.delete_failed_iterations:
|
||||||
delete_errors[node_key] = metrics.delete_failed_iterations
|
delete_errors[node_key] = metrics.delete_failed_iterations
|
||||||
|
|
||||||
if write_section_required:
|
if write_section_required:
|
||||||
html += self._get_oprations_sub_section_html(
|
html += self._get_operations_sub_section_html(
|
||||||
"Write",
|
"Write",
|
||||||
write_operations,
|
write_operations,
|
||||||
requested_write_rate_str,
|
requested_write_rate_str,
|
||||||
|
@ -245,10 +260,11 @@ class LoadReport:
|
||||||
write_op_sec,
|
write_op_sec,
|
||||||
write_throughput,
|
write_throughput,
|
||||||
write_errors,
|
write_errors,
|
||||||
|
write_latency,
|
||||||
)
|
)
|
||||||
|
|
||||||
if read_section_required:
|
if read_section_required:
|
||||||
html += self._get_oprations_sub_section_html(
|
html += self._get_operations_sub_section_html(
|
||||||
"Read",
|
"Read",
|
||||||
read_operations,
|
read_operations,
|
||||||
requested_read_rate_str,
|
requested_read_rate_str,
|
||||||
|
@ -256,10 +272,11 @@ class LoadReport:
|
||||||
read_op_sec,
|
read_op_sec,
|
||||||
read_throughput,
|
read_throughput,
|
||||||
read_errors,
|
read_errors,
|
||||||
|
read_latency,
|
||||||
)
|
)
|
||||||
|
|
||||||
if delete_section_required:
|
if delete_section_required:
|
||||||
html += self._get_oprations_sub_section_html(
|
html += self._get_operations_sub_section_html(
|
||||||
"Delete",
|
"Delete",
|
||||||
delete_operations,
|
delete_operations,
|
||||||
requested_delete_rate_str,
|
requested_delete_rate_str,
|
||||||
|
@ -267,6 +284,7 @@ class LoadReport:
|
||||||
delete_op_sec,
|
delete_op_sec,
|
||||||
0,
|
0,
|
||||||
delete_errors,
|
delete_errors,
|
||||||
|
delete_latency,
|
||||||
)
|
)
|
||||||
|
|
||||||
return html
|
return html
|
||||||
|
|
|
@ -2,7 +2,9 @@ import logging
|
||||||
|
|
||||||
from frostfs_testlib.load.load_config import LoadParams, LoadScenario
|
from frostfs_testlib.load.load_config import LoadParams, LoadScenario
|
||||||
from frostfs_testlib.load.load_metrics import get_metrics_object
|
from frostfs_testlib.load.load_metrics import get_metrics_object
|
||||||
|
from frostfs_testlib.reporter import get_reporter
|
||||||
|
|
||||||
|
reporter = get_reporter()
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
|
||||||
|
@ -10,54 +12,108 @@ class LoadVerifier:
|
||||||
def __init__(self, load_params: LoadParams) -> None:
|
def __init__(self, load_params: LoadParams) -> None:
|
||||||
self.load_params = load_params
|
self.load_params = load_params
|
||||||
|
|
||||||
def verify_summaries(self, load_summary, verification_summary) -> None:
|
def collect_load_issues(self, load_summaries: dict[str, dict]) -> list[str]:
|
||||||
exceptions = []
|
write_operations = 0
|
||||||
|
write_errors = 0
|
||||||
|
|
||||||
if not verification_summary or not load_summary:
|
read_operations = 0
|
||||||
logger.info("Can't check load results due to missing summary")
|
read_errors = 0
|
||||||
|
|
||||||
load_metrics = get_metrics_object(self.load_params.scenario, load_summary)
|
delete_operations = 0
|
||||||
|
delete_errors = 0
|
||||||
|
|
||||||
writers = self.load_params.writers or self.load_params.preallocated_writers or 0
|
writers = self.load_params.writers or self.load_params.preallocated_writers or 0
|
||||||
readers = self.load_params.readers or self.load_params.preallocated_readers or 0
|
readers = self.load_params.readers or self.load_params.preallocated_readers or 0
|
||||||
deleters = self.load_params.deleters or self.load_params.preallocated_deleters or 0
|
deleters = self.load_params.deleters or self.load_params.preallocated_deleters or 0
|
||||||
|
|
||||||
objects_count = load_metrics.write_success_iterations
|
for load_summary in load_summaries.values():
|
||||||
fails_count = load_metrics.write_failed_iterations
|
metrics = get_metrics_object(self.load_params.scenario, load_summary)
|
||||||
|
|
||||||
if writers > 0:
|
if writers:
|
||||||
if objects_count < 1:
|
write_operations += metrics.write_total_iterations
|
||||||
exceptions.append("Total put objects should be greater than 0")
|
write_errors += metrics.write_failed_iterations
|
||||||
if fails_count > 0:
|
|
||||||
exceptions.append(f"There were {fails_count} failed write operations")
|
|
||||||
|
|
||||||
if readers > 0:
|
if readers:
|
||||||
read_count = load_metrics.read_success_iterations
|
read_operations += metrics.read_total_iterations
|
||||||
read_fails_count = load_metrics.read_failed_iterations
|
read_errors += metrics.read_failed_iterations
|
||||||
if read_count < 1:
|
|
||||||
exceptions.append("Total read operations should be greater than 0")
|
if deleters:
|
||||||
if read_fails_count > 0:
|
delete_operations += metrics.delete_total_iterations
|
||||||
exceptions.append(f"There were {read_fails_count} failed read operations")
|
delete_errors += metrics.delete_failed_iterations
|
||||||
|
|
||||||
|
issues = []
|
||||||
|
if writers and not write_operations:
|
||||||
|
issues.append(f"No any write operation was performed")
|
||||||
|
if readers and not read_operations:
|
||||||
|
issues.append(f"No any read operation was performed")
|
||||||
|
if deleters and not delete_operations:
|
||||||
|
issues.append(f"No any delete operation was performed")
|
||||||
|
|
||||||
|
if (
|
||||||
|
write_operations
|
||||||
|
and writers
|
||||||
|
and write_errors / write_operations * 100 > self.load_params.error_threshold
|
||||||
|
):
|
||||||
|
issues.append(
|
||||||
|
f"Write error rate is greater than threshold: {write_errors / write_operations * 100} > {self.load_params.error_threshold}"
|
||||||
|
)
|
||||||
|
if (
|
||||||
|
read_operations
|
||||||
|
and readers
|
||||||
|
and read_errors / read_operations * 100 > self.load_params.error_threshold
|
||||||
|
):
|
||||||
|
issues.append(
|
||||||
|
f"Read error rate is greater than threshold: {read_errors / read_operations * 100} > {self.load_params.error_threshold}"
|
||||||
|
)
|
||||||
|
if (
|
||||||
|
delete_operations
|
||||||
|
and deleters
|
||||||
|
and delete_errors / delete_operations * 100 > self.load_params.error_threshold
|
||||||
|
):
|
||||||
|
issues.append(
|
||||||
|
f"Delete error rate is greater than threshold: {delete_errors / delete_operations * 100} > {self.load_params.error_threshold}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return issues
|
||||||
|
|
||||||
|
def collect_verify_issues(self, load_summaries, verification_summaries) -> list[str]:
|
||||||
|
verify_issues: list[str] = []
|
||||||
|
for k6_process_label in load_summaries:
|
||||||
|
with reporter.step(f"Check verify scenario results for {k6_process_label}"):
|
||||||
|
verify_issues.extend(
|
||||||
|
self._collect_verify_issues_on_process(
|
||||||
|
k6_process_label,
|
||||||
|
load_summaries[k6_process_label],
|
||||||
|
verification_summaries[k6_process_label],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return verify_issues
|
||||||
|
|
||||||
|
def _collect_verify_issues_on_process(
|
||||||
|
self, label, load_summary, verification_summary
|
||||||
|
) -> list[str]:
|
||||||
|
issues = []
|
||||||
|
|
||||||
|
load_metrics = get_metrics_object(self.load_params.scenario, load_summary)
|
||||||
|
|
||||||
|
writers = self.load_params.writers or self.load_params.preallocated_writers or 0
|
||||||
|
deleters = self.load_params.deleters or self.load_params.preallocated_deleters or 0
|
||||||
|
|
||||||
|
delete_success = 0
|
||||||
|
|
||||||
if deleters > 0:
|
if deleters > 0:
|
||||||
delete_count = load_metrics.delete_success_iterations
|
delete_success = load_metrics.delete_success_iterations
|
||||||
delete_fails_count = load_metrics.delete_failed_iterations
|
|
||||||
if delete_count < 1:
|
|
||||||
exceptions.append("Total delete operations should be greater than 0")
|
|
||||||
if delete_fails_count > 0:
|
|
||||||
exceptions.append(f"There were {delete_fails_count} failed delete operations")
|
|
||||||
|
|
||||||
if verification_summary:
|
if verification_summary:
|
||||||
verify_metrics = get_metrics_object(LoadScenario.VERIFY, verification_summary)
|
verify_metrics = get_metrics_object(LoadScenario.VERIFY, verification_summary)
|
||||||
verified_objects = verify_metrics.read_success_iterations
|
verified_objects = verify_metrics.read_success_iterations
|
||||||
invalid_objects = verify_metrics.read_failed_iterations
|
invalid_objects = verify_metrics.read_failed_iterations
|
||||||
|
total_left_objects = load_metrics.write_success_iterations - delete_success
|
||||||
|
|
||||||
if invalid_objects > 0:
|
|
||||||
exceptions.append(f"There were {invalid_objects} verification fails")
|
|
||||||
# Due to interruptions we may see total verified objects to be less than written on writers count
|
# Due to interruptions we may see total verified objects to be less than written on writers count
|
||||||
if abs(objects_count - verified_objects) > writers:
|
if abs(total_left_objects - verified_objects) > writers:
|
||||||
exceptions.append(
|
issues.append(
|
||||||
f"Verified objects mismatch. Total: {objects_count}, Verified: {verified_objects}. Writers: {writers}."
|
f"Verified objects mismatch for {label}. Total: {total_left_objects}, Verified: {verified_objects}. Writers: {writers}."
|
||||||
)
|
)
|
||||||
|
|
||||||
assert not exceptions, "\n".join(exceptions)
|
return issues
|
||||||
|
|
|
@ -3,9 +3,9 @@ import itertools
|
||||||
import math
|
import math
|
||||||
import re
|
import re
|
||||||
import time
|
import time
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
|
||||||
from dataclasses import fields
|
from dataclasses import fields
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
|
@ -23,13 +23,16 @@ from frostfs_testlib.resources.load_params import (
|
||||||
LOAD_NODE_SSH_USER,
|
LOAD_NODE_SSH_USER,
|
||||||
LOAD_NODES,
|
LOAD_NODES,
|
||||||
)
|
)
|
||||||
|
from frostfs_testlib.shell.command_inspectors import SuInspector
|
||||||
from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput
|
from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput
|
||||||
from frostfs_testlib.storage.cluster import ClusterNode
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
|
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
|
||||||
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode
|
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing import parallel, run_optionally
|
from frostfs_testlib.testing import parallel, run_optionally
|
||||||
from frostfs_testlib.utils import FileKeeper, datetime_utils
|
from frostfs_testlib.testing.test_control import retry
|
||||||
|
from frostfs_testlib.utils import datetime_utils
|
||||||
|
from frostfs_testlib.utils.file_keeper import FileKeeper
|
||||||
|
|
||||||
reporter = get_reporter()
|
reporter = get_reporter()
|
||||||
|
|
||||||
|
@ -42,8 +45,8 @@ class RunnerBase(ScenarioRunner):
|
||||||
parallel([k6.preset for k6 in self.k6_instances])
|
parallel([k6.preset for k6 in self.k6_instances])
|
||||||
|
|
||||||
@reporter.step_deco("Wait until load finish")
|
@reporter.step_deco("Wait until load finish")
|
||||||
def wait_until_finish(self):
|
def wait_until_finish(self, soft_timeout: int = 0):
|
||||||
parallel([k6.wait_until_finished for k6 in self.k6_instances])
|
parallel([k6.wait_until_finished for k6 in self.k6_instances], soft_timeout=soft_timeout)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def is_running(self):
|
def is_running(self):
|
||||||
|
@ -71,6 +74,7 @@ class DefaultRunner(RunnerBase):
|
||||||
def prepare(
|
def prepare(
|
||||||
self,
|
self,
|
||||||
load_params: LoadParams,
|
load_params: LoadParams,
|
||||||
|
cluster_nodes: list[ClusterNode],
|
||||||
nodes_under_load: list[ClusterNode],
|
nodes_under_load: list[ClusterNode],
|
||||||
k6_dir: str,
|
k6_dir: str,
|
||||||
):
|
):
|
||||||
|
@ -80,7 +84,7 @@ class DefaultRunner(RunnerBase):
|
||||||
with reporter.step("Init s3 client on loaders"):
|
with reporter.step("Init s3 client on loaders"):
|
||||||
storage_node = nodes_under_load[0].service(StorageNode)
|
storage_node = nodes_under_load[0].service(StorageNode)
|
||||||
s3_public_keys = [
|
s3_public_keys = [
|
||||||
node.service(S3Gate).get_wallet_public_key() for node in nodes_under_load
|
node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes
|
||||||
]
|
]
|
||||||
grpc_peer = storage_node.get_rpc_endpoint()
|
grpc_peer = storage_node.get_rpc_endpoint()
|
||||||
|
|
||||||
|
@ -257,9 +261,10 @@ class DefaultRunner(RunnerBase):
|
||||||
raise RuntimeError("k6_process_allocation_strategy should not be none")
|
raise RuntimeError("k6_process_allocation_strategy should not be none")
|
||||||
|
|
||||||
result = k6_instance.get_results()
|
result = k6_instance.get_results()
|
||||||
|
endpoint = urlparse(k6_instance.endpoints[0]).netloc or k6_instance.endpoints[0]
|
||||||
keys_map = {
|
keys_map = {
|
||||||
K6ProcessAllocationStrategy.PER_LOAD_NODE: k6_instance.loader.ip,
|
K6ProcessAllocationStrategy.PER_LOAD_NODE: k6_instance.loader.ip,
|
||||||
K6ProcessAllocationStrategy.PER_ENDPOINT: k6_instance.endpoints[0],
|
K6ProcessAllocationStrategy.PER_ENDPOINT: endpoint,
|
||||||
}
|
}
|
||||||
key = keys_map[k6_instance.load_params.k6_process_allocation_strategy]
|
key = keys_map[k6_instance.load_params.k6_process_allocation_strategy]
|
||||||
results[key] = result
|
results[key] = result
|
||||||
|
@ -282,48 +287,64 @@ class LocalRunner(RunnerBase):
|
||||||
self.cluster_state_controller = cluster_state_controller
|
self.cluster_state_controller = cluster_state_controller
|
||||||
self.file_keeper = file_keeper
|
self.file_keeper = file_keeper
|
||||||
self.loaders = [NodeLoader(node) for node in nodes_under_load]
|
self.loaders = [NodeLoader(node) for node in nodes_under_load]
|
||||||
|
self.nodes_under_load = nodes_under_load
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
||||||
@reporter.step_deco("Preparation steps")
|
@reporter.step_deco("Preparation steps")
|
||||||
def prepare(
|
def prepare(
|
||||||
self,
|
self,
|
||||||
load_params: LoadParams,
|
load_params: LoadParams,
|
||||||
|
cluster_nodes: list[ClusterNode],
|
||||||
nodes_under_load: list[ClusterNode],
|
nodes_under_load: list[ClusterNode],
|
||||||
k6_dir: str,
|
k6_dir: str,
|
||||||
):
|
):
|
||||||
@reporter.step_deco("Prepare node {cluster_node}")
|
parallel(self.prepare_node, nodes_under_load, k6_dir, load_params)
|
||||||
def prepare_node(cluster_node: ClusterNode):
|
|
||||||
shell = cluster_node.host.get_shell()
|
|
||||||
|
|
||||||
with reporter.step("Allow storage user to login into system"):
|
@retry(3, 5, expected_result=True)
|
||||||
shell.exec(f"sudo chsh -s /bin/bash {STORAGE_USER_NAME}")
|
def allow_user_to_login_in_system(self, cluster_node: ClusterNode):
|
||||||
|
shell = cluster_node.host.get_shell()
|
||||||
|
|
||||||
with reporter.step("Update limits.conf"):
|
result = None
|
||||||
limits_path = "/etc/security/limits.conf"
|
try:
|
||||||
self.file_keeper.add(cluster_node.storage_node, limits_path)
|
shell.exec(f"sudo chsh -s /bin/bash {STORAGE_USER_NAME}")
|
||||||
content = f"{STORAGE_USER_NAME} hard nofile 65536\n{STORAGE_USER_NAME} soft nofile 65536\n"
|
self.lock_passwd_on_node(cluster_node)
|
||||||
shell.exec(f"echo '{content}' | sudo tee {limits_path}")
|
options = CommandOptions(check=False, extra_inspectors=[SuInspector(STORAGE_USER_NAME)])
|
||||||
|
result = shell.exec("whoami", options)
|
||||||
|
finally:
|
||||||
|
if not result or result.return_code:
|
||||||
|
self.restore_passwd_on_node(cluster_node)
|
||||||
|
return False
|
||||||
|
|
||||||
with reporter.step("Download K6"):
|
return True
|
||||||
shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}")
|
|
||||||
shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}")
|
|
||||||
shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz -C {k6_dir}")
|
|
||||||
shell.exec(f"sudo chmod -R 777 {k6_dir}")
|
|
||||||
|
|
||||||
with reporter.step("Create empty_passwd"):
|
@reporter.step_deco("Prepare node {cluster_node}")
|
||||||
self.wallet = WalletInfo(
|
def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams):
|
||||||
f"{k6_dir}/scenarios/files/wallet.json", "", "/tmp/empty_passwd.yml"
|
shell = cluster_node.host.get_shell()
|
||||||
)
|
|
||||||
content = yaml.dump({"password": ""})
|
|
||||||
shell.exec(f'echo "{content}" | sudo tee {self.wallet.config_path}')
|
|
||||||
shell.exec(f"sudo chmod -R 777 {self.wallet.config_path}")
|
|
||||||
|
|
||||||
with ThreadPoolExecutor(max_workers=len(nodes_under_load)) as executor:
|
with reporter.step("Allow storage user to login into system"):
|
||||||
result = executor.map(prepare_node, nodes_under_load)
|
self.allow_user_to_login_in_system(cluster_node)
|
||||||
|
|
||||||
# Check for exceptions
|
with reporter.step("Update limits.conf"):
|
||||||
for _ in result:
|
limits_path = "/etc/security/limits.conf"
|
||||||
pass
|
self.file_keeper.add(cluster_node.storage_node, limits_path)
|
||||||
|
content = (
|
||||||
|
f"{STORAGE_USER_NAME} hard nofile 65536\n{STORAGE_USER_NAME} soft nofile 65536\n"
|
||||||
|
)
|
||||||
|
shell.exec(f"echo '{content}' | sudo tee {limits_path}")
|
||||||
|
|
||||||
|
with reporter.step("Download K6"):
|
||||||
|
shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}")
|
||||||
|
shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}")
|
||||||
|
shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz -C {k6_dir}")
|
||||||
|
shell.exec(f"sudo chmod -R 777 {k6_dir}")
|
||||||
|
|
||||||
|
with reporter.step("Create empty_passwd"):
|
||||||
|
self.wallet = WalletInfo(
|
||||||
|
f"{k6_dir}/scenarios/files/wallet.json", "", "/tmp/empty_passwd.yml"
|
||||||
|
)
|
||||||
|
content = yaml.dump({"password": ""})
|
||||||
|
shell.exec(f'echo "{content}" | sudo tee {self.wallet.config_path}')
|
||||||
|
shell.exec(f"sudo chmod -R 777 {self.wallet.config_path}")
|
||||||
|
|
||||||
@reporter.step_deco("Init k6 instances")
|
@reporter.step_deco("Init k6 instances")
|
||||||
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
|
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
|
||||||
|
@ -373,6 +394,16 @@ class LocalRunner(RunnerBase):
|
||||||
):
|
):
|
||||||
time.sleep(wait_after_start_time)
|
time.sleep(wait_after_start_time)
|
||||||
|
|
||||||
|
@reporter.step_deco("Restore passwd on {cluster_node}")
|
||||||
|
def restore_passwd_on_node(self, cluster_node: ClusterNode):
|
||||||
|
shell = cluster_node.host.get_shell()
|
||||||
|
shell.exec("sudo chattr -i /etc/passwd")
|
||||||
|
|
||||||
|
@reporter.step_deco("Lock passwd on {cluster_node}")
|
||||||
|
def lock_passwd_on_node(self, cluster_node: ClusterNode):
|
||||||
|
shell = cluster_node.host.get_shell()
|
||||||
|
shell.exec("sudo chattr +i /etc/passwd")
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
for k6_instance in self.k6_instances:
|
for k6_instance in self.k6_instances:
|
||||||
k6_instance.stop()
|
k6_instance.stop()
|
||||||
|
@ -386,4 +417,6 @@ class LocalRunner(RunnerBase):
|
||||||
result = k6_instance.get_results()
|
result = k6_instance.get_results()
|
||||||
results[k6_instance.loader.ip] = result
|
results[k6_instance.loader.ip] = result
|
||||||
|
|
||||||
|
parallel(self.restore_passwd_on_node, self.nodes_under_load)
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
|
@ -1,12 +1,6 @@
|
||||||
import sys
|
from importlib.metadata import entry_points
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
if sys.version_info < (3, 10):
|
|
||||||
# On Python prior 3.10 we need to use backport of entry points
|
|
||||||
from importlib_metadata import entry_points
|
|
||||||
else:
|
|
||||||
from importlib.metadata import entry_points
|
|
||||||
|
|
||||||
|
|
||||||
def load_plugin(plugin_group: str, name: str) -> Any:
|
def load_plugin(plugin_group: str, name: str) -> Any:
|
||||||
"""Loads plugin using entry point specification.
|
"""Loads plugin using entry point specification.
|
||||||
|
|
|
@ -43,6 +43,6 @@ with open(DEFAULT_WALLET_CONFIG, "w") as file:
|
||||||
|
|
||||||
# Number of attempts that S3 clients will attempt per each request (1 means single attempt
|
# Number of attempts that S3 clients will attempt per each request (1 means single attempt
|
||||||
# without any retries)
|
# without any retries)
|
||||||
MAX_REQUEST_ATTEMPTS = 1
|
MAX_REQUEST_ATTEMPTS = 5
|
||||||
RETRY_MODE = "standard"
|
RETRY_MODE = "standard"
|
||||||
CREDENTIALS_CREATE_TIMEOUT = "1m"
|
CREDENTIALS_CREATE_TIMEOUT = "1m"
|
||||||
|
|
|
@ -11,8 +11,9 @@ BACKGROUND_WRITERS_COUNT = os.getenv("BACKGROUND_WRITERS_COUNT", 0)
|
||||||
BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 0)
|
BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 0)
|
||||||
BACKGROUND_DELETERS_COUNT = os.getenv("BACKGROUND_DELETERS_COUNT", 0)
|
BACKGROUND_DELETERS_COUNT = os.getenv("BACKGROUND_DELETERS_COUNT", 0)
|
||||||
BACKGROUND_VERIFIERS_COUNT = os.getenv("BACKGROUND_VERIFIERS_COUNT", 0)
|
BACKGROUND_VERIFIERS_COUNT = os.getenv("BACKGROUND_VERIFIERS_COUNT", 0)
|
||||||
BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 600)
|
BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 1800)
|
||||||
BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE = os.getenv("BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE", 32)
|
BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE = os.getenv("BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE", 32)
|
||||||
|
BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME = float(os.getenv("BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME", 0.8))
|
||||||
BACKGROUND_LOAD_SETUP_TIMEOUT = os.getenv("BACKGROUND_LOAD_SETUP_TIMEOUT", "5s")
|
BACKGROUND_LOAD_SETUP_TIMEOUT = os.getenv("BACKGROUND_LOAD_SETUP_TIMEOUT", "5s")
|
||||||
|
|
||||||
# This will decrease load params for some weak environments
|
# This will decrease load params for some weak environments
|
||||||
|
@ -26,7 +27,7 @@ BACKGROUND_LOAD_CONTAINER_PLACEMENT_POLICY = os.getenv(
|
||||||
BACKGROUND_LOAD_S3_LOCATION = os.getenv("BACKGROUND_LOAD_S3_LOCATION", "node-off")
|
BACKGROUND_LOAD_S3_LOCATION = os.getenv("BACKGROUND_LOAD_S3_LOCATION", "node-off")
|
||||||
PRESET_CONTAINERS_COUNT = os.getenv("CONTAINERS_COUNT", "40")
|
PRESET_CONTAINERS_COUNT = os.getenv("CONTAINERS_COUNT", "40")
|
||||||
# TODO: At lease one object is required due to bug in xk6 (buckets with no objects produce millions exceptions in read)
|
# TODO: At lease one object is required due to bug in xk6 (buckets with no objects produce millions exceptions in read)
|
||||||
PRESET_OBJECTS_COUNT = os.getenv("OBJ_COUNT", "10")
|
PRESET_OBJECTS_COUNT = os.getenv("OBJ_COUNT", "1")
|
||||||
K6_DIRECTORY = os.getenv("K6_DIRECTORY", "/etc/k6")
|
K6_DIRECTORY = os.getenv("K6_DIRECTORY", "/etc/k6")
|
||||||
K6_TEARDOWN_PERIOD = os.getenv("K6_TEARDOWN_PERIOD", "30")
|
K6_TEARDOWN_PERIOD = os.getenv("K6_TEARDOWN_PERIOD", "30")
|
||||||
K6_STOP_SIGNAL_TIMEOUT = int(os.getenv("K6_STOP_SIGNAL_TIMEOUT", 300))
|
K6_STOP_SIGNAL_TIMEOUT = int(os.getenv("K6_STOP_SIGNAL_TIMEOUT", 300))
|
||||||
|
|
|
@ -7,23 +7,22 @@ from time import sleep
|
||||||
from typing import Literal, Optional, Union
|
from typing import Literal, Optional, Union
|
||||||
|
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib.reporter import get_reporter
|
||||||
from frostfs_testlib.resources.common import (
|
from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME
|
||||||
ASSETS_DIR,
|
|
||||||
MAX_REQUEST_ATTEMPTS,
|
|
||||||
RETRY_MODE,
|
|
||||||
S3_SYNC_WAIT_TIME,
|
|
||||||
)
|
|
||||||
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
|
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
|
||||||
|
from frostfs_testlib.shell import CommandOptions
|
||||||
|
from frostfs_testlib.shell.local_shell import LocalShell
|
||||||
|
|
||||||
# TODO: Refactor this code to use shell instead of _cmd_run
|
# TODO: Refactor this code to use shell instead of _cmd_run
|
||||||
from frostfs_testlib.utils.cli_utils import _cmd_run, _configure_aws_cli
|
from frostfs_testlib.utils.cli_utils import _configure_aws_cli
|
||||||
|
|
||||||
reporter = get_reporter()
|
reporter = get_reporter()
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
LONG_TIMEOUT = 240
|
command_options = CommandOptions(timeout=240)
|
||||||
|
|
||||||
|
|
||||||
class AwsCliClient(S3ClientWrapper):
|
class AwsCliClient(S3ClientWrapper):
|
||||||
|
__repr_name__: str = "AWS CLI"
|
||||||
|
|
||||||
# Flags that we use for all S3 commands: disable SSL verification (as we use self-signed
|
# Flags that we use for all S3 commands: disable SSL verification (as we use self-signed
|
||||||
# certificate in devenv) and disable automatic pagination in CLI output
|
# certificate in devenv) and disable automatic pagination in CLI output
|
||||||
common_flags = "--no-verify-ssl --no-paginate"
|
common_flags = "--no-verify-ssl --no-paginate"
|
||||||
|
@ -32,10 +31,13 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
@reporter.step_deco("Configure S3 client (aws cli)")
|
@reporter.step_deco("Configure S3 client (aws cli)")
|
||||||
def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None:
|
def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None:
|
||||||
self.s3gate_endpoint = s3gate_endpoint
|
self.s3gate_endpoint = s3gate_endpoint
|
||||||
|
self.local_shell = LocalShell()
|
||||||
try:
|
try:
|
||||||
_configure_aws_cli("aws configure", access_key_id, secret_access_key)
|
_configure_aws_cli("aws configure", access_key_id, secret_access_key)
|
||||||
_cmd_run(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS}")
|
self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS}")
|
||||||
_cmd_run(f"aws configure set retry_mode {RETRY_MODE}")
|
self.local_shell.exec(
|
||||||
|
f"aws configure set retry_mode {RETRY_MODE}",
|
||||||
|
)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
raise RuntimeError("Error while configuring AwsCliClient") from err
|
raise RuntimeError("Error while configuring AwsCliClient") from err
|
||||||
|
|
||||||
|
@ -77,7 +79,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
cmd += f" --grant-read {grant_read}"
|
cmd += f" --grant-read {grant_read}"
|
||||||
if location_constraint:
|
if location_constraint:
|
||||||
cmd += f" --create-bucket-configuration LocationConstraint={location_constraint}"
|
cmd += f" --create-bucket-configuration LocationConstraint={location_constraint}"
|
||||||
_cmd_run(cmd)
|
self.local_shell.exec(cmd)
|
||||||
sleep(S3_SYNC_WAIT_TIME)
|
sleep(S3_SYNC_WAIT_TIME)
|
||||||
|
|
||||||
return bucket
|
return bucket
|
||||||
|
@ -85,20 +87,20 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
@reporter.step_deco("List buckets S3")
|
@reporter.step_deco("List buckets S3")
|
||||||
def list_buckets(self) -> list[str]:
|
def list_buckets(self) -> list[str]:
|
||||||
cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint}"
|
cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint}"
|
||||||
output = _cmd_run(cmd)
|
output = self.local_shell.exec(cmd).stdout
|
||||||
buckets_json = self._to_json(output)
|
buckets_json = self._to_json(output)
|
||||||
return [bucket["Name"] for bucket in buckets_json["Buckets"]]
|
return [bucket["Name"] for bucket in buckets_json["Buckets"]]
|
||||||
|
|
||||||
@reporter.step_deco("Delete bucket S3")
|
@reporter.step_deco("Delete bucket S3")
|
||||||
def delete_bucket(self, bucket: str) -> None:
|
def delete_bucket(self, bucket: str) -> None:
|
||||||
cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint}"
|
cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint}"
|
||||||
_cmd_run(cmd, LONG_TIMEOUT)
|
self.local_shell.exec(cmd, command_options)
|
||||||
sleep(S3_SYNC_WAIT_TIME)
|
sleep(S3_SYNC_WAIT_TIME)
|
||||||
|
|
||||||
@reporter.step_deco("Head bucket S3")
|
@reporter.step_deco("Head bucket S3")
|
||||||
def head_bucket(self, bucket: str) -> None:
|
def head_bucket(self, bucket: str) -> None:
|
||||||
cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint}"
|
cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint}"
|
||||||
_cmd_run(cmd)
|
self.local_shell.exec(cmd)
|
||||||
|
|
||||||
@reporter.step_deco("Put bucket versioning status")
|
@reporter.step_deco("Put bucket versioning status")
|
||||||
def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None:
|
def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None:
|
||||||
|
@ -107,7 +109,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"--versioning-configuration Status={status.value} "
|
f"--versioning-configuration Status={status.value} "
|
||||||
f"--endpoint {self.s3gate_endpoint}"
|
f"--endpoint {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
_cmd_run(cmd)
|
self.local_shell.exec(cmd)
|
||||||
|
|
||||||
@reporter.step_deco("Get bucket versioning status")
|
@reporter.step_deco("Get bucket versioning status")
|
||||||
def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]:
|
def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]:
|
||||||
|
@ -115,58 +117,48 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} "
|
f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} "
|
||||||
f"--endpoint {self.s3gate_endpoint}"
|
f"--endpoint {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
output = _cmd_run(cmd)
|
output = self.local_shell.exec(cmd).stdout
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
return response.get("Status")
|
return response.get("Status")
|
||||||
|
|
||||||
@reporter.step_deco("Put bucket tagging")
|
@reporter.step_deco("Put bucket tagging")
|
||||||
def put_bucket_tagging(self, bucket: str, tags: list) -> None:
|
def put_bucket_tagging(self, bucket: str, tags: list) -> None:
|
||||||
tags_json = {
|
tags_json = {"TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]}
|
||||||
"TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
|
|
||||||
}
|
|
||||||
cmd = (
|
cmd = (
|
||||||
f"aws {self.common_flags} s3api put-bucket-tagging --bucket {bucket} "
|
f"aws {self.common_flags} s3api put-bucket-tagging --bucket {bucket} "
|
||||||
f"--tagging '{json.dumps(tags_json)}' --endpoint {self.s3gate_endpoint}"
|
f"--tagging '{json.dumps(tags_json)}' --endpoint {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
_cmd_run(cmd)
|
self.local_shell.exec(cmd)
|
||||||
|
|
||||||
@reporter.step_deco("Get bucket tagging")
|
@reporter.step_deco("Get bucket tagging")
|
||||||
def get_bucket_tagging(self, bucket: str) -> list:
|
def get_bucket_tagging(self, bucket: str) -> list:
|
||||||
cmd = (
|
cmd = (
|
||||||
f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} "
|
f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}"
|
||||||
f"--endpoint {self.s3gate_endpoint}"
|
|
||||||
)
|
)
|
||||||
output = _cmd_run(cmd)
|
output = self.local_shell.exec(cmd).stdout
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
return response.get("TagSet")
|
return response.get("TagSet")
|
||||||
|
|
||||||
@reporter.step_deco("Get bucket acl")
|
@reporter.step_deco("Get bucket acl")
|
||||||
def get_bucket_acl(self, bucket: str) -> list:
|
def get_bucket_acl(self, bucket: str) -> list:
|
||||||
cmd = (
|
cmd = f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}"
|
||||||
f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} "
|
output = self.local_shell.exec(cmd).stdout
|
||||||
f"--endpoint {self.s3gate_endpoint}"
|
|
||||||
)
|
|
||||||
output = _cmd_run(cmd)
|
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
return response.get("Grants")
|
return response.get("Grants")
|
||||||
|
|
||||||
@reporter.step_deco("Get bucket location")
|
@reporter.step_deco("Get bucket location")
|
||||||
def get_bucket_location(self, bucket: str) -> dict:
|
def get_bucket_location(self, bucket: str) -> dict:
|
||||||
cmd = (
|
cmd = (
|
||||||
f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} "
|
f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}"
|
||||||
f"--endpoint {self.s3gate_endpoint}"
|
|
||||||
)
|
)
|
||||||
output = _cmd_run(cmd)
|
output = self.local_shell.exec(cmd).stdout
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
return response.get("LocationConstraint")
|
return response.get("LocationConstraint")
|
||||||
|
|
||||||
@reporter.step_deco("List objects S3")
|
@reporter.step_deco("List objects S3")
|
||||||
def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
|
def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
|
||||||
cmd = (
|
cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}"
|
||||||
f"aws {self.common_flags} s3api list-objects --bucket {bucket} "
|
output = self.local_shell.exec(cmd).stdout
|
||||||
f"--endpoint {self.s3gate_endpoint}"
|
|
||||||
)
|
|
||||||
output = _cmd_run(cmd)
|
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
|
|
||||||
obj_list = [obj["Key"] for obj in response.get("Contents", [])]
|
obj_list = [obj["Key"] for obj in response.get("Contents", [])]
|
||||||
|
@ -176,11 +168,8 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
|
|
||||||
@reporter.step_deco("List objects S3 v2")
|
@reporter.step_deco("List objects S3 v2")
|
||||||
def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
|
def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
|
||||||
cmd = (
|
cmd = f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}"
|
||||||
f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} "
|
output = self.local_shell.exec(cmd).stdout
|
||||||
f"--endpoint {self.s3gate_endpoint}"
|
|
||||||
)
|
|
||||||
output = _cmd_run(cmd)
|
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
|
|
||||||
obj_list = [obj["Key"] for obj in response.get("Contents", [])]
|
obj_list = [obj["Key"] for obj in response.get("Contents", [])]
|
||||||
|
@ -194,7 +183,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} "
|
f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} "
|
||||||
f"--endpoint {self.s3gate_endpoint}"
|
f"--endpoint {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
output = _cmd_run(cmd)
|
output = self.local_shell.exec(cmd).stdout
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
return response if full_output else response.get("Versions", [])
|
return response if full_output else response.get("Versions", [])
|
||||||
|
|
||||||
|
@ -204,7 +193,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} "
|
f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} "
|
||||||
f"--endpoint {self.s3gate_endpoint}"
|
f"--endpoint {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
output = _cmd_run(cmd)
|
output = self.local_shell.exec(cmd).stdout
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
return response if full_output else response.get("DeleteMarkers", [])
|
return response if full_output else response.get("DeleteMarkers", [])
|
||||||
|
|
||||||
|
@ -243,7 +232,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
cmd += f" --tagging-directive {tagging_directive}"
|
cmd += f" --tagging-directive {tagging_directive}"
|
||||||
if tagging:
|
if tagging:
|
||||||
cmd += f" --tagging {tagging}"
|
cmd += f" --tagging {tagging}"
|
||||||
_cmd_run(cmd, LONG_TIMEOUT)
|
self.local_shell.exec(cmd, command_options)
|
||||||
return key
|
return key
|
||||||
|
|
||||||
@reporter.step_deco("Put object S3")
|
@reporter.step_deco("Put object S3")
|
||||||
|
@ -286,7 +275,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
cmd += f" --grant-full-control '{grant_full_control}'"
|
cmd += f" --grant-full-control '{grant_full_control}'"
|
||||||
if grant_read:
|
if grant_read:
|
||||||
cmd += f" --grant-read {grant_read}"
|
cmd += f" --grant-read {grant_read}"
|
||||||
output = _cmd_run(cmd, LONG_TIMEOUT)
|
output = self.local_shell.exec(cmd, command_options).stdout
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
return response.get("VersionId")
|
return response.get("VersionId")
|
||||||
|
|
||||||
|
@ -297,7 +286,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"aws {self.common_flags} s3api head-object --bucket {bucket} --key {key} "
|
f"aws {self.common_flags} s3api head-object --bucket {bucket} --key {key} "
|
||||||
f"{version} --endpoint {self.s3gate_endpoint}"
|
f"{version} --endpoint {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
output = _cmd_run(cmd)
|
output = self.local_shell.exec(cmd).stdout
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
@ -318,7 +307,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
)
|
)
|
||||||
if object_range:
|
if object_range:
|
||||||
cmd += f" --range bytes={object_range[0]}-{object_range[1]}"
|
cmd += f" --range bytes={object_range[0]}-{object_range[1]}"
|
||||||
output = _cmd_run(cmd)
|
output = self.local_shell.exec(cmd).stdout
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
return response if full_output else file_path
|
return response if full_output else file_path
|
||||||
|
|
||||||
|
@ -329,7 +318,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"aws {self.common_flags} s3api get-object-acl --bucket {bucket} --key {key} "
|
f"aws {self.common_flags} s3api get-object-acl --bucket {bucket} --key {key} "
|
||||||
f"{version} --endpoint {self.s3gate_endpoint}"
|
f"{version} --endpoint {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
output = _cmd_run(cmd)
|
output = self.local_shell.exec(cmd).stdout
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
return response.get("Grants")
|
return response.get("Grants")
|
||||||
|
|
||||||
|
@ -352,7 +341,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
cmd += f" --grant-write {grant_write}"
|
cmd += f" --grant-write {grant_write}"
|
||||||
if grant_read:
|
if grant_read:
|
||||||
cmd += f" --grant-read {grant_read}"
|
cmd += f" --grant-read {grant_read}"
|
||||||
output = _cmd_run(cmd)
|
output = self.local_shell.exec(cmd).stdout
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
return response.get("Grants")
|
return response.get("Grants")
|
||||||
|
|
||||||
|
@ -364,17 +353,14 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
grant_write: Optional[str] = None,
|
grant_write: Optional[str] = None,
|
||||||
grant_read: Optional[str] = None,
|
grant_read: Optional[str] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
cmd = (
|
cmd = f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " f" --endpoint {self.s3gate_endpoint}"
|
||||||
f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} "
|
|
||||||
f" --endpoint {self.s3gate_endpoint}"
|
|
||||||
)
|
|
||||||
if acl:
|
if acl:
|
||||||
cmd += f" --acl {acl}"
|
cmd += f" --acl {acl}"
|
||||||
if grant_write:
|
if grant_write:
|
||||||
cmd += f" --grant-write {grant_write}"
|
cmd += f" --grant-write {grant_write}"
|
||||||
if grant_read:
|
if grant_read:
|
||||||
cmd += f" --grant-read {grant_read}"
|
cmd += f" --grant-read {grant_read}"
|
||||||
_cmd_run(cmd)
|
self.local_shell.exec(cmd)
|
||||||
|
|
||||||
@reporter.step_deco("Delete objects S3")
|
@reporter.step_deco("Delete objects S3")
|
||||||
def delete_objects(self, bucket: str, keys: list[str]) -> dict:
|
def delete_objects(self, bucket: str, keys: list[str]) -> dict:
|
||||||
|
@ -388,7 +374,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"aws {self.common_flags} s3api delete-objects --bucket {bucket} "
|
f"aws {self.common_flags} s3api delete-objects --bucket {bucket} "
|
||||||
f"--delete file://{file_path} --endpoint {self.s3gate_endpoint}"
|
f"--delete file://{file_path} --endpoint {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
output = _cmd_run(cmd, LONG_TIMEOUT)
|
output = self.local_shell.exec(cmd, command_options).stdout
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
sleep(S3_SYNC_WAIT_TIME)
|
sleep(S3_SYNC_WAIT_TIME)
|
||||||
return response
|
return response
|
||||||
|
@ -400,7 +386,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"aws {self.common_flags} s3api delete-object --bucket {bucket} "
|
f"aws {self.common_flags} s3api delete-object --bucket {bucket} "
|
||||||
f"--key {key} {version} --endpoint {self.s3gate_endpoint}"
|
f"--key {key} {version} --endpoint {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
output = _cmd_run(cmd, LONG_TIMEOUT)
|
output = self.local_shell.exec(cmd, command_options).stdout
|
||||||
sleep(S3_SYNC_WAIT_TIME)
|
sleep(S3_SYNC_WAIT_TIME)
|
||||||
return self._to_json(output)
|
return self._to_json(output)
|
||||||
|
|
||||||
|
@ -427,7 +413,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"aws {self.common_flags} s3api delete-objects --bucket {bucket} "
|
f"aws {self.common_flags} s3api delete-objects --bucket {bucket} "
|
||||||
f"--delete file://{file_path} --endpoint {self.s3gate_endpoint}"
|
f"--delete file://{file_path} --endpoint {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
output = _cmd_run(cmd, LONG_TIMEOUT)
|
output = self.local_shell.exec(cmd, command_options).stdout
|
||||||
sleep(S3_SYNC_WAIT_TIME)
|
sleep(S3_SYNC_WAIT_TIME)
|
||||||
return self._to_json(output)
|
return self._to_json(output)
|
||||||
|
|
||||||
|
@ -435,9 +421,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None:
|
def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None:
|
||||||
# Delete objects without creating delete markers
|
# Delete objects without creating delete markers
|
||||||
for object_version in object_versions:
|
for object_version in object_versions:
|
||||||
self.delete_object(
|
self.delete_object(bucket=bucket, key=object_version["Key"], version_id=object_version["VersionId"])
|
||||||
bucket=bucket, key=object_version["Key"], version_id=object_version["VersionId"]
|
|
||||||
)
|
|
||||||
|
|
||||||
@reporter.step_deco("Get object attributes")
|
@reporter.step_deco("Get object attributes")
|
||||||
def get_object_attributes(
|
def get_object_attributes(
|
||||||
|
@ -460,7 +444,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"--key {key} {version} {parts} {part_number_str} --object-attributes {attrs} "
|
f"--key {key} {version} {parts} {part_number_str} --object-attributes {attrs} "
|
||||||
f"--endpoint {self.s3gate_endpoint}"
|
f"--endpoint {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
output = _cmd_run(cmd)
|
output = self.local_shell.exec(cmd).stdout
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
|
|
||||||
for attr in attributes:
|
for attr in attributes:
|
||||||
|
@ -473,11 +457,8 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
|
|
||||||
@reporter.step_deco("Get bucket policy")
|
@reporter.step_deco("Get bucket policy")
|
||||||
def get_bucket_policy(self, bucket: str) -> dict:
|
def get_bucket_policy(self, bucket: str) -> dict:
|
||||||
cmd = (
|
cmd = f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}"
|
||||||
f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} "
|
output = self.local_shell.exec(cmd).stdout
|
||||||
f"--endpoint {self.s3gate_endpoint}"
|
|
||||||
)
|
|
||||||
output = _cmd_run(cmd)
|
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
return response.get("Policy")
|
return response.get("Policy")
|
||||||
|
|
||||||
|
@ -494,15 +475,12 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"aws {self.common_flags} s3api put-bucket-policy --bucket {bucket} "
|
f"aws {self.common_flags} s3api put-bucket-policy --bucket {bucket} "
|
||||||
f"--policy {dumped_policy} --endpoint {self.s3gate_endpoint}"
|
f"--policy {dumped_policy} --endpoint {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
_cmd_run(cmd)
|
self.local_shell.exec(cmd)
|
||||||
|
|
||||||
@reporter.step_deco("Get bucket cors")
|
@reporter.step_deco("Get bucket cors")
|
||||||
def get_bucket_cors(self, bucket: str) -> dict:
|
def get_bucket_cors(self, bucket: str) -> dict:
|
||||||
cmd = (
|
cmd = f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}"
|
||||||
f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} "
|
output = self.local_shell.exec(cmd).stdout
|
||||||
f"--endpoint {self.s3gate_endpoint}"
|
|
||||||
)
|
|
||||||
output = _cmd_run(cmd)
|
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
return response.get("CORSRules")
|
return response.get("CORSRules")
|
||||||
|
|
||||||
|
@ -512,15 +490,14 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} "
|
f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} "
|
||||||
f"--cors-configuration '{json.dumps(cors_configuration)}' --endpoint {self.s3gate_endpoint}"
|
f"--cors-configuration '{json.dumps(cors_configuration)}' --endpoint {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
_cmd_run(cmd)
|
self.local_shell.exec(cmd)
|
||||||
|
|
||||||
@reporter.step_deco("Delete bucket cors")
|
@reporter.step_deco("Delete bucket cors")
|
||||||
def delete_bucket_cors(self, bucket: str) -> None:
|
def delete_bucket_cors(self, bucket: str) -> None:
|
||||||
cmd = (
|
cmd = (
|
||||||
f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} "
|
f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}"
|
||||||
f"--endpoint {self.s3gate_endpoint}"
|
|
||||||
)
|
)
|
||||||
_cmd_run(cmd)
|
self.local_shell.exec(cmd)
|
||||||
|
|
||||||
@reporter.step_deco("Delete bucket tagging")
|
@reporter.step_deco("Delete bucket tagging")
|
||||||
def delete_bucket_tagging(self, bucket: str) -> None:
|
def delete_bucket_tagging(self, bucket: str) -> None:
|
||||||
|
@ -528,7 +505,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} "
|
f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} "
|
||||||
f"--endpoint {self.s3gate_endpoint}"
|
f"--endpoint {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
_cmd_run(cmd)
|
self.local_shell.exec(cmd)
|
||||||
|
|
||||||
@reporter.step_deco("Put object retention")
|
@reporter.step_deco("Put object retention")
|
||||||
def put_object_retention(
|
def put_object_retention(
|
||||||
|
@ -546,7 +523,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
)
|
)
|
||||||
if bypass_governance_retention is not None:
|
if bypass_governance_retention is not None:
|
||||||
cmd += " --bypass-governance-retention"
|
cmd += " --bypass-governance-retention"
|
||||||
_cmd_run(cmd)
|
self.local_shell.exec(cmd)
|
||||||
|
|
||||||
@reporter.step_deco("Put object legal hold")
|
@reporter.step_deco("Put object legal hold")
|
||||||
def put_object_legal_hold(
|
def put_object_legal_hold(
|
||||||
|
@ -562,7 +539,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"aws {self.common_flags} s3api put-object-legal-hold --bucket {bucket} --key {key} "
|
f"aws {self.common_flags} s3api put-object-legal-hold --bucket {bucket} --key {key} "
|
||||||
f"{version} --legal-hold '{legal_hold}' --endpoint {self.s3gate_endpoint}"
|
f"{version} --legal-hold '{legal_hold}' --endpoint {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
_cmd_run(cmd)
|
self.local_shell.exec(cmd)
|
||||||
|
|
||||||
@reporter.step_deco("Put object tagging")
|
@reporter.step_deco("Put object tagging")
|
||||||
def put_object_tagging(self, bucket: str, key: str, tags: list) -> None:
|
def put_object_tagging(self, bucket: str, key: str, tags: list) -> None:
|
||||||
|
@ -572,7 +549,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"aws {self.common_flags} s3api put-object-tagging --bucket {bucket} --key {key} "
|
f"aws {self.common_flags} s3api put-object-tagging --bucket {bucket} --key {key} "
|
||||||
f"--tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint}"
|
f"--tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
_cmd_run(cmd)
|
self.local_shell.exec(cmd)
|
||||||
|
|
||||||
@reporter.step_deco("Get object tagging")
|
@reporter.step_deco("Get object tagging")
|
||||||
def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
|
def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
|
||||||
|
@ -581,7 +558,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"aws {self.common_flags} s3api get-object-tagging --bucket {bucket} --key {key} "
|
f"aws {self.common_flags} s3api get-object-tagging --bucket {bucket} --key {key} "
|
||||||
f"{version} --endpoint {self.s3gate_endpoint}"
|
f"{version} --endpoint {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
output = _cmd_run(cmd)
|
output = self.local_shell.exec(cmd).stdout
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
return response.get("TagSet")
|
return response.get("TagSet")
|
||||||
|
|
||||||
|
@ -591,7 +568,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} "
|
f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} "
|
||||||
f"--key {key} --endpoint {self.s3gate_endpoint}"
|
f"--key {key} --endpoint {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
_cmd_run(cmd)
|
self.local_shell.exec(cmd)
|
||||||
|
|
||||||
@reporter.step_deco("Sync directory S3")
|
@reporter.step_deco("Sync directory S3")
|
||||||
def sync(
|
def sync(
|
||||||
|
@ -601,17 +578,14 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
acl: Optional[str] = None,
|
acl: Optional[str] = None,
|
||||||
metadata: Optional[dict] = None,
|
metadata: Optional[dict] = None,
|
||||||
) -> dict:
|
) -> dict:
|
||||||
cmd = (
|
cmd = f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint}"
|
||||||
f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} "
|
|
||||||
f"--endpoint-url {self.s3gate_endpoint}"
|
|
||||||
)
|
|
||||||
if metadata:
|
if metadata:
|
||||||
cmd += " --metadata"
|
cmd += " --metadata"
|
||||||
for key, value in metadata.items():
|
for key, value in metadata.items():
|
||||||
cmd += f" {key}={value}"
|
cmd += f" {key}={value}"
|
||||||
if acl:
|
if acl:
|
||||||
cmd += f" --acl {acl}"
|
cmd += f" --acl {acl}"
|
||||||
output = _cmd_run(cmd, LONG_TIMEOUT)
|
output = self.local_shell.exec(cmd, command_options).stdout
|
||||||
return self._to_json(output)
|
return self._to_json(output)
|
||||||
|
|
||||||
@reporter.step_deco("CP directory S3")
|
@reporter.step_deco("CP directory S3")
|
||||||
|
@ -632,7 +606,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
cmd += f" {key}={value}"
|
cmd += f" {key}={value}"
|
||||||
if acl:
|
if acl:
|
||||||
cmd += f" --acl {acl}"
|
cmd += f" --acl {acl}"
|
||||||
output = _cmd_run(cmd, LONG_TIMEOUT)
|
output = self.local_shell.exec(cmd, command_options).stdout
|
||||||
return self._to_json(output)
|
return self._to_json(output)
|
||||||
|
|
||||||
@reporter.step_deco("Create multipart upload S3")
|
@reporter.step_deco("Create multipart upload S3")
|
||||||
|
@ -641,7 +615,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} "
|
f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} "
|
||||||
f"--key {key} --endpoint-url {self.s3gate_endpoint}"
|
f"--key {key} --endpoint-url {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
output = _cmd_run(cmd)
|
output = self.local_shell.exec(cmd).stdout
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
|
|
||||||
assert response.get("UploadId"), f"Expected UploadId in response:\n{response}"
|
assert response.get("UploadId"), f"Expected UploadId in response:\n{response}"
|
||||||
|
@ -654,7 +628,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} "
|
f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} "
|
||||||
f"--endpoint-url {self.s3gate_endpoint}"
|
f"--endpoint-url {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
output = _cmd_run(cmd)
|
output = self.local_shell.exec(cmd).stdout
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
return response.get("Uploads")
|
return response.get("Uploads")
|
||||||
|
|
||||||
|
@ -664,36 +638,30 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} "
|
f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} "
|
||||||
f"--key {key} --upload-id {upload_id} --endpoint-url {self.s3gate_endpoint}"
|
f"--key {key} --upload-id {upload_id} --endpoint-url {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
_cmd_run(cmd)
|
self.local_shell.exec(cmd)
|
||||||
|
|
||||||
@reporter.step_deco("Upload part S3")
|
@reporter.step_deco("Upload part S3")
|
||||||
def upload_part(
|
def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str:
|
||||||
self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str
|
|
||||||
) -> str:
|
|
||||||
cmd = (
|
cmd = (
|
||||||
f"aws {self.common_flags} s3api upload-part --bucket {bucket} --key {key} "
|
f"aws {self.common_flags} s3api upload-part --bucket {bucket} --key {key} "
|
||||||
f"--upload-id {upload_id} --part-number {part_num} --body {filepath} "
|
f"--upload-id {upload_id} --part-number {part_num} --body {filepath} "
|
||||||
f"--endpoint-url {self.s3gate_endpoint}"
|
f"--endpoint-url {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
output = _cmd_run(cmd, LONG_TIMEOUT)
|
output = self.local_shell.exec(cmd, command_options).stdout
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
assert response.get("ETag"), f"Expected ETag in response:\n{response}"
|
assert response.get("ETag"), f"Expected ETag in response:\n{response}"
|
||||||
return response["ETag"]
|
return response["ETag"]
|
||||||
|
|
||||||
@reporter.step_deco("Upload copy part S3")
|
@reporter.step_deco("Upload copy part S3")
|
||||||
def upload_part_copy(
|
def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str:
|
||||||
self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str
|
|
||||||
) -> str:
|
|
||||||
cmd = (
|
cmd = (
|
||||||
f"aws {self.common_flags} s3api upload-part-copy --bucket {bucket} --key {key} "
|
f"aws {self.common_flags} s3api upload-part-copy --bucket {bucket} --key {key} "
|
||||||
f"--upload-id {upload_id} --part-number {part_num} --copy-source {copy_source} "
|
f"--upload-id {upload_id} --part-number {part_num} --copy-source {copy_source} "
|
||||||
f"--endpoint-url {self.s3gate_endpoint}"
|
f"--endpoint-url {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
output = _cmd_run(cmd, LONG_TIMEOUT)
|
output = self.local_shell.exec(cmd, command_options).stdout
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
assert response.get("CopyPartResult", []).get(
|
assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}"
|
||||||
"ETag"
|
|
||||||
), f"Expected ETag in response:\n{response}"
|
|
||||||
|
|
||||||
return response["CopyPartResult"]["ETag"]
|
return response["CopyPartResult"]["ETag"]
|
||||||
|
|
||||||
|
@ -703,7 +671,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} "
|
f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} "
|
||||||
f"--upload-id {upload_id} --endpoint-url {self.s3gate_endpoint}"
|
f"--upload-id {upload_id} --endpoint-url {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
output = _cmd_run(cmd)
|
output = self.local_shell.exec(cmd).stdout
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
|
|
||||||
assert response.get("Parts"), f"Expected Parts in response:\n{response}"
|
assert response.get("Parts"), f"Expected Parts in response:\n{response}"
|
||||||
|
@ -725,7 +693,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} "
|
f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} "
|
||||||
f"--endpoint-url {self.s3gate_endpoint}"
|
f"--endpoint-url {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
_cmd_run(cmd)
|
self.local_shell.exec(cmd)
|
||||||
|
|
||||||
@reporter.step_deco("Put object lock configuration")
|
@reporter.step_deco("Put object lock configuration")
|
||||||
def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict:
|
def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict:
|
||||||
|
@ -733,7 +701,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} "
|
f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} "
|
||||||
f"--object-lock-configuration '{json.dumps(configuration)}' --endpoint-url {self.s3gate_endpoint}"
|
f"--object-lock-configuration '{json.dumps(configuration)}' --endpoint-url {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
output = _cmd_run(cmd)
|
output = self.local_shell.exec(cmd).stdout
|
||||||
return self._to_json(output)
|
return self._to_json(output)
|
||||||
|
|
||||||
@reporter.step_deco("Get object lock configuration")
|
@reporter.step_deco("Get object lock configuration")
|
||||||
|
@ -742,7 +710,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} "
|
f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} "
|
||||||
f"--endpoint-url {self.s3gate_endpoint}"
|
f"--endpoint-url {self.s3gate_endpoint}"
|
||||||
)
|
)
|
||||||
output = _cmd_run(cmd)
|
output = self.local_shell.exec(cmd).stdout
|
||||||
response = self._to_json(output)
|
response = self._to_json(output)
|
||||||
return response.get("ObjectLockConfiguration")
|
return response.get("ObjectLockConfiguration")
|
||||||
|
|
||||||
|
|
|
@ -44,6 +44,8 @@ def report_error(func):
|
||||||
|
|
||||||
|
|
||||||
class Boto3ClientWrapper(S3ClientWrapper):
|
class Boto3ClientWrapper(S3ClientWrapper):
|
||||||
|
__repr_name__: str = "Boto3 client"
|
||||||
|
|
||||||
@reporter.step_deco("Configure S3 client (boto3)")
|
@reporter.step_deco("Configure S3 client (boto3)")
|
||||||
@report_error
|
@report_error
|
||||||
def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None:
|
def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None:
|
||||||
|
|
|
@ -1,8 +1,9 @@
|
||||||
from abc import ABC, abstractmethod
|
from abc import abstractmethod
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from enum import Enum
|
|
||||||
from typing import Literal, Optional, Union
|
from typing import Literal, Optional, Union
|
||||||
|
|
||||||
|
from frostfs_testlib.testing.readable import HumanReadableABC, HumanReadableEnum
|
||||||
|
|
||||||
|
|
||||||
def _make_objs_dict(key_names):
|
def _make_objs_dict(key_names):
|
||||||
objs_list = []
|
objs_list = []
|
||||||
|
@ -13,7 +14,8 @@ def _make_objs_dict(key_names):
|
||||||
return objs_dict
|
return objs_dict
|
||||||
|
|
||||||
|
|
||||||
class VersioningStatus(Enum):
|
class VersioningStatus(HumanReadableEnum):
|
||||||
|
UNDEFINED = None
|
||||||
ENABLED = "Enabled"
|
ENABLED = "Enabled"
|
||||||
SUSPENDED = "Suspended"
|
SUSPENDED = "Suspended"
|
||||||
|
|
||||||
|
@ -29,7 +31,7 @@ ACL_COPY = [
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
class S3ClientWrapper(ABC):
|
class S3ClientWrapper(HumanReadableABC):
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None:
|
def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None:
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
from frostfs_testlib.shell.interfaces import CommandOptions, CommandResult, InteractiveInput, Shell
|
from frostfs_testlib.shell.interfaces import CommandOptions, CommandResult, InteractiveInput, Shell
|
||||||
from frostfs_testlib.shell.local_shell import LocalShell
|
from frostfs_testlib.shell.local_shell import LocalShell
|
||||||
from frostfs_testlib.shell.ssh_shell import SSHShell
|
from frostfs_testlib.shell.ssh_shell import SshConnectionProvider, SSHShell
|
||||||
|
|
|
@ -39,7 +39,7 @@ class LocalShell(Shell):
|
||||||
log_file = tempfile.TemporaryFile() # File is reliable cross-platform way to capture output
|
log_file = tempfile.TemporaryFile() # File is reliable cross-platform way to capture output
|
||||||
|
|
||||||
try:
|
try:
|
||||||
command_process = pexpect.spawn(command, timeout=options.timeout)
|
command_process = pexpect.spawn(command, timeout=options.timeout, use_poll=True)
|
||||||
except (pexpect.ExceptionPexpect, OSError) as exc:
|
except (pexpect.ExceptionPexpect, OSError) as exc:
|
||||||
raise RuntimeError(f"Command: {command}") from exc
|
raise RuntimeError(f"Command: {command}") from exc
|
||||||
|
|
||||||
|
|
|
@ -20,12 +20,117 @@ from paramiko import (
|
||||||
from paramiko.ssh_exception import AuthenticationException
|
from paramiko.ssh_exception import AuthenticationException
|
||||||
|
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib.reporter import get_reporter
|
||||||
from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell
|
from frostfs_testlib.shell.interfaces import (
|
||||||
|
CommandInspector,
|
||||||
|
CommandOptions,
|
||||||
|
CommandResult,
|
||||||
|
Shell,
|
||||||
|
SshCredentials,
|
||||||
|
)
|
||||||
|
|
||||||
logger = logging.getLogger("frostfs.testlib.shell")
|
logger = logging.getLogger("frostfs.testlib.shell")
|
||||||
reporter = get_reporter()
|
reporter = get_reporter()
|
||||||
|
|
||||||
|
|
||||||
|
class SshConnectionProvider:
|
||||||
|
SSH_CONNECTION_ATTEMPTS: ClassVar[int] = 4
|
||||||
|
SSH_ATTEMPTS_INTERVAL: ClassVar[int] = 10
|
||||||
|
CONNECTION_TIMEOUT = 60
|
||||||
|
|
||||||
|
instance = None
|
||||||
|
connections: dict[str, SSHClient] = {}
|
||||||
|
creds: dict[str, SshCredentials] = {}
|
||||||
|
|
||||||
|
def __new__(cls):
|
||||||
|
if not cls.instance:
|
||||||
|
cls.instance = super(SshConnectionProvider, cls).__new__(cls)
|
||||||
|
return cls.instance
|
||||||
|
|
||||||
|
def store_creds(self, host: str, ssh_creds: SshCredentials):
|
||||||
|
self.creds[host] = ssh_creds
|
||||||
|
|
||||||
|
def provide(self, host: str, port: str) -> SSHClient:
|
||||||
|
if host not in self.creds:
|
||||||
|
raise RuntimeError(f"Please add credentials for host {host}")
|
||||||
|
|
||||||
|
if host in self.connections:
|
||||||
|
client = self.connections[host]
|
||||||
|
if client:
|
||||||
|
return client
|
||||||
|
|
||||||
|
creds = self.creds[host]
|
||||||
|
client = self._create_connection(host, port, creds)
|
||||||
|
self.connections[host] = client
|
||||||
|
return client
|
||||||
|
|
||||||
|
def drop(self, host: str):
|
||||||
|
if host in self.connections:
|
||||||
|
client = self.connections.pop(host)
|
||||||
|
client.close()
|
||||||
|
|
||||||
|
def drop_all(self):
|
||||||
|
hosts = list(self.connections.keys())
|
||||||
|
for host in hosts:
|
||||||
|
self.drop(host)
|
||||||
|
|
||||||
|
def _create_connection(
|
||||||
|
self,
|
||||||
|
host: str,
|
||||||
|
port: str,
|
||||||
|
creds: SshCredentials,
|
||||||
|
) -> SSHClient:
|
||||||
|
for attempt in range(self.SSH_CONNECTION_ATTEMPTS):
|
||||||
|
connection = SSHClient()
|
||||||
|
connection.set_missing_host_key_policy(AutoAddPolicy())
|
||||||
|
try:
|
||||||
|
if creds.ssh_key_path:
|
||||||
|
logger.info(
|
||||||
|
f"Trying to connect to host {host} as {creds.ssh_login} using SSH key "
|
||||||
|
f"{creds.ssh_key_path} (attempt {attempt})"
|
||||||
|
)
|
||||||
|
connection.connect(
|
||||||
|
hostname=host,
|
||||||
|
port=port,
|
||||||
|
username=creds.ssh_login,
|
||||||
|
pkey=_load_private_key(creds.ssh_key_path, creds.ssh_key_passphrase),
|
||||||
|
timeout=self.CONNECTION_TIMEOUT,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.info(
|
||||||
|
f"Trying to connect to host {host} as {creds.ssh_login} using password "
|
||||||
|
f"(attempt {attempt})"
|
||||||
|
)
|
||||||
|
connection.connect(
|
||||||
|
hostname=host,
|
||||||
|
port=port,
|
||||||
|
username=creds.ssh_login,
|
||||||
|
password=creds.ssh_password,
|
||||||
|
timeout=self.CONNECTION_TIMEOUT,
|
||||||
|
)
|
||||||
|
return connection
|
||||||
|
except AuthenticationException:
|
||||||
|
connection.close()
|
||||||
|
logger.exception(f"Can't connect to host {host}")
|
||||||
|
raise
|
||||||
|
except (
|
||||||
|
SSHException,
|
||||||
|
ssh_exception.NoValidConnectionsError,
|
||||||
|
AttributeError,
|
||||||
|
socket.timeout,
|
||||||
|
OSError,
|
||||||
|
) as exc:
|
||||||
|
connection.close()
|
||||||
|
can_retry = attempt + 1 < self.SSH_CONNECTION_ATTEMPTS
|
||||||
|
if can_retry:
|
||||||
|
logger.warn(
|
||||||
|
f"Can't connect to host {host}, will retry after {self.SSH_ATTEMPTS_INTERVAL}s. Error: {exc}"
|
||||||
|
)
|
||||||
|
sleep(self.SSH_ATTEMPTS_INTERVAL)
|
||||||
|
continue
|
||||||
|
logger.exception(f"Can't connect to host {host}")
|
||||||
|
raise HostIsNotAvailable(host) from exc
|
||||||
|
|
||||||
|
|
||||||
class HostIsNotAvailable(Exception):
|
class HostIsNotAvailable(Exception):
|
||||||
"""Raised when host is not reachable via SSH connection."""
|
"""Raised when host is not reachable via SSH connection."""
|
||||||
|
|
||||||
|
@ -91,9 +196,6 @@ class SSHShell(Shell):
|
||||||
# to allow remote command to flush its output buffer
|
# to allow remote command to flush its output buffer
|
||||||
DELAY_AFTER_EXIT = 0.2
|
DELAY_AFTER_EXIT = 0.2
|
||||||
|
|
||||||
SSH_CONNECTION_ATTEMPTS: ClassVar[int] = 3
|
|
||||||
CONNECTION_TIMEOUT = 90
|
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
host: str,
|
host: str,
|
||||||
|
@ -105,23 +207,21 @@ class SSHShell(Shell):
|
||||||
command_inspectors: Optional[list[CommandInspector]] = None,
|
command_inspectors: Optional[list[CommandInspector]] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
self.connection_provider = SshConnectionProvider()
|
||||||
|
self.connection_provider.store_creds(
|
||||||
|
host, SshCredentials(login, password, private_key_path, private_key_passphrase)
|
||||||
|
)
|
||||||
self.host = host
|
self.host = host
|
||||||
self.port = port
|
self.port = port
|
||||||
self.login = login
|
|
||||||
self.password = password
|
|
||||||
self.private_key_path = private_key_path
|
|
||||||
self.private_key_passphrase = private_key_passphrase
|
|
||||||
self.command_inspectors = command_inspectors or []
|
self.command_inspectors = command_inspectors or []
|
||||||
self.__connection: Optional[SSHClient] = None
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _connection(self):
|
def _connection(self):
|
||||||
if not self.__connection:
|
return self.connection_provider.provide(self.host, self.port)
|
||||||
self.__connection = self._create_connection()
|
|
||||||
return self.__connection
|
|
||||||
|
|
||||||
def drop(self):
|
def drop(self):
|
||||||
self._reset_connection()
|
self.connection_provider.drop(self.host)
|
||||||
|
|
||||||
def exec(self, command: str, options: Optional[CommandOptions] = None) -> CommandResult:
|
def exec(self, command: str, options: Optional[CommandOptions] = None) -> CommandResult:
|
||||||
options = options or CommandOptions()
|
options = options or CommandOptions()
|
||||||
|
@ -195,7 +295,7 @@ class SSHShell(Shell):
|
||||||
socket.timeout,
|
socket.timeout,
|
||||||
) as exc:
|
) as exc:
|
||||||
logger.exception(f"Can't execute command {command} on host: {self.host}")
|
logger.exception(f"Can't execute command {command} on host: {self.host}")
|
||||||
self._reset_connection()
|
self.drop()
|
||||||
raise HostIsNotAvailable(self.host) from exc
|
raise HostIsNotAvailable(self.host) from exc
|
||||||
|
|
||||||
def _read_channels(
|
def _read_channels(
|
||||||
|
@ -250,57 +350,3 @@ class SSHShell(Shell):
|
||||||
full_stderr = b"".join(stderr_chunks)
|
full_stderr = b"".join(stderr_chunks)
|
||||||
|
|
||||||
return (full_stdout.decode(errors="ignore"), full_stderr.decode(errors="ignore"))
|
return (full_stdout.decode(errors="ignore"), full_stderr.decode(errors="ignore"))
|
||||||
|
|
||||||
def _create_connection(self, attempts: int = SSH_CONNECTION_ATTEMPTS) -> SSHClient:
|
|
||||||
for attempt in range(attempts):
|
|
||||||
connection = SSHClient()
|
|
||||||
connection.set_missing_host_key_policy(AutoAddPolicy())
|
|
||||||
try:
|
|
||||||
if self.private_key_path:
|
|
||||||
logger.info(
|
|
||||||
f"Trying to connect to host {self.host} as {self.login} using SSH key "
|
|
||||||
f"{self.private_key_path} (attempt {attempt})"
|
|
||||||
)
|
|
||||||
connection.connect(
|
|
||||||
hostname=self.host,
|
|
||||||
port=self.port,
|
|
||||||
username=self.login,
|
|
||||||
pkey=_load_private_key(self.private_key_path, self.private_key_passphrase),
|
|
||||||
timeout=self.CONNECTION_TIMEOUT,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
logger.info(
|
|
||||||
f"Trying to connect to host {self.host} as {self.login} using password "
|
|
||||||
f"(attempt {attempt})"
|
|
||||||
)
|
|
||||||
connection.connect(
|
|
||||||
hostname=self.host,
|
|
||||||
port=self.port,
|
|
||||||
username=self.login,
|
|
||||||
password=self.password,
|
|
||||||
timeout=self.CONNECTION_TIMEOUT,
|
|
||||||
)
|
|
||||||
return connection
|
|
||||||
except AuthenticationException:
|
|
||||||
connection.close()
|
|
||||||
logger.exception(f"Can't connect to host {self.host}")
|
|
||||||
raise
|
|
||||||
except (
|
|
||||||
SSHException,
|
|
||||||
ssh_exception.NoValidConnectionsError,
|
|
||||||
AttributeError,
|
|
||||||
socket.timeout,
|
|
||||||
OSError,
|
|
||||||
) as exc:
|
|
||||||
connection.close()
|
|
||||||
can_retry = attempt + 1 < attempts
|
|
||||||
if can_retry:
|
|
||||||
logger.warn(f"Can't connect to host {self.host}, will retry. Error: {exc}")
|
|
||||||
continue
|
|
||||||
logger.exception(f"Can't connect to host {self.host}")
|
|
||||||
raise HostIsNotAvailable(self.host) from exc
|
|
||||||
|
|
||||||
def _reset_connection(self) -> None:
|
|
||||||
if self.__connection:
|
|
||||||
self.__connection.close()
|
|
||||||
self.__connection = None
|
|
||||||
|
|
|
@ -11,8 +11,9 @@ from frostfs_testlib.reporter import get_reporter
|
||||||
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE
|
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE
|
||||||
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
|
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.storage.cluster import Cluster
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||||
from frostfs_testlib.utils import json_utils
|
from frostfs_testlib.utils import json_utils
|
||||||
|
from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
reporter = get_reporter()
|
reporter = get_reporter()
|
||||||
|
@ -183,6 +184,7 @@ def put_object_to_random_node(
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
cluster: Cluster,
|
cluster: Cluster,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
|
copies_number: Optional[int] = None,
|
||||||
attributes: Optional[dict] = None,
|
attributes: Optional[dict] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
wallet_config: Optional[str] = None,
|
wallet_config: Optional[str] = None,
|
||||||
|
@ -201,6 +203,7 @@ def put_object_to_random_node(
|
||||||
shell: executor for cli command
|
shell: executor for cli command
|
||||||
cluster: cluster under test
|
cluster: cluster under test
|
||||||
bearer: path to Bearer Token file, appends to `--bearer` key
|
bearer: path to Bearer Token file, appends to `--bearer` key
|
||||||
|
copies_number: Number of copies of the object to store within the RPC call
|
||||||
attributes: User attributes in form of Key1=Value1,Key2=Value2
|
attributes: User attributes in form of Key1=Value1,Key2=Value2
|
||||||
cluster: cluster under test
|
cluster: cluster under test
|
||||||
wallet_config: path to the wallet config
|
wallet_config: path to the wallet config
|
||||||
|
@ -221,6 +224,7 @@ def put_object_to_random_node(
|
||||||
shell,
|
shell,
|
||||||
endpoint,
|
endpoint,
|
||||||
bearer,
|
bearer,
|
||||||
|
copies_number,
|
||||||
attributes,
|
attributes,
|
||||||
xhdr,
|
xhdr,
|
||||||
wallet_config,
|
wallet_config,
|
||||||
|
@ -239,6 +243,7 @@ def put_object(
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
|
copies_number: Optional[int] = None,
|
||||||
attributes: Optional[dict] = None,
|
attributes: Optional[dict] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
wallet_config: Optional[str] = None,
|
wallet_config: Optional[str] = None,
|
||||||
|
@ -256,6 +261,7 @@ def put_object(
|
||||||
cid: ID of Container where we get the Object from
|
cid: ID of Container where we get the Object from
|
||||||
shell: executor for cli command
|
shell: executor for cli command
|
||||||
bearer: path to Bearer Token file, appends to `--bearer` key
|
bearer: path to Bearer Token file, appends to `--bearer` key
|
||||||
|
copies_number: Number of copies of the object to store within the RPC call
|
||||||
attributes: User attributes in form of Key1=Value1,Key2=Value2
|
attributes: User attributes in form of Key1=Value1,Key2=Value2
|
||||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
wallet_config: path to the wallet config
|
wallet_config: path to the wallet config
|
||||||
|
@ -276,6 +282,7 @@ def put_object(
|
||||||
cid=cid,
|
cid=cid,
|
||||||
attributes=attributes,
|
attributes=attributes,
|
||||||
bearer=bearer,
|
bearer=bearer,
|
||||||
|
copies_number=copies_number,
|
||||||
expire_at=expire_at,
|
expire_at=expire_at,
|
||||||
no_progress=no_progress,
|
no_progress=no_progress,
|
||||||
xhdr=xhdr,
|
xhdr=xhdr,
|
||||||
|
@ -725,3 +732,62 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict:
|
||||||
latest_block[0].replace(":", ""): int(latest_block[1]),
|
latest_block[0].replace(":", ""): int(latest_block[1]),
|
||||||
validated_state[0].replace(":", ""): int(validated_state[1]),
|
validated_state[0].replace(":", ""): int(validated_state[1]),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@reporter.step_deco("Search object nodes")
|
||||||
|
def get_object_nodes(
|
||||||
|
cluster: Cluster,
|
||||||
|
wallet: str,
|
||||||
|
cid: str,
|
||||||
|
oid: str,
|
||||||
|
shell: Shell,
|
||||||
|
endpoint: str,
|
||||||
|
bearer: str = "",
|
||||||
|
xhdr: Optional[dict] = None,
|
||||||
|
is_direct: bool = False,
|
||||||
|
verify_presence_all: bool = False,
|
||||||
|
wallet_config: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||||
|
) -> list[ClusterNode]:
|
||||||
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
|
||||||
|
|
||||||
|
result_object_nodes = cli.object.nodes(
|
||||||
|
rpc_endpoint=endpoint,
|
||||||
|
wallet=wallet,
|
||||||
|
cid=cid,
|
||||||
|
oid=oid,
|
||||||
|
bearer=bearer,
|
||||||
|
ttl=1 if is_direct else None,
|
||||||
|
xhdr=xhdr,
|
||||||
|
timeout=timeout,
|
||||||
|
verify_presence_all=verify_presence_all,
|
||||||
|
)
|
||||||
|
|
||||||
|
parsing_output = parse_cmd_table(result_object_nodes.stdout, "|")
|
||||||
|
list_object_nodes = [
|
||||||
|
node
|
||||||
|
for node in parsing_output
|
||||||
|
if node["should_contain_object"] == "true" and node["actually_contains_object"] == "true"
|
||||||
|
]
|
||||||
|
|
||||||
|
netmap_nodes_list = parse_netmap_output(
|
||||||
|
cli.netmap.snapshot(
|
||||||
|
rpc_endpoint=endpoint,
|
||||||
|
wallet=wallet,
|
||||||
|
).stdout
|
||||||
|
)
|
||||||
|
netmap_nodes = [
|
||||||
|
netmap_node
|
||||||
|
for object_node in list_object_nodes
|
||||||
|
for netmap_node in netmap_nodes_list
|
||||||
|
if object_node["node_id"] == netmap_node.node_id
|
||||||
|
]
|
||||||
|
|
||||||
|
result = [
|
||||||
|
cluster_node
|
||||||
|
for netmap_node in netmap_nodes
|
||||||
|
for cluster_node in cluster.cluster_nodes
|
||||||
|
if netmap_node.node == cluster_node.host_ip
|
||||||
|
]
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
|
@ -12,12 +12,13 @@ import requests
|
||||||
|
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib.reporter import get_reporter
|
||||||
from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE
|
from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE
|
||||||
from frostfs_testlib.s3.aws_cli_client import LONG_TIMEOUT
|
from frostfs_testlib.s3.aws_cli_client import command_options
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
|
from frostfs_testlib.shell.local_shell import LocalShell
|
||||||
from frostfs_testlib.steps.cli.object import get_object
|
from frostfs_testlib.steps.cli.object import get_object
|
||||||
from frostfs_testlib.steps.storage_policy import get_nodes_without_object
|
from frostfs_testlib.steps.storage_policy import get_nodes_without_object
|
||||||
from frostfs_testlib.storage.cluster import StorageNode
|
from frostfs_testlib.storage.cluster import StorageNode
|
||||||
from frostfs_testlib.utils.cli_utils import _cmd_run
|
from frostfs_testlib.testing.test_control import retry
|
||||||
from frostfs_testlib.utils.file_utils import get_file_hash
|
from frostfs_testlib.utils.file_utils import get_file_hash
|
||||||
|
|
||||||
reporter = get_reporter()
|
reporter = get_reporter()
|
||||||
|
@ -25,6 +26,7 @@ reporter = get_reporter()
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/")
|
ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/")
|
||||||
|
local_shell = LocalShell()
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get via HTTP Gate")
|
@reporter.step_deco("Get via HTTP Gate")
|
||||||
|
@ -32,6 +34,7 @@ def get_via_http_gate(
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
|
http_hostname: str,
|
||||||
request_path: Optional[str] = None,
|
request_path: Optional[str] = None,
|
||||||
timeout: Optional[int] = 300,
|
timeout: Optional[int] = 300,
|
||||||
):
|
):
|
||||||
|
@ -40,6 +43,7 @@ def get_via_http_gate(
|
||||||
cid: container id to get object from
|
cid: container id to get object from
|
||||||
oid: object ID
|
oid: object ID
|
||||||
endpoint: http gate endpoint
|
endpoint: http gate endpoint
|
||||||
|
http_hostname: http host name on the node
|
||||||
request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}]
|
request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}]
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -49,13 +53,16 @@ def get_via_http_gate(
|
||||||
else:
|
else:
|
||||||
request = f"{endpoint}{request_path}"
|
request = f"{endpoint}{request_path}"
|
||||||
|
|
||||||
resp = requests.get(request, stream=True, timeout=timeout, verify=False)
|
resp = requests.get(
|
||||||
|
request, headers={"Host": http_hostname}, stream=True, timeout=timeout, verify=False
|
||||||
|
)
|
||||||
|
|
||||||
if not resp.ok:
|
if not resp.ok:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
f"""Failed to get object via HTTP gate:
|
f"""Failed to get object via HTTP gate:
|
||||||
request: {resp.request.path_url},
|
request: {resp.request.path_url},
|
||||||
response: {resp.text},
|
response: {resp.text},
|
||||||
|
headers: {resp.headers},
|
||||||
status code: {resp.status_code} {resp.reason}"""
|
status code: {resp.status_code} {resp.reason}"""
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -69,12 +76,15 @@ def get_via_http_gate(
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get via Zip HTTP Gate")
|
@reporter.step_deco("Get via Zip HTTP Gate")
|
||||||
def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, timeout: Optional[int] = 300):
|
def get_via_zip_http_gate(
|
||||||
|
cid: str, prefix: str, endpoint: str, http_hostname: str, timeout: Optional[int] = 300
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
This function gets given object from HTTP gate
|
This function gets given object from HTTP gate
|
||||||
cid: container id to get object from
|
cid: container id to get object from
|
||||||
prefix: common prefix
|
prefix: common prefix
|
||||||
endpoint: http gate endpoint
|
endpoint: http gate endpoint
|
||||||
|
http_hostname: http host name on the node
|
||||||
"""
|
"""
|
||||||
request = f"{endpoint}/zip/{cid}/{prefix}"
|
request = f"{endpoint}/zip/{cid}/{prefix}"
|
||||||
resp = requests.get(request, stream=True, timeout=timeout, verify=False)
|
resp = requests.get(request, stream=True, timeout=timeout, verify=False)
|
||||||
|
@ -84,6 +94,7 @@ def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, timeout: Optiona
|
||||||
f"""Failed to get object via HTTP gate:
|
f"""Failed to get object via HTTP gate:
|
||||||
request: {resp.request.path_url},
|
request: {resp.request.path_url},
|
||||||
response: {resp.text},
|
response: {resp.text},
|
||||||
|
headers: {resp.headers},
|
||||||
status code: {resp.status_code} {resp.reason}"""
|
status code: {resp.status_code} {resp.reason}"""
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -105,6 +116,7 @@ def get_via_http_gate_by_attribute(
|
||||||
cid: str,
|
cid: str,
|
||||||
attribute: dict,
|
attribute: dict,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
|
http_hostname: str,
|
||||||
request_path: Optional[str] = None,
|
request_path: Optional[str] = None,
|
||||||
timeout: Optional[int] = 300,
|
timeout: Optional[int] = 300,
|
||||||
):
|
):
|
||||||
|
@ -113,6 +125,7 @@ def get_via_http_gate_by_attribute(
|
||||||
cid: CID to get object from
|
cid: CID to get object from
|
||||||
attribute: attribute {name: attribute} value pair
|
attribute: attribute {name: attribute} value pair
|
||||||
endpoint: http gate endpoint
|
endpoint: http gate endpoint
|
||||||
|
http_hostname: http host name on the node
|
||||||
request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}]
|
request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}]
|
||||||
"""
|
"""
|
||||||
attr_name = list(attribute.keys())[0]
|
attr_name = list(attribute.keys())[0]
|
||||||
|
@ -123,13 +136,16 @@ def get_via_http_gate_by_attribute(
|
||||||
else:
|
else:
|
||||||
request = f"{endpoint}{request_path}"
|
request = f"{endpoint}{request_path}"
|
||||||
|
|
||||||
resp = requests.get(request, stream=True, timeout=timeout, verify=False)
|
resp = requests.get(
|
||||||
|
request, stream=True, timeout=timeout, verify=False, headers={"Host": http_hostname}
|
||||||
|
)
|
||||||
|
|
||||||
if not resp.ok:
|
if not resp.ok:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
f"""Failed to get object via HTTP gate:
|
f"""Failed to get object via HTTP gate:
|
||||||
request: {resp.request.path_url},
|
request: {resp.request.path_url},
|
||||||
response: {resp.text},
|
response: {resp.text},
|
||||||
|
headers: {resp.headers},
|
||||||
status code: {resp.status_code} {resp.reason}"""
|
status code: {resp.status_code} {resp.reason}"""
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -142,6 +158,7 @@ def get_via_http_gate_by_attribute(
|
||||||
return file_path
|
return file_path
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: pass http_hostname as a header
|
||||||
@reporter.step_deco("Upload via HTTP Gate")
|
@reporter.step_deco("Upload via HTTP Gate")
|
||||||
def upload_via_http_gate(
|
def upload_via_http_gate(
|
||||||
cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300
|
cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300
|
||||||
|
@ -156,7 +173,9 @@ def upload_via_http_gate(
|
||||||
request = f"{endpoint}/upload/{cid}"
|
request = f"{endpoint}/upload/{cid}"
|
||||||
files = {"upload_file": open(path, "rb")}
|
files = {"upload_file": open(path, "rb")}
|
||||||
body = {"filename": path}
|
body = {"filename": path}
|
||||||
resp = requests.post(request, files=files, data=body, headers=headers, timeout=timeout, verify=False)
|
resp = requests.post(
|
||||||
|
request, files=files, data=body, headers=headers, timeout=timeout, verify=False
|
||||||
|
)
|
||||||
|
|
||||||
if not resp.ok:
|
if not resp.ok:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
|
@ -188,6 +207,7 @@ def is_object_large(filepath: str) -> bool:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: pass http_hostname as a header
|
||||||
@reporter.step_deco("Upload via HTTP Gate using Curl")
|
@reporter.step_deco("Upload via HTTP Gate using Curl")
|
||||||
def upload_via_http_gate_curl(
|
def upload_via_http_gate_curl(
|
||||||
cid: str,
|
cid: str,
|
||||||
|
@ -213,16 +233,16 @@ def upload_via_http_gate_curl(
|
||||||
large_object = is_object_large(filepath)
|
large_object = is_object_large(filepath)
|
||||||
if large_object:
|
if large_object:
|
||||||
# pre-clean
|
# pre-clean
|
||||||
_cmd_run("rm pipe -f")
|
local_shell.exec("rm pipe -f")
|
||||||
files = f"file=@pipe;filename={os.path.basename(filepath)}"
|
files = f"file=@pipe;filename={os.path.basename(filepath)}"
|
||||||
cmd = f"mkfifo pipe;cat {filepath} > pipe & curl -k --no-buffer -F '{files}' {attributes} {request}"
|
cmd = f"mkfifo pipe;cat {filepath} > pipe & curl -k --no-buffer -F '{files}' {attributes} {request}"
|
||||||
output = _cmd_run(cmd, LONG_TIMEOUT)
|
output = local_shell.exec(cmd, command_options)
|
||||||
# clean up pipe
|
# clean up pipe
|
||||||
_cmd_run("rm pipe")
|
local_shell.exec("rm pipe")
|
||||||
else:
|
else:
|
||||||
files = f"file=@{filepath};filename={os.path.basename(filepath)}"
|
files = f"file=@{filepath};filename={os.path.basename(filepath)}"
|
||||||
cmd = f"curl -k -F '{files}' {attributes} {request}"
|
cmd = f"curl -k -F '{files}' {attributes} {request}"
|
||||||
output = _cmd_run(cmd)
|
output = local_shell.exec(cmd)
|
||||||
|
|
||||||
if error_pattern:
|
if error_pattern:
|
||||||
match = error_pattern.casefold() in str(output).casefold()
|
match = error_pattern.casefold() in str(output).casefold()
|
||||||
|
@ -235,19 +255,21 @@ def upload_via_http_gate_curl(
|
||||||
return oid_re.group(1)
|
return oid_re.group(1)
|
||||||
|
|
||||||
|
|
||||||
|
@retry(max_attempts=3, sleep_interval=1)
|
||||||
@reporter.step_deco("Get via HTTP Gate using Curl")
|
@reporter.step_deco("Get via HTTP Gate using Curl")
|
||||||
def get_via_http_curl(cid: str, oid: str, endpoint: str) -> str:
|
def get_via_http_curl(cid: str, oid: str, endpoint: str, http_hostname: str) -> str:
|
||||||
"""
|
"""
|
||||||
This function gets given object from HTTP gate using curl utility.
|
This function gets given object from HTTP gate using curl utility.
|
||||||
cid: CID to get object from
|
cid: CID to get object from
|
||||||
oid: object OID
|
oid: object OID
|
||||||
endpoint: http gate endpoint
|
endpoint: http gate endpoint
|
||||||
|
http_hostname: http host name of the node
|
||||||
"""
|
"""
|
||||||
request = f"{endpoint}/get/{cid}/{oid}"
|
request = f"{endpoint}/get/{cid}/{oid}"
|
||||||
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}")
|
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}")
|
||||||
|
|
||||||
cmd = f"curl -k {request} > {file_path}"
|
cmd = f'curl -k -H "Host: {http_hostname}" {request} > {file_path}'
|
||||||
_cmd_run(cmd)
|
local_shell.exec(cmd)
|
||||||
|
|
||||||
return file_path
|
return file_path
|
||||||
|
|
||||||
|
@ -260,10 +282,14 @@ def _attach_allure_step(request: str, status_code: int, req_type="GET"):
|
||||||
|
|
||||||
@reporter.step_deco("Try to get object and expect error")
|
@reporter.step_deco("Try to get object and expect error")
|
||||||
def try_to_get_object_and_expect_error(
|
def try_to_get_object_and_expect_error(
|
||||||
cid: str, oid: str, error_pattern: str, endpoint: str
|
cid: str,
|
||||||
|
oid: str,
|
||||||
|
error_pattern: str,
|
||||||
|
endpoint: str,
|
||||||
|
http_hostname: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
try:
|
try:
|
||||||
get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint)
|
get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname)
|
||||||
raise AssertionError(f"Expected error on getting object with cid: {cid}")
|
raise AssertionError(f"Expected error on getting object with cid: {cid}")
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
match = error_pattern.casefold() in str(err).casefold()
|
match = error_pattern.casefold() in str(err).casefold()
|
||||||
|
@ -272,11 +298,18 @@ def try_to_get_object_and_expect_error(
|
||||||
|
|
||||||
@reporter.step_deco("Verify object can be get using HTTP header attribute")
|
@reporter.step_deco("Verify object can be get using HTTP header attribute")
|
||||||
def get_object_by_attr_and_verify_hashes(
|
def get_object_by_attr_and_verify_hashes(
|
||||||
oid: str, file_name: str, cid: str, attrs: dict, endpoint: str
|
oid: str,
|
||||||
|
file_name: str,
|
||||||
|
cid: str,
|
||||||
|
attrs: dict,
|
||||||
|
endpoint: str,
|
||||||
|
http_hostname: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
got_file_path_http = get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint)
|
got_file_path_http = get_via_http_gate(
|
||||||
|
cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname
|
||||||
|
)
|
||||||
got_file_path_http_attr = get_via_http_gate_by_attribute(
|
got_file_path_http_attr = get_via_http_gate_by_attribute(
|
||||||
cid=cid, attribute=attrs, endpoint=endpoint
|
cid=cid, attribute=attrs, endpoint=endpoint, http_hostname=http_hostname
|
||||||
)
|
)
|
||||||
assert_hashes_are_equal(file_name, got_file_path_http, got_file_path_http_attr)
|
assert_hashes_are_equal(file_name, got_file_path_http, got_file_path_http_attr)
|
||||||
|
|
||||||
|
@ -289,6 +322,7 @@ def verify_object_hash(
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
nodes: list[StorageNode],
|
nodes: list[StorageNode],
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
|
http_hostname: str,
|
||||||
object_getter=None,
|
object_getter=None,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
|
||||||
|
@ -314,7 +348,9 @@ def verify_object_hash(
|
||||||
shell=shell,
|
shell=shell,
|
||||||
endpoint=random_node.get_rpc_endpoint(),
|
endpoint=random_node.get_rpc_endpoint(),
|
||||||
)
|
)
|
||||||
got_file_path_http = object_getter(cid=cid, oid=oid, endpoint=endpoint)
|
got_file_path_http = object_getter(
|
||||||
|
cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname
|
||||||
|
)
|
||||||
|
|
||||||
assert_hashes_are_equal(file_name, got_file_path, got_file_path_http)
|
assert_hashes_are_equal(file_name, got_file_path, got_file_path_http)
|
||||||
|
|
||||||
|
@ -352,14 +388,25 @@ def try_to_get_object_via_passed_request_and_expect_error(
|
||||||
error_pattern: str,
|
error_pattern: str,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
http_request_path: str,
|
http_request_path: str,
|
||||||
|
http_hostname: str,
|
||||||
attrs: Optional[dict] = None,
|
attrs: Optional[dict] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
try:
|
try:
|
||||||
if attrs is None:
|
if attrs is None:
|
||||||
get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, request_path=http_request_path)
|
get_via_http_gate(
|
||||||
|
cid=cid,
|
||||||
|
oid=oid,
|
||||||
|
endpoint=endpoint,
|
||||||
|
request_path=http_request_path,
|
||||||
|
http_hostname=http_hostname,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
get_via_http_gate_by_attribute(
|
get_via_http_gate_by_attribute(
|
||||||
cid=cid, attribute=attrs, endpoint=endpoint, request_path=http_request_path
|
cid=cid,
|
||||||
|
attribute=attrs,
|
||||||
|
endpoint=endpoint,
|
||||||
|
request_path=http_request_path,
|
||||||
|
http_hostname=http_hostname,
|
||||||
)
|
)
|
||||||
raise AssertionError(f"Expected error on getting object with cid: {cid}")
|
raise AssertionError(f"Expected error on getting object with cid: {cid}")
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
|
|
89
src/frostfs_testlib/steps/network.py
Normal file
89
src/frostfs_testlib/steps/network.py
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
from frostfs_testlib.reporter import get_reporter
|
||||||
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
|
from frostfs_testlib.testing.test_control import retry
|
||||||
|
|
||||||
|
reporter = get_reporter()
|
||||||
|
|
||||||
|
|
||||||
|
class IpTablesHelper:
|
||||||
|
@staticmethod
|
||||||
|
def drop_input_traffic_to_port(node: ClusterNode, ports: list[str]) -> None:
|
||||||
|
shell = node.host.get_shell()
|
||||||
|
for port in ports:
|
||||||
|
shell.exec(f"iptables -A INPUT -p tcp --dport {port} -j DROP")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[str]) -> None:
|
||||||
|
shell = node.host.get_shell()
|
||||||
|
for ip in block_ip:
|
||||||
|
shell.exec(f"iptables -A INPUT -s {ip} -j DROP")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def restore_input_traffic_to_port(node: ClusterNode) -> None:
|
||||||
|
shell = node.host.get_shell()
|
||||||
|
ports = (
|
||||||
|
shell.exec("iptables -L --numeric | grep DROP | awk '{print $7}'")
|
||||||
|
.stdout.strip()
|
||||||
|
.split("\n")
|
||||||
|
)
|
||||||
|
if ports[0] == "":
|
||||||
|
return
|
||||||
|
for port in ports:
|
||||||
|
shell.exec(f"iptables -D INPUT -p tcp --dport {port.split(':')[-1]} -j DROP")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def restore_input_traffic_to_node(node: ClusterNode) -> None:
|
||||||
|
shell = node.host.get_shell()
|
||||||
|
unlock_ip = (
|
||||||
|
shell.exec("iptables -L --numeric | grep DROP | awk '{print $4}'")
|
||||||
|
.stdout.strip()
|
||||||
|
.split("\n")
|
||||||
|
)
|
||||||
|
if unlock_ip[0] == "":
|
||||||
|
return
|
||||||
|
for ip in unlock_ip:
|
||||||
|
shell.exec(f"iptables -D INPUT -s {ip} -j DROP")
|
||||||
|
|
||||||
|
|
||||||
|
# TODO Move class to HOST
|
||||||
|
class IfUpDownHelper:
|
||||||
|
@reporter.step_deco("Down {interface} to {node}")
|
||||||
|
def down_interface(self, node: ClusterNode, interface: str) -> None:
|
||||||
|
shell = node.host.get_shell()
|
||||||
|
shell.exec(f"ifdown {interface}")
|
||||||
|
|
||||||
|
@reporter.step_deco("Up {interface} to {node}")
|
||||||
|
def up_interface(self, node: ClusterNode, interface: str) -> None:
|
||||||
|
shell = node.host.get_shell()
|
||||||
|
shell.exec(f"ifup {interface}")
|
||||||
|
|
||||||
|
@reporter.step_deco("Up all interface to {node}")
|
||||||
|
def up_all_interface(self, node: ClusterNode) -> None:
|
||||||
|
shell = node.host.get_shell()
|
||||||
|
interfaces = list(node.host.config.interfaces.keys())
|
||||||
|
shell.exec("ifup -av")
|
||||||
|
for name_interface in interfaces:
|
||||||
|
self.check_state_up(node, name_interface)
|
||||||
|
|
||||||
|
@reporter.step_deco("Down all interface to {node}")
|
||||||
|
def down_all_interface(self, node: ClusterNode) -> None:
|
||||||
|
shell = node.host.get_shell()
|
||||||
|
interfaces = list(node.host.config.interfaces.keys())
|
||||||
|
shell.exec("ifdown -av")
|
||||||
|
for name_interface in interfaces:
|
||||||
|
self.check_state_down(node, name_interface)
|
||||||
|
|
||||||
|
@reporter.step_deco("Check {node} to {interface}")
|
||||||
|
def check_state(self, node: ClusterNode, interface: str) -> str:
|
||||||
|
shell = node.host.get_shell()
|
||||||
|
return shell.exec(
|
||||||
|
f"ip link show {interface} | sed -z 's/.*state \(.*\) mode .*/\\1/'"
|
||||||
|
).stdout.strip()
|
||||||
|
|
||||||
|
@retry(max_attempts=5, sleep_interval=5, expected_result="UP")
|
||||||
|
def check_state_up(self, node: ClusterNode, interface: str) -> str:
|
||||||
|
return self.check_state(node=node, interface=interface)
|
||||||
|
|
||||||
|
@retry(max_attempts=5, sleep_interval=5, expected_result="DOWN")
|
||||||
|
def check_state_down(self, node: ClusterNode, interface: str) -> str:
|
||||||
|
return self.check_state(node=node, interface=interface)
|
|
@ -15,7 +15,7 @@ from frostfs_testlib.resources.cli import (
|
||||||
)
|
)
|
||||||
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.epoch import tick_epoch
|
from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align
|
||||||
from frostfs_testlib.storage.cluster import Cluster, StorageNode
|
from frostfs_testlib.storage.cluster import Cluster, StorageNode
|
||||||
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate
|
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate
|
||||||
from frostfs_testlib.utils import datetime_utils
|
from frostfs_testlib.utils import datetime_utils
|
||||||
|
@ -40,44 +40,6 @@ class HealthStatus:
|
||||||
return HealthStatus(network, health)
|
return HealthStatus(network, health)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Stop random storage nodes")
|
|
||||||
def stop_random_storage_nodes(number: int, nodes: list[StorageNode]) -> list[StorageNode]:
|
|
||||||
"""
|
|
||||||
Shuts down the given number of randomly selected storage nodes.
|
|
||||||
Args:
|
|
||||||
number: the number of storage nodes to stop
|
|
||||||
nodes: the list of storage nodes to stop
|
|
||||||
Returns:
|
|
||||||
the list of nodes that were stopped
|
|
||||||
"""
|
|
||||||
nodes_to_stop = random.sample(nodes, number)
|
|
||||||
for node in nodes_to_stop:
|
|
||||||
node.stop_service()
|
|
||||||
return nodes_to_stop
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Start storage node")
|
|
||||||
def start_storage_nodes(nodes: list[StorageNode]) -> None:
|
|
||||||
"""
|
|
||||||
The function starts specified storage nodes.
|
|
||||||
Args:
|
|
||||||
nodes: the list of nodes to start
|
|
||||||
"""
|
|
||||||
for node in nodes:
|
|
||||||
node.start_service()
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Stop storage node")
|
|
||||||
def stop_storage_nodes(nodes: list[StorageNode]) -> None:
|
|
||||||
"""
|
|
||||||
The function starts specified storage nodes.
|
|
||||||
Args:
|
|
||||||
nodes: the list of nodes to start
|
|
||||||
"""
|
|
||||||
for node in nodes:
|
|
||||||
node.stop_service()
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get Locode from random storage node")
|
@reporter.step_deco("Get Locode from random storage node")
|
||||||
def get_locode_from_random_node(cluster: Cluster) -> str:
|
def get_locode_from_random_node(cluster: Cluster) -> str:
|
||||||
node = random.choice(cluster.services(StorageNode))
|
node = random.choice(cluster.services(StorageNode))
|
||||||
|
@ -189,6 +151,7 @@ def exclude_node_from_network_map(
|
||||||
|
|
||||||
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
|
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
|
||||||
tick_epoch(shell, cluster)
|
tick_epoch(shell, cluster)
|
||||||
|
wait_for_epochs_align(shell, cluster)
|
||||||
|
|
||||||
snapshot = get_netmap_snapshot(node=alive_node, shell=shell)
|
snapshot = get_netmap_snapshot(node=alive_node, shell=shell)
|
||||||
assert (
|
assert (
|
||||||
|
@ -327,25 +290,3 @@ def _run_control_command(node: StorageNode, command: str) -> None:
|
||||||
f"--wallet {wallet_path} --config {wallet_config_path}"
|
f"--wallet {wallet_path} --config {wallet_config_path}"
|
||||||
)
|
)
|
||||||
return result.stdout
|
return result.stdout
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Start services s3gate ")
|
|
||||||
def start_s3gates(cluster: Cluster) -> None:
|
|
||||||
"""
|
|
||||||
The function starts specified storage nodes.
|
|
||||||
Args:
|
|
||||||
cluster: cluster instance under test
|
|
||||||
"""
|
|
||||||
for gate in cluster.services(S3Gate):
|
|
||||||
gate.start_service()
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Stop services s3gate ")
|
|
||||||
def stop_s3gates(cluster: Cluster) -> None:
|
|
||||||
"""
|
|
||||||
The function starts specified storage nodes.
|
|
||||||
Args:
|
|
||||||
cluster: cluster instance under test
|
|
||||||
"""
|
|
||||||
for gate in cluster.services(S3Gate):
|
|
||||||
gate.stop_service()
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ from frostfs_testlib.reporter import get_reporter
|
||||||
from frostfs_testlib.resources.cli import NEOGO_EXECUTABLE
|
from frostfs_testlib.resources.cli import NEOGO_EXECUTABLE
|
||||||
from frostfs_testlib.resources.common import FROSTFS_CONTRACT, GAS_HASH, MORPH_BLOCK_TIME
|
from frostfs_testlib.resources.common import FROSTFS_CONTRACT, GAS_HASH, MORPH_BLOCK_TIME
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.storage.dataclasses.frostfs_services import MainChain, MorphChain
|
from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain
|
||||||
from frostfs_testlib.utils import converting_utils, datetime_utils, wallet_utils
|
from frostfs_testlib.utils import converting_utils, datetime_utils, wallet_utils
|
||||||
|
|
||||||
reporter = get_reporter()
|
reporter = get_reporter()
|
||||||
|
@ -21,10 +21,8 @@ logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
EMPTY_PASSWORD = ""
|
EMPTY_PASSWORD = ""
|
||||||
TX_PERSIST_TIMEOUT = 15 # seconds
|
TX_PERSIST_TIMEOUT = 15 # seconds
|
||||||
ASSET_POWER_MAINCHAIN = 10**8
|
|
||||||
ASSET_POWER_SIDECHAIN = 10**12
|
ASSET_POWER_SIDECHAIN = 10**12
|
||||||
|
|
||||||
|
|
||||||
def get_nns_contract_hash(morph_chain: MorphChain) -> str:
|
def get_nns_contract_hash(morph_chain: MorphChain) -> str:
|
||||||
return morph_chain.rpc_client.get_contract_state(1)["hash"]
|
return morph_chain.rpc_client.get_contract_state(1)["hash"]
|
||||||
|
|
||||||
|
@ -41,33 +39,7 @@ def get_contract_hash(morph_chain: MorphChain, resolve_name: str, shell: Shell)
|
||||||
stack_data = json.loads(out.stdout.replace("\n", ""))["stack"][0]["value"]
|
stack_data = json.loads(out.stdout.replace("\n", ""))["stack"][0]["value"]
|
||||||
return bytes.decode(base64.b64decode(stack_data[0]["value"]))
|
return bytes.decode(base64.b64decode(stack_data[0]["value"]))
|
||||||
|
|
||||||
|
def transaction_accepted(morph_chain: MorphChain, tx_id: str):
|
||||||
@reporter.step_deco("Withdraw Mainnet Gas")
|
|
||||||
def withdraw_mainnet_gas(shell: Shell, main_chain: MainChain, wlt: str, amount: int):
|
|
||||||
address = wallet_utils.get_last_address_from_wallet(wlt, EMPTY_PASSWORD)
|
|
||||||
scripthash = neo3_utils.address_to_script_hash(address)
|
|
||||||
|
|
||||||
neogo = NeoGo(shell=shell, neo_go_exec_path=NEOGO_EXECUTABLE)
|
|
||||||
out = neogo.contract.invokefunction(
|
|
||||||
wallet=wlt,
|
|
||||||
address=address,
|
|
||||||
rpc_endpoint=main_chain.get_endpoint(),
|
|
||||||
scripthash=FROSTFS_CONTRACT,
|
|
||||||
method="withdraw",
|
|
||||||
arguments=f"{scripthash} int:{amount}",
|
|
||||||
multisig_hash=f"{scripthash}:Global",
|
|
||||||
wallet_password="",
|
|
||||||
)
|
|
||||||
|
|
||||||
m = re.match(r"^Sent invocation transaction (\w{64})$", out.stdout)
|
|
||||||
if m is None:
|
|
||||||
raise Exception("Can not get Tx.")
|
|
||||||
tx = m.group(1)
|
|
||||||
if not transaction_accepted(main_chain, tx):
|
|
||||||
raise AssertionError(f"TX {tx} hasn't been processed")
|
|
||||||
|
|
||||||
|
|
||||||
def transaction_accepted(main_chain: MainChain, tx_id: str):
|
|
||||||
"""
|
"""
|
||||||
This function returns True in case of accepted TX.
|
This function returns True in case of accepted TX.
|
||||||
Args:
|
Args:
|
||||||
|
@ -79,8 +51,8 @@ def transaction_accepted(main_chain: MainChain, tx_id: str):
|
||||||
try:
|
try:
|
||||||
for _ in range(0, TX_PERSIST_TIMEOUT):
|
for _ in range(0, TX_PERSIST_TIMEOUT):
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
neogo = NeoGo(shell=main_chain.host.get_shell(), neo_go_exec_path=NEOGO_EXECUTABLE)
|
neogo = NeoGo(shell=morph_chain.host.get_shell(), neo_go_exec_path=NEOGO_EXECUTABLE)
|
||||||
resp = neogo.query.tx(tx_hash=tx_id, rpc_endpoint=main_chain.get_endpoint())
|
resp = neogo.query.tx(tx_hash=tx_id, rpc_endpoint=morph_chain.get_endpoint())
|
||||||
if resp is not None:
|
if resp is not None:
|
||||||
logger.info(f"TX is accepted in block: {resp}")
|
logger.info(f"TX is accepted in block: {resp}")
|
||||||
return True, resp
|
return True, resp
|
||||||
|
@ -110,12 +82,11 @@ def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_
|
||||||
logger.error(f"failed to get wallet balance: {out}")
|
logger.error(f"failed to get wallet balance: {out}")
|
||||||
raise out
|
raise out
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Transfer Gas")
|
@reporter.step_deco("Transfer Gas")
|
||||||
def transfer_gas(
|
def transfer_gas(
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
amount: int,
|
amount: int,
|
||||||
main_chain: MainChain,
|
morph_chain: MorphChain,
|
||||||
wallet_from_path: Optional[str] = None,
|
wallet_from_path: Optional[str] = None,
|
||||||
wallet_from_password: Optional[str] = None,
|
wallet_from_password: Optional[str] = None,
|
||||||
address_from: Optional[str] = None,
|
address_from: Optional[str] = None,
|
||||||
|
@ -138,11 +109,11 @@ def transfer_gas(
|
||||||
address_to: The address of the wallet to transfer assets to.
|
address_to: The address of the wallet to transfer assets to.
|
||||||
amount: Amount of gas to transfer.
|
amount: Amount of gas to transfer.
|
||||||
"""
|
"""
|
||||||
wallet_from_path = wallet_from_path or main_chain.get_wallet_path()
|
wallet_from_path = wallet_from_path or morph_chain.get_wallet_path()
|
||||||
wallet_from_password = (
|
wallet_from_password = (
|
||||||
wallet_from_password
|
wallet_from_password
|
||||||
if wallet_from_password is not None
|
if wallet_from_password is not None
|
||||||
else main_chain.get_wallet_password()
|
else morph_chain.get_wallet_password()
|
||||||
)
|
)
|
||||||
address_from = address_from or wallet_utils.get_last_address_from_wallet(
|
address_from = address_from or wallet_utils.get_last_address_from_wallet(
|
||||||
wallet_from_path, wallet_from_password
|
wallet_from_path, wallet_from_password
|
||||||
|
@ -153,7 +124,7 @@ def transfer_gas(
|
||||||
|
|
||||||
neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE)
|
neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE)
|
||||||
out = neogo.nep17.transfer(
|
out = neogo.nep17.transfer(
|
||||||
rpc_endpoint=main_chain.get_endpoint(),
|
rpc_endpoint=morph_chain.get_endpoint(),
|
||||||
wallet=wallet_from_path,
|
wallet=wallet_from_path,
|
||||||
wallet_password=wallet_from_password,
|
wallet_password=wallet_from_password,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
|
@ -165,49 +136,11 @@ def transfer_gas(
|
||||||
txid = out.stdout.strip().split("\n")[-1]
|
txid = out.stdout.strip().split("\n")[-1]
|
||||||
if len(txid) != 64:
|
if len(txid) != 64:
|
||||||
raise Exception("Got no TXID after run the command")
|
raise Exception("Got no TXID after run the command")
|
||||||
if not transaction_accepted(main_chain, txid):
|
if not transaction_accepted(morph_chain, txid):
|
||||||
raise AssertionError(f"TX {txid} hasn't been processed")
|
raise AssertionError(f"TX {txid} hasn't been processed")
|
||||||
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
|
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("FrostFS Deposit")
|
|
||||||
def deposit_gas(
|
|
||||||
shell: Shell,
|
|
||||||
main_chain: MainChain,
|
|
||||||
amount: int,
|
|
||||||
wallet_from_path: str,
|
|
||||||
wallet_from_password: str,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Transferring GAS from given wallet to FrostFS contract address.
|
|
||||||
"""
|
|
||||||
# get FrostFS contract address
|
|
||||||
deposit_addr = converting_utils.contract_hash_to_address(FROSTFS_CONTRACT)
|
|
||||||
logger.info(f"FrostFS contract address: {deposit_addr}")
|
|
||||||
address_from = wallet_utils.get_last_address_from_wallet(
|
|
||||||
wallet_path=wallet_from_path, wallet_password=wallet_from_password
|
|
||||||
)
|
|
||||||
transfer_gas(
|
|
||||||
shell=shell,
|
|
||||||
main_chain=main_chain,
|
|
||||||
amount=amount,
|
|
||||||
wallet_from_path=wallet_from_path,
|
|
||||||
wallet_from_password=wallet_from_password,
|
|
||||||
address_to=deposit_addr,
|
|
||||||
address_from=address_from,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get Mainnet Balance")
|
|
||||||
def get_mainnet_balance(main_chain: MainChain, address: str):
|
|
||||||
resp = main_chain.rpc_client.get_nep17_balances(address=address)
|
|
||||||
logger.info(f"Got getnep17balances response: {resp}")
|
|
||||||
for balance in resp["balance"]:
|
|
||||||
if balance["assethash"] == GAS_HASH:
|
|
||||||
return float(balance["amount"]) / ASSET_POWER_MAINCHAIN
|
|
||||||
return float(0)
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Get Sidechain Balance")
|
@reporter.step_deco("Get Sidechain Balance")
|
||||||
def get_sidechain_balance(morph_chain: MorphChain, address: str):
|
def get_sidechain_balance(morph_chain: MorphChain, address: str):
|
||||||
resp = morph_chain.rpc_client.get_nep17_balances(address=address)
|
resp = morph_chain.rpc_client.get_nep17_balances(address=address)
|
||||||
|
|
|
@ -67,6 +67,9 @@ def try_to_get_objects_and_expect_error(
|
||||||
|
|
||||||
@reporter.step_deco("Set versioning status to '{status}' for bucket '{bucket}'")
|
@reporter.step_deco("Set versioning status to '{status}' for bucket '{bucket}'")
|
||||||
def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: VersioningStatus):
|
def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: VersioningStatus):
|
||||||
|
if status == VersioningStatus.UNDEFINED:
|
||||||
|
return
|
||||||
|
|
||||||
s3_client.get_bucket_versioning_status(bucket)
|
s3_client.get_bucket_versioning_status(bucket)
|
||||||
s3_client.put_bucket_versioning(bucket, status=status)
|
s3_client.put_bucket_versioning(bucket, status=status)
|
||||||
bucket_status = s3_client.get_bucket_versioning_status(bucket)
|
bucket_status = s3_client.get_bucket_versioning_status(bucket)
|
||||||
|
@ -191,6 +194,7 @@ def init_s3_credentials(
|
||||||
cluster: Cluster,
|
cluster: Cluster,
|
||||||
policy: Optional[dict] = None,
|
policy: Optional[dict] = None,
|
||||||
s3gates: Optional[list[S3Gate]] = None,
|
s3gates: Optional[list[S3Gate]] = None,
|
||||||
|
container_placement_policy: Optional[str] = None,
|
||||||
):
|
):
|
||||||
gate_public_keys = []
|
gate_public_keys = []
|
||||||
bucket = str(uuid.uuid4())
|
bucket = str(uuid.uuid4())
|
||||||
|
@ -206,6 +210,7 @@ def init_s3_credentials(
|
||||||
wallet_password=wallet.password,
|
wallet_password=wallet.password,
|
||||||
container_policy=policy,
|
container_policy=policy,
|
||||||
container_friendly_name=bucket,
|
container_friendly_name=bucket,
|
||||||
|
container_placement_policy=container_placement_policy,
|
||||||
).stdout
|
).stdout
|
||||||
aws_access_key_id = str(
|
aws_access_key_id = str(
|
||||||
re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group(
|
re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group(
|
||||||
|
|
|
@ -14,6 +14,7 @@ from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
|
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
from frostfs_testlib.testing.readable import HumanReadableEnum
|
||||||
from frostfs_testlib.utils import json_utils, wallet_utils
|
from frostfs_testlib.utils import json_utils, wallet_utils
|
||||||
|
|
||||||
reporter = get_reporter()
|
reporter = get_reporter()
|
||||||
|
@ -26,7 +27,7 @@ WRONG_VERB = "wrong verb of the session"
|
||||||
INVALID_SIGNATURE = "invalid signature of the session data"
|
INVALID_SIGNATURE = "invalid signature of the session data"
|
||||||
|
|
||||||
|
|
||||||
class ObjectVerb(Enum):
|
class ObjectVerb(HumanReadableEnum):
|
||||||
PUT = "PUT"
|
PUT = "PUT"
|
||||||
DELETE = "DELETE"
|
DELETE = "DELETE"
|
||||||
GET = "GET"
|
GET = "GET"
|
||||||
|
@ -36,7 +37,7 @@ class ObjectVerb(Enum):
|
||||||
SEARCH = "SEARCH"
|
SEARCH = "SEARCH"
|
||||||
|
|
||||||
|
|
||||||
class ContainerVerb(Enum):
|
class ContainerVerb(HumanReadableEnum):
|
||||||
CREATE = "PUT"
|
CREATE = "PUT"
|
||||||
DELETE = "DELETE"
|
DELETE = "DELETE"
|
||||||
SETEACL = "SETEACL"
|
SETEACL = "SETEACL"
|
||||||
|
|
|
@ -2,7 +2,6 @@ from frostfs_testlib.storage.constants import _FrostfsServicesNames
|
||||||
from frostfs_testlib.storage.dataclasses.frostfs_services import (
|
from frostfs_testlib.storage.dataclasses.frostfs_services import (
|
||||||
HTTPGate,
|
HTTPGate,
|
||||||
InnerRing,
|
InnerRing,
|
||||||
MainChain,
|
|
||||||
MorphChain,
|
MorphChain,
|
||||||
S3Gate,
|
S3Gate,
|
||||||
StorageNode,
|
StorageNode,
|
||||||
|
@ -17,8 +16,6 @@ __class_registry.register_service(_FrostfsServicesNames.INNER_RING, InnerRing)
|
||||||
__class_registry.register_service(_FrostfsServicesNames.MORPH_CHAIN, MorphChain)
|
__class_registry.register_service(_FrostfsServicesNames.MORPH_CHAIN, MorphChain)
|
||||||
__class_registry.register_service(_FrostfsServicesNames.S3_GATE, S3Gate)
|
__class_registry.register_service(_FrostfsServicesNames.S3_GATE, S3Gate)
|
||||||
__class_registry.register_service(_FrostfsServicesNames.HTTP_GATE, HTTPGate)
|
__class_registry.register_service(_FrostfsServicesNames.HTTP_GATE, HTTPGate)
|
||||||
# # TODO: Remove this since we are no longer have main chain
|
|
||||||
__class_registry.register_service(_FrostfsServicesNames.MAIN_CHAIN, MainChain)
|
|
||||||
|
|
||||||
|
|
||||||
def get_service_registry() -> ServiceRegistry:
|
def get_service_registry() -> ServiceRegistry:
|
||||||
|
|
|
@ -17,6 +17,7 @@ from frostfs_testlib.storage.dataclasses.frostfs_services import (
|
||||||
StorageNode,
|
StorageNode,
|
||||||
)
|
)
|
||||||
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
|
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
|
||||||
|
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces
|
||||||
from frostfs_testlib.storage.service_registry import ServiceRegistry
|
from frostfs_testlib.storage.service_registry import ServiceRegistry
|
||||||
|
|
||||||
reporter = get_reporter()
|
reporter = get_reporter()
|
||||||
|
@ -121,6 +122,40 @@ class ClusterNode:
|
||||||
config.attributes[ConfigAttributes.SERVICE_NAME] for config in self.host.config.services
|
config.attributes[ConfigAttributes.SERVICE_NAME] for config in self.host.config.services
|
||||||
]
|
]
|
||||||
|
|
||||||
|
def get_all_interfaces(self) -> dict[str, str]:
|
||||||
|
return self.host.config.interfaces
|
||||||
|
|
||||||
|
def get_interface(self, interface: Interfaces) -> str:
|
||||||
|
return self.host.config.interfaces[interface.value]
|
||||||
|
|
||||||
|
def get_data_interfaces(self) -> list[str]:
|
||||||
|
return [
|
||||||
|
ip_address
|
||||||
|
for name_interface, ip_address in self.host.config.interfaces.items()
|
||||||
|
if "data" in name_interface
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_data_interface(self, search_interface: str) -> list[str]:
|
||||||
|
return [
|
||||||
|
self.host.config.interfaces[interface]
|
||||||
|
for interface in self.host.config.interfaces.keys()
|
||||||
|
if search_interface == interface
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_internal_interfaces(self) -> list[str]:
|
||||||
|
return [
|
||||||
|
ip_address
|
||||||
|
for name_interface, ip_address in self.host.config.interfaces.items()
|
||||||
|
if "internal" in name_interface
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_internal_interface(self, search_internal: str) -> list[str]:
|
||||||
|
return [
|
||||||
|
self.host.config.interfaces[interface]
|
||||||
|
for interface in self.host.config.interfaces.keys()
|
||||||
|
if search_internal == interface
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
class Cluster:
|
class Cluster:
|
||||||
"""
|
"""
|
||||||
|
@ -130,6 +165,8 @@ class Cluster:
|
||||||
default_rpc_endpoint: str
|
default_rpc_endpoint: str
|
||||||
default_s3_gate_endpoint: str
|
default_s3_gate_endpoint: str
|
||||||
default_http_gate_endpoint: str
|
default_http_gate_endpoint: str
|
||||||
|
default_http_hostname: str
|
||||||
|
default_s3_hostname: str
|
||||||
|
|
||||||
def __init__(self, hosting: Hosting) -> None:
|
def __init__(self, hosting: Hosting) -> None:
|
||||||
self._hosting = hosting
|
self._hosting = hosting
|
||||||
|
@ -138,6 +175,8 @@ class Cluster:
|
||||||
self.default_rpc_endpoint = self.services(StorageNode)[0].get_rpc_endpoint()
|
self.default_rpc_endpoint = self.services(StorageNode)[0].get_rpc_endpoint()
|
||||||
self.default_s3_gate_endpoint = self.services(S3Gate)[0].get_endpoint()
|
self.default_s3_gate_endpoint = self.services(S3Gate)[0].get_endpoint()
|
||||||
self.default_http_gate_endpoint = self.services(HTTPGate)[0].get_endpoint()
|
self.default_http_gate_endpoint = self.services(HTTPGate)[0].get_endpoint()
|
||||||
|
self.default_http_hostname = self.services(StorageNode)[0].get_http_hostname()
|
||||||
|
self.default_s3_hostname = self.services(StorageNode)[0].get_s3_hostname()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def hosts(self) -> list[Host]:
|
def hosts(self) -> list[Host]:
|
||||||
|
@ -169,6 +208,42 @@ class Cluster:
|
||||||
def morph_chain(self) -> list[MorphChain]:
|
def morph_chain(self) -> list[MorphChain]:
|
||||||
return self.services(MorphChain)
|
return self.services(MorphChain)
|
||||||
|
|
||||||
|
def nodes(self, services: list[ServiceClass]) -> list[ClusterNode]:
|
||||||
|
"""
|
||||||
|
Resolve which cluster nodes hosting the specified services.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
services: list of services to resolve hosting cluster nodes.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list of cluster nodes which host specified services.
|
||||||
|
"""
|
||||||
|
|
||||||
|
cluster_nodes = set()
|
||||||
|
for service in services:
|
||||||
|
cluster_nodes.update(
|
||||||
|
[node for node in self.cluster_nodes if node.service(type(service)) == service]
|
||||||
|
)
|
||||||
|
|
||||||
|
return list(cluster_nodes)
|
||||||
|
|
||||||
|
def node(self, service: ServiceClass) -> ClusterNode:
|
||||||
|
"""
|
||||||
|
Resolve single cluster node hosting the specified service.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
services: list of services to resolve hosting cluster nodes.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list of cluster nodes which host specified services.
|
||||||
|
"""
|
||||||
|
|
||||||
|
nodes = [node for node in self.cluster_nodes if node.service(type(service)) == service]
|
||||||
|
if not len(nodes):
|
||||||
|
raise RuntimeError(f"Cannot find service {service} on any node")
|
||||||
|
|
||||||
|
return nodes[0]
|
||||||
|
|
||||||
def services(self, service_type: type[ServiceClass]) -> list[ServiceClass]:
|
def services(self, service_type: type[ServiceClass]) -> list[ServiceClass]:
|
||||||
"""
|
"""
|
||||||
Get all services in a cluster of specified type.
|
Get all services in a cluster of specified type.
|
||||||
|
|
|
@ -4,13 +4,17 @@ class ConfigAttributes:
|
||||||
WALLET_PATH = "wallet_path"
|
WALLET_PATH = "wallet_path"
|
||||||
WALLET_CONFIG = "wallet_config"
|
WALLET_CONFIG = "wallet_config"
|
||||||
CONFIG_PATH = "config_path"
|
CONFIG_PATH = "config_path"
|
||||||
|
SHARD_CONFIG_PATH = "shard_config_path"
|
||||||
LOCAL_WALLET_PATH = "local_wallet_path"
|
LOCAL_WALLET_PATH = "local_wallet_path"
|
||||||
LOCAL_WALLET_CONFIG = "local_config_path"
|
LOCAL_WALLET_CONFIG = "local_config_path"
|
||||||
ENDPOINT_DATA_0 = "endpoint_data0"
|
ENDPOINT_DATA_0 = "endpoint_data0"
|
||||||
ENDPOINT_DATA_1 = "endpoint_data1"
|
ENDPOINT_DATA_1 = "endpoint_data1"
|
||||||
ENDPOINT_INTERNAL = "endpoint_internal0"
|
ENDPOINT_INTERNAL = "endpoint_internal0"
|
||||||
|
ENDPOINT_PROMETHEUS = "endpoint_prometheus"
|
||||||
CONTROL_ENDPOINT = "control_endpoint"
|
CONTROL_ENDPOINT = "control_endpoint"
|
||||||
UN_LOCODE = "un_locode"
|
UN_LOCODE = "un_locode"
|
||||||
|
HTTP_HOSTNAME = "http_hostname"
|
||||||
|
S3_HOSTNAME = "s3_hostname"
|
||||||
|
|
||||||
|
|
||||||
class _FrostfsServicesNames:
|
class _FrostfsServicesNames:
|
||||||
|
@ -19,4 +23,3 @@ class _FrostfsServicesNames:
|
||||||
HTTP_GATE = "http-gate"
|
HTTP_GATE = "http-gate"
|
||||||
MORPH_CHAIN = "morph-chain"
|
MORPH_CHAIN = "morph-chain"
|
||||||
INNER_RING = "ir"
|
INNER_RING = "ir"
|
||||||
MAIN_CHAIN = "main-chain"
|
|
||||||
|
|
|
@ -25,6 +25,7 @@ class BackgroundLoadController:
|
||||||
load_params: LoadParams
|
load_params: LoadParams
|
||||||
original_load_params: LoadParams
|
original_load_params: LoadParams
|
||||||
verification_params: LoadParams
|
verification_params: LoadParams
|
||||||
|
cluster_nodes: list[ClusterNode]
|
||||||
nodes_under_load: list[ClusterNode]
|
nodes_under_load: list[ClusterNode]
|
||||||
load_counter: int
|
load_counter: int
|
||||||
loaders_wallet: WalletInfo
|
loaders_wallet: WalletInfo
|
||||||
|
@ -38,12 +39,14 @@ class BackgroundLoadController:
|
||||||
k6_dir: str,
|
k6_dir: str,
|
||||||
load_params: LoadParams,
|
load_params: LoadParams,
|
||||||
loaders_wallet: WalletInfo,
|
loaders_wallet: WalletInfo,
|
||||||
|
cluster_nodes: list[ClusterNode],
|
||||||
nodes_under_load: list[ClusterNode],
|
nodes_under_load: list[ClusterNode],
|
||||||
runner: ScenarioRunner,
|
runner: ScenarioRunner,
|
||||||
) -> None:
|
) -> None:
|
||||||
self.k6_dir = k6_dir
|
self.k6_dir = k6_dir
|
||||||
self.original_load_params = load_params
|
self.original_load_params = load_params
|
||||||
self.load_params = copy.deepcopy(self.original_load_params)
|
self.load_params = copy.deepcopy(self.original_load_params)
|
||||||
|
self.cluster_nodes = cluster_nodes
|
||||||
self.nodes_under_load = nodes_under_load
|
self.nodes_under_load = nodes_under_load
|
||||||
self.load_counter = 1
|
self.load_counter = 1
|
||||||
self.loaders_wallet = loaders_wallet
|
self.loaders_wallet = loaders_wallet
|
||||||
|
@ -52,10 +55,6 @@ class BackgroundLoadController:
|
||||||
if load_params.endpoint_selection_strategy is None:
|
if load_params.endpoint_selection_strategy is None:
|
||||||
raise RuntimeError("endpoint_selection_strategy should not be None")
|
raise RuntimeError("endpoint_selection_strategy should not be None")
|
||||||
|
|
||||||
self.endpoints = self._get_endpoints(
|
|
||||||
load_params.load_type, load_params.endpoint_selection_strategy
|
|
||||||
)
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, [])
|
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, [])
|
||||||
def _get_endpoints(
|
def _get_endpoints(
|
||||||
self, load_type: LoadType, endpoint_selection_strategy: EndpointSelectionStrategy
|
self, load_type: LoadType, endpoint_selection_strategy: EndpointSelectionStrategy
|
||||||
|
@ -80,17 +79,14 @@ class BackgroundLoadController:
|
||||||
LoadType.S3: {
|
LoadType.S3: {
|
||||||
EndpointSelectionStrategy.ALL: list(
|
EndpointSelectionStrategy.ALL: list(
|
||||||
set(
|
set(
|
||||||
endpoint.replace("http://", "").replace("https://", "")
|
endpoint
|
||||||
for node_under_load in self.nodes_under_load
|
for node_under_load in self.nodes_under_load
|
||||||
for endpoint in node_under_load.service(S3Gate).get_all_endpoints()
|
for endpoint in node_under_load.service(S3Gate).get_all_endpoints()
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
EndpointSelectionStrategy.FIRST: list(
|
EndpointSelectionStrategy.FIRST: list(
|
||||||
set(
|
set(
|
||||||
node_under_load.service(S3Gate)
|
node_under_load.service(S3Gate).get_endpoint()
|
||||||
.get_endpoint()
|
|
||||||
.replace("http://", "")
|
|
||||||
.replace("https://", "")
|
|
||||||
for node_under_load in self.nodes_under_load
|
for node_under_load in self.nodes_under_load
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
|
@ -102,7 +98,12 @@ class BackgroundLoadController:
|
||||||
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
||||||
@reporter.step_deco("Prepare load instances")
|
@reporter.step_deco("Prepare load instances")
|
||||||
def prepare(self):
|
def prepare(self):
|
||||||
self.runner.prepare(self.load_params, self.nodes_under_load, self.k6_dir)
|
self.endpoints = self._get_endpoints(
|
||||||
|
self.load_params.load_type, self.load_params.endpoint_selection_strategy
|
||||||
|
)
|
||||||
|
self.runner.prepare(
|
||||||
|
self.load_params, self.cluster_nodes, self.nodes_under_load, self.k6_dir
|
||||||
|
)
|
||||||
self.runner.init_k6_instances(self.load_params, self.endpoints, self.k6_dir)
|
self.runner.init_k6_instances(self.load_params, self.endpoints, self.k6_dir)
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
||||||
|
@ -154,54 +155,65 @@ class BackgroundLoadController:
|
||||||
load_report.add_summaries(self.load_summaries)
|
load_report.add_summaries(self.load_summaries)
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
||||||
@reporter.step_deco("Verify results of load")
|
@reporter.step_deco("Run post-load verification")
|
||||||
def verify(self):
|
def verify(self):
|
||||||
try:
|
try:
|
||||||
|
load_issues = self._collect_load_issues()
|
||||||
if self.load_params.verify:
|
if self.load_params.verify:
|
||||||
self.verification_params = LoadParams(
|
load_issues.extend(self._run_verify_scenario())
|
||||||
verify_clients=self.load_params.verify_clients,
|
|
||||||
scenario=LoadScenario.VERIFY,
|
assert not load_issues, "\n".join(load_issues)
|
||||||
registry_file=self.load_params.registry_file,
|
|
||||||
verify_time=self.load_params.verify_time,
|
|
||||||
load_type=self.load_params.load_type,
|
|
||||||
load_id=self.load_params.load_id,
|
|
||||||
working_dir=self.load_params.working_dir,
|
|
||||||
endpoint_selection_strategy=self.load_params.endpoint_selection_strategy,
|
|
||||||
k6_process_allocation_strategy=self.load_params.k6_process_allocation_strategy,
|
|
||||||
setup_timeout="1s",
|
|
||||||
)
|
|
||||||
self._run_verify_scenario()
|
|
||||||
verification_summaries = self._get_results()
|
|
||||||
self.verify_summaries(self.load_summaries, verification_summaries)
|
|
||||||
finally:
|
finally:
|
||||||
self._reset_for_consequent_load()
|
self._reset_for_consequent_load()
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
||||||
@reporter.step_deco("Verify summaries from k6")
|
@reporter.step_deco("Collect load issues")
|
||||||
def verify_summaries(self, load_summaries: dict, verification_summaries: dict):
|
def _collect_load_issues(self):
|
||||||
verifier = LoadVerifier(self.load_params)
|
verifier = LoadVerifier(self.load_params)
|
||||||
for node_or_endpoint in load_summaries:
|
return verifier.collect_load_issues(self.load_summaries)
|
||||||
with reporter.step(f"Verify load summaries for {node_or_endpoint}"):
|
|
||||||
verifier.verify_summaries(
|
|
||||||
load_summaries[node_or_endpoint], verification_summaries[node_or_endpoint]
|
|
||||||
)
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
||||||
def wait_until_finish(self):
|
def wait_until_finish(self, soft_timeout: int = 0):
|
||||||
self.runner.wait_until_finish()
|
self.runner.wait_until_finish(soft_timeout)
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
||||||
@reporter.step_deco("Run verify scenario")
|
@reporter.step_deco("Verify loaded objects")
|
||||||
def _run_verify_scenario(self):
|
def _run_verify_scenario(self) -> list[str]:
|
||||||
|
self.verification_params = LoadParams(
|
||||||
|
verify_clients=self.load_params.verify_clients,
|
||||||
|
scenario=LoadScenario.VERIFY,
|
||||||
|
read_from=self.load_params.read_from,
|
||||||
|
registry_file=self.load_params.registry_file,
|
||||||
|
verify_time=self.load_params.verify_time,
|
||||||
|
load_type=self.load_params.load_type,
|
||||||
|
load_id=self.load_params.load_id,
|
||||||
|
vu_init_time=0,
|
||||||
|
working_dir=self.load_params.working_dir,
|
||||||
|
endpoint_selection_strategy=self.load_params.endpoint_selection_strategy,
|
||||||
|
k6_process_allocation_strategy=self.load_params.k6_process_allocation_strategy,
|
||||||
|
setup_timeout="1s",
|
||||||
|
)
|
||||||
|
|
||||||
if self.verification_params.verify_time is None:
|
if self.verification_params.verify_time is None:
|
||||||
raise RuntimeError("verify_time should not be none")
|
raise RuntimeError("verify_time should not be none")
|
||||||
|
|
||||||
self.runner.init_k6_instances(self.verification_params, self.endpoints, self.k6_dir)
|
self.runner.init_k6_instances(self.verification_params, self.endpoints, self.k6_dir)
|
||||||
with reporter.step("Run verify load data"):
|
with reporter.step("Run verify scenario"):
|
||||||
self.runner.start()
|
self.runner.start()
|
||||||
self.runner.wait_until_finish()
|
self.runner.wait_until_finish()
|
||||||
|
|
||||||
|
with reporter.step("Collect verify issues"):
|
||||||
|
verification_summaries = self._get_results()
|
||||||
|
verifier = LoadVerifier(self.load_params)
|
||||||
|
return verifier.collect_verify_issues(self.load_summaries, verification_summaries)
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
||||||
@reporter.step_deco("Get load results")
|
|
||||||
def _get_results(self) -> dict:
|
def _get_results(self) -> dict:
|
||||||
return self.runner.get_results()
|
with reporter.step(f"Get {self.load_params.scenario.value} scenario results"):
|
||||||
|
return self.runner.get_results()
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
return self.load_params.__str__()
|
||||||
|
|
||||||
|
def __repr__(self) -> str:
|
||||||
|
return repr(self.load_params)
|
||||||
|
|
|
@ -1,12 +1,15 @@
|
||||||
|
import datetime
|
||||||
import time
|
import time
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
|
||||||
|
|
||||||
import frostfs_testlib.resources.optionals as optionals
|
import frostfs_testlib.resources.optionals as optionals
|
||||||
|
from frostfs_testlib.healthcheck.interfaces import Healthcheck
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib.reporter import get_reporter
|
||||||
from frostfs_testlib.shell import CommandOptions, Shell
|
from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider
|
||||||
from frostfs_testlib.steps import epoch
|
from frostfs_testlib.steps.network import IfUpDownHelper, IpTablesHelper
|
||||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode
|
||||||
from frostfs_testlib.storage.controllers.disk_controller import DiskController
|
from frostfs_testlib.storage.controllers.disk_controller import DiskController
|
||||||
|
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
|
||||||
|
from frostfs_testlib.testing import parallel
|
||||||
from frostfs_testlib.testing.test_control import run_optionally, wait_for_success
|
from frostfs_testlib.testing.test_control import run_optionally, wait_for_success
|
||||||
from frostfs_testlib.utils.failover_utils import (
|
from frostfs_testlib.utils.failover_utils import (
|
||||||
wait_all_storage_nodes_returned,
|
wait_all_storage_nodes_returned,
|
||||||
|
@ -16,32 +19,59 @@ from frostfs_testlib.utils.failover_utils import (
|
||||||
)
|
)
|
||||||
|
|
||||||
reporter = get_reporter()
|
reporter = get_reporter()
|
||||||
|
if_up_down_helper = IfUpDownHelper()
|
||||||
|
|
||||||
|
|
||||||
class ClusterStateController:
|
class ClusterStateController:
|
||||||
def __init__(self, shell: Shell, cluster: Cluster) -> None:
|
def __init__(self, shell: Shell, cluster: Cluster, healthcheck: Healthcheck) -> None:
|
||||||
self.stopped_nodes: list[ClusterNode] = []
|
self.stopped_nodes: list[ClusterNode] = []
|
||||||
self.detached_disks: dict[str, DiskController] = {}
|
self.detached_disks: dict[str, DiskController] = {}
|
||||||
self.stopped_storage_nodes: list[ClusterNode] = []
|
self.dropped_traffic: list[ClusterNode] = []
|
||||||
self.stopped_s3_gates: list[ClusterNode] = []
|
self.stopped_services: set[NodeBase] = set()
|
||||||
self.cluster = cluster
|
self.cluster = cluster
|
||||||
|
self.healthcheck = healthcheck
|
||||||
self.shell = shell
|
self.shell = shell
|
||||||
self.suspended_services: dict[str, list[ClusterNode]] = {}
|
self.suspended_services: dict[str, list[ClusterNode]] = {}
|
||||||
|
self.nodes_with_modified_interface: list[ClusterNode] = []
|
||||||
|
|
||||||
|
def _get_stopped_by_node(self, node: ClusterNode) -> set[NodeBase]:
|
||||||
|
stopped_by_node = [svc for svc in self.stopped_services if svc.host == node.host]
|
||||||
|
return set(stopped_by_node)
|
||||||
|
|
||||||
|
def _get_stopped_by_type(self, service_type: type[ServiceClass]) -> set[ServiceClass]:
|
||||||
|
stopped_by_type = [svc for svc in self.stopped_services if isinstance(svc, service_type)]
|
||||||
|
return set(stopped_by_type)
|
||||||
|
|
||||||
|
def _from_stopped_nodes(self, service_type: type[ServiceClass]) -> set[ServiceClass]:
|
||||||
|
stopped_on_nodes = set([node.service(service_type) for node in self.stopped_nodes])
|
||||||
|
return set(stopped_on_nodes)
|
||||||
|
|
||||||
|
def _get_online(self, service_type: type[ServiceClass]) -> set[ServiceClass]:
|
||||||
|
stopped_svc = self._get_stopped_by_type(service_type).union(self._from_stopped_nodes(service_type))
|
||||||
|
online_svc = set(self.cluster.services(service_type)) - stopped_svc
|
||||||
|
return online_svc
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
@reporter.step_deco("Stop host of node {node}")
|
@reporter.step_deco("Stop host of node {node}")
|
||||||
def stop_node_host(self, node: ClusterNode, mode: str):
|
def stop_node_host(self, node: ClusterNode, mode: str):
|
||||||
|
# Drop ssh connection for this node before shutdown
|
||||||
|
provider = SshConnectionProvider()
|
||||||
|
provider.drop(node.host_ip)
|
||||||
|
|
||||||
|
self.stopped_nodes.append(node)
|
||||||
with reporter.step(f"Stop host {node.host.config.address}"):
|
with reporter.step(f"Stop host {node.host.config.address}"):
|
||||||
node.host.stop_host(mode=mode)
|
node.host.stop_host(mode=mode)
|
||||||
wait_for_host_offline(self.shell, node.storage_node)
|
wait_for_host_offline(self.shell, node.storage_node)
|
||||||
self.stopped_nodes.append(node)
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
@reporter.step_deco("Shutdown whole cluster")
|
@reporter.step_deco("Shutdown whole cluster")
|
||||||
def shutdown_cluster(self, mode: str, reversed_order: bool = False):
|
def shutdown_cluster(self, mode: str, reversed_order: bool = False):
|
||||||
nodes = (
|
nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes
|
||||||
reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes
|
|
||||||
)
|
# Drop all ssh connections before shutdown
|
||||||
|
provider = SshConnectionProvider()
|
||||||
|
provider.drop_all()
|
||||||
|
|
||||||
for node in nodes:
|
for node in nodes:
|
||||||
with reporter.step(f"Stop host {node.host.config.address}"):
|
with reporter.step(f"Stop host {node.host.config.address}"):
|
||||||
self.stopped_nodes.append(node)
|
self.stopped_nodes.append(node)
|
||||||
|
@ -50,26 +80,6 @@ class ClusterStateController:
|
||||||
for node in nodes:
|
for node in nodes:
|
||||||
wait_for_host_offline(self.shell, node.storage_node)
|
wait_for_host_offline(self.shell, node.storage_node)
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
|
||||||
@reporter.step_deco("Stop all storage services on cluster")
|
|
||||||
def stop_all_storage_services(self, reversed_order: bool = False):
|
|
||||||
nodes = (
|
|
||||||
reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes
|
|
||||||
)
|
|
||||||
|
|
||||||
for node in nodes:
|
|
||||||
self.stop_storage_service(node)
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
|
||||||
@reporter.step_deco("Stop all S3 gates on cluster")
|
|
||||||
def stop_all_s3_gates(self, reversed_order: bool = False):
|
|
||||||
nodes = (
|
|
||||||
reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes
|
|
||||||
)
|
|
||||||
|
|
||||||
for node in nodes:
|
|
||||||
self.stop_s3_gate(node)
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
@reporter.step_deco("Start host of node {node}")
|
@reporter.step_deco("Start host of node {node}")
|
||||||
def start_node_host(self, node: ClusterNode):
|
def start_node_host(self, node: ClusterNode):
|
||||||
|
@ -89,13 +99,10 @@ class ClusterStateController:
|
||||||
for node in nodes:
|
for node in nodes:
|
||||||
with reporter.step(f"Start host {node.host.config.address}"):
|
with reporter.step(f"Start host {node.host.config.address}"):
|
||||||
node.host.start_host()
|
node.host.start_host()
|
||||||
if node in self.stopped_storage_nodes:
|
self.stopped_services.difference_update(self._get_stopped_by_node(node))
|
||||||
self.stopped_storage_nodes.remove(node)
|
|
||||||
|
|
||||||
if node in self.stopped_s3_gates:
|
|
||||||
self.stopped_s3_gates.remove(node)
|
|
||||||
self.stopped_nodes = []
|
self.stopped_nodes = []
|
||||||
wait_all_storage_nodes_returned(self.shell, self.cluster)
|
self.wait_after_storage_startup()
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
@reporter.step_deco("Detach disk {device} at {mountpoint} on node {node}")
|
@reporter.step_deco("Detach disk {device} at {mountpoint} on node {node}")
|
||||||
|
@ -119,65 +126,134 @@ class ClusterStateController:
|
||||||
self.detached_disks = {}
|
self.detached_disks = {}
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
@reporter.step_deco("Stop storage service on {node}")
|
@reporter.step_deco("Stop all {service_type} services")
|
||||||
def stop_storage_service(self, node: ClusterNode):
|
def stop_services_of_type(self, service_type: type[ServiceClass], mask: bool = True):
|
||||||
node.storage_node.stop_service()
|
services = self.cluster.services(service_type)
|
||||||
self.stopped_storage_nodes.append(node)
|
self.stopped_services.update(services)
|
||||||
|
parallel([service.stop_service for service in services], mask=mask)
|
||||||
|
|
||||||
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
|
@reporter.step_deco("Start all {service_type} services")
|
||||||
|
def start_services_of_type(self, service_type: type[ServiceClass]):
|
||||||
|
services = self.cluster.services(service_type)
|
||||||
|
parallel([service.start_service for service in services])
|
||||||
|
self.stopped_services.difference_update(set(services))
|
||||||
|
|
||||||
|
if service_type == StorageNode:
|
||||||
|
self.wait_after_storage_startup()
|
||||||
|
|
||||||
|
@wait_for_success(600, 60)
|
||||||
|
def wait_s3gate(self, s3gate: S3Gate):
|
||||||
|
with reporter.step(f"Wait for {s3gate} reconnection"):
|
||||||
|
result = s3gate.get_metric("frostfs_s3_gw_pool_current_nodes")
|
||||||
|
assert 'address="127.0.0.1' in result.stdout, "S3Gate should connect to local storage node"
|
||||||
|
|
||||||
|
@reporter.step_deco("Wait for S3Gates reconnection to local storage")
|
||||||
|
def wait_s3gates(self):
|
||||||
|
online_s3gates = self._get_online(S3Gate)
|
||||||
|
parallel(self.wait_s3gate, online_s3gates)
|
||||||
|
|
||||||
|
@wait_for_success(600, 60)
|
||||||
|
def wait_tree_healthcheck(self):
|
||||||
|
nodes = self.cluster.nodes(self._get_online(StorageNode))
|
||||||
|
parallel(self.healthcheck.tree_healthcheck, nodes)
|
||||||
|
|
||||||
|
@reporter.step_deco("Wait for storage reconnection to the system")
|
||||||
|
def wait_after_storage_startup(self):
|
||||||
|
wait_all_storage_nodes_returned(self.shell, self.cluster)
|
||||||
|
self.wait_s3gates()
|
||||||
|
self.wait_tree_healthcheck()
|
||||||
|
|
||||||
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
|
@reporter.step_deco("Start all stopped services")
|
||||||
|
def start_all_stopped_services(self):
|
||||||
|
stopped_storages = self._get_stopped_by_type(StorageNode)
|
||||||
|
parallel([service.start_service for service in self.stopped_services])
|
||||||
|
self.stopped_services.clear()
|
||||||
|
|
||||||
|
if stopped_storages:
|
||||||
|
self.wait_after_storage_startup()
|
||||||
|
|
||||||
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
|
@reporter.step_deco("Stop {service_type} service on {node}")
|
||||||
|
def stop_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass], mask: bool = True):
|
||||||
|
service = node.service(service_type)
|
||||||
|
service.stop_service(mask)
|
||||||
|
self.stopped_services.add(service)
|
||||||
|
|
||||||
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
|
@reporter.step_deco("Start {service_type} service on {node}")
|
||||||
|
def start_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]):
|
||||||
|
service = node.service(service_type)
|
||||||
|
service.start_service()
|
||||||
|
self.stopped_services.discard(service)
|
||||||
|
|
||||||
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
|
@reporter.step_deco("Start all stopped {service_type} services")
|
||||||
|
def start_stopped_services_of_type(self, service_type: type[ServiceClass]):
|
||||||
|
stopped_svc = self._get_stopped_by_type(service_type)
|
||||||
|
if not stopped_svc:
|
||||||
|
return
|
||||||
|
|
||||||
|
parallel([svc.start_service for svc in stopped_svc])
|
||||||
|
self.stopped_services.difference_update(stopped_svc)
|
||||||
|
|
||||||
|
if service_type == StorageNode:
|
||||||
|
self.wait_after_storage_startup()
|
||||||
|
|
||||||
|
# TODO: Deprecated
|
||||||
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
|
@reporter.step_deco("Stop all storage services on cluster")
|
||||||
|
def stop_all_storage_services(self, reversed_order: bool = False):
|
||||||
|
nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes
|
||||||
|
|
||||||
|
for node in nodes:
|
||||||
|
self.stop_service_of_type(node, StorageNode)
|
||||||
|
|
||||||
|
# TODO: Deprecated
|
||||||
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
|
@reporter.step_deco("Stop all S3 gates on cluster")
|
||||||
|
def stop_all_s3_gates(self, reversed_order: bool = False):
|
||||||
|
nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes
|
||||||
|
|
||||||
|
for node in nodes:
|
||||||
|
self.stop_service_of_type(node, S3Gate)
|
||||||
|
|
||||||
|
# TODO: Deprecated
|
||||||
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
|
@reporter.step_deco("Stop storage service on {node}")
|
||||||
|
def stop_storage_service(self, node: ClusterNode, mask: bool = True):
|
||||||
|
self.stop_service_of_type(node, StorageNode, mask)
|
||||||
|
|
||||||
|
# TODO: Deprecated
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
@reporter.step_deco("Start storage service on {node}")
|
@reporter.step_deco("Start storage service on {node}")
|
||||||
def start_storage_service(self, node: ClusterNode):
|
def start_storage_service(self, node: ClusterNode):
|
||||||
node.storage_node.start_service()
|
self.start_service_of_type(node, StorageNode)
|
||||||
self.stopped_storage_nodes.remove(node)
|
|
||||||
|
|
||||||
|
# TODO: Deprecated
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
@reporter.step_deco("Start stopped storage services")
|
@reporter.step_deco("Start stopped storage services")
|
||||||
def start_stopped_storage_services(self):
|
def start_stopped_storage_services(self):
|
||||||
if not self.stopped_storage_nodes:
|
self.start_stopped_services_of_type(StorageNode)
|
||||||
return
|
|
||||||
|
|
||||||
# In case if we stopped couple services, for example (s01-s04):
|
|
||||||
# After starting only s01, it may require connections to s02-s04, which is still down, and fail to start.
|
|
||||||
# Also, if something goes wrong here, we might skip s02-s04 start at all, and cluster will be left in a bad state.
|
|
||||||
# So in order to make sure that services are at least attempted to be started, using threads here.
|
|
||||||
with ThreadPoolExecutor(max_workers=len(self.stopped_storage_nodes)) as executor:
|
|
||||||
start_result = executor.map(self.start_storage_service, self.stopped_storage_nodes)
|
|
||||||
|
|
||||||
# Looks tricky, but if exception is raised in any thread, it will be "eaten" by ThreadPoolExecutor,
|
|
||||||
# But will be thrown here.
|
|
||||||
# Not ideal solution, but okay for now
|
|
||||||
for _ in start_result:
|
|
||||||
pass
|
|
||||||
|
|
||||||
wait_all_storage_nodes_returned(self.shell, self.cluster)
|
|
||||||
self.stopped_storage_nodes = []
|
|
||||||
|
|
||||||
|
# TODO: Deprecated
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
@reporter.step_deco("Stop s3 gate on {node}")
|
@reporter.step_deco("Stop s3 gate on {node}")
|
||||||
def stop_s3_gate(self, node: ClusterNode):
|
def stop_s3_gate(self, node: ClusterNode, mask: bool = True):
|
||||||
node.s3_gate.stop_service()
|
self.stop_service_of_type(node, S3Gate, mask)
|
||||||
self.stopped_s3_gates.append(node)
|
|
||||||
|
|
||||||
|
# TODO: Deprecated
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
@reporter.step_deco("Start s3 gate on {node}")
|
@reporter.step_deco("Start s3 gate on {node}")
|
||||||
def start_s3_gate(self, node: ClusterNode):
|
def start_s3_gate(self, node: ClusterNode):
|
||||||
node.s3_gate.start_service()
|
self.start_service_of_type(node, S3Gate)
|
||||||
self.stopped_s3_gates.remove(node)
|
|
||||||
|
|
||||||
|
# TODO: Deprecated
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
@reporter.step_deco("Start stopped S3 gates")
|
@reporter.step_deco("Start stopped S3 gates")
|
||||||
def start_stopped_s3_gates(self):
|
def start_stopped_s3_gates(self):
|
||||||
if not self.stopped_s3_gates:
|
self.start_stopped_services_of_type(S3Gate)
|
||||||
return
|
|
||||||
|
|
||||||
with ThreadPoolExecutor(max_workers=len(self.stopped_s3_gates)) as executor:
|
|
||||||
start_result = executor.map(self.start_s3_gate, self.stopped_s3_gates)
|
|
||||||
|
|
||||||
# Looks tricky, but if exception is raised in any thread, it will be "eaten" by ThreadPoolExecutor,
|
|
||||||
# But will be thrown here.
|
|
||||||
# Not ideal solution, but okay for now
|
|
||||||
for _ in start_result:
|
|
||||||
pass
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
@reporter.step_deco("Suspend {process_name} service in {node}")
|
@reporter.step_deco("Suspend {process_name} service in {node}")
|
||||||
|
@ -192,10 +268,8 @@ class ClusterStateController:
|
||||||
@reporter.step_deco("Resume {process_name} service in {node}")
|
@reporter.step_deco("Resume {process_name} service in {node}")
|
||||||
def resume_service(self, process_name: str, node: ClusterNode):
|
def resume_service(self, process_name: str, node: ClusterNode):
|
||||||
node.host.wait_success_resume_process(process_name)
|
node.host.wait_success_resume_process(process_name)
|
||||||
if self.suspended_services.get(process_name):
|
if self.suspended_services.get(process_name) and node in self.suspended_services[process_name]:
|
||||||
self.suspended_services[process_name].append(node)
|
self.suspended_services[process_name].remove(node)
|
||||||
else:
|
|
||||||
self.suspended_services[process_name] = [node]
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
@reporter.step_deco("Start suspend processes services")
|
@reporter.step_deco("Start suspend processes services")
|
||||||
|
@ -204,6 +278,62 @@ class ClusterStateController:
|
||||||
[node.host.wait_success_resume_process(process_name) for node in list_nodes]
|
[node.host.wait_success_resume_process(process_name) for node in list_nodes]
|
||||||
self.suspended_services = {}
|
self.suspended_services = {}
|
||||||
|
|
||||||
|
@reporter.step_deco("Drop traffic to {node}, with ports - {ports}, nodes - {block_nodes}")
|
||||||
|
def drop_traffic(
|
||||||
|
self,
|
||||||
|
mode: str,
|
||||||
|
node: ClusterNode,
|
||||||
|
wakeup_timeout: int,
|
||||||
|
ports: list[str] = None,
|
||||||
|
block_nodes: list[ClusterNode] = None,
|
||||||
|
) -> None:
|
||||||
|
allowed_modes = ["ports", "nodes"]
|
||||||
|
assert mode in allowed_modes
|
||||||
|
|
||||||
|
match mode:
|
||||||
|
case "ports":
|
||||||
|
IpTablesHelper.drop_input_traffic_to_port(node, ports)
|
||||||
|
case "nodes":
|
||||||
|
list_ip = self._parse_intefaces(block_nodes)
|
||||||
|
IpTablesHelper.drop_input_traffic_to_node(node, list_ip)
|
||||||
|
time.sleep(wakeup_timeout)
|
||||||
|
self.dropped_traffic.append(node)
|
||||||
|
|
||||||
|
@reporter.step_deco("Ping traffic")
|
||||||
|
def ping_traffic(
|
||||||
|
self,
|
||||||
|
node: ClusterNode,
|
||||||
|
nodes_list: list[ClusterNode],
|
||||||
|
expect_result: int,
|
||||||
|
) -> bool:
|
||||||
|
shell = node.host.get_shell()
|
||||||
|
options = CommandOptions(check=False)
|
||||||
|
ips = self._parse_intefaces(nodes_list)
|
||||||
|
for ip in ips:
|
||||||
|
code = shell.exec(f"ping {ip} -c 1", options).return_code
|
||||||
|
if code != expect_result:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
@reporter.step_deco("Start traffic to {node}")
|
||||||
|
def restore_traffic(
|
||||||
|
self,
|
||||||
|
mode: str,
|
||||||
|
node: ClusterNode,
|
||||||
|
) -> None:
|
||||||
|
allowed_modes = ["ports", "nodes"]
|
||||||
|
assert mode in allowed_modes
|
||||||
|
|
||||||
|
match mode:
|
||||||
|
case "ports":
|
||||||
|
IpTablesHelper.restore_input_traffic_to_port(node=node)
|
||||||
|
case "nodes":
|
||||||
|
IpTablesHelper.restore_input_traffic_to_node(node=node)
|
||||||
|
|
||||||
|
@reporter.step_deco("Restore blocked nodes")
|
||||||
|
def restore_all_traffic(self):
|
||||||
|
parallel(self._restore_traffic_to_node, self.dropped_traffic)
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
@reporter.step_deco("Hard reboot host {node} via magic SysRq option")
|
@reporter.step_deco("Hard reboot host {node} via magic SysRq option")
|
||||||
def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True):
|
def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True):
|
||||||
|
@ -213,6 +343,10 @@ class ClusterStateController:
|
||||||
options = CommandOptions(close_stdin=True, timeout=1, check=False)
|
options = CommandOptions(close_stdin=True, timeout=1, check=False)
|
||||||
shell.exec('sudo sh -c "echo b > /proc/sysrq-trigger"', options)
|
shell.exec('sudo sh -c "echo b > /proc/sysrq-trigger"', options)
|
||||||
|
|
||||||
|
# Drop ssh connection for this node
|
||||||
|
provider = SshConnectionProvider()
|
||||||
|
provider.drop(node.host_ip)
|
||||||
|
|
||||||
if wait_for_return:
|
if wait_for_return:
|
||||||
# Let the things to be settled
|
# Let the things to be settled
|
||||||
# A little wait here to prevent ssh stuck during panic
|
# A little wait here to prevent ssh stuck during panic
|
||||||
|
@ -220,9 +354,66 @@ class ClusterStateController:
|
||||||
wait_for_host_online(self.shell, node.storage_node)
|
wait_for_host_online(self.shell, node.storage_node)
|
||||||
wait_for_node_online(node.storage_node)
|
wait_for_node_online(node.storage_node)
|
||||||
|
|
||||||
def _get_disk_controller(
|
@reporter.step_deco("Down {interface} to {nodes}")
|
||||||
self, node: StorageNode, device: str, mountpoint: str
|
def down_interface(self, nodes: list[ClusterNode], interface: str):
|
||||||
) -> DiskController:
|
for node in nodes:
|
||||||
|
if_up_down_helper.down_interface(node=node, interface=interface)
|
||||||
|
assert if_up_down_helper.check_state(node=node, interface=interface) == "DOWN"
|
||||||
|
self.nodes_with_modified_interface.append(node)
|
||||||
|
|
||||||
|
@reporter.step_deco("Up {interface} to {nodes}")
|
||||||
|
def up_interface(self, nodes: list[ClusterNode], interface: str):
|
||||||
|
for node in nodes:
|
||||||
|
if_up_down_helper.up_interface(node=node, interface=interface)
|
||||||
|
assert if_up_down_helper.check_state(node=node, interface=interface) == "UP"
|
||||||
|
if node in self.nodes_with_modified_interface:
|
||||||
|
self.nodes_with_modified_interface.remove(node)
|
||||||
|
|
||||||
|
@reporter.step_deco("Restore interface")
|
||||||
|
def restore_interfaces(self):
|
||||||
|
for node in self.nodes_with_modified_interface:
|
||||||
|
if_up_down_helper.up_all_interface(node)
|
||||||
|
|
||||||
|
@reporter.step_deco("Get node time")
|
||||||
|
def get_node_date(self, node: ClusterNode) -> datetime:
|
||||||
|
shell = node.host.get_shell()
|
||||||
|
return datetime.datetime.strptime(shell.exec("hwclock -r").stdout.strip(), "%Y-%m-%d %H:%M:%S.%f%z")
|
||||||
|
|
||||||
|
@reporter.step_deco("Set node time to {in_date}")
|
||||||
|
def change_node_date(self, node: ClusterNode, in_date: datetime) -> None:
|
||||||
|
shell = node.host.get_shell()
|
||||||
|
shell.exec(f"hwclock --set --date='{in_date}'")
|
||||||
|
shell.exec("hwclock --hctosys")
|
||||||
|
node_time = self.get_node_date(node)
|
||||||
|
with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"):
|
||||||
|
assert (self.get_node_date(node) - in_date) < datetime.timedelta(minutes=1)
|
||||||
|
|
||||||
|
@reporter.step_deco(f"Restore time")
|
||||||
|
def restore_node_date(self, node: ClusterNode) -> None:
|
||||||
|
shell = node.host.get_shell()
|
||||||
|
now_time = datetime.datetime.now(datetime.timezone.utc)
|
||||||
|
with reporter.step(f"Set {now_time} time"):
|
||||||
|
shell.exec(f"hwclock --set --date='{now_time}'")
|
||||||
|
shell.exec("hwclock --hctosys")
|
||||||
|
|
||||||
|
@reporter.step_deco("Change the synchronizer status to {status}")
|
||||||
|
def set_sync_date_all_nodes(self, status: str):
|
||||||
|
if status == "active":
|
||||||
|
parallel(self._enable_date_synchronizer, self.cluster.cluster_nodes)
|
||||||
|
return
|
||||||
|
parallel(self._disable_date_synchronizer, self.cluster.cluster_nodes)
|
||||||
|
|
||||||
|
def _enable_date_synchronizer(self, cluster_node: ClusterNode):
|
||||||
|
shell = cluster_node.host.get_shell()
|
||||||
|
shell.exec("timedatectl set-ntp true")
|
||||||
|
cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "active", 5)
|
||||||
|
|
||||||
|
def _disable_date_synchronizer(self, cluster_node: ClusterNode):
|
||||||
|
shell = cluster_node.host.get_shell()
|
||||||
|
shell.exec("timedatectl set-ntp false")
|
||||||
|
cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "inactive", 5)
|
||||||
|
|
||||||
|
def _get_disk_controller(self, node: StorageNode, device: str, mountpoint: str) -> DiskController:
|
||||||
disk_controller_id = DiskController.get_id(node, device)
|
disk_controller_id = DiskController.get_id(node, device)
|
||||||
if disk_controller_id in self.detached_disks.keys():
|
if disk_controller_id in self.detached_disks.keys():
|
||||||
disk_controller = self.detached_disks[disk_controller_id]
|
disk_controller = self.detached_disks[disk_controller_id]
|
||||||
|
@ -230,3 +421,16 @@ class ClusterStateController:
|
||||||
disk_controller = DiskController(node, device, mountpoint)
|
disk_controller = DiskController(node, device, mountpoint)
|
||||||
|
|
||||||
return disk_controller
|
return disk_controller
|
||||||
|
|
||||||
|
def _restore_traffic_to_node(self, node):
|
||||||
|
IpTablesHelper.restore_input_traffic_to_port(node)
|
||||||
|
IpTablesHelper.restore_input_traffic_to_node(node)
|
||||||
|
|
||||||
|
def _parse_intefaces(self, nodes: list[ClusterNode]):
|
||||||
|
interfaces = []
|
||||||
|
for node in nodes:
|
||||||
|
dict_interfaces = node.host.config.interfaces
|
||||||
|
for type, ip in dict_interfaces.items():
|
||||||
|
if "mgmt" not in type:
|
||||||
|
interfaces.append(ip)
|
||||||
|
return interfaces
|
||||||
|
|
|
@ -3,6 +3,7 @@ from dataclasses import dataclass
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Any, Dict, List, Optional, Union
|
from typing import Any, Dict, List, Optional, Union
|
||||||
|
|
||||||
|
from frostfs_testlib.testing.readable import HumanReadableEnum
|
||||||
from frostfs_testlib.utils import wallet_utils
|
from frostfs_testlib.utils import wallet_utils
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
@ -10,7 +11,7 @@ EACL_LIFETIME = 100500
|
||||||
FROSTFS_CONTRACT_CACHE_TIMEOUT = 30
|
FROSTFS_CONTRACT_CACHE_TIMEOUT = 30
|
||||||
|
|
||||||
|
|
||||||
class EACLOperation(Enum):
|
class EACLOperation(HumanReadableEnum):
|
||||||
PUT = "put"
|
PUT = "put"
|
||||||
GET = "get"
|
GET = "get"
|
||||||
HEAD = "head"
|
HEAD = "head"
|
||||||
|
@ -20,24 +21,24 @@ class EACLOperation(Enum):
|
||||||
DELETE = "delete"
|
DELETE = "delete"
|
||||||
|
|
||||||
|
|
||||||
class EACLAccess(Enum):
|
class EACLAccess(HumanReadableEnum):
|
||||||
ALLOW = "allow"
|
ALLOW = "allow"
|
||||||
DENY = "deny"
|
DENY = "deny"
|
||||||
|
|
||||||
|
|
||||||
class EACLRole(Enum):
|
class EACLRole(HumanReadableEnum):
|
||||||
OTHERS = "others"
|
OTHERS = "others"
|
||||||
USER = "user"
|
USER = "user"
|
||||||
SYSTEM = "system"
|
SYSTEM = "system"
|
||||||
|
|
||||||
|
|
||||||
class EACLHeaderType(Enum):
|
class EACLHeaderType(HumanReadableEnum):
|
||||||
REQUEST = "req" # Filter request headers
|
REQUEST = "req" # Filter request headers
|
||||||
OBJECT = "obj" # Filter object headers
|
OBJECT = "obj" # Filter object headers
|
||||||
SERVICE = "SERVICE" # Filter service headers. These are not processed by FrostFS nodes and exist for service use only
|
SERVICE = "SERVICE" # Filter service headers. These are not processed by FrostFS nodes and exist for service use only
|
||||||
|
|
||||||
|
|
||||||
class EACLMatchType(Enum):
|
class EACLMatchType(HumanReadableEnum):
|
||||||
STRING_EQUAL = "=" # Return true if strings are equal
|
STRING_EQUAL = "=" # Return true if strings are equal
|
||||||
STRING_NOT_EQUAL = "!=" # Return true if strings are different
|
STRING_NOT_EQUAL = "!=" # Return true if strings are different
|
||||||
|
|
||||||
|
|
|
@ -110,28 +110,8 @@ class MorphChain(NodeBase):
|
||||||
def label(self) -> str:
|
def label(self) -> str:
|
||||||
return f"{self.name}: {self.get_endpoint()}"
|
return f"{self.name}: {self.get_endpoint()}"
|
||||||
|
|
||||||
|
def get_http_endpoint(self) -> str:
|
||||||
class MainChain(NodeBase):
|
return self._get_attribute("http_endpoint")
|
||||||
"""
|
|
||||||
Class represents main-chain consensus node in a cluster
|
|
||||||
|
|
||||||
Consensus node is not always the same as physical host:
|
|
||||||
It can be service running in a container or on physical host (or physical node, if you will):
|
|
||||||
For testing perspective, it's not relevant how it is actually running,
|
|
||||||
since frostfs network will still treat it as "node"
|
|
||||||
"""
|
|
||||||
|
|
||||||
rpc_client: RPCClient
|
|
||||||
|
|
||||||
def construct(self):
|
|
||||||
self.rpc_client = RPCClient(self.get_endpoint())
|
|
||||||
|
|
||||||
def get_endpoint(self) -> str:
|
|
||||||
return self._get_attribute(ConfigAttributes.ENDPOINT_INTERNAL)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def label(self) -> str:
|
|
||||||
return f"{self.name}: {self.get_endpoint()}"
|
|
||||||
|
|
||||||
|
|
||||||
class StorageNode(NodeBase):
|
class StorageNode(NodeBase):
|
||||||
|
@ -162,6 +142,12 @@ class StorageNode(NodeBase):
|
||||||
)
|
)
|
||||||
return health_metric in output
|
return health_metric in output
|
||||||
|
|
||||||
|
def get_shard_config_path(self) -> str:
|
||||||
|
return self._get_attribute(ConfigAttributes.SHARD_CONFIG_PATH)
|
||||||
|
|
||||||
|
def get_shards_config(self) -> tuple[str, dict]:
|
||||||
|
return self.get_config(self.get_shard_config_path())
|
||||||
|
|
||||||
def get_control_endpoint(self) -> str:
|
def get_control_endpoint(self) -> str:
|
||||||
return self._get_attribute(ConfigAttributes.CONTROL_ENDPOINT)
|
return self._get_attribute(ConfigAttributes.CONTROL_ENDPOINT)
|
||||||
|
|
||||||
|
@ -171,6 +157,12 @@ class StorageNode(NodeBase):
|
||||||
def get_data_directory(self) -> str:
|
def get_data_directory(self) -> str:
|
||||||
return self.host.get_data_directory(self.name)
|
return self.host.get_data_directory(self.name)
|
||||||
|
|
||||||
|
def get_http_hostname(self) -> str:
|
||||||
|
return self._get_attribute(ConfigAttributes.HTTP_HOSTNAME)
|
||||||
|
|
||||||
|
def get_s3_hostname(self) -> str:
|
||||||
|
return self._get_attribute(ConfigAttributes.S3_HOSTNAME)
|
||||||
|
|
||||||
def delete_blobovnicza(self):
|
def delete_blobovnicza(self):
|
||||||
self.host.delete_blobovnicza(self.name)
|
self.host.delete_blobovnicza(self.name)
|
||||||
|
|
||||||
|
|
|
@ -1,17 +1,22 @@
|
||||||
from abc import ABC, abstractmethod
|
from abc import abstractmethod
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import Optional, Tuple, TypedDict, TypeVar
|
from typing import Optional, TypedDict, TypeVar
|
||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from frostfs_testlib.hosting.config import ServiceConfig
|
from frostfs_testlib.hosting.config import ServiceConfig
|
||||||
from frostfs_testlib.hosting.interfaces import Host
|
from frostfs_testlib.hosting.interfaces import Host
|
||||||
|
from frostfs_testlib.reporter import get_reporter
|
||||||
|
from frostfs_testlib.shell.interfaces import CommandResult
|
||||||
from frostfs_testlib.storage.constants import ConfigAttributes
|
from frostfs_testlib.storage.constants import ConfigAttributes
|
||||||
|
from frostfs_testlib.testing.readable import HumanReadableABC
|
||||||
from frostfs_testlib.utils import wallet_utils
|
from frostfs_testlib.utils import wallet_utils
|
||||||
|
|
||||||
|
reporter = get_reporter()
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class NodeBase(ABC):
|
class NodeBase(HumanReadableABC):
|
||||||
"""
|
"""
|
||||||
Represents a node of some underlying service
|
Represents a node of some underlying service
|
||||||
"""
|
"""
|
||||||
|
@ -53,17 +58,36 @@ class NodeBase(ABC):
|
||||||
return self._process_name
|
return self._process_name
|
||||||
|
|
||||||
def start_service(self):
|
def start_service(self):
|
||||||
self.host.start_service(self.name)
|
with reporter.step(f"Unmask {self.name} service on {self.host.config.address}"):
|
||||||
|
self.host.unmask_service(self.name)
|
||||||
|
|
||||||
|
with reporter.step(f"Start {self.name} service on {self.host.config.address}"):
|
||||||
|
self.host.start_service(self.name)
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def service_healthcheck(self) -> bool:
|
def service_healthcheck(self) -> bool:
|
||||||
"""Service healthcheck."""
|
"""Service healthcheck."""
|
||||||
|
|
||||||
def stop_service(self):
|
# TODO: Migrate to sub-class Metrcis (not yet exists :))
|
||||||
self.host.stop_service(self.name)
|
def get_metric(self, metric: str) -> CommandResult:
|
||||||
|
shell = self.host.get_shell()
|
||||||
|
result = shell.exec(f"curl -s {self.get_metrics_endpoint()} | grep -e '^{metric}'")
|
||||||
|
return result
|
||||||
|
|
||||||
|
def get_metrics_endpoint(self) -> str:
|
||||||
|
return self._get_attribute(ConfigAttributes.ENDPOINT_PROMETHEUS)
|
||||||
|
|
||||||
|
def stop_service(self, mask: bool = True):
|
||||||
|
if mask:
|
||||||
|
with reporter.step(f"Mask {self.name} service on {self.host.config.address}"):
|
||||||
|
self.host.mask_service(self.name)
|
||||||
|
|
||||||
|
with reporter.step(f"Stop {self.name} service on {self.host.config.address}"):
|
||||||
|
self.host.stop_service(self.name)
|
||||||
|
|
||||||
def restart_service(self):
|
def restart_service(self):
|
||||||
self.host.restart_service(self.name)
|
with reporter.step(f"Restart {self.name} service on {self.host.config.address}"):
|
||||||
|
self.host.restart_service(self.name)
|
||||||
|
|
||||||
def get_wallet_password(self) -> str:
|
def get_wallet_password(self) -> str:
|
||||||
return self._get_attribute(ConfigAttributes.WALLET_PASSWORD)
|
return self._get_attribute(ConfigAttributes.WALLET_PASSWORD)
|
||||||
|
@ -96,8 +120,10 @@ class NodeBase(ABC):
|
||||||
ConfigAttributes.WALLET_CONFIG,
|
ConfigAttributes.WALLET_CONFIG,
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_config(self) -> Tuple[str, dict]:
|
def get_config(self, config_file_path: Optional[str] = None) -> tuple[str, dict]:
|
||||||
config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH)
|
if config_file_path is None:
|
||||||
|
config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH)
|
||||||
|
|
||||||
shell = self.host.get_shell()
|
shell = self.host.get_shell()
|
||||||
|
|
||||||
result = shell.exec(f"cat {config_file_path}")
|
result = shell.exec(f"cat {config_file_path}")
|
||||||
|
@ -106,8 +132,10 @@ class NodeBase(ABC):
|
||||||
config = yaml.safe_load(config_text)
|
config = yaml.safe_load(config_text)
|
||||||
return config_file_path, config
|
return config_file_path, config
|
||||||
|
|
||||||
def save_config(self, new_config: dict) -> None:
|
def save_config(self, new_config: dict, config_file_path: Optional[str] = None) -> None:
|
||||||
config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH)
|
if config_file_path is None:
|
||||||
|
config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH)
|
||||||
|
|
||||||
shell = self.host.get_shell()
|
shell = self.host.get_shell()
|
||||||
|
|
||||||
config_str = yaml.dump(new_config)
|
config_str = yaml.dump(new_config)
|
||||||
|
|
13
src/frostfs_testlib/storage/dataclasses/object_size.py
Normal file
13
src/frostfs_testlib/storage/dataclasses/object_size.py
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ObjectSize:
|
||||||
|
name: str
|
||||||
|
value: int
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
return self.name
|
||||||
|
|
||||||
|
def __repr__(self) -> str:
|
||||||
|
return self.__str__()
|
|
@ -1,6 +1,9 @@
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
|
from enum import Enum
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
|
from frostfs_testlib.testing.readable import HumanReadableEnum
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class ObjectRef:
|
class ObjectRef:
|
||||||
|
@ -23,3 +26,30 @@ class StorageObjectInfo(ObjectRef):
|
||||||
attributes: Optional[list[dict[str, str]]] = None
|
attributes: Optional[list[dict[str, str]]] = None
|
||||||
tombstone: Optional[str] = None
|
tombstone: Optional[str] = None
|
||||||
locks: Optional[list[LockObjectInfo]] = None
|
locks: Optional[list[LockObjectInfo]] = None
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class NodeNetmapInfo:
|
||||||
|
node_id: str = None
|
||||||
|
node_status: str = None
|
||||||
|
node_data_ips: list[str] = None
|
||||||
|
cluster_name: str = None
|
||||||
|
continent: str = None
|
||||||
|
country: str = None
|
||||||
|
country_code: str = None
|
||||||
|
external_address: list[str] = None
|
||||||
|
location: str = None
|
||||||
|
node: str = None
|
||||||
|
price: int = None
|
||||||
|
sub_div: str = None
|
||||||
|
sub_div_code: int = None
|
||||||
|
un_locode: str = None
|
||||||
|
role: str = None
|
||||||
|
|
||||||
|
|
||||||
|
class Interfaces(HumanReadableEnum):
|
||||||
|
DATA_O: str = "data0"
|
||||||
|
DATA_1: str = "data1"
|
||||||
|
MGMT: str = "mgmt"
|
||||||
|
INTERNAL_0: str = "internal0"
|
||||||
|
INTERNAL_1: str = "internal1"
|
||||||
|
|
|
@ -1,10 +1,13 @@
|
||||||
|
import time
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib.reporter import get_reporter
|
||||||
|
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps import epoch
|
from frostfs_testlib.steps import epoch
|
||||||
from frostfs_testlib.storage.cluster import Cluster
|
from frostfs_testlib.storage.cluster import Cluster
|
||||||
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
|
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
|
||||||
|
from frostfs_testlib.utils import datetime_utils
|
||||||
|
|
||||||
reporter = get_reporter()
|
reporter = get_reporter()
|
||||||
|
|
||||||
|
@ -14,13 +17,24 @@ class ClusterTestBase:
|
||||||
shell: Shell
|
shell: Shell
|
||||||
cluster: Cluster
|
cluster: Cluster
|
||||||
|
|
||||||
@reporter.step_deco("Tick {epochs_to_tick} epochs")
|
@reporter.step_deco("Tick {epochs_to_tick} epochs, wait {wait_block} block")
|
||||||
def tick_epochs(self, epochs_to_tick: int, alive_node: Optional[StorageNode] = None):
|
def tick_epochs(
|
||||||
|
self,
|
||||||
|
epochs_to_tick: int,
|
||||||
|
alive_node: Optional[StorageNode] = None,
|
||||||
|
wait_block: int = None,
|
||||||
|
):
|
||||||
for _ in range(epochs_to_tick):
|
for _ in range(epochs_to_tick):
|
||||||
self.tick_epoch(alive_node)
|
self.tick_epoch(alive_node, wait_block)
|
||||||
|
|
||||||
def tick_epoch(self, alive_node: Optional[StorageNode] = None):
|
def tick_epoch(
|
||||||
|
self,
|
||||||
|
alive_node: Optional[StorageNode] = None,
|
||||||
|
wait_block: int = None,
|
||||||
|
):
|
||||||
epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node)
|
epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node)
|
||||||
|
if wait_block:
|
||||||
|
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * wait_block)
|
||||||
|
|
||||||
def wait_for_epochs_align(self):
|
def wait_for_epochs_align(self):
|
||||||
epoch.wait_for_epochs_align(self.shell, self.cluster)
|
epoch.wait_for_epochs_align(self.shell, self.cluster)
|
||||||
|
|
|
@ -42,7 +42,7 @@ def parallel(
|
||||||
exceptions = [future.exception() for future in futures if future.exception()]
|
exceptions = [future.exception() for future in futures if future.exception()]
|
||||||
if exceptions:
|
if exceptions:
|
||||||
message = "\n".join([str(e) for e in exceptions])
|
message = "\n".join([str(e) for e in exceptions])
|
||||||
raise RuntimeError(f"The following exceptions occured during parallel run: {message}")
|
raise RuntimeError(f"The following exceptions occured during parallel run:\n {message}")
|
||||||
return futures
|
return futures
|
||||||
|
|
||||||
|
|
||||||
|
|
36
src/frostfs_testlib/testing/readable.py
Normal file
36
src/frostfs_testlib/testing/readable.py
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
from abc import ABCMeta
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
|
||||||
|
class HumanReadableEnum(Enum):
|
||||||
|
def __str__(self):
|
||||||
|
return self._name_
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return self._name_
|
||||||
|
|
||||||
|
|
||||||
|
class HumanReadableABCMeta(ABCMeta):
|
||||||
|
def __str__(cls):
|
||||||
|
if "__repr_name__" in cls.__dict__:
|
||||||
|
return cls.__dict__["__repr_name__"]
|
||||||
|
return cls.__name__
|
||||||
|
|
||||||
|
def __repr__(cls):
|
||||||
|
if "__repr_name__" in cls.__dict__:
|
||||||
|
return cls.__dict__["__repr_name__"]
|
||||||
|
return cls.__name__
|
||||||
|
|
||||||
|
|
||||||
|
class HumanReadableABC(metaclass=HumanReadableABCMeta):
|
||||||
|
@classmethod
|
||||||
|
def __str__(cls):
|
||||||
|
if "__repr_name__" in cls.__dict__:
|
||||||
|
return cls.__dict__["__repr_name__"]
|
||||||
|
return type(cls).__name__
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def __repr__(cls):
|
||||||
|
if "__repr_name__" in cls.__dict__:
|
||||||
|
return cls.__dict__["__repr_name__"]
|
||||||
|
return type(cls).__name__
|
|
@ -3,4 +3,3 @@ import frostfs_testlib.utils.datetime_utils
|
||||||
import frostfs_testlib.utils.json_utils
|
import frostfs_testlib.utils.json_utils
|
||||||
import frostfs_testlib.utils.string_utils
|
import frostfs_testlib.utils.string_utils
|
||||||
import frostfs_testlib.utils.wallet_utils
|
import frostfs_testlib.utils.wallet_utils
|
||||||
from frostfs_testlib.utils.file_keeper import FileKeeper
|
|
||||||
|
|
|
@ -5,18 +5,22 @@
|
||||||
"""
|
"""
|
||||||
Helper functions to use with `frostfs-cli`, `neo-go` and other CLIs.
|
Helper functions to use with `frostfs-cli`, `neo-go` and other CLIs.
|
||||||
"""
|
"""
|
||||||
|
import csv
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
import re
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
from contextlib import suppress
|
from contextlib import suppress
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
from io import StringIO
|
||||||
from textwrap import shorten
|
from textwrap import shorten
|
||||||
from typing import TypedDict, Union
|
from typing import Dict, List, TypedDict, Union
|
||||||
|
|
||||||
import pexpect
|
import pexpect
|
||||||
|
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib.reporter import get_reporter
|
||||||
|
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetmapInfo
|
||||||
|
|
||||||
reporter = get_reporter()
|
reporter = get_reporter()
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
@ -24,57 +28,6 @@ COLOR_GREEN = "\033[92m"
|
||||||
COLOR_OFF = "\033[0m"
|
COLOR_OFF = "\033[0m"
|
||||||
|
|
||||||
|
|
||||||
def _cmd_run(cmd: str, timeout: int = 90) -> str:
|
|
||||||
"""
|
|
||||||
Runs given shell command <cmd>, in case of success returns its stdout,
|
|
||||||
in case of failure returns error message.
|
|
||||||
"""
|
|
||||||
compl_proc = None
|
|
||||||
start_time = datetime.now()
|
|
||||||
try:
|
|
||||||
logger.info(f"{COLOR_GREEN}Executing command: {cmd}{COLOR_OFF}")
|
|
||||||
start_time = datetime.utcnow()
|
|
||||||
compl_proc = subprocess.run(
|
|
||||||
cmd,
|
|
||||||
check=True,
|
|
||||||
universal_newlines=True,
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
stderr=subprocess.STDOUT,
|
|
||||||
timeout=timeout,
|
|
||||||
shell=True,
|
|
||||||
)
|
|
||||||
output = compl_proc.stdout
|
|
||||||
return_code = compl_proc.returncode
|
|
||||||
end_time = datetime.utcnow()
|
|
||||||
logger.info(f"{COLOR_GREEN}Output: {output}{COLOR_OFF}")
|
|
||||||
_attach_allure_log(cmd, output, return_code, start_time, end_time)
|
|
||||||
|
|
||||||
return output
|
|
||||||
except subprocess.CalledProcessError as exc:
|
|
||||||
logger.info(
|
|
||||||
f"Command: {cmd}\n" f"Error:\nreturn code: {exc.returncode} " f"\nOutput: {exc.output}"
|
|
||||||
)
|
|
||||||
end_time = datetime.now()
|
|
||||||
return_code, cmd_output = subprocess.getstatusoutput(cmd)
|
|
||||||
_attach_allure_log(cmd, cmd_output, return_code, start_time, end_time)
|
|
||||||
|
|
||||||
raise RuntimeError(
|
|
||||||
f"Command: {cmd}\n" f"Error:\nreturn code: {exc.returncode}\n" f"Output: {exc.output}"
|
|
||||||
) from exc
|
|
||||||
except OSError as exc:
|
|
||||||
raise RuntimeError(f"Command: {cmd}\n" f"Output: {exc.strerror}") from exc
|
|
||||||
except Exception as exc:
|
|
||||||
return_code, cmd_output = subprocess.getstatusoutput(cmd)
|
|
||||||
end_time = datetime.now()
|
|
||||||
_attach_allure_log(cmd, cmd_output, return_code, start_time, end_time)
|
|
||||||
logger.info(
|
|
||||||
f"Command: {cmd}\n"
|
|
||||||
f"Error:\nreturn code: {return_code}\n"
|
|
||||||
f"Output: {exc.output.decode('utf-8') if type(exc.output) is bytes else exc.output}"
|
|
||||||
)
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def _run_with_passwd(cmd: str) -> str:
|
def _run_with_passwd(cmd: str) -> str:
|
||||||
child = pexpect.spawn(cmd)
|
child = pexpect.spawn(cmd)
|
||||||
child.delaybeforesend = 1
|
child.delaybeforesend = 1
|
||||||
|
@ -133,3 +86,64 @@ def log_command_execution(cmd: str, output: Union[str, TypedDict]) -> None:
|
||||||
command_attachment = f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n"
|
command_attachment = f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n"
|
||||||
with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'):
|
with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'):
|
||||||
reporter.attach(command_attachment, "Command execution")
|
reporter.attach(command_attachment, "Command execution")
|
||||||
|
|
||||||
|
|
||||||
|
def parse_netmap_output(output: str) -> list[NodeNetmapInfo]:
|
||||||
|
"""
|
||||||
|
The code will parse each line and return each node as dataclass.
|
||||||
|
"""
|
||||||
|
netmap_nodes = output.split("Node ")[1:]
|
||||||
|
dataclasses_netmap = []
|
||||||
|
result_netmap = {}
|
||||||
|
|
||||||
|
regexes = {
|
||||||
|
"node_id": r"\d+: (?P<node_id>\w+)",
|
||||||
|
"node_data_ips": r"(?P<node_data_ips>/ip4/.+?)$",
|
||||||
|
"node_status": r"(?P<node_status>ONLINE|OFFLINE)",
|
||||||
|
"cluster_name": r"ClusterName: (?P<cluster_name>\w+)",
|
||||||
|
"continent": r"Continent: (?P<continent>\w+)",
|
||||||
|
"country": r"Country: (?P<country>\w+)",
|
||||||
|
"country_code": r"CountryCode: (?P<country_code>\w+)",
|
||||||
|
"external_address": r"ExternalAddr: (?P<external_address>/ip[4].+?)$",
|
||||||
|
"location": r"Location: (?P<location>\w+.*)",
|
||||||
|
"node": r"Node: (?P<node>\d+\.\d+\.\d+\.\d+)",
|
||||||
|
"price": r"Price: (?P<price>\d+)",
|
||||||
|
"sub_div": r"SubDiv: (?P<sub_div>.*)",
|
||||||
|
"sub_div_code": r"SubDivCode: (?P<sub_div_code>\w+)",
|
||||||
|
"un_locode": r"UN-LOCODE: (?P<un_locode>\w+.*)",
|
||||||
|
"role": r"role: (?P<role>\w+)",
|
||||||
|
}
|
||||||
|
|
||||||
|
for node in netmap_nodes:
|
||||||
|
for key, regex in regexes.items():
|
||||||
|
search_result = re.search(regex, node, flags=re.MULTILINE)
|
||||||
|
if key == "node_data_ips":
|
||||||
|
result_netmap[key] = search_result[key].strip().split(" ")
|
||||||
|
continue
|
||||||
|
if key == "external_address":
|
||||||
|
result_netmap[key] = search_result[key].strip().split(",")
|
||||||
|
continue
|
||||||
|
if search_result == None:
|
||||||
|
result_netmap[key] = None
|
||||||
|
continue
|
||||||
|
result_netmap[key] = search_result[key].strip()
|
||||||
|
|
||||||
|
dataclasses_netmap.append(NodeNetmapInfo(**result_netmap))
|
||||||
|
|
||||||
|
return dataclasses_netmap
|
||||||
|
|
||||||
|
|
||||||
|
def parse_cmd_table(output: str, delimiter="|") -> list[dict[str, str]]:
|
||||||
|
parsing_output = []
|
||||||
|
reader = csv.reader(StringIO(output.strip()), delimiter=delimiter)
|
||||||
|
iter_reader = iter(reader)
|
||||||
|
header_row = next(iter_reader)
|
||||||
|
for row in iter_reader:
|
||||||
|
table = {}
|
||||||
|
for i in range(len(row)):
|
||||||
|
header = header_row[i].strip().lower().replace(" ", "_")
|
||||||
|
value = row[i].strip().lower()
|
||||||
|
if header:
|
||||||
|
table[header] = value
|
||||||
|
parsing_output.append(table)
|
||||||
|
return parsing_output
|
||||||
|
|
|
@ -1,10 +1,23 @@
|
||||||
import base64
|
import base64
|
||||||
import binascii
|
import binascii
|
||||||
import json
|
import json
|
||||||
|
from typing import Tuple
|
||||||
|
|
||||||
import base58
|
import base58
|
||||||
|
|
||||||
|
|
||||||
|
def calc_unit(value: float, skip_units: int = 0) -> Tuple[float, str]:
|
||||||
|
units = ["B", "KiB", "MiB", "GiB", "TiB"]
|
||||||
|
|
||||||
|
for unit in units[skip_units:]:
|
||||||
|
if value < 1024:
|
||||||
|
return value, unit
|
||||||
|
|
||||||
|
value = value / 1024.0
|
||||||
|
|
||||||
|
return value, unit
|
||||||
|
|
||||||
|
|
||||||
def str_to_ascii_hex(input: str) -> str:
|
def str_to_ascii_hex(input: str) -> str:
|
||||||
b = binascii.hexlify(input.encode())
|
b = binascii.hexlify(input.encode())
|
||||||
return str(b)[2:-1]
|
return str(b)[2:-1]
|
||||||
|
|
5
tests/conftest.py
Normal file
5
tests/conftest.py
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
app_dir = os.path.join(os.getcwd(), "src")
|
||||||
|
sys.path.insert(0, app_dir)
|
|
@ -14,11 +14,7 @@ def format_error_details(error: Exception) -> str:
|
||||||
Returns:
|
Returns:
|
||||||
String containing exception details.
|
String containing exception details.
|
||||||
"""
|
"""
|
||||||
detail_lines = traceback.format_exception(
|
detail_lines = traceback.format_exception(error)
|
||||||
etype=type(error),
|
|
||||||
value=error,
|
|
||||||
tb=error.__traceback__,
|
|
||||||
)
|
|
||||||
return "".join(detail_lines)
|
return "".join(detail_lines)
|
||||||
|
|
||||||
|
|
||||||
|
|
39
tests/test_dataclasses.py
Normal file
39
tests/test_dataclasses.py
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper
|
||||||
|
from frostfs_testlib.storage.dataclasses.acl import EACLRole
|
||||||
|
from frostfs_testlib.storage.dataclasses.frostfs_services import (
|
||||||
|
HTTPGate,
|
||||||
|
InnerRing,
|
||||||
|
MorphChain,
|
||||||
|
S3Gate,
|
||||||
|
StorageNode,
|
||||||
|
)
|
||||||
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
|
|
||||||
|
|
||||||
|
class TestDataclassesStr:
|
||||||
|
"""Here we are testing important classes string representation."""
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"obj, expected",
|
||||||
|
[
|
||||||
|
(Boto3ClientWrapper, "Boto3 client"),
|
||||||
|
(AwsCliClient, "AWS CLI"),
|
||||||
|
(ObjectSize("simple", 1), "simple object size"),
|
||||||
|
(ObjectSize("simple", 10), "simple object size"),
|
||||||
|
(ObjectSize("complex", 5000), "complex object size"),
|
||||||
|
(ObjectSize("complex", 5555), "complex object size"),
|
||||||
|
(StorageNode, "StorageNode"),
|
||||||
|
(MorphChain, "MorphChain"),
|
||||||
|
(S3Gate, "S3Gate"),
|
||||||
|
(HTTPGate, "HTTPGate"),
|
||||||
|
(InnerRing, "InnerRing"),
|
||||||
|
(EACLRole.OTHERS, "OTHERS"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_classes_string_representation(self, obj: Any, expected: str):
|
||||||
|
assert f"{obj}" == expected
|
||||||
|
assert repr(obj) == expected
|
595
tests/test_load_config.py
Normal file
595
tests/test_load_config.py
Normal file
|
@ -0,0 +1,595 @@
|
||||||
|
from dataclasses import Field, dataclass, fields, is_dataclass
|
||||||
|
from typing import Any, get_args
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from frostfs_testlib.load.load_config import (
|
||||||
|
EndpointSelectionStrategy,
|
||||||
|
LoadParams,
|
||||||
|
LoadScenario,
|
||||||
|
LoadType,
|
||||||
|
Preset,
|
||||||
|
ReadFrom,
|
||||||
|
)
|
||||||
|
from frostfs_testlib.load.runners import DefaultRunner
|
||||||
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
|
from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController
|
||||||
|
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
|
||||||
|
from frostfs_testlib.storage.dataclasses.node_base import NodeBase
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class MetaTestField:
|
||||||
|
field: Field
|
||||||
|
field_type: type
|
||||||
|
instance: Any
|
||||||
|
|
||||||
|
|
||||||
|
class TestLoadConfig:
|
||||||
|
@pytest.fixture
|
||||||
|
def set_empty(self, request: pytest.FixtureRequest):
|
||||||
|
# Workaround for verify
|
||||||
|
if "param" in request.__dict__ and request.param:
|
||||||
|
return request.param
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def load_type(self, request: pytest.FixtureRequest):
|
||||||
|
# Workaround for verify
|
||||||
|
if "param" in request.__dict__ and request.param:
|
||||||
|
return request.param
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def load_params(self, load_type: LoadType, set_empty: bool, request: pytest.FixtureRequest):
|
||||||
|
load_scenario = request.param
|
||||||
|
return self._get_filled_load_params(load_type, load_scenario, set_empty)
|
||||||
|
|
||||||
|
def test_load_params_only_load_type_required(self):
|
||||||
|
load_params = LoadParams(load_type=LoadType.S3)
|
||||||
|
expected = "s3"
|
||||||
|
assert repr(load_params) == expected
|
||||||
|
assert f"{load_params}" == expected
|
||||||
|
|
||||||
|
def test_load_params_initially_have_all_values_none(self):
|
||||||
|
load_params = LoadParams(load_type=LoadType.S3)
|
||||||
|
self._check_all_values_none(load_params, ["load_type", "scenario"])
|
||||||
|
|
||||||
|
def test_preset_initially_have_all_values_none(self):
|
||||||
|
preset = Preset()
|
||||||
|
self._check_all_values_none(preset)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True)
|
||||||
|
def test_string_representation_s3_car(self, load_params: LoadParams):
|
||||||
|
load_params.object_size = 524288
|
||||||
|
expected = "s3_car 512 MiB, write_rate=10, read_rate=9, delete_rate=11, preallocated_writers=20, preallocated_readers=20, preallocated_deleters=21"
|
||||||
|
assert f"{load_params}" == expected
|
||||||
|
assert repr(load_params) == expected
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("load_params", [LoadScenario.gRPC], indirect=True)
|
||||||
|
def test_string_representation_grpc(self, load_params: LoadParams):
|
||||||
|
load_params.object_size = 512
|
||||||
|
expected = "grpc 512 KiB, writers=7, readers=7, deleters=8"
|
||||||
|
assert f"{load_params}" == expected
|
||||||
|
assert repr(load_params) == expected
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("load_params", [LoadScenario.gRPC], indirect=True)
|
||||||
|
def test_load_controller_string_representation(self, load_params: LoadParams):
|
||||||
|
load_params.endpoint_selection_strategy = EndpointSelectionStrategy.ALL
|
||||||
|
load_params.object_size = 512
|
||||||
|
background_load_controller = BackgroundLoadController(
|
||||||
|
"tmp", load_params, "wallet", None, None, DefaultRunner(None)
|
||||||
|
)
|
||||||
|
expected = "grpc 512 KiB, writers=7, readers=7, deleters=8"
|
||||||
|
assert f"{background_load_controller}" == expected
|
||||||
|
assert repr(background_load_controller) == expected
|
||||||
|
|
||||||
|
def test_load_set_id_changes_fields(self):
|
||||||
|
load_params = LoadParams(load_type=LoadType.S3)
|
||||||
|
load_params.preset = Preset()
|
||||||
|
load_params.read_from = ReadFrom["REGISTRY"]
|
||||||
|
load_params.working_dir = "/tmp"
|
||||||
|
load_params.set_id("test_id")
|
||||||
|
|
||||||
|
assert load_params.registry_file == "/tmp/test_id_registry.bolt"
|
||||||
|
assert load_params.preset.pregen_json == "/tmp/test_id_prepare.json"
|
||||||
|
assert load_params.load_id == "test_id"
|
||||||
|
|
||||||
|
# No other values should be changed
|
||||||
|
self._check_all_values_none(
|
||||||
|
load_params,
|
||||||
|
[
|
||||||
|
"load_type",
|
||||||
|
"working_dir",
|
||||||
|
"load_id",
|
||||||
|
"registry_file",
|
||||||
|
"preset",
|
||||||
|
"scenario",
|
||||||
|
"read_from",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
self._check_all_values_none(load_params.preset, ["pregen_json", "scenario"])
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("load_params", [LoadScenario.gRPC], indirect=True)
|
||||||
|
def test_argument_parsing_for_grpc_scenario(self, load_params: LoadParams):
|
||||||
|
expected_preset_args = [
|
||||||
|
"--size '11'",
|
||||||
|
"--preload_obj '13'",
|
||||||
|
"--out 'pregen_json'",
|
||||||
|
"--workers '7'",
|
||||||
|
"--containers '16'",
|
||||||
|
"--policy 'container_placement_policy'",
|
||||||
|
]
|
||||||
|
expected_env_vars = {
|
||||||
|
"DURATION": 9,
|
||||||
|
"WRITE_OBJ_SIZE": 11,
|
||||||
|
"REGISTRY_FILE": "registry_file",
|
||||||
|
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
|
||||||
|
"K6_SETUP_TIMEOUT": "setup_timeout",
|
||||||
|
"WRITERS": 7,
|
||||||
|
"READERS": 7,
|
||||||
|
"DELETERS": 8,
|
||||||
|
"PREGEN_JSON": "pregen_json",
|
||||||
|
"PREPARE_LOCALLY": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
self._check_preset_params(load_params, expected_preset_args)
|
||||||
|
self._check_env_vars(load_params, expected_env_vars)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("load_params", [LoadScenario.gRPC_CAR], indirect=True)
|
||||||
|
def test_argument_parsing_for_grpc_car_scenario(self, load_params: LoadParams):
|
||||||
|
expected_preset_args = [
|
||||||
|
"--size '11'",
|
||||||
|
"--preload_obj '13'",
|
||||||
|
"--out 'pregen_json'",
|
||||||
|
"--workers '7'",
|
||||||
|
"--containers '16'",
|
||||||
|
"--policy 'container_placement_policy'",
|
||||||
|
]
|
||||||
|
expected_env_vars = {
|
||||||
|
"DURATION": 9,
|
||||||
|
"WRITE_OBJ_SIZE": 11,
|
||||||
|
"REGISTRY_FILE": "registry_file",
|
||||||
|
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
|
||||||
|
"K6_SETUP_TIMEOUT": "setup_timeout",
|
||||||
|
"MAX_WRITERS": 11,
|
||||||
|
"MAX_READERS": 11,
|
||||||
|
"MAX_DELETERS": 12,
|
||||||
|
"PRE_ALLOC_DELETERS": 21,
|
||||||
|
"PRE_ALLOC_READERS": 20,
|
||||||
|
"PRE_ALLOC_WRITERS": 20,
|
||||||
|
"PREGEN_JSON": "pregen_json",
|
||||||
|
"TIME_UNIT": "time_unit",
|
||||||
|
"WRITE_RATE": 10,
|
||||||
|
"READ_RATE": 9,
|
||||||
|
"DELETE_RATE": 11,
|
||||||
|
"PREPARE_LOCALLY": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
self._check_preset_params(load_params, expected_preset_args)
|
||||||
|
self._check_env_vars(load_params, expected_env_vars)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("load_params", [LoadScenario.S3], indirect=True)
|
||||||
|
def test_argument_parsing_for_s3_scenario(self, load_params: LoadParams):
|
||||||
|
expected_preset_args = [
|
||||||
|
"--size '11'",
|
||||||
|
"--preload_obj '13'",
|
||||||
|
"--no-verify-ssl",
|
||||||
|
"--out 'pregen_json'",
|
||||||
|
"--workers '7'",
|
||||||
|
"--buckets '13'",
|
||||||
|
"--location 's3_location'",
|
||||||
|
]
|
||||||
|
expected_env_vars = {
|
||||||
|
"DURATION": 9,
|
||||||
|
"WRITE_OBJ_SIZE": 11,
|
||||||
|
"REGISTRY_FILE": "registry_file",
|
||||||
|
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
|
||||||
|
"K6_SETUP_TIMEOUT": "setup_timeout",
|
||||||
|
"WRITERS": 7,
|
||||||
|
"READERS": 7,
|
||||||
|
"DELETERS": 8,
|
||||||
|
"NO_VERIFY_SSL": True,
|
||||||
|
"PREGEN_JSON": "pregen_json",
|
||||||
|
}
|
||||||
|
|
||||||
|
self._check_preset_params(load_params, expected_preset_args)
|
||||||
|
self._check_env_vars(load_params, expected_env_vars)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True)
|
||||||
|
def test_argument_parsing_for_s3_car_scenario(self, load_params: LoadParams):
|
||||||
|
expected_preset_args = [
|
||||||
|
"--size '11'",
|
||||||
|
"--preload_obj '13'",
|
||||||
|
"--no-verify-ssl",
|
||||||
|
"--out 'pregen_json'",
|
||||||
|
"--workers '7'",
|
||||||
|
"--buckets '13'",
|
||||||
|
"--location 's3_location'",
|
||||||
|
]
|
||||||
|
expected_env_vars = {
|
||||||
|
"DURATION": 9,
|
||||||
|
"WRITE_OBJ_SIZE": 11,
|
||||||
|
"REGISTRY_FILE": "registry_file",
|
||||||
|
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
|
||||||
|
"K6_SETUP_TIMEOUT": "setup_timeout",
|
||||||
|
"NO_VERIFY_SSL": True,
|
||||||
|
"MAX_WRITERS": 11,
|
||||||
|
"MAX_READERS": 11,
|
||||||
|
"MAX_DELETERS": 12,
|
||||||
|
"PRE_ALLOC_DELETERS": 21,
|
||||||
|
"PRE_ALLOC_READERS": 20,
|
||||||
|
"PRE_ALLOC_WRITERS": 20,
|
||||||
|
"PREGEN_JSON": "pregen_json",
|
||||||
|
"TIME_UNIT": "time_unit",
|
||||||
|
"WRITE_RATE": 10,
|
||||||
|
"READ_RATE": 9,
|
||||||
|
"DELETE_RATE": 11,
|
||||||
|
}
|
||||||
|
|
||||||
|
self._check_preset_params(load_params, expected_preset_args)
|
||||||
|
self._check_env_vars(load_params, expected_env_vars)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("load_params", [LoadScenario.HTTP], indirect=True)
|
||||||
|
def test_argument_parsing_for_http_scenario(self, load_params: LoadParams):
|
||||||
|
expected_preset_args = [
|
||||||
|
"--no-verify-ssl",
|
||||||
|
"--size '11'",
|
||||||
|
"--preload_obj '13'",
|
||||||
|
"--out 'pregen_json'",
|
||||||
|
"--workers '7'",
|
||||||
|
"--containers '16'",
|
||||||
|
"--policy 'container_placement_policy'",
|
||||||
|
]
|
||||||
|
expected_env_vars = {
|
||||||
|
"DURATION": 9,
|
||||||
|
"WRITE_OBJ_SIZE": 11,
|
||||||
|
"NO_VERIFY_SSL": True,
|
||||||
|
"REGISTRY_FILE": "registry_file",
|
||||||
|
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
|
||||||
|
"K6_SETUP_TIMEOUT": "setup_timeout",
|
||||||
|
"WRITERS": 7,
|
||||||
|
"READERS": 7,
|
||||||
|
"DELETERS": 8,
|
||||||
|
"PREGEN_JSON": "pregen_json",
|
||||||
|
}
|
||||||
|
|
||||||
|
self._check_preset_params(load_params, expected_preset_args)
|
||||||
|
self._check_env_vars(load_params, expected_env_vars)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("load_params", [LoadScenario.LOCAL], indirect=True)
|
||||||
|
def test_argument_parsing_for_local_scenario(self, load_params: LoadParams):
|
||||||
|
expected_preset_args = [
|
||||||
|
"--size '11'",
|
||||||
|
"--preload_obj '13'",
|
||||||
|
"--out 'pregen_json'",
|
||||||
|
"--workers '7'",
|
||||||
|
"--containers '16'",
|
||||||
|
"--policy 'container_placement_policy'",
|
||||||
|
]
|
||||||
|
expected_env_vars = {
|
||||||
|
"CONFIG_FILE": "config_file",
|
||||||
|
"DURATION": 9,
|
||||||
|
"WRITE_OBJ_SIZE": 11,
|
||||||
|
"REGISTRY_FILE": "registry_file",
|
||||||
|
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
|
||||||
|
"K6_SETUP_TIMEOUT": "setup_timeout",
|
||||||
|
"WRITERS": 7,
|
||||||
|
"READERS": 7,
|
||||||
|
"DELETERS": 8,
|
||||||
|
"PREGEN_JSON": "pregen_json",
|
||||||
|
}
|
||||||
|
|
||||||
|
self._check_preset_params(load_params, expected_preset_args)
|
||||||
|
self._check_env_vars(load_params, expected_env_vars)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True
|
||||||
|
)
|
||||||
|
def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams):
|
||||||
|
expected_env_vars = {
|
||||||
|
"CLIENTS": 14,
|
||||||
|
"REGISTRY_FILE": "registry_file",
|
||||||
|
"K6_SETUP_TIMEOUT": "setup_timeout",
|
||||||
|
"NO_VERIFY_SSL": True,
|
||||||
|
"TIME_LIMIT": 11,
|
||||||
|
}
|
||||||
|
|
||||||
|
self._check_env_vars(load_params, expected_env_vars)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"load_params, load_type", [(LoadScenario.VERIFY, LoadType.gRPC)], indirect=True
|
||||||
|
)
|
||||||
|
def test_argument_parsing_for_grpc_verify_scenario(self, load_params: LoadParams):
|
||||||
|
expected_env_vars = {
|
||||||
|
"CLIENTS": 14,
|
||||||
|
"REGISTRY_FILE": "registry_file",
|
||||||
|
"K6_SETUP_TIMEOUT": "setup_timeout",
|
||||||
|
"NO_VERIFY_SSL": True,
|
||||||
|
"TIME_LIMIT": 11,
|
||||||
|
}
|
||||||
|
|
||||||
|
self._check_env_vars(load_params, expected_env_vars)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.gRPC, True)], indirect=True)
|
||||||
|
def test_empty_argument_parsing_for_grpc_scenario(self, load_params: LoadParams):
|
||||||
|
expected_preset_args = [
|
||||||
|
"--size '0'",
|
||||||
|
"--preload_obj '0'",
|
||||||
|
"--out ''",
|
||||||
|
"--workers '0'",
|
||||||
|
"--containers '0'",
|
||||||
|
"--policy ''",
|
||||||
|
]
|
||||||
|
expected_env_vars = {
|
||||||
|
"DURATION": 0,
|
||||||
|
"WRITE_OBJ_SIZE": 0,
|
||||||
|
"REGISTRY_FILE": "",
|
||||||
|
"K6_MIN_ITERATION_DURATION": "",
|
||||||
|
"K6_SETUP_TIMEOUT": "",
|
||||||
|
"WRITERS": 0,
|
||||||
|
"READERS": 0,
|
||||||
|
"DELETERS": 0,
|
||||||
|
"PREGEN_JSON": "",
|
||||||
|
"PREPARE_LOCALLY": False,
|
||||||
|
}
|
||||||
|
|
||||||
|
self._check_preset_params(load_params, expected_preset_args)
|
||||||
|
self._check_env_vars(load_params, expected_env_vars)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"load_params, set_empty", [(LoadScenario.gRPC_CAR, True)], indirect=True
|
||||||
|
)
|
||||||
|
def test_empty_argument_parsing_for_grpc_car_scenario(self, load_params: LoadParams):
|
||||||
|
expected_preset_args = [
|
||||||
|
"--size '0'",
|
||||||
|
"--preload_obj '0'",
|
||||||
|
"--out ''",
|
||||||
|
"--workers '0'",
|
||||||
|
"--containers '0'",
|
||||||
|
"--policy ''",
|
||||||
|
]
|
||||||
|
expected_env_vars = {
|
||||||
|
"DURATION": 0,
|
||||||
|
"WRITE_OBJ_SIZE": 0,
|
||||||
|
"REGISTRY_FILE": "",
|
||||||
|
"K6_MIN_ITERATION_DURATION": "",
|
||||||
|
"K6_SETUP_TIMEOUT": "",
|
||||||
|
"MAX_WRITERS": 0,
|
||||||
|
"MAX_READERS": 0,
|
||||||
|
"MAX_DELETERS": 0,
|
||||||
|
"PRE_ALLOC_DELETERS": 0,
|
||||||
|
"PRE_ALLOC_READERS": 0,
|
||||||
|
"PRE_ALLOC_WRITERS": 0,
|
||||||
|
"PREGEN_JSON": "",
|
||||||
|
"TIME_UNIT": "",
|
||||||
|
"WRITE_RATE": 0,
|
||||||
|
"READ_RATE": 0,
|
||||||
|
"DELETE_RATE": 0,
|
||||||
|
"PREPARE_LOCALLY": False,
|
||||||
|
}
|
||||||
|
|
||||||
|
self._check_preset_params(load_params, expected_preset_args)
|
||||||
|
self._check_env_vars(load_params, expected_env_vars)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.S3, True)], indirect=True)
|
||||||
|
def test_empty_argument_parsing_for_s3_scenario(self, load_params: LoadParams):
|
||||||
|
expected_preset_args = [
|
||||||
|
"--size '0'",
|
||||||
|
"--preload_obj '0'",
|
||||||
|
"--out ''",
|
||||||
|
"--workers '0'",
|
||||||
|
"--buckets '0'",
|
||||||
|
"--location ''",
|
||||||
|
]
|
||||||
|
expected_env_vars = {
|
||||||
|
"DURATION": 0,
|
||||||
|
"WRITE_OBJ_SIZE": 0,
|
||||||
|
"REGISTRY_FILE": "",
|
||||||
|
"K6_MIN_ITERATION_DURATION": "",
|
||||||
|
"K6_SETUP_TIMEOUT": "",
|
||||||
|
"WRITERS": 0,
|
||||||
|
"READERS": 0,
|
||||||
|
"DELETERS": 0,
|
||||||
|
"NO_VERIFY_SSL": False,
|
||||||
|
"PREGEN_JSON": "",
|
||||||
|
}
|
||||||
|
|
||||||
|
self._check_preset_params(load_params, expected_preset_args)
|
||||||
|
self._check_env_vars(load_params, expected_env_vars)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.S3_CAR, True)], indirect=True)
|
||||||
|
def test_empty_argument_parsing_for_s3_car_scenario(self, load_params: LoadParams):
|
||||||
|
expected_preset_args = [
|
||||||
|
"--size '0'",
|
||||||
|
"--preload_obj '0'",
|
||||||
|
"--out ''",
|
||||||
|
"--workers '0'",
|
||||||
|
"--buckets '0'",
|
||||||
|
"--location ''",
|
||||||
|
]
|
||||||
|
expected_env_vars = {
|
||||||
|
"DURATION": 0,
|
||||||
|
"WRITE_OBJ_SIZE": 0,
|
||||||
|
"REGISTRY_FILE": "",
|
||||||
|
"K6_MIN_ITERATION_DURATION": "",
|
||||||
|
"K6_SETUP_TIMEOUT": "",
|
||||||
|
"NO_VERIFY_SSL": False,
|
||||||
|
"MAX_WRITERS": 0,
|
||||||
|
"MAX_READERS": 0,
|
||||||
|
"MAX_DELETERS": 0,
|
||||||
|
"PRE_ALLOC_DELETERS": 0,
|
||||||
|
"PRE_ALLOC_READERS": 0,
|
||||||
|
"PRE_ALLOC_WRITERS": 0,
|
||||||
|
"PREGEN_JSON": "",
|
||||||
|
"TIME_UNIT": "",
|
||||||
|
"WRITE_RATE": 0,
|
||||||
|
"READ_RATE": 0,
|
||||||
|
"DELETE_RATE": 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
self._check_preset_params(load_params, expected_preset_args)
|
||||||
|
self._check_env_vars(load_params, expected_env_vars)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.HTTP, True)], indirect=True)
|
||||||
|
def test_empty_argument_parsing_for_http_scenario(self, load_params: LoadParams):
|
||||||
|
expected_preset_args = [
|
||||||
|
"--size '0'",
|
||||||
|
"--preload_obj '0'",
|
||||||
|
"--out ''",
|
||||||
|
"--workers '0'",
|
||||||
|
"--containers '0'",
|
||||||
|
"--policy ''",
|
||||||
|
]
|
||||||
|
expected_env_vars = {
|
||||||
|
"DURATION": 0,
|
||||||
|
"WRITE_OBJ_SIZE": 0,
|
||||||
|
"NO_VERIFY_SSL": False,
|
||||||
|
"REGISTRY_FILE": "",
|
||||||
|
"K6_MIN_ITERATION_DURATION": "",
|
||||||
|
"K6_SETUP_TIMEOUT": "",
|
||||||
|
"WRITERS": 0,
|
||||||
|
"READERS": 0,
|
||||||
|
"DELETERS": 0,
|
||||||
|
"PREGEN_JSON": "",
|
||||||
|
}
|
||||||
|
|
||||||
|
self._check_preset_params(load_params, expected_preset_args)
|
||||||
|
self._check_env_vars(load_params, expected_env_vars)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.LOCAL, True)], indirect=True)
|
||||||
|
def test_empty_argument_parsing_for_local_scenario(self, load_params: LoadParams):
|
||||||
|
expected_preset_args = [
|
||||||
|
"--size '0'",
|
||||||
|
"--preload_obj '0'",
|
||||||
|
"--out ''",
|
||||||
|
"--workers '0'",
|
||||||
|
"--containers '0'",
|
||||||
|
"--policy ''",
|
||||||
|
]
|
||||||
|
expected_env_vars = {
|
||||||
|
"CONFIG_FILE": "",
|
||||||
|
"DURATION": 0,
|
||||||
|
"WRITE_OBJ_SIZE": 0,
|
||||||
|
"REGISTRY_FILE": "",
|
||||||
|
"K6_MIN_ITERATION_DURATION": "",
|
||||||
|
"K6_SETUP_TIMEOUT": "",
|
||||||
|
"WRITERS": 0,
|
||||||
|
"READERS": 0,
|
||||||
|
"DELETERS": 0,
|
||||||
|
"PREGEN_JSON": "",
|
||||||
|
}
|
||||||
|
|
||||||
|
self._check_preset_params(load_params, expected_preset_args)
|
||||||
|
self._check_env_vars(load_params, expected_env_vars)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"load_params, load_type, set_empty",
|
||||||
|
[(LoadScenario.VERIFY, LoadType.S3, True)],
|
||||||
|
indirect=True,
|
||||||
|
)
|
||||||
|
def test_empty_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams):
|
||||||
|
expected_env_vars = {
|
||||||
|
"CLIENTS": 0,
|
||||||
|
"REGISTRY_FILE": "",
|
||||||
|
"K6_SETUP_TIMEOUT": "",
|
||||||
|
"NO_VERIFY_SSL": False,
|
||||||
|
"TIME_LIMIT": 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
self._check_env_vars(load_params, expected_env_vars)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"load_params, load_type, set_empty",
|
||||||
|
[(LoadScenario.VERIFY, LoadType.gRPC, True)],
|
||||||
|
indirect=True,
|
||||||
|
)
|
||||||
|
def test_argument_parsing_for_grpc_verify_scenario(self, load_params: LoadParams):
|
||||||
|
expected_env_vars = {
|
||||||
|
"CLIENTS": 0,
|
||||||
|
"REGISTRY_FILE": "",
|
||||||
|
"K6_SETUP_TIMEOUT": "",
|
||||||
|
"NO_VERIFY_SSL": False,
|
||||||
|
"TIME_LIMIT": 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
self._check_env_vars(load_params, expected_env_vars)
|
||||||
|
|
||||||
|
def _check_preset_params(self, load_params: LoadParams, expected_preset_args: list[str]):
|
||||||
|
preset_parameters = load_params.get_preset_arguments()
|
||||||
|
assert sorted(preset_parameters) == sorted(expected_preset_args)
|
||||||
|
|
||||||
|
def _check_env_vars(self, load_params: LoadParams, expected_env_vars: dict[str, str]):
|
||||||
|
env_vars = load_params.get_env_vars()
|
||||||
|
assert env_vars == expected_env_vars
|
||||||
|
|
||||||
|
def _check_all_values_none(self, dataclass, skip_fields=None):
|
||||||
|
if skip_fields is None:
|
||||||
|
skip_fields = []
|
||||||
|
|
||||||
|
dataclass_fields = [field for field in fields(dataclass) if field.name not in skip_fields]
|
||||||
|
for field in dataclass_fields:
|
||||||
|
value = getattr(dataclass, field.name)
|
||||||
|
assert value is None, f"{field.name} is not None"
|
||||||
|
|
||||||
|
def _check_all_values_not_none(self, dataclass, skip_fields=None):
|
||||||
|
if skip_fields is None:
|
||||||
|
skip_fields = []
|
||||||
|
|
||||||
|
dataclass_fields = [field for field in fields(dataclass) if field.name not in skip_fields]
|
||||||
|
for field in dataclass_fields:
|
||||||
|
value = getattr(dataclass, field.name)
|
||||||
|
assert value is not None, f"{field.name} is not None"
|
||||||
|
|
||||||
|
def _get_filled_load_params(
|
||||||
|
self, load_type: LoadType, load_scenario: LoadScenario, set_emtpy: bool = False
|
||||||
|
) -> LoadParams:
|
||||||
|
load_type_map = {
|
||||||
|
LoadScenario.S3: LoadType.S3,
|
||||||
|
LoadScenario.S3_CAR: LoadType.S3,
|
||||||
|
LoadScenario.gRPC: LoadType.gRPC,
|
||||||
|
LoadScenario.gRPC_CAR: LoadType.gRPC,
|
||||||
|
LoadScenario.LOCAL: LoadType.gRPC,
|
||||||
|
LoadScenario.HTTP: LoadType.HTTP,
|
||||||
|
}
|
||||||
|
load_type = load_type_map[load_scenario] if not load_type else load_type
|
||||||
|
|
||||||
|
load_params = LoadParams(load_type)
|
||||||
|
load_params.scenario = load_scenario
|
||||||
|
load_params.preset = Preset()
|
||||||
|
|
||||||
|
meta_fields = self._get_meta_fields(load_params)
|
||||||
|
for field in meta_fields:
|
||||||
|
if (
|
||||||
|
getattr(field.instance, field.field.name) is None
|
||||||
|
and load_params.scenario in field.field.metadata["applicable_scenarios"]
|
||||||
|
):
|
||||||
|
value_to_set_map = {
|
||||||
|
int: 0 if set_emtpy else len(field.field.name),
|
||||||
|
str: "" if set_emtpy else field.field.name,
|
||||||
|
bool: False if set_emtpy else True,
|
||||||
|
}
|
||||||
|
value_to_set = value_to_set_map[field.field_type]
|
||||||
|
setattr(field.instance, field.field.name, value_to_set)
|
||||||
|
|
||||||
|
return load_params
|
||||||
|
|
||||||
|
def _get_actual_field_type(self, field: Field) -> type:
|
||||||
|
return get_args(field.type)[0] if len(get_args(field.type)) else get_args(field.type)
|
||||||
|
|
||||||
|
def _get_meta_fields(self, instance):
|
||||||
|
data_fields = fields(instance)
|
||||||
|
fields_with_data = [
|
||||||
|
MetaTestField(field, self._get_actual_field_type(field), instance)
|
||||||
|
for field in data_fields
|
||||||
|
if field.metadata
|
||||||
|
]
|
||||||
|
|
||||||
|
for field in data_fields:
|
||||||
|
actual_field_type = self._get_actual_field_type(field)
|
||||||
|
if is_dataclass(actual_field_type) and getattr(instance, field.name):
|
||||||
|
fields_with_data += self._get_meta_fields(getattr(instance, field.name))
|
||||||
|
|
||||||
|
return fields_with_data or []
|
|
@ -1,50 +1,68 @@
|
||||||
import os
|
import os
|
||||||
from unittest import SkipTest, TestCase
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput
|
from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput
|
||||||
from frostfs_testlib.shell.ssh_shell import SSHShell
|
from frostfs_testlib.shell.ssh_shell import SshConnectionProvider, SSHShell
|
||||||
from helpers import format_error_details, get_output_lines
|
from helpers import format_error_details, get_output_lines
|
||||||
|
|
||||||
|
|
||||||
def init_shell() -> SSHShell:
|
def get_shell(host: str):
|
||||||
host = os.getenv("SSH_SHELL_HOST")
|
|
||||||
port = os.getenv("SSH_SHELL_PORT", "22")
|
port = os.getenv("SSH_SHELL_PORT", "22")
|
||||||
login = os.getenv("SSH_SHELL_LOGIN")
|
login = os.getenv("SSH_SHELL_LOGIN")
|
||||||
private_key_path = os.getenv("SSH_SHELL_PRIVATE_KEY_PATH")
|
|
||||||
private_key_passphrase = os.getenv("SSH_SHELL_PRIVATE_KEY_PASSPHRASE")
|
password = os.getenv("SSH_SHELL_PASSWORD", "")
|
||||||
|
private_key_path = os.getenv("SSH_SHELL_PRIVATE_KEY_PATH", "")
|
||||||
|
private_key_passphrase = os.getenv("SSH_SHELL_PRIVATE_KEY_PASSPHRASE", "")
|
||||||
|
|
||||||
if not all([host, login, private_key_path, private_key_passphrase]):
|
if not all([host, login, private_key_path, private_key_passphrase]):
|
||||||
# TODO: in the future we might use https://pypi.org/project/mock-ssh-server,
|
# TODO: in the future we might use https://pypi.org/project/mock-ssh-server,
|
||||||
# at the moment it is not suitable for us because of its issues with stdin
|
# at the moment it is not suitable for us because of its issues with stdin
|
||||||
raise SkipTest("SSH connection is not configured")
|
pytest.skip("SSH connection is not configured")
|
||||||
|
|
||||||
return SSHShell(
|
return SSHShell(
|
||||||
host=host,
|
host=host,
|
||||||
port=port,
|
port=port,
|
||||||
login=login,
|
login=login,
|
||||||
|
password=password,
|
||||||
private_key_path=private_key_path,
|
private_key_path=private_key_path,
|
||||||
private_key_passphrase=private_key_passphrase,
|
private_key_passphrase=private_key_passphrase,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class TestSSHShellInteractive(TestCase):
|
@pytest.fixture(scope="module")
|
||||||
@classmethod
|
def shell() -> SSHShell:
|
||||||
def setUpClass(cls):
|
return get_shell(host=os.getenv("SSH_SHELL_HOST"))
|
||||||
cls.shell = init_shell()
|
|
||||||
|
|
||||||
def test_command_with_one_prompt(self):
|
|
||||||
|
@pytest.fixture(scope="module")
|
||||||
|
def shell_same_host() -> SSHShell:
|
||||||
|
return get_shell(host=os.getenv("SSH_SHELL_HOST"))
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="module")
|
||||||
|
def shell_another_host() -> SSHShell:
|
||||||
|
return get_shell(host=os.getenv("SSH_SHELL_HOST_2"))
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="function", autouse=True)
|
||||||
|
def reset_connection():
|
||||||
|
provider = SshConnectionProvider()
|
||||||
|
provider.drop_all()
|
||||||
|
|
||||||
|
|
||||||
|
class TestSSHShellInteractive:
|
||||||
|
def test_command_with_one_prompt(self, shell: SSHShell):
|
||||||
script = "password = input('Password: '); print('\\n' + password)"
|
script = "password = input('Password: '); print('\\n' + password)"
|
||||||
|
|
||||||
inputs = [InteractiveInput(prompt_pattern="Password", input="test")]
|
inputs = [InteractiveInput(prompt_pattern="Password", input="test")]
|
||||||
result = self.shell.exec(
|
result = shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs))
|
||||||
f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)
|
|
||||||
)
|
|
||||||
|
|
||||||
self.assertEqual(0, result.return_code)
|
assert result.return_code == 0
|
||||||
self.assertEqual(["Password: test", "test"], get_output_lines(result))
|
assert ["Password: test", "test"] == get_output_lines(result)
|
||||||
self.assertEqual("", result.stderr)
|
assert not result.stderr
|
||||||
|
|
||||||
def test_command_with_several_prompts(self):
|
def test_command_with_several_prompts(self, shell: SSHShell):
|
||||||
script = (
|
script = (
|
||||||
"input1 = input('Input1: '); print('\\n' + input1); "
|
"input1 = input('Input1: '); print('\\n' + input1); "
|
||||||
"input2 = input('Input2: '); print('\\n' + input2)"
|
"input2 = input('Input2: '); print('\\n' + input2)"
|
||||||
|
@ -54,86 +72,132 @@ class TestSSHShellInteractive(TestCase):
|
||||||
InteractiveInput(prompt_pattern="Input2", input="test2"),
|
InteractiveInput(prompt_pattern="Input2", input="test2"),
|
||||||
]
|
]
|
||||||
|
|
||||||
result = self.shell.exec(
|
result = shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs))
|
||||||
f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)
|
|
||||||
)
|
|
||||||
|
|
||||||
self.assertEqual(0, result.return_code)
|
assert result.return_code == 0
|
||||||
self.assertEqual(
|
assert ["Input1: test1", "test1", "Input2: test2", "test2"] == get_output_lines(result)
|
||||||
["Input1: test1", "test1", "Input2: test2", "test2"], get_output_lines(result)
|
assert not result.stderr
|
||||||
)
|
|
||||||
self.assertEqual("", result.stderr)
|
|
||||||
|
|
||||||
def test_invalid_command_with_check(self):
|
def test_invalid_command_with_check(self, shell: SSHShell):
|
||||||
script = "invalid script"
|
script = "invalid script"
|
||||||
inputs = [InteractiveInput(prompt_pattern=".*", input="test")]
|
inputs = [InteractiveInput(prompt_pattern=".*", input="test")]
|
||||||
|
|
||||||
with self.assertRaises(RuntimeError) as raised:
|
with pytest.raises(RuntimeError) as raised:
|
||||||
self.shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs))
|
shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs))
|
||||||
|
|
||||||
error = format_error_details(raised.exception)
|
error = format_error_details(raised.value)
|
||||||
self.assertIn("SyntaxError", error)
|
assert "SyntaxError" in error
|
||||||
self.assertIn("return code: 1", error)
|
assert "return code: 1" in error
|
||||||
|
|
||||||
def test_invalid_command_without_check(self):
|
def test_invalid_command_without_check(self, shell: SSHShell):
|
||||||
script = "invalid script"
|
script = "invalid script"
|
||||||
inputs = [InteractiveInput(prompt_pattern=".*", input="test")]
|
inputs = [InteractiveInput(prompt_pattern=".*", input="test")]
|
||||||
|
|
||||||
result = self.shell.exec(
|
result = shell.exec(
|
||||||
f'python3 -c "{script}"',
|
f'python3 -c "{script}"',
|
||||||
CommandOptions(interactive_inputs=inputs, check=False),
|
CommandOptions(interactive_inputs=inputs, check=False),
|
||||||
)
|
)
|
||||||
self.assertIn("SyntaxError", result.stdout)
|
assert "SyntaxError" in result.stdout
|
||||||
self.assertEqual(1, result.return_code)
|
assert result.return_code == 1
|
||||||
|
|
||||||
def test_non_existing_binary(self):
|
def test_non_existing_binary(self, shell: SSHShell):
|
||||||
inputs = [InteractiveInput(prompt_pattern=".*", input="test")]
|
inputs = [InteractiveInput(prompt_pattern=".*", input="test")]
|
||||||
|
|
||||||
with self.assertRaises(RuntimeError) as raised:
|
with pytest.raises(RuntimeError) as raised:
|
||||||
self.shell.exec("not-a-command", CommandOptions(interactive_inputs=inputs))
|
shell.exec("not-a-command", CommandOptions(interactive_inputs=inputs))
|
||||||
|
|
||||||
error = format_error_details(raised.exception)
|
error = format_error_details(raised.value)
|
||||||
self.assertIn("return code: 127", error)
|
assert "return code: 127" in error
|
||||||
|
|
||||||
|
|
||||||
class TestSSHShellNonInteractive(TestCase):
|
class TestSSHShellNonInteractive:
|
||||||
@classmethod
|
def test_correct_command(self, shell: SSHShell):
|
||||||
def setUpClass(cls):
|
|
||||||
cls.shell = init_shell()
|
|
||||||
|
|
||||||
def test_correct_command(self):
|
|
||||||
script = "print('test')"
|
script = "print('test')"
|
||||||
|
|
||||||
result = self.shell.exec(f'python3 -c "{script}"')
|
result = shell.exec(f'python3 -c "{script}"')
|
||||||
|
|
||||||
self.assertEqual(0, result.return_code)
|
assert result.return_code == 0
|
||||||
self.assertEqual("test", result.stdout.strip())
|
assert result.stdout.strip() == "test"
|
||||||
self.assertEqual("", result.stderr)
|
assert not result.stderr
|
||||||
|
|
||||||
def test_invalid_command_with_check(self):
|
def test_invalid_command_with_check(self, shell: SSHShell):
|
||||||
script = "invalid script"
|
script = "invalid script"
|
||||||
|
|
||||||
with self.assertRaises(RuntimeError) as raised:
|
with pytest.raises(RuntimeError) as raised:
|
||||||
self.shell.exec(f'python3 -c "{script}"')
|
shell.exec(f'python3 -c "{script}"')
|
||||||
|
|
||||||
error = format_error_details(raised.exception)
|
error = format_error_details(raised.value)
|
||||||
self.assertIn("Error", error)
|
assert "Error" in error
|
||||||
self.assertIn("return code: 1", error)
|
assert "return code: 1" in error
|
||||||
|
|
||||||
def test_invalid_command_without_check(self):
|
def test_invalid_command_without_check(self, shell: SSHShell):
|
||||||
script = "invalid script"
|
script = "invalid script"
|
||||||
|
|
||||||
result = self.shell.exec(f'python3 -c "{script}"', CommandOptions(check=False))
|
result = shell.exec(f'python3 -c "{script}"', CommandOptions(check=False))
|
||||||
|
|
||||||
self.assertEqual(1, result.return_code)
|
assert result.return_code == 1
|
||||||
# TODO: we have inconsistency with local shell here, the local shell captures error info
|
# TODO: we have inconsistency with local shell here, the local shell captures error info
|
||||||
# in stdout while ssh shell captures it in stderr
|
# in stdout while ssh shell captures it in stderr
|
||||||
self.assertIn("Error", result.stderr)
|
assert "Error" in result.stderr
|
||||||
|
|
||||||
def test_non_existing_binary(self):
|
def test_non_existing_binary(self, shell: SSHShell):
|
||||||
with self.assertRaises(RuntimeError) as exc:
|
with pytest.raises(RuntimeError) as raised:
|
||||||
self.shell.exec("not-a-command")
|
shell.exec("not-a-command")
|
||||||
|
|
||||||
error = format_error_details(exc.exception)
|
error = format_error_details(raised.value)
|
||||||
self.assertIn("Error", error)
|
assert "Error" in error
|
||||||
self.assertIn("return code: 127", error)
|
assert "return code: 127" in error
|
||||||
|
|
||||||
|
|
||||||
|
class TestSSHShellConnection:
|
||||||
|
def test_connection_provider_is_singleton(self):
|
||||||
|
provider = SshConnectionProvider()
|
||||||
|
provider2 = SshConnectionProvider()
|
||||||
|
assert id(provider) == id(provider2)
|
||||||
|
|
||||||
|
def test_connection_provider_has_creds(self, shell: SSHShell):
|
||||||
|
provider = SshConnectionProvider()
|
||||||
|
assert len(provider.creds) == 1
|
||||||
|
assert len(provider.connections) == 0
|
||||||
|
|
||||||
|
def test_connection_provider_has_only_one_connection(self, shell: SSHShell):
|
||||||
|
provider = SshConnectionProvider()
|
||||||
|
assert len(provider.connections) == 0
|
||||||
|
shell.exec("echo 1")
|
||||||
|
assert len(provider.connections) == 1
|
||||||
|
shell.exec("echo 2")
|
||||||
|
assert len(provider.connections) == 1
|
||||||
|
shell.drop()
|
||||||
|
assert len(provider.connections) == 0
|
||||||
|
|
||||||
|
def test_connection_same_host(self, shell: SSHShell, shell_same_host: SSHShell):
|
||||||
|
provider = SshConnectionProvider()
|
||||||
|
assert len(provider.connections) == 0
|
||||||
|
|
||||||
|
shell.exec("echo 1")
|
||||||
|
assert len(provider.connections) == 1
|
||||||
|
|
||||||
|
shell_same_host.exec("echo 2")
|
||||||
|
assert len(provider.connections) == 1
|
||||||
|
|
||||||
|
shell.drop()
|
||||||
|
assert len(provider.connections) == 0
|
||||||
|
|
||||||
|
shell.exec("echo 3")
|
||||||
|
assert len(provider.connections) == 1
|
||||||
|
|
||||||
|
def test_connection_another_host(self, shell: SSHShell, shell_another_host: SSHShell):
|
||||||
|
provider = SshConnectionProvider()
|
||||||
|
assert len(provider.connections) == 0
|
||||||
|
|
||||||
|
shell.exec("echo 1")
|
||||||
|
assert len(provider.connections) == 1
|
||||||
|
|
||||||
|
shell_another_host.exec("echo 2")
|
||||||
|
assert len(provider.connections) == 2
|
||||||
|
|
||||||
|
shell.drop()
|
||||||
|
assert len(provider.connections) == 1
|
||||||
|
|
||||||
|
shell_another_host.drop()
|
||||||
|
assert len(provider.connections) == 0
|
||||||
|
|
Loading…
Reference in a new issue