forked from TrueCloudLab/frostfs-testlib
Compare commits
7 commits
f8562da7e0
...
9ab4def44f
Author | SHA1 | Date | |
---|---|---|---|
9ab4def44f | |||
ed8f90dfc0 | |||
ed70dada96 | |||
22647c6d59 | |||
61a1b28652 | |||
6519cfafc9 | |||
72bd467c53 |
31 changed files with 795 additions and 275 deletions
|
@ -3,8 +3,8 @@
|
||||||
First, thank you for contributing! We love and encourage pull requests from
|
First, thank you for contributing! We love and encourage pull requests from
|
||||||
everyone. Please follow the guidelines:
|
everyone. Please follow the guidelines:
|
||||||
|
|
||||||
- Check the open [issues](https://github.com/TrueCloudLab/frostfs-testlib/issues) and
|
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/issues) and
|
||||||
[pull requests](https://github.com/TrueCloudLab/frostfs-testlib/pulls) for existing
|
[pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/pulls) for existing
|
||||||
discussions.
|
discussions.
|
||||||
|
|
||||||
- Open an issue first, to discuss a new feature or enhancement.
|
- Open an issue first, to discuss a new feature or enhancement.
|
||||||
|
@ -26,8 +26,8 @@ Start by forking the `frostfs-testlib` repository, make changes in a branch and
|
||||||
send a pull request. We encourage pull requests to discuss code changes. Here
|
send a pull request. We encourage pull requests to discuss code changes. Here
|
||||||
are the steps in details:
|
are the steps in details:
|
||||||
|
|
||||||
### Set up your GitHub Repository
|
### Set up your Git Repository
|
||||||
Fork [FrostFS testlib upstream](https://github.com/TrueCloudLab/frostfs-testlib/fork) source
|
Fork [FrostFS testlib upstream](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/forks) source
|
||||||
repository to your own personal repository. Copy the URL of your fork and clone it:
|
repository to your own personal repository. Copy the URL of your fork and clone it:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
|
@ -37,7 +37,7 @@ $ git clone <url of your fork>
|
||||||
### Set up git remote as ``upstream``
|
### Set up git remote as ``upstream``
|
||||||
```shell
|
```shell
|
||||||
$ cd frostfs-testlib
|
$ cd frostfs-testlib
|
||||||
$ git remote add upstream https://github.com/TrueCloudLab/frostfs-testlib
|
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-testlib
|
||||||
$ git fetch upstream
|
$ git fetch upstream
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -99,8 +99,8 @@ $ git push origin feature/123-something_awesome
|
||||||
```
|
```
|
||||||
|
|
||||||
### Create a Pull Request
|
### Create a Pull Request
|
||||||
Pull requests can be created via GitHub. Refer to [this
|
Pull requests can be created via Git. Refer to [this
|
||||||
document](https://help.github.com/articles/creating-a-pull-request/) for
|
document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for
|
||||||
detailed steps on how to create a pull request. After a Pull Request gets peer
|
detailed steps on how to create a pull request. After a Pull Request gets peer
|
||||||
reviewed and approved, it will be merged.
|
reviewed and approved, it will be merged.
|
||||||
|
|
||||||
|
|
|
@ -92,4 +92,4 @@ The library provides the following primary components:
|
||||||
|
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
Any contributions to the library should conform to the [contribution guideline](https://github.com/TrueCloudLab/frostfs-testlib/blob/master/CONTRIBUTING.md).
|
Any contributions to the library should conform to the [contribution guideline](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/src/branch/master/CONTRIBUTING.md).
|
||||||
|
|
|
@ -36,7 +36,7 @@ requires-python = ">=3.10"
|
||||||
dev = ["black", "bumpver", "isort", "pre-commit"]
|
dev = ["black", "bumpver", "isort", "pre-commit"]
|
||||||
|
|
||||||
[project.urls]
|
[project.urls]
|
||||||
Homepage = "https://github.com/TrueCloudLab/frostfs-testlib"
|
Homepage = "https://git.frostfs.info/TrueCloudLab/frostfs-testlib"
|
||||||
|
|
||||||
[project.entry-points."frostfs.testlib.reporter"]
|
[project.entry-points."frostfs.testlib.reporter"]
|
||||||
allure = "frostfs_testlib.reporter.allure_handler:AllureHandler"
|
allure = "frostfs_testlib.reporter.allure_handler:AllureHandler"
|
||||||
|
@ -47,6 +47,10 @@ docker = "frostfs_testlib.hosting.docker_host:DockerHost"
|
||||||
[project.entry-points."frostfs.testlib.healthcheck"]
|
[project.entry-points."frostfs.testlib.healthcheck"]
|
||||||
basic = "frostfs_testlib.healthcheck.basic_healthcheck:BasicHealthcheck"
|
basic = "frostfs_testlib.healthcheck.basic_healthcheck:BasicHealthcheck"
|
||||||
|
|
||||||
|
[project.entry-points."frostfs.testlib.csc_managers"]
|
||||||
|
config = "frostfs_testlib.storage.controllers.state_managers.config_state_manager:ConfigStateManager"
|
||||||
|
|
||||||
|
|
||||||
[tool.isort]
|
[tool.isort]
|
||||||
profile = "black"
|
profile = "black"
|
||||||
src_paths = ["src", "tests"]
|
src_paths = ["src", "tests"]
|
||||||
|
|
|
@ -27,11 +27,7 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph deposit-notary",
|
"morph deposit-notary",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def dump_balances(
|
def dump_balances(
|
||||||
|
@ -56,11 +52,7 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph dump-balances",
|
"morph dump-balances",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def dump_config(self, rpc_endpoint: str) -> CommandResult:
|
def dump_config(self, rpc_endpoint: str) -> CommandResult:
|
||||||
|
@ -74,11 +66,25 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph dump-config",
|
"morph dump-config",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
)
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
def set_config(
|
||||||
},
|
self, set_key_value: str, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None
|
||||||
|
) -> CommandResult:
|
||||||
|
"""Add/update global config value in the FrostFS network.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
set_key_value: key1=val1 [key2=val2 ...]
|
||||||
|
alphabet_wallets: Path to alphabet wallets dir
|
||||||
|
rpc_endpoint: N3 RPC node endpoint
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command's result.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
f"morph set-config {set_key_value}",
|
||||||
|
**{param: param_value for param, param_value in locals().items() if param not in ["self", "set_key_value"]},
|
||||||
)
|
)
|
||||||
|
|
||||||
def dump_containers(
|
def dump_containers(
|
||||||
|
@ -101,11 +107,7 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph dump-containers",
|
"morph dump-containers",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def dump_hashes(self, rpc_endpoint: str) -> CommandResult:
|
def dump_hashes(self, rpc_endpoint: str) -> CommandResult:
|
||||||
|
@ -119,11 +121,7 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph dump-hashes",
|
"morph dump-hashes",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def force_new_epoch(
|
def force_new_epoch(
|
||||||
|
@ -140,11 +138,7 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph force-new-epoch",
|
"morph force-new-epoch",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def generate_alphabet(
|
def generate_alphabet(
|
||||||
|
@ -165,11 +159,7 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph generate-alphabet",
|
"morph generate-alphabet",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def generate_storage_wallet(
|
def generate_storage_wallet(
|
||||||
|
@ -192,11 +182,7 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph generate-storage-wallet",
|
"morph generate-storage-wallet",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def init(
|
def init(
|
||||||
|
@ -219,7 +205,7 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
container_alias_fee: Container alias fee (default 500).
|
container_alias_fee: Container alias fee (default 500).
|
||||||
container_fee: Container registration fee (default 1000).
|
container_fee: Container registration fee (default 1000).
|
||||||
contracts: Path to archive with compiled FrostFS contracts
|
contracts: Path to archive with compiled FrostFS contracts
|
||||||
(default fetched from latest github release).
|
(default fetched from latest git release).
|
||||||
epoch_duration: Amount of side chain blocks in one FrostFS epoch (default 240).
|
epoch_duration: Amount of side chain blocks in one FrostFS epoch (default 240).
|
||||||
homomorphic_disabled: Disable object homomorphic hashing.
|
homomorphic_disabled: Disable object homomorphic hashing.
|
||||||
local_dump: Path to the blocks dump file.
|
local_dump: Path to the blocks dump file.
|
||||||
|
@ -232,11 +218,7 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph init",
|
"morph init",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def refill_gas(
|
def refill_gas(
|
||||||
|
@ -259,11 +241,7 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph refill-gas",
|
"morph refill-gas",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def restore_containers(
|
def restore_containers(
|
||||||
|
@ -286,11 +264,7 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph restore-containers",
|
"morph restore-containers",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def set_policy(
|
def set_policy(
|
||||||
|
@ -340,7 +314,7 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
Args:
|
Args:
|
||||||
alphabet_wallets: Path to alphabet wallets dir.
|
alphabet_wallets: Path to alphabet wallets dir.
|
||||||
contracts: Path to archive with compiled FrostFS contracts
|
contracts: Path to archive with compiled FrostFS contracts
|
||||||
(default fetched from latest github release).
|
(default fetched from latest git release).
|
||||||
rpc_endpoint: N3 RPC node endpoint.
|
rpc_endpoint: N3 RPC node endpoint.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
|
@ -348,17 +322,13 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"morph update-contracts",
|
"morph update-contracts",
|
||||||
**{
|
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||||
param: param_value
|
|
||||||
for param, param_value in locals().items()
|
|
||||||
if param not in ["self"]
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def remove_nodes(
|
def remove_nodes(
|
||||||
self, node_netmap_keys: list[str], rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None
|
self, node_netmap_keys: list[str], rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None
|
||||||
) -> CommandResult:
|
) -> CommandResult:
|
||||||
""" Move node to the Offline state in the candidates list
|
"""Move node to the Offline state in the candidates list
|
||||||
and tick an epoch to update the netmap using frostfs-adm
|
and tick an epoch to update the netmap using frostfs-adm
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -371,7 +341,7 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
"""
|
"""
|
||||||
if not len(node_netmap_keys):
|
if not len(node_netmap_keys):
|
||||||
raise AttributeError("Got empty node_netmap_keys list")
|
raise AttributeError("Got empty node_netmap_keys list")
|
||||||
|
|
||||||
return self._execute(
|
return self._execute(
|
||||||
f"morph remove-nodes {' '.join(node_netmap_keys)}",
|
f"morph remove-nodes {' '.join(node_netmap_keys)}",
|
||||||
**{
|
**{
|
||||||
|
@ -379,4 +349,4 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
for param, param_value in locals().items()
|
for param, param_value in locals().items()
|
||||||
if param not in ["self", "node_netmap_keys"]
|
if param not in ["self", "node_netmap_keys"]
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
|
@ -22,7 +22,7 @@ class FrostfsCliACL(CliCommand):
|
||||||
Well-known system object headers start with '$Object:' prefix.
|
Well-known system object headers start with '$Object:' prefix.
|
||||||
User defined headers start without prefix.
|
User defined headers start without prefix.
|
||||||
Read more about filter keys at:
|
Read more about filter keys at:
|
||||||
http://github.com/TrueCloudLab/frostfs-api/blob/master/proto-docs/acl.md#message-eaclrecordfilter
|
https://git.frostfs.info/TrueCloudLab/frostfs-api/src/branch/master/proto-docs/acl.md#message-eaclrecord-filter
|
||||||
Match is '=' for matching and '!=' for non-matching filter.
|
Match is '=' for matching and '!=' for non-matching filter.
|
||||||
Value is a valid unicode string corresponding to object or request header value.
|
Value is a valid unicode string corresponding to object or request header value.
|
||||||
|
|
||||||
|
|
|
@ -3,6 +3,7 @@ from typing import Optional
|
||||||
from frostfs_testlib.cli.frostfs_cli.accounting import FrostfsCliAccounting
|
from frostfs_testlib.cli.frostfs_cli.accounting import FrostfsCliAccounting
|
||||||
from frostfs_testlib.cli.frostfs_cli.acl import FrostfsCliACL
|
from frostfs_testlib.cli.frostfs_cli.acl import FrostfsCliACL
|
||||||
from frostfs_testlib.cli.frostfs_cli.container import FrostfsCliContainer
|
from frostfs_testlib.cli.frostfs_cli.container import FrostfsCliContainer
|
||||||
|
from frostfs_testlib.cli.frostfs_cli.control import FrostfsCliControl
|
||||||
from frostfs_testlib.cli.frostfs_cli.netmap import FrostfsCliNetmap
|
from frostfs_testlib.cli.frostfs_cli.netmap import FrostfsCliNetmap
|
||||||
from frostfs_testlib.cli.frostfs_cli.object import FrostfsCliObject
|
from frostfs_testlib.cli.frostfs_cli.object import FrostfsCliObject
|
||||||
from frostfs_testlib.cli.frostfs_cli.session import FrostfsCliSession
|
from frostfs_testlib.cli.frostfs_cli.session import FrostfsCliSession
|
||||||
|
@ -25,6 +26,7 @@ class FrostfsCli:
|
||||||
storagegroup: FrostfsCliStorageGroup
|
storagegroup: FrostfsCliStorageGroup
|
||||||
util: FrostfsCliUtil
|
util: FrostfsCliUtil
|
||||||
version: FrostfsCliVersion
|
version: FrostfsCliVersion
|
||||||
|
control: FrostfsCliControl
|
||||||
|
|
||||||
def __init__(self, shell: Shell, frostfs_cli_exec_path: str, config_file: Optional[str] = None):
|
def __init__(self, shell: Shell, frostfs_cli_exec_path: str, config_file: Optional[str] = None):
|
||||||
self.accounting = FrostfsCliAccounting(shell, frostfs_cli_exec_path, config=config_file)
|
self.accounting = FrostfsCliAccounting(shell, frostfs_cli_exec_path, config=config_file)
|
||||||
|
@ -38,3 +40,4 @@ class FrostfsCli:
|
||||||
self.util = FrostfsCliUtil(shell, frostfs_cli_exec_path, config=config_file)
|
self.util = FrostfsCliUtil(shell, frostfs_cli_exec_path, config=config_file)
|
||||||
self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file)
|
self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file)
|
||||||
self.tree = FrostfsCliTree(shell, frostfs_cli_exec_path, config=config_file)
|
self.tree = FrostfsCliTree(shell, frostfs_cli_exec_path, config=config_file)
|
||||||
|
self.control = FrostfsCliControl(shell, frostfs_cli_exec_path, config=config_file)
|
||||||
|
|
58
src/frostfs_testlib/cli/frostfs_cli/control.py
Normal file
58
src/frostfs_testlib/cli/frostfs_cli/control.py
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from frostfs_testlib.cli.cli_command import CliCommand
|
||||||
|
from frostfs_testlib.shell import CommandResult
|
||||||
|
|
||||||
|
|
||||||
|
class FrostfsCliControl(CliCommand):
|
||||||
|
def set_status(
|
||||||
|
self,
|
||||||
|
endpoint: str,
|
||||||
|
status: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
force: Optional[bool] = None,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""Set status of the storage node in FrostFS network map
|
||||||
|
|
||||||
|
Args:
|
||||||
|
wallet: Path to the wallet or binary key
|
||||||
|
address: Address of wallet account
|
||||||
|
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
||||||
|
force: Force turning to local maintenance
|
||||||
|
status: New netmap status keyword ('online', 'offline', 'maintenance')
|
||||||
|
timeout: Timeout for an operation (default 15s)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command`s result.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"control set-status",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
||||||
|
|
||||||
|
def healthcheck(
|
||||||
|
self,
|
||||||
|
endpoint: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
|
address: Optional[str] = None,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
|
) -> CommandResult:
|
||||||
|
"""Set status of the storage node in FrostFS network map
|
||||||
|
|
||||||
|
Args:
|
||||||
|
wallet: Path to the wallet or binary key
|
||||||
|
address: Address of wallet account
|
||||||
|
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
||||||
|
force: Force turning to local maintenance
|
||||||
|
status: New netmap status keyword ('online', 'offline', 'maintenance')
|
||||||
|
timeout: Timeout for an operation (default 15s)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Command`s result.
|
||||||
|
"""
|
||||||
|
return self._execute(
|
||||||
|
"control healthcheck",
|
||||||
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
|
)
|
86
src/frostfs_testlib/cli/netmap_parser.py
Normal file
86
src/frostfs_testlib/cli/netmap_parser.py
Normal file
|
@ -0,0 +1,86 @@
|
||||||
|
import re
|
||||||
|
|
||||||
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
|
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo
|
||||||
|
|
||||||
|
|
||||||
|
class NetmapParser:
|
||||||
|
@staticmethod
|
||||||
|
def netinfo(output: str) -> NodeNetInfo:
|
||||||
|
regexes = {
|
||||||
|
"epoch": r"Epoch: (?P<epoch>\d+)",
|
||||||
|
"network_magic": r"Network magic: (?P<network_magic>.*$)",
|
||||||
|
"time_per_block": r"Time per block: (?P<time_per_block>\d+\w+)",
|
||||||
|
"container_fee": r"Container fee: (?P<container_fee>\d+)",
|
||||||
|
"epoch_duration": r"Epoch duration: (?P<epoch_duration>\d+)",
|
||||||
|
"inner_ring_candidate_fee": r"Inner Ring candidate fee: (?P<inner_ring_candidate_fee>\d+)",
|
||||||
|
"maximum_object_size": r"Maximum object size: (?P<maximum_object_size>\d+)",
|
||||||
|
"withdrawal_fee": r"Withdrawal fee: (?P<withdrawal_fee>\d+)",
|
||||||
|
"homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?P<homomorphic_hashing_disabled>true|false)",
|
||||||
|
"maintenance_mode_allowed": r"Maintenance mode allowed: (?P<maintenance_mode_allowed>true|false)",
|
||||||
|
"eigen_trust_alpha": r"EigenTrustAlpha: (?P<eigen_trust_alpha>\d+\w+$)",
|
||||||
|
"eigen_trust_iterations": r"EigenTrustIterations: (?P<eigen_trust_iterations>\d+)",
|
||||||
|
}
|
||||||
|
parse_result = {}
|
||||||
|
|
||||||
|
for key, regex in regexes.items():
|
||||||
|
search_result = re.search(regex, output, flags=re.MULTILINE)
|
||||||
|
if search_result == None:
|
||||||
|
parse_result[key] = None
|
||||||
|
continue
|
||||||
|
parse_result[key] = search_result[key].strip()
|
||||||
|
|
||||||
|
node_netinfo = NodeNetInfo(**parse_result)
|
||||||
|
|
||||||
|
return node_netinfo
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def snapshot_all_nodes(output: str) -> list[NodeNetmapInfo]:
|
||||||
|
"""The code will parse each line and return each node as dataclass."""
|
||||||
|
netmap_nodes = output.split("Node ")[1:]
|
||||||
|
dataclasses_netmap = []
|
||||||
|
result_netmap = {}
|
||||||
|
|
||||||
|
regexes = {
|
||||||
|
"node_id": r"\d+: (?P<node_id>\w+)",
|
||||||
|
"node_data_ips": r"(?P<node_data_ips>/ip4/.+?)$",
|
||||||
|
"node_status": r"(?P<node_status>ONLINE|OFFLINE)",
|
||||||
|
"cluster_name": r"ClusterName: (?P<cluster_name>\w+)",
|
||||||
|
"continent": r"Continent: (?P<continent>\w+)",
|
||||||
|
"country": r"Country: (?P<country>\w+)",
|
||||||
|
"country_code": r"CountryCode: (?P<country_code>\w+)",
|
||||||
|
"external_address": r"ExternalAddr: (?P<external_address>/ip[4].+?)$",
|
||||||
|
"location": r"Location: (?P<location>\w+.*)",
|
||||||
|
"node": r"Node: (?P<node>\d+\.\d+\.\d+\.\d+)",
|
||||||
|
"price": r"Price: (?P<price>\d+)",
|
||||||
|
"sub_div": r"SubDiv: (?P<sub_div>.*)",
|
||||||
|
"sub_div_code": r"SubDivCode: (?P<sub_div_code>\w+)",
|
||||||
|
"un_locode": r"UN-LOCODE: (?P<un_locode>\w+.*)",
|
||||||
|
"role": r"role: (?P<role>\w+)",
|
||||||
|
}
|
||||||
|
|
||||||
|
for node in netmap_nodes:
|
||||||
|
for key, regex in regexes.items():
|
||||||
|
search_result = re.search(regex, node, flags=re.MULTILINE)
|
||||||
|
if key == "node_data_ips":
|
||||||
|
result_netmap[key] = search_result[key].strip().split(" ")
|
||||||
|
continue
|
||||||
|
if key == "external_address":
|
||||||
|
result_netmap[key] = search_result[key].strip().split(",")
|
||||||
|
continue
|
||||||
|
if search_result == None:
|
||||||
|
result_netmap[key] = None
|
||||||
|
continue
|
||||||
|
result_netmap[key] = search_result[key].strip()
|
||||||
|
|
||||||
|
dataclasses_netmap.append(NodeNetmapInfo(**result_netmap))
|
||||||
|
|
||||||
|
return dataclasses_netmap
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def snapshot_one_node(output: str, cluster_node: ClusterNode) -> NodeNetmapInfo | None:
|
||||||
|
snapshot_nodes = NetmapParser.snapshot_all_nodes(output=output)
|
||||||
|
snapshot_node = [node for node in snapshot_nodes if node.node == cluster_node.host_ip]
|
||||||
|
if not snapshot_node:
|
||||||
|
return None
|
||||||
|
return snapshot_node[0]
|
|
@ -1,4 +1,5 @@
|
||||||
from frostfs_testlib.load.interfaces import Loader, ScenarioRunner
|
from frostfs_testlib.load.interfaces.loader import Loader
|
||||||
|
from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner
|
||||||
from frostfs_testlib.load.load_config import (
|
from frostfs_testlib.load.load_config import (
|
||||||
EndpointSelectionStrategy,
|
EndpointSelectionStrategy,
|
||||||
K6ProcessAllocationStrategy,
|
K6ProcessAllocationStrategy,
|
||||||
|
@ -11,4 +12,4 @@ from frostfs_testlib.load.load_config import (
|
||||||
)
|
)
|
||||||
from frostfs_testlib.load.load_report import LoadReport
|
from frostfs_testlib.load.load_report import LoadReport
|
||||||
from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader
|
from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader
|
||||||
from frostfs_testlib.load.runners import DefaultRunner, LocalRunner
|
from frostfs_testlib.load.runners import DefaultRunner, LocalRunner, S3LocalRunner
|
||||||
|
|
14
src/frostfs_testlib/load/interfaces/loader.py
Normal file
14
src/frostfs_testlib/load/interfaces/loader.py
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
|
from frostfs_testlib.shell.interfaces import Shell
|
||||||
|
|
||||||
|
|
||||||
|
class Loader(ABC):
|
||||||
|
@abstractmethod
|
||||||
|
def get_shell(self) -> Shell:
|
||||||
|
"""Get shell for the loader"""
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def ip(self):
|
||||||
|
"""Get address of the loader"""
|
|
@ -1,20 +1,8 @@
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
|
from frostfs_testlib.load.k6 import K6
|
||||||
from frostfs_testlib.load.load_config import LoadParams
|
from frostfs_testlib.load.load_config import LoadParams
|
||||||
from frostfs_testlib.shell.interfaces import Shell
|
|
||||||
from frostfs_testlib.storage.cluster import ClusterNode
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
|
||||||
|
|
||||||
|
|
||||||
class Loader(ABC):
|
|
||||||
@abstractmethod
|
|
||||||
def get_shell(self) -> Shell:
|
|
||||||
"""Get shell for the loader"""
|
|
||||||
|
|
||||||
@property
|
|
||||||
@abstractmethod
|
|
||||||
def ip(self):
|
|
||||||
"""Get address of the loader"""
|
|
||||||
|
|
||||||
|
|
||||||
class ScenarioRunner(ABC):
|
class ScenarioRunner(ABC):
|
||||||
|
@ -32,6 +20,10 @@ class ScenarioRunner(ABC):
|
||||||
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
|
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
|
||||||
"""Init K6 instances"""
|
"""Init K6 instances"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_k6_instances(self) -> list[K6]:
|
||||||
|
"""Get K6 instances"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def start(self):
|
def start(self):
|
||||||
"""Start K6 instances"""
|
"""Start K6 instances"""
|
|
@ -8,13 +8,8 @@ from time import sleep
|
||||||
from typing import Any
|
from typing import Any
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
from frostfs_testlib.load.interfaces import Loader
|
from frostfs_testlib.load.interfaces.loader import Loader
|
||||||
from frostfs_testlib.load.load_config import (
|
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType
|
||||||
K6ProcessAllocationStrategy,
|
|
||||||
LoadParams,
|
|
||||||
LoadScenario,
|
|
||||||
LoadType,
|
|
||||||
)
|
|
||||||
from frostfs_testlib.processes.remote_process import RemoteProcess
|
from frostfs_testlib.processes.remote_process import RemoteProcess
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib.reporter import get_reporter
|
||||||
from frostfs_testlib.resources.common import STORAGE_USER_NAME
|
from frostfs_testlib.resources.common import STORAGE_USER_NAME
|
||||||
|
@ -59,6 +54,7 @@ class K6:
|
||||||
self.loader: Loader = loader
|
self.loader: Loader = loader
|
||||||
self.shell: Shell = shell
|
self.shell: Shell = shell
|
||||||
self.wallet = wallet
|
self.wallet = wallet
|
||||||
|
self.preset_output: str = ""
|
||||||
self.summary_json: str = os.path.join(
|
self.summary_json: str = os.path.join(
|
||||||
self.load_params.working_dir,
|
self.load_params.working_dir,
|
||||||
f"{self.load_params.load_id}_{self.load_params.scenario.value}_summary.json",
|
f"{self.load_params.load_id}_{self.load_params.scenario.value}_summary.json",
|
||||||
|
@ -101,10 +97,10 @@ class K6:
|
||||||
command = " ".join(command_args)
|
command = " ".join(command_args)
|
||||||
result = self.shell.exec(command)
|
result = self.shell.exec(command)
|
||||||
|
|
||||||
assert (
|
assert result.return_code == EXIT_RESULT_CODE, f"Return code of preset is not zero: {result.stdout}"
|
||||||
result.return_code == EXIT_RESULT_CODE
|
|
||||||
), f"Return code of preset is not zero: {result.stdout}"
|
self.preset_output = result.stdout.strip("\n")
|
||||||
return result.stdout.strip("\n")
|
return self.preset_output
|
||||||
|
|
||||||
@reporter.step_deco("Generate K6 command")
|
@reporter.step_deco("Generate K6 command")
|
||||||
def _generate_env_variables(self) -> str:
|
def _generate_env_variables(self) -> str:
|
||||||
|
@ -113,31 +109,21 @@ class K6:
|
||||||
env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints)
|
env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints)
|
||||||
env_vars["SUMMARY_JSON"] = self.summary_json
|
env_vars["SUMMARY_JSON"] = self.summary_json
|
||||||
|
|
||||||
reporter.attach(
|
reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables")
|
||||||
"\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables"
|
return " ".join([f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None])
|
||||||
)
|
|
||||||
return " ".join(
|
|
||||||
[f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None]
|
|
||||||
)
|
|
||||||
|
|
||||||
def start(self) -> None:
|
def start(self) -> None:
|
||||||
with reporter.step(
|
with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"):
|
||||||
f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"
|
|
||||||
):
|
|
||||||
self._start_time = int(datetime.utcnow().timestamp())
|
self._start_time = int(datetime.utcnow().timestamp())
|
||||||
command = (
|
command = (
|
||||||
f"{self._k6_dir}/k6 run {self._generate_env_variables()} "
|
f"{self._k6_dir}/k6 run {self._generate_env_variables()} "
|
||||||
f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js"
|
f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js"
|
||||||
)
|
)
|
||||||
user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None
|
user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None
|
||||||
self._k6_process = RemoteProcess.create(
|
self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, user)
|
||||||
command, self.shell, self.load_params.working_dir, user
|
|
||||||
)
|
|
||||||
|
|
||||||
def wait_until_finished(self, soft_timeout: int = 0) -> None:
|
def wait_until_finished(self, soft_timeout: int = 0) -> None:
|
||||||
with reporter.step(
|
with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"):
|
||||||
f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"
|
|
||||||
):
|
|
||||||
if self.load_params.scenario == LoadScenario.VERIFY:
|
if self.load_params.scenario == LoadScenario.VERIFY:
|
||||||
timeout = self.load_params.verify_time or 0
|
timeout = self.load_params.verify_time or 0
|
||||||
else:
|
else:
|
||||||
|
@ -180,9 +166,11 @@ class K6:
|
||||||
while timeout > 0:
|
while timeout > 0:
|
||||||
if not self._k6_process.running():
|
if not self._k6_process.running():
|
||||||
return
|
return
|
||||||
remaining_time_hours = f"{timeout//3600}h" if timeout//3600 != 0 else ""
|
remaining_time_hours = f"{timeout//3600}h" if timeout // 3600 != 0 else ""
|
||||||
remaining_time_minutes = f"{timeout//60%60}m" if timeout//60%60 != 0 else ""
|
remaining_time_minutes = f"{timeout//60%60}m" if timeout // 60 % 60 != 0 else ""
|
||||||
logger.info(f"K6 is running. Remaining time {remaining_time_hours}{remaining_time_minutes}{timeout%60}s. Next check after {wait_interval} seconds...")
|
logger.info(
|
||||||
|
f"K6 is running. Remaining time {remaining_time_hours}{remaining_time_minutes}{timeout%60}s. Next check after {wait_interval} seconds..."
|
||||||
|
)
|
||||||
sleep(wait_interval)
|
sleep(wait_interval)
|
||||||
timeout -= min(timeout, wait_interval)
|
timeout -= min(timeout, wait_interval)
|
||||||
wait_interval = max(
|
wait_interval = max(
|
||||||
|
@ -198,9 +186,7 @@ class K6:
|
||||||
raise TimeoutError(f"Expected K6 to finish after {original_timeout} sec.")
|
raise TimeoutError(f"Expected K6 to finish after {original_timeout} sec.")
|
||||||
|
|
||||||
def get_results(self) -> Any:
|
def get_results(self) -> Any:
|
||||||
with reporter.step(
|
with reporter.step(f"Get load results from loader {self.loader.ip} on endpoints {self.endpoints}"):
|
||||||
f"Get load results from loader {self.loader.ip} on endpoints {self.endpoints}"
|
|
||||||
):
|
|
||||||
self.__log_output()
|
self.__log_output()
|
||||||
|
|
||||||
if not self.summary_json:
|
if not self.summary_json:
|
||||||
|
@ -231,9 +217,7 @@ class K6:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@reporter.step_deco("Wait until K6 process end")
|
@reporter.step_deco("Wait until K6 process end")
|
||||||
@wait_for_success(
|
@wait_for_success(K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout")
|
||||||
K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout"
|
|
||||||
)
|
|
||||||
def _wait_until_process_end(self):
|
def _wait_until_process_end(self):
|
||||||
return self._k6_process.running()
|
return self._k6_process.running()
|
||||||
|
|
||||||
|
|
|
@ -20,6 +20,7 @@ class LoadScenario(Enum):
|
||||||
S3 = "s3"
|
S3 = "s3"
|
||||||
S3_CAR = "s3_car"
|
S3_CAR = "s3_car"
|
||||||
S3_MULTIPART = "s3_multipart"
|
S3_MULTIPART = "s3_multipart"
|
||||||
|
S3_LOCAL = "s3local"
|
||||||
HTTP = "http"
|
HTTP = "http"
|
||||||
VERIFY = "verify"
|
VERIFY = "verify"
|
||||||
LOCAL = "local"
|
LOCAL = "local"
|
||||||
|
@ -38,11 +39,12 @@ all_load_scenarios = [
|
||||||
LoadScenario.S3_CAR,
|
LoadScenario.S3_CAR,
|
||||||
LoadScenario.gRPC_CAR,
|
LoadScenario.gRPC_CAR,
|
||||||
LoadScenario.LOCAL,
|
LoadScenario.LOCAL,
|
||||||
LoadScenario.S3_MULTIPART
|
LoadScenario.S3_MULTIPART,
|
||||||
|
LoadScenario.S3_LOCAL
|
||||||
]
|
]
|
||||||
all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY]
|
all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY]
|
||||||
|
|
||||||
constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP, LoadScenario.LOCAL, LoadScenario.S3_MULTIPART]
|
constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP, LoadScenario.LOCAL, LoadScenario.S3_MULTIPART, LoadScenario.S3_LOCAL]
|
||||||
constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]
|
constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]
|
||||||
|
|
||||||
grpc_preset_scenarios = [
|
grpc_preset_scenarios = [
|
||||||
|
@ -51,7 +53,7 @@ grpc_preset_scenarios = [
|
||||||
LoadScenario.gRPC_CAR,
|
LoadScenario.gRPC_CAR,
|
||||||
LoadScenario.LOCAL,
|
LoadScenario.LOCAL,
|
||||||
]
|
]
|
||||||
s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART]
|
s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART, LoadScenario.S3_LOCAL]
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
|
@ -172,9 +174,13 @@ class LoadParams:
|
||||||
preset: Optional[Preset] = None
|
preset: Optional[Preset] = None
|
||||||
# K6 download url
|
# K6 download url
|
||||||
k6_url: Optional[str] = None
|
k6_url: Optional[str] = None
|
||||||
|
# Requests module url
|
||||||
|
requests_module_url: Optional[str] = None
|
||||||
|
# aws cli download url
|
||||||
|
awscli_url: Optional[str] = None
|
||||||
# No ssl verification flag
|
# No ssl verification flag
|
||||||
no_verify_ssl: Optional[bool] = metadata_field(
|
no_verify_ssl: Optional[bool] = metadata_field(
|
||||||
[LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART, LoadScenario.VERIFY, LoadScenario.HTTP],
|
[LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART, LoadScenario.S3_LOCAL, LoadScenario.VERIFY, LoadScenario.HTTP],
|
||||||
"no-verify-ssl",
|
"no-verify-ssl",
|
||||||
"NO_VERIFY_SSL",
|
"NO_VERIFY_SSL",
|
||||||
False,
|
False,
|
||||||
|
@ -283,7 +289,9 @@ class LoadParams:
|
||||||
|
|
||||||
# ------- LOCAL SCENARIO PARAMS -------
|
# ------- LOCAL SCENARIO PARAMS -------
|
||||||
# Config file location (filled automatically)
|
# Config file location (filled automatically)
|
||||||
config_file: Optional[str] = metadata_field([LoadScenario.LOCAL], None, "CONFIG_FILE", False)
|
config_file: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_FILE", False)
|
||||||
|
# Config directory location (filled automatically)
|
||||||
|
config_dir: Optional[str] = metadata_field([LoadScenario.S3_LOCAL], None, "CONFIG_DIR", False)
|
||||||
|
|
||||||
def set_id(self, load_id):
|
def set_id(self, load_id):
|
||||||
self.load_id = load_id
|
self.load_id = load_id
|
||||||
|
|
|
@ -165,6 +165,14 @@ class S3Metrics(MetricsBase):
|
||||||
_DELETE_ERRORS = "aws_obj_delete_fails"
|
_DELETE_ERRORS = "aws_obj_delete_fails"
|
||||||
_DELETE_LATENCY = "aws_obj_delete_duration"
|
_DELETE_LATENCY = "aws_obj_delete_duration"
|
||||||
|
|
||||||
|
class S3LocalMetrics(MetricsBase):
|
||||||
|
_WRITE_SUCCESS = "s3local_obj_put_total"
|
||||||
|
_WRITE_ERRORS = "s3local_obj_put_fails"
|
||||||
|
_WRITE_LATENCY = "s3local_obj_put_duration"
|
||||||
|
|
||||||
|
_READ_SUCCESS = "s3local_obj_get_total"
|
||||||
|
_READ_ERRORS = "s3local_obj_get_fails"
|
||||||
|
_READ_LATENCY = "s3local_obj_get_duration"
|
||||||
|
|
||||||
class LocalMetrics(MetricsBase):
|
class LocalMetrics(MetricsBase):
|
||||||
_WRITE_SUCCESS = "local_obj_put_total"
|
_WRITE_SUCCESS = "local_obj_put_total"
|
||||||
|
@ -197,6 +205,7 @@ def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> Metr
|
||||||
LoadScenario.S3: S3Metrics,
|
LoadScenario.S3: S3Metrics,
|
||||||
LoadScenario.S3_CAR: S3Metrics,
|
LoadScenario.S3_CAR: S3Metrics,
|
||||||
LoadScenario.S3_MULTIPART: S3Metrics,
|
LoadScenario.S3_MULTIPART: S3Metrics,
|
||||||
|
LoadScenario.S3_LOCAL: S3LocalMetrics,
|
||||||
LoadScenario.VERIFY: VerifyMetrics,
|
LoadScenario.VERIFY: VerifyMetrics,
|
||||||
LoadScenario.LOCAL: LocalMetrics,
|
LoadScenario.LOCAL: LocalMetrics,
|
||||||
}
|
}
|
||||||
|
|
|
@ -97,6 +97,7 @@ class LoadReport:
|
||||||
LoadScenario.gRPC_CAR: "open model",
|
LoadScenario.gRPC_CAR: "open model",
|
||||||
LoadScenario.S3_CAR: "open model",
|
LoadScenario.S3_CAR: "open model",
|
||||||
LoadScenario.LOCAL: "local fill",
|
LoadScenario.LOCAL: "local fill",
|
||||||
|
LoadScenario.S3_LOCAL: "local fill"
|
||||||
}
|
}
|
||||||
|
|
||||||
return model_map[self.load_params.scenario]
|
return model_map[self.load_params.scenario]
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
from frostfs_testlib.load.interfaces import Loader
|
from frostfs_testlib.load.interfaces.loader import Loader
|
||||||
from frostfs_testlib.resources.load_params import (
|
from frostfs_testlib.resources.load_params import (
|
||||||
LOAD_NODE_SSH_PASSWORD,
|
LOAD_NODE_SSH_PASSWORD,
|
||||||
LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE,
|
LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE,
|
||||||
|
|
|
@ -10,7 +10,8 @@ from urllib.parse import urlparse
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from frostfs_testlib.cli.frostfs_authmate.authmate import FrostfsAuthmate
|
from frostfs_testlib.cli.frostfs_authmate.authmate import FrostfsAuthmate
|
||||||
from frostfs_testlib.load.interfaces import Loader, ScenarioRunner
|
from frostfs_testlib.load.interfaces.loader import Loader
|
||||||
|
from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner
|
||||||
from frostfs_testlib.load.k6 import K6
|
from frostfs_testlib.load.k6 import K6
|
||||||
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType
|
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType
|
||||||
from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader
|
from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader
|
||||||
|
@ -18,11 +19,7 @@ from frostfs_testlib.reporter import get_reporter
|
||||||
from frostfs_testlib.resources import optionals
|
from frostfs_testlib.resources import optionals
|
||||||
from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC
|
from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC
|
||||||
from frostfs_testlib.resources.common import STORAGE_USER_NAME
|
from frostfs_testlib.resources.common import STORAGE_USER_NAME
|
||||||
from frostfs_testlib.resources.load_params import (
|
from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_VUS_COUNT_DIVISOR, LOAD_NODE_SSH_USER, LOAD_NODES
|
||||||
BACKGROUND_LOAD_VUS_COUNT_DIVISOR,
|
|
||||||
LOAD_NODE_SSH_USER,
|
|
||||||
LOAD_NODES,
|
|
||||||
)
|
|
||||||
from frostfs_testlib.shell.command_inspectors import SuInspector
|
from frostfs_testlib.shell.command_inspectors import SuInspector
|
||||||
from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput
|
from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput
|
||||||
from frostfs_testlib.storage.cluster import ClusterNode
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
|
@ -54,6 +51,9 @@ class RunnerBase(ScenarioRunner):
|
||||||
|
|
||||||
return any([future.result() for future in futures])
|
return any([future.result() for future in futures])
|
||||||
|
|
||||||
|
def get_k6_instances(self):
|
||||||
|
return self.k6_instances
|
||||||
|
|
||||||
|
|
||||||
class DefaultRunner(RunnerBase):
|
class DefaultRunner(RunnerBase):
|
||||||
loaders: list[Loader]
|
loaders: list[Loader]
|
||||||
|
@ -83,14 +83,10 @@ class DefaultRunner(RunnerBase):
|
||||||
|
|
||||||
with reporter.step("Init s3 client on loaders"):
|
with reporter.step("Init s3 client on loaders"):
|
||||||
storage_node = nodes_under_load[0].service(StorageNode)
|
storage_node = nodes_under_load[0].service(StorageNode)
|
||||||
s3_public_keys = [
|
s3_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes]
|
||||||
node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes
|
|
||||||
]
|
|
||||||
grpc_peer = storage_node.get_rpc_endpoint()
|
grpc_peer = storage_node.get_rpc_endpoint()
|
||||||
|
|
||||||
parallel(
|
parallel(self._prepare_loader, self.loaders, load_params, grpc_peer, s3_public_keys, k6_dir)
|
||||||
self._prepare_loader, self.loaders, load_params, grpc_peer, s3_public_keys, k6_dir
|
|
||||||
)
|
|
||||||
|
|
||||||
def _prepare_loader(
|
def _prepare_loader(
|
||||||
self,
|
self,
|
||||||
|
@ -112,9 +108,9 @@ class DefaultRunner(RunnerBase):
|
||||||
wallet_password=self.loaders_wallet.password,
|
wallet_password=self.loaders_wallet.password,
|
||||||
).stdout
|
).stdout
|
||||||
aws_access_key_id = str(
|
aws_access_key_id = str(
|
||||||
re.search(
|
re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group(
|
||||||
r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output
|
"aws_access_key_id"
|
||||||
).group("aws_access_key_id")
|
)
|
||||||
)
|
)
|
||||||
aws_secret_access_key = str(
|
aws_secret_access_key = str(
|
||||||
re.search(
|
re.search(
|
||||||
|
@ -125,9 +121,7 @@ class DefaultRunner(RunnerBase):
|
||||||
|
|
||||||
configure_input = [
|
configure_input = [
|
||||||
InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id),
|
InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id),
|
||||||
InteractiveInput(
|
InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key),
|
||||||
prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key
|
|
||||||
),
|
|
||||||
InteractiveInput(prompt_pattern=r".*", input=""),
|
InteractiveInput(prompt_pattern=r".*", input=""),
|
||||||
InteractiveInput(prompt_pattern=r".*", input=""),
|
InteractiveInput(prompt_pattern=r".*", input=""),
|
||||||
]
|
]
|
||||||
|
@ -144,16 +138,12 @@ class DefaultRunner(RunnerBase):
|
||||||
}
|
}
|
||||||
endpoints_generators = {
|
endpoints_generators = {
|
||||||
K6ProcessAllocationStrategy.PER_LOAD_NODE: itertools.cycle([endpoints]),
|
K6ProcessAllocationStrategy.PER_LOAD_NODE: itertools.cycle([endpoints]),
|
||||||
K6ProcessAllocationStrategy.PER_ENDPOINT: itertools.cycle(
|
K6ProcessAllocationStrategy.PER_ENDPOINT: itertools.cycle([[endpoint] for endpoint in endpoints]),
|
||||||
[[endpoint] for endpoint in endpoints]
|
|
||||||
),
|
|
||||||
}
|
}
|
||||||
k6_processes_count = k6_distribution_count[load_params.k6_process_allocation_strategy]
|
k6_processes_count = k6_distribution_count[load_params.k6_process_allocation_strategy]
|
||||||
endpoints_gen = endpoints_generators[load_params.k6_process_allocation_strategy]
|
endpoints_gen = endpoints_generators[load_params.k6_process_allocation_strategy]
|
||||||
|
|
||||||
distributed_load_params_list = self._get_distributed_load_params_list(
|
distributed_load_params_list = self._get_distributed_load_params_list(load_params, k6_processes_count)
|
||||||
load_params, k6_processes_count
|
|
||||||
)
|
|
||||||
|
|
||||||
futures = parallel(
|
futures = parallel(
|
||||||
self._init_k6_instance,
|
self._init_k6_instance,
|
||||||
|
@ -164,9 +154,7 @@ class DefaultRunner(RunnerBase):
|
||||||
)
|
)
|
||||||
self.k6_instances = [future.result() for future in futures]
|
self.k6_instances = [future.result() for future in futures]
|
||||||
|
|
||||||
def _init_k6_instance(
|
def _init_k6_instance(self, load_params_for_loader: LoadParams, loader: Loader, endpoints: list[str], k6_dir: str):
|
||||||
self, load_params_for_loader: LoadParams, loader: Loader, endpoints: list[str], k6_dir: str
|
|
||||||
):
|
|
||||||
shell = loader.get_shell()
|
shell = loader.get_shell()
|
||||||
with reporter.step(f"Init K6 instance on {loader.ip} for endpoints {endpoints}"):
|
with reporter.step(f"Init K6 instance on {loader.ip} for endpoints {endpoints}"):
|
||||||
with reporter.step(f"Make working directory"):
|
with reporter.step(f"Make working directory"):
|
||||||
|
@ -204,9 +192,7 @@ class DefaultRunner(RunnerBase):
|
||||||
and getattr(original_load_params, field.name) is not None
|
and getattr(original_load_params, field.name) is not None
|
||||||
):
|
):
|
||||||
original_value = getattr(original_load_params, field.name)
|
original_value = getattr(original_load_params, field.name)
|
||||||
distribution = self._get_distribution(
|
distribution = self._get_distribution(math.ceil(original_value / divisor), workers_count)
|
||||||
math.ceil(original_value / divisor), workers_count
|
|
||||||
)
|
|
||||||
for i in range(workers_count):
|
for i in range(workers_count):
|
||||||
setattr(distributed_load_params[i], field.name, distribution[i])
|
setattr(distributed_load_params[i], field.name, distribution[i])
|
||||||
|
|
||||||
|
@ -233,10 +219,7 @@ class DefaultRunner(RunnerBase):
|
||||||
# Remainder of clients left to be distributed
|
# Remainder of clients left to be distributed
|
||||||
remainder = clients_count - clients_per_worker * workers_count
|
remainder = clients_count - clients_per_worker * workers_count
|
||||||
|
|
||||||
distribution = [
|
distribution = [clients_per_worker + 1 if i < remainder else clients_per_worker for i in range(workers_count)]
|
||||||
clients_per_worker + 1 if i < remainder else clients_per_worker
|
|
||||||
for i in range(workers_count)
|
|
||||||
]
|
|
||||||
return distribution
|
return distribution
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
|
@ -245,9 +228,7 @@ class DefaultRunner(RunnerBase):
|
||||||
parallel([k6.start for k6 in self.k6_instances])
|
parallel([k6.start for k6 in self.k6_instances])
|
||||||
|
|
||||||
wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5
|
wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5
|
||||||
with reporter.step(
|
with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"):
|
||||||
f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"
|
|
||||||
):
|
|
||||||
time.sleep(wait_after_start_time)
|
time.sleep(wait_after_start_time)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
|
@ -327,9 +308,7 @@ class LocalRunner(RunnerBase):
|
||||||
with reporter.step("Update limits.conf"):
|
with reporter.step("Update limits.conf"):
|
||||||
limits_path = "/etc/security/limits.conf"
|
limits_path = "/etc/security/limits.conf"
|
||||||
self.file_keeper.add(cluster_node.storage_node, limits_path)
|
self.file_keeper.add(cluster_node.storage_node, limits_path)
|
||||||
content = (
|
content = f"{STORAGE_USER_NAME} hard nofile 65536\n{STORAGE_USER_NAME} soft nofile 65536\n"
|
||||||
f"{STORAGE_USER_NAME} hard nofile 65536\n{STORAGE_USER_NAME} soft nofile 65536\n"
|
|
||||||
)
|
|
||||||
shell.exec(f"echo '{content}' | sudo tee {limits_path}")
|
shell.exec(f"echo '{content}' | sudo tee {limits_path}")
|
||||||
|
|
||||||
with reporter.step("Download K6"):
|
with reporter.step("Download K6"):
|
||||||
|
@ -339,9 +318,7 @@ class LocalRunner(RunnerBase):
|
||||||
shell.exec(f"sudo chmod -R 777 {k6_dir}")
|
shell.exec(f"sudo chmod -R 777 {k6_dir}")
|
||||||
|
|
||||||
with reporter.step("Create empty_passwd"):
|
with reporter.step("Create empty_passwd"):
|
||||||
self.wallet = WalletInfo(
|
self.wallet = WalletInfo(f"{k6_dir}/scenarios/files/wallet.json", "", "/tmp/empty_passwd.yml")
|
||||||
f"{k6_dir}/scenarios/files/wallet.json", "", "/tmp/empty_passwd.yml"
|
|
||||||
)
|
|
||||||
content = yaml.dump({"password": ""})
|
content = yaml.dump({"password": ""})
|
||||||
shell.exec(f'echo "{content}" | sudo tee {self.wallet.config_path}')
|
shell.exec(f'echo "{content}" | sudo tee {self.wallet.config_path}')
|
||||||
shell.exec(f"sudo chmod -R 777 {self.wallet.config_path}")
|
shell.exec(f"sudo chmod -R 777 {self.wallet.config_path}")
|
||||||
|
@ -383,15 +360,13 @@ class LocalRunner(RunnerBase):
|
||||||
def start(self):
|
def start(self):
|
||||||
load_params = self.k6_instances[0].load_params
|
load_params = self.k6_instances[0].load_params
|
||||||
|
|
||||||
self.cluster_state_controller.stop_all_s3_gates()
|
self.cluster_state_controller.stop_services_of_type(S3Gate)
|
||||||
self.cluster_state_controller.stop_all_storage_services()
|
self.cluster_state_controller.stop_services_of_type(StorageNode)
|
||||||
|
|
||||||
parallel([k6.start for k6 in self.k6_instances])
|
parallel([k6.start for k6 in self.k6_instances])
|
||||||
|
|
||||||
wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5
|
wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5
|
||||||
with reporter.step(
|
with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"):
|
||||||
f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"
|
|
||||||
):
|
|
||||||
time.sleep(wait_after_start_time)
|
time.sleep(wait_after_start_time)
|
||||||
|
|
||||||
@reporter.step_deco("Restore passwd on {cluster_node}")
|
@reporter.step_deco("Restore passwd on {cluster_node}")
|
||||||
|
@ -408,8 +383,7 @@ class LocalRunner(RunnerBase):
|
||||||
for k6_instance in self.k6_instances:
|
for k6_instance in self.k6_instances:
|
||||||
k6_instance.stop()
|
k6_instance.stop()
|
||||||
|
|
||||||
self.cluster_state_controller.start_stopped_storage_services()
|
self.cluster_state_controller.start_all_stopped_services()
|
||||||
self.cluster_state_controller.start_stopped_s3_gates()
|
|
||||||
|
|
||||||
def get_results(self) -> dict:
|
def get_results(self) -> dict:
|
||||||
results = {}
|
results = {}
|
||||||
|
@ -420,3 +394,124 @@ class LocalRunner(RunnerBase):
|
||||||
parallel(self.restore_passwd_on_node, self.nodes_under_load)
|
parallel(self.restore_passwd_on_node, self.nodes_under_load)
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
class S3LocalRunner(LocalRunner):
|
||||||
|
endpoints: list[str]
|
||||||
|
k6_dir: str
|
||||||
|
|
||||||
|
@reporter.step_deco("Run preset on loaders")
|
||||||
|
def preset(self):
|
||||||
|
LocalRunner.preset(self)
|
||||||
|
with reporter.step(f"Resolve containers in preset"):
|
||||||
|
parallel(self._resolve_containers_in_preset, self.k6_instances)
|
||||||
|
|
||||||
|
@reporter.step_deco("Resolve containers in preset")
|
||||||
|
def _resolve_containers_in_preset(self, k6_instance: K6):
|
||||||
|
k6_instance.shell.exec(
|
||||||
|
f"sudo {self.k6_dir}/scenarios/preset/resolve_containers_in_preset.py --endpoint {k6_instance.endpoints[0]} --preset_file {k6_instance.load_params.preset.pregen_json}"
|
||||||
|
)
|
||||||
|
|
||||||
|
@reporter.step_deco("Init k6 instances")
|
||||||
|
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
|
||||||
|
self.k6_instances = []
|
||||||
|
futures = parallel(
|
||||||
|
self._init_k6_instance_,
|
||||||
|
self.loaders,
|
||||||
|
load_params,
|
||||||
|
endpoints,
|
||||||
|
k6_dir,
|
||||||
|
)
|
||||||
|
self.k6_instances = [future.result() for future in futures]
|
||||||
|
|
||||||
|
def _init_k6_instance_(self, loader: Loader, load_params: LoadParams, endpoints: list[str], k6_dir: str):
|
||||||
|
shell = loader.get_shell()
|
||||||
|
with reporter.step(f"Init K6 instance on {loader.ip} for endpoints {endpoints}"):
|
||||||
|
with reporter.step(f"Make working directory"):
|
||||||
|
shell.exec(f"sudo mkdir -p {load_params.working_dir}")
|
||||||
|
# If we chmod /home/<user_name> folder we can no longer ssh to the node
|
||||||
|
# !! IMPORTANT !!
|
||||||
|
if (
|
||||||
|
load_params.working_dir
|
||||||
|
and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}"
|
||||||
|
and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/"
|
||||||
|
):
|
||||||
|
shell.exec(f"sudo chmod -R 777 {load_params.working_dir}")
|
||||||
|
|
||||||
|
return K6(
|
||||||
|
load_params,
|
||||||
|
self.endpoints,
|
||||||
|
k6_dir,
|
||||||
|
shell,
|
||||||
|
loader,
|
||||||
|
self.wallet,
|
||||||
|
)
|
||||||
|
|
||||||
|
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
||||||
|
@reporter.step_deco("Preparation steps")
|
||||||
|
def prepare(
|
||||||
|
self,
|
||||||
|
load_params: LoadParams,
|
||||||
|
cluster_nodes: list[ClusterNode],
|
||||||
|
nodes_under_load: list[ClusterNode],
|
||||||
|
k6_dir: str,
|
||||||
|
):
|
||||||
|
self.k6_dir = k6_dir
|
||||||
|
with reporter.step("Init s3 client on loaders"):
|
||||||
|
storage_node = nodes_under_load[0].service(StorageNode)
|
||||||
|
s3_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes]
|
||||||
|
grpc_peer = storage_node.get_rpc_endpoint()
|
||||||
|
|
||||||
|
parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, s3_public_keys, grpc_peer)
|
||||||
|
|
||||||
|
@reporter.step_deco("Prepare node {cluster_node}")
|
||||||
|
def prepare_node(
|
||||||
|
self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, s3_public_keys: list[str], grpc_peer: str
|
||||||
|
):
|
||||||
|
LocalRunner.prepare_node(self, cluster_node, k6_dir, load_params)
|
||||||
|
self.endpoints = cluster_node.s3_gate.get_all_endpoints()
|
||||||
|
shell = cluster_node.host.get_shell()
|
||||||
|
|
||||||
|
with reporter.step("Uninstall previous installation of aws cli"):
|
||||||
|
shell.exec(f"sudo rm -rf /usr/local/aws-cli")
|
||||||
|
shell.exec(f"sudo rm -rf /usr/local/bin/aws")
|
||||||
|
shell.exec(f"sudo rm -rf /usr/local/bin/aws_completer")
|
||||||
|
|
||||||
|
with reporter.step("Install aws cli"):
|
||||||
|
shell.exec(f"sudo curl {load_params.awscli_url} -o {k6_dir}/awscliv2.zip")
|
||||||
|
shell.exec(f"sudo unzip -q {k6_dir}/awscliv2.zip -d {k6_dir}")
|
||||||
|
shell.exec(f"sudo {k6_dir}/aws/install")
|
||||||
|
|
||||||
|
with reporter.step("Install requests python module"):
|
||||||
|
shell.exec(f"sudo apt-get -y install python3-pip")
|
||||||
|
shell.exec(f"sudo curl -so {k6_dir}/requests.tar.gz {load_params.requests_module_url}")
|
||||||
|
shell.exec(f"sudo python3 -m pip install -I {k6_dir}/requests.tar.gz")
|
||||||
|
|
||||||
|
with reporter.step(f"Init s3 client on {cluster_node.host_ip}"):
|
||||||
|
frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC)
|
||||||
|
issue_secret_output = frostfs_authmate_exec.secret.issue(
|
||||||
|
wallet=self.wallet.path,
|
||||||
|
peer=grpc_peer,
|
||||||
|
gate_public_key=s3_public_keys,
|
||||||
|
container_placement_policy=load_params.preset.container_placement_policy,
|
||||||
|
container_policy=f"{k6_dir}/scenarios/files/policy.json",
|
||||||
|
wallet_password=self.wallet.password,
|
||||||
|
).stdout
|
||||||
|
aws_access_key_id = str(
|
||||||
|
re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group(
|
||||||
|
"aws_access_key_id"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
aws_secret_access_key = str(
|
||||||
|
re.search(
|
||||||
|
r"secret_access_key.*:\s.(?P<aws_secret_access_key>\w*)",
|
||||||
|
issue_secret_output,
|
||||||
|
).group("aws_secret_access_key")
|
||||||
|
)
|
||||||
|
configure_input = [
|
||||||
|
InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id),
|
||||||
|
InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key),
|
||||||
|
InteractiveInput(prompt_pattern=r".*", input=""),
|
||||||
|
InteractiveInput(prompt_pattern=r".*", input=""),
|
||||||
|
]
|
||||||
|
shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input))
|
||||||
|
|
|
@ -17,3 +17,16 @@ def load_plugin(plugin_group: str, name: str) -> Any:
|
||||||
return None
|
return None
|
||||||
plugin = plugins[name]
|
plugin = plugins[name]
|
||||||
return plugin.load()
|
return plugin.load()
|
||||||
|
|
||||||
|
|
||||||
|
def load_all(group: str) -> Any:
|
||||||
|
"""Loads all plugins using entry point specification.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
plugin_group: Name of plugin group.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Classes from specified group.
|
||||||
|
"""
|
||||||
|
plugins = entry_points(group=group)
|
||||||
|
return [plugin.load() for plugin in plugins]
|
||||||
|
|
|
@ -62,7 +62,8 @@ class LocalShell(Shell):
|
||||||
if options.check and result.return_code != 0:
|
if options.check and result.return_code != 0:
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
f"Command: {command}\nreturn code: {result.return_code}\n"
|
f"Command: {command}\nreturn code: {result.return_code}\n"
|
||||||
f"Output: {result.stdout}"
|
f"Output: {result.stdout}\n"
|
||||||
|
f"Stderr: {result.stderr}\n"
|
||||||
)
|
)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
@ -94,9 +95,7 @@ class LocalShell(Shell):
|
||||||
return_code=exc.returncode,
|
return_code=exc.returncode,
|
||||||
)
|
)
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
f"Command: {command}\nError:\n"
|
f"Command: {command}\nError:\n" f"return code: {exc.returncode}\n" f"output: {exc.output}"
|
||||||
f"return code: {exc.returncode}\n"
|
|
||||||
f"output: {exc.output}"
|
|
||||||
) from exc
|
) from exc
|
||||||
except OSError as exc:
|
except OSError as exc:
|
||||||
raise RuntimeError(f"Command: {command}\nOutput: {exc.strerror}") from exc
|
raise RuntimeError(f"Command: {command}\nOutput: {exc.strerror}") from exc
|
||||||
|
|
|
@ -6,27 +6,11 @@ from functools import lru_cache, wraps
|
||||||
from time import sleep
|
from time import sleep
|
||||||
from typing import ClassVar, Optional, Tuple
|
from typing import ClassVar, Optional, Tuple
|
||||||
|
|
||||||
from paramiko import (
|
from paramiko import AutoAddPolicy, Channel, ECDSAKey, Ed25519Key, PKey, RSAKey, SSHClient, SSHException, ssh_exception
|
||||||
AutoAddPolicy,
|
|
||||||
Channel,
|
|
||||||
ECDSAKey,
|
|
||||||
Ed25519Key,
|
|
||||||
PKey,
|
|
||||||
RSAKey,
|
|
||||||
SSHClient,
|
|
||||||
SSHException,
|
|
||||||
ssh_exception,
|
|
||||||
)
|
|
||||||
from paramiko.ssh_exception import AuthenticationException
|
from paramiko.ssh_exception import AuthenticationException
|
||||||
|
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib.reporter import get_reporter
|
||||||
from frostfs_testlib.shell.interfaces import (
|
from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell, SshCredentials
|
||||||
CommandInspector,
|
|
||||||
CommandOptions,
|
|
||||||
CommandResult,
|
|
||||||
Shell,
|
|
||||||
SshCredentials,
|
|
||||||
)
|
|
||||||
|
|
||||||
logger = logging.getLogger("frostfs.testlib.shell")
|
logger = logging.getLogger("frostfs.testlib.shell")
|
||||||
reporter = get_reporter()
|
reporter = get_reporter()
|
||||||
|
@ -97,8 +81,7 @@ class SshConnectionProvider:
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Trying to connect to host {host} as {creds.ssh_login} using password "
|
f"Trying to connect to host {host} as {creds.ssh_login} using password " f"(attempt {attempt})"
|
||||||
f"(attempt {attempt})"
|
|
||||||
)
|
)
|
||||||
connection.connect(
|
connection.connect(
|
||||||
hostname=host,
|
hostname=host,
|
||||||
|
@ -141,9 +124,7 @@ class HostIsNotAvailable(Exception):
|
||||||
|
|
||||||
def log_command(func):
|
def log_command(func):
|
||||||
@wraps(func)
|
@wraps(func)
|
||||||
def wrapper(
|
def wrapper(shell: "SSHShell", command: str, options: CommandOptions, *args, **kwargs) -> CommandResult:
|
||||||
shell: "SSHShell", command: str, options: CommandOptions, *args, **kwargs
|
|
||||||
) -> CommandResult:
|
|
||||||
command_info = command.removeprefix("$ProgressPreference='SilentlyContinue'\n")
|
command_info = command.removeprefix("$ProgressPreference='SilentlyContinue'\n")
|
||||||
with reporter.step(command_info):
|
with reporter.step(command_info):
|
||||||
logger.info(f'Execute command "{command}" on "{shell.host}"')
|
logger.info(f'Execute command "{command}" on "{shell.host}"')
|
||||||
|
@ -238,15 +219,13 @@ class SSHShell(Shell):
|
||||||
|
|
||||||
if options.check and result.return_code != 0:
|
if options.check and result.return_code != 0:
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}"
|
f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}\nStderr: {result.stderr}\n"
|
||||||
)
|
)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@log_command
|
@log_command
|
||||||
def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult:
|
def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult:
|
||||||
stdin, stdout, stderr = self._connection.exec_command(
|
stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout, get_pty=True)
|
||||||
command, timeout=options.timeout, get_pty=True
|
|
||||||
)
|
|
||||||
for interactive_input in options.interactive_inputs:
|
for interactive_input in options.interactive_inputs:
|
||||||
input = interactive_input.input
|
input = interactive_input.input
|
||||||
if not input.endswith("\n"):
|
if not input.endswith("\n"):
|
||||||
|
|
|
@ -169,7 +169,7 @@ def include_node_to_network_map(
|
||||||
storage_node_set_status(node_to_include, status="online")
|
storage_node_set_status(node_to_include, status="online")
|
||||||
|
|
||||||
# Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch.
|
# Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch.
|
||||||
# First sleep can be omitted after https://github.com/TrueCloudLab/frostfs-node/issues/60 complete.
|
# First sleep can be omitted after https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/60 complete.
|
||||||
|
|
||||||
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
|
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
|
||||||
tick_epoch(shell, cluster)
|
tick_epoch(shell, cluster)
|
||||||
|
|
|
@ -8,14 +8,10 @@ from frostfs_testlib.hosting import Host, Hosting
|
||||||
from frostfs_testlib.hosting.config import ServiceConfig
|
from frostfs_testlib.hosting.config import ServiceConfig
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib.reporter import get_reporter
|
||||||
from frostfs_testlib.storage import get_service_registry
|
from frostfs_testlib.storage import get_service_registry
|
||||||
|
from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml
|
||||||
|
from frostfs_testlib.storage.configuration.service_configuration import ServiceConfiguration
|
||||||
from frostfs_testlib.storage.constants import ConfigAttributes
|
from frostfs_testlib.storage.constants import ConfigAttributes
|
||||||
from frostfs_testlib.storage.dataclasses.frostfs_services import (
|
from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode
|
||||||
HTTPGate,
|
|
||||||
InnerRing,
|
|
||||||
MorphChain,
|
|
||||||
S3Gate,
|
|
||||||
StorageNode,
|
|
||||||
)
|
|
||||||
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
|
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
|
||||||
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces
|
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces
|
||||||
from frostfs_testlib.storage.service_registry import ServiceRegistry
|
from frostfs_testlib.storage.service_registry import ServiceRegistry
|
||||||
|
@ -93,6 +89,9 @@ class ClusterNode:
|
||||||
config_str = yaml.dump(new_config)
|
config_str = yaml.dump(new_config)
|
||||||
shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}")
|
shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}")
|
||||||
|
|
||||||
|
def config(self, service_type: type[ServiceClass]) -> ServiceConfigurationYml:
|
||||||
|
return ServiceConfiguration(self.service(service_type))
|
||||||
|
|
||||||
def service(self, service_type: type[ServiceClass]) -> ServiceClass:
|
def service(self, service_type: type[ServiceClass]) -> ServiceClass:
|
||||||
"""
|
"""
|
||||||
Get a service cluster node of specified type.
|
Get a service cluster node of specified type.
|
||||||
|
@ -118,9 +117,7 @@ class ClusterNode:
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_list_of_services(self) -> list[str]:
|
def get_list_of_services(self) -> list[str]:
|
||||||
return [
|
return [config.attributes[ConfigAttributes.SERVICE_NAME] for config in self.host.config.services]
|
||||||
config.attributes[ConfigAttributes.SERVICE_NAME] for config in self.host.config.services
|
|
||||||
]
|
|
||||||
|
|
||||||
def get_all_interfaces(self) -> dict[str, str]:
|
def get_all_interfaces(self) -> dict[str, str]:
|
||||||
return self.host.config.interfaces
|
return self.host.config.interfaces
|
||||||
|
@ -130,9 +127,7 @@ class ClusterNode:
|
||||||
|
|
||||||
def get_data_interfaces(self) -> list[str]:
|
def get_data_interfaces(self) -> list[str]:
|
||||||
return [
|
return [
|
||||||
ip_address
|
ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "data" in name_interface
|
||||||
for name_interface, ip_address in self.host.config.interfaces.items()
|
|
||||||
if "data" in name_interface
|
|
||||||
]
|
]
|
||||||
|
|
||||||
def get_data_interface(self, search_interface: str) -> list[str]:
|
def get_data_interface(self, search_interface: str) -> list[str]:
|
||||||
|
@ -221,9 +216,7 @@ class Cluster:
|
||||||
|
|
||||||
cluster_nodes = set()
|
cluster_nodes = set()
|
||||||
for service in services:
|
for service in services:
|
||||||
cluster_nodes.update(
|
cluster_nodes.update([node for node in self.cluster_nodes if node.service(type(service)) == service])
|
||||||
[node for node in self.cluster_nodes if node.service(type(service)) == service]
|
|
||||||
)
|
|
||||||
|
|
||||||
return list(cluster_nodes)
|
return list(cluster_nodes)
|
||||||
|
|
||||||
|
@ -331,8 +324,6 @@ class Cluster:
|
||||||
return [node.get_endpoint() for node in nodes]
|
return [node.get_endpoint() for node in nodes]
|
||||||
|
|
||||||
def get_nodes_by_ip(self, ips: list[str]) -> list[ClusterNode]:
|
def get_nodes_by_ip(self, ips: list[str]) -> list[ClusterNode]:
|
||||||
cluster_nodes = [
|
cluster_nodes = [node for node in self.cluster_nodes if URL(node.morph_chain.get_endpoint()).host in ips]
|
||||||
node for node in self.cluster_nodes if URL(node.morph_chain.get_endpoint()).host in ips
|
|
||||||
]
|
|
||||||
with reporter.step(f"Return cluster nodes - {cluster_nodes}"):
|
with reporter.step(f"Return cluster nodes - {cluster_nodes}"):
|
||||||
return cluster_nodes
|
return cluster_nodes
|
||||||
|
|
65
src/frostfs_testlib/storage/configuration/interfaces.py
Normal file
65
src/frostfs_testlib/storage/configuration/interfaces.py
Normal file
|
@ -0,0 +1,65 @@
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceConfigurationYml(ABC):
|
||||||
|
"""
|
||||||
|
Class to manipulate yml configuration for service
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _find_option(self, key: str, data: dict):
|
||||||
|
tree = key.split(":")
|
||||||
|
current = data
|
||||||
|
for node in tree:
|
||||||
|
if isinstance(current, list) and len(current) - 1 >= int(node):
|
||||||
|
current = current[int(node)]
|
||||||
|
continue
|
||||||
|
|
||||||
|
if node not in current:
|
||||||
|
return None
|
||||||
|
|
||||||
|
current = current[node]
|
||||||
|
|
||||||
|
return current
|
||||||
|
|
||||||
|
def _set_option(self, key: str, value: Any, data: dict):
|
||||||
|
tree = key.split(":")
|
||||||
|
current = data
|
||||||
|
for node in tree[:-1]:
|
||||||
|
if isinstance(current, list) and len(current) - 1 >= int(node):
|
||||||
|
current = current[int(node)]
|
||||||
|
continue
|
||||||
|
|
||||||
|
if node not in current:
|
||||||
|
current[node] = {}
|
||||||
|
|
||||||
|
current = current[node]
|
||||||
|
|
||||||
|
current[tree[-1]] = value
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get(self, key: str) -> str:
|
||||||
|
"""
|
||||||
|
Get parameter value from current configuration
|
||||||
|
|
||||||
|
Args:
|
||||||
|
key: key of the parameter in yaml format like 'storage:shard:default:resync_metabase'
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
value of the parameter
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def set(self, values: dict[str, Any]):
|
||||||
|
"""
|
||||||
|
Sets parameters to configuration
|
||||||
|
|
||||||
|
Args:
|
||||||
|
values: dict where key is the key of the parameter in yaml format like 'storage:shard:default:resync_metabase' and value is the value of the option to set
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def revert(self):
|
||||||
|
"""
|
||||||
|
Revert changes
|
||||||
|
"""
|
|
@ -0,0 +1,67 @@
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from frostfs_testlib.reporter import get_reporter
|
||||||
|
from frostfs_testlib.shell.interfaces import CommandOptions
|
||||||
|
from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml
|
||||||
|
from frostfs_testlib.storage.dataclasses.node_base import ServiceClass
|
||||||
|
|
||||||
|
reporter = get_reporter()
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceConfiguration(ServiceConfigurationYml):
|
||||||
|
def __init__(self, service: "ServiceClass") -> None:
|
||||||
|
self.service = service
|
||||||
|
self.shell = self.service.host.get_shell()
|
||||||
|
self.confd_path = os.path.join(self.service.config_dir, "conf.d")
|
||||||
|
self.custom_file = os.path.join(self.confd_path, "99_changes.yml")
|
||||||
|
|
||||||
|
def _path_exists(self, path: str) -> bool:
|
||||||
|
return not self.shell.exec(f"test -e {path}", options=CommandOptions(check=False)).return_code
|
||||||
|
|
||||||
|
def _get_data_from_file(self, path: str) -> dict:
|
||||||
|
content = self.shell.exec(f"cat {path}").stdout
|
||||||
|
data = yaml.safe_load(content)
|
||||||
|
return data
|
||||||
|
|
||||||
|
def get(self, key: str) -> str:
|
||||||
|
with reporter.step(f"Get {key} configuration value for {self.service}"):
|
||||||
|
config_files = [self.service.main_config_path]
|
||||||
|
|
||||||
|
if self._path_exists(self.confd_path):
|
||||||
|
files = self.shell.exec(f"find {self.confd_path} -type f").stdout.strip().split()
|
||||||
|
# Sorting files in backwards order from latest to first one
|
||||||
|
config_files.extend(sorted(files, key=lambda x: -int(re.findall("^\d+", os.path.basename(x))[0])))
|
||||||
|
|
||||||
|
result = None
|
||||||
|
for file in files:
|
||||||
|
data = self._get_data_from_file(file)
|
||||||
|
result = self._find_option(key, data)
|
||||||
|
if result is not None:
|
||||||
|
break
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def set(self, values: dict[str, Any]):
|
||||||
|
with reporter.step(f"Change configuration for {self.service}"):
|
||||||
|
if not self._path_exists(self.confd_path):
|
||||||
|
self.shell.exec(f"mkdir {self.confd_path}")
|
||||||
|
|
||||||
|
if self._path_exists(self.custom_file):
|
||||||
|
data = self._get_data_from_file(self.custom_file)
|
||||||
|
else:
|
||||||
|
data = {}
|
||||||
|
|
||||||
|
for key, value in values.items():
|
||||||
|
self._set_option(key, value, data)
|
||||||
|
|
||||||
|
content = yaml.dump(data)
|
||||||
|
self.shell.exec(f"echo '{content}' | sudo tee {self.custom_file}")
|
||||||
|
self.shell.exec(f"chmod 777 {self.custom_file}")
|
||||||
|
|
||||||
|
def revert(self):
|
||||||
|
with reporter.step(f"Revert changed options for {self.service}"):
|
||||||
|
self.shell.exec(f"rm -rf {self.custom_file}")
|
|
@ -3,6 +3,7 @@ class ConfigAttributes:
|
||||||
WALLET_PASSWORD = "wallet_password"
|
WALLET_PASSWORD = "wallet_password"
|
||||||
WALLET_PATH = "wallet_path"
|
WALLET_PATH = "wallet_path"
|
||||||
WALLET_CONFIG = "wallet_config"
|
WALLET_CONFIG = "wallet_config"
|
||||||
|
CONFIG_DIR = "service_config_dir"
|
||||||
CONFIG_PATH = "config_path"
|
CONFIG_PATH = "config_path"
|
||||||
SHARD_CONFIG_PATH = "shard_config_path"
|
SHARD_CONFIG_PATH = "shard_config_path"
|
||||||
LOCAL_WALLET_PATH = "local_wallet_path"
|
LOCAL_WALLET_PATH = "local_wallet_path"
|
||||||
|
|
|
@ -2,13 +2,8 @@ import copy
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
import frostfs_testlib.resources.optionals as optionals
|
import frostfs_testlib.resources.optionals as optionals
|
||||||
from frostfs_testlib.load.interfaces import ScenarioRunner
|
from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner
|
||||||
from frostfs_testlib.load.load_config import (
|
from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType
|
||||||
EndpointSelectionStrategy,
|
|
||||||
LoadParams,
|
|
||||||
LoadScenario,
|
|
||||||
LoadType,
|
|
||||||
)
|
|
||||||
from frostfs_testlib.load.load_report import LoadReport
|
from frostfs_testlib.load.load_report import LoadReport
|
||||||
from frostfs_testlib.load.load_verifiers import LoadVerifier
|
from frostfs_testlib.load.load_verifiers import LoadVerifier
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib.reporter import get_reporter
|
||||||
|
@ -56,9 +51,7 @@ class BackgroundLoadController:
|
||||||
raise RuntimeError("endpoint_selection_strategy should not be None")
|
raise RuntimeError("endpoint_selection_strategy should not be None")
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, [])
|
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, [])
|
||||||
def _get_endpoints(
|
def _get_endpoints(self, load_type: LoadType, endpoint_selection_strategy: EndpointSelectionStrategy):
|
||||||
self, load_type: LoadType, endpoint_selection_strategy: EndpointSelectionStrategy
|
|
||||||
):
|
|
||||||
all_endpoints = {
|
all_endpoints = {
|
||||||
LoadType.gRPC: {
|
LoadType.gRPC: {
|
||||||
EndpointSelectionStrategy.ALL: list(
|
EndpointSelectionStrategy.ALL: list(
|
||||||
|
@ -85,10 +78,7 @@ class BackgroundLoadController:
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
EndpointSelectionStrategy.FIRST: list(
|
EndpointSelectionStrategy.FIRST: list(
|
||||||
set(
|
set(node_under_load.service(S3Gate).get_endpoint() for node_under_load in self.nodes_under_load)
|
||||||
node_under_load.service(S3Gate).get_endpoint()
|
|
||||||
for node_under_load in self.nodes_under_load
|
|
||||||
)
|
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -98,12 +88,8 @@ class BackgroundLoadController:
|
||||||
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
||||||
@reporter.step_deco("Prepare load instances")
|
@reporter.step_deco("Prepare load instances")
|
||||||
def prepare(self):
|
def prepare(self):
|
||||||
self.endpoints = self._get_endpoints(
|
self.endpoints = self._get_endpoints(self.load_params.load_type, self.load_params.endpoint_selection_strategy)
|
||||||
self.load_params.load_type, self.load_params.endpoint_selection_strategy
|
self.runner.prepare(self.load_params, self.cluster_nodes, self.nodes_under_load, self.k6_dir)
|
||||||
)
|
|
||||||
self.runner.prepare(
|
|
||||||
self.load_params, self.cluster_nodes, self.nodes_under_load, self.k6_dir
|
|
||||||
)
|
|
||||||
self.runner.init_k6_instances(self.load_params, self.endpoints, self.k6_dir)
|
self.runner.init_k6_instances(self.load_params, self.endpoints, self.k6_dir)
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
||||||
|
|
|
@ -1,9 +1,15 @@
|
||||||
import datetime
|
import datetime
|
||||||
import time
|
import time
|
||||||
|
from typing import TypeVar
|
||||||
|
|
||||||
import frostfs_testlib.resources.optionals as optionals
|
import frostfs_testlib.resources.optionals as optionals
|
||||||
|
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli
|
||||||
|
from frostfs_testlib.cli.netmap_parser import NetmapParser
|
||||||
from frostfs_testlib.healthcheck.interfaces import Healthcheck
|
from frostfs_testlib.healthcheck.interfaces import Healthcheck
|
||||||
|
from frostfs_testlib.plugins import load_all
|
||||||
from frostfs_testlib.reporter import get_reporter
|
from frostfs_testlib.reporter import get_reporter
|
||||||
|
from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC
|
||||||
|
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, MORPH_BLOCK_TIME
|
||||||
from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider
|
from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider
|
||||||
from frostfs_testlib.steps.network import IfUpDownHelper, IpTablesHelper
|
from frostfs_testlib.steps.network import IfUpDownHelper, IpTablesHelper
|
||||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode
|
||||||
|
@ -11,6 +17,7 @@ from frostfs_testlib.storage.controllers.disk_controller import DiskController
|
||||||
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
|
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
|
||||||
from frostfs_testlib.testing import parallel
|
from frostfs_testlib.testing import parallel
|
||||||
from frostfs_testlib.testing.test_control import run_optionally, wait_for_success
|
from frostfs_testlib.testing.test_control import run_optionally, wait_for_success
|
||||||
|
from frostfs_testlib.utils.datetime_utils import parse_time
|
||||||
from frostfs_testlib.utils.failover_utils import (
|
from frostfs_testlib.utils.failover_utils import (
|
||||||
wait_all_storage_nodes_returned,
|
wait_all_storage_nodes_returned,
|
||||||
wait_for_host_offline,
|
wait_for_host_offline,
|
||||||
|
@ -22,6 +29,14 @@ reporter = get_reporter()
|
||||||
if_up_down_helper = IfUpDownHelper()
|
if_up_down_helper = IfUpDownHelper()
|
||||||
|
|
||||||
|
|
||||||
|
class StateManager:
|
||||||
|
def __init__(self, cluster_state_controller: "ClusterStateController") -> None:
|
||||||
|
self.csc = cluster_state_controller
|
||||||
|
|
||||||
|
|
||||||
|
StateManagerClass = TypeVar("StateManagerClass", bound=StateManager)
|
||||||
|
|
||||||
|
|
||||||
class ClusterStateController:
|
class ClusterStateController:
|
||||||
def __init__(self, shell: Shell, cluster: Cluster, healthcheck: Healthcheck) -> None:
|
def __init__(self, shell: Shell, cluster: Cluster, healthcheck: Healthcheck) -> None:
|
||||||
self.stopped_nodes: list[ClusterNode] = []
|
self.stopped_nodes: list[ClusterNode] = []
|
||||||
|
@ -33,6 +48,18 @@ class ClusterStateController:
|
||||||
self.shell = shell
|
self.shell = shell
|
||||||
self.suspended_services: dict[str, list[ClusterNode]] = {}
|
self.suspended_services: dict[str, list[ClusterNode]] = {}
|
||||||
self.nodes_with_modified_interface: list[ClusterNode] = []
|
self.nodes_with_modified_interface: list[ClusterNode] = []
|
||||||
|
self.managers: list[StateManagerClass] = []
|
||||||
|
|
||||||
|
# TODO: move all functionality to managers
|
||||||
|
managers = set(load_all(group="frostfs.testlib.csc_managers"))
|
||||||
|
for manager in managers:
|
||||||
|
self.managers.append(manager(self))
|
||||||
|
|
||||||
|
def manager(self, manager_type: type[StateManagerClass]) -> StateManagerClass:
|
||||||
|
for manager in self.managers:
|
||||||
|
# Subclasses here for the future if we have overriding subclasses of base interface
|
||||||
|
if issubclass(type(manager), manager_type):
|
||||||
|
return manager
|
||||||
|
|
||||||
def _get_stopped_by_node(self, node: ClusterNode) -> set[NodeBase]:
|
def _get_stopped_by_node(self, node: ClusterNode) -> set[NodeBase]:
|
||||||
stopped_by_node = [svc for svc in self.stopped_services if svc.host == node.host]
|
stopped_by_node = [svc for svc in self.stopped_services if svc.host == node.host]
|
||||||
|
@ -82,12 +109,14 @@ class ClusterStateController:
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
@reporter.step_deco("Start host of node {node}")
|
@reporter.step_deco("Start host of node {node}")
|
||||||
def start_node_host(self, node: ClusterNode):
|
def start_node_host(self, node: ClusterNode, tree_healthcheck: bool = True):
|
||||||
with reporter.step(f"Start host {node.host.config.address}"):
|
with reporter.step(f"Start host {node.host.config.address}"):
|
||||||
node.host.start_host()
|
node.host.start_host()
|
||||||
wait_for_host_online(self.shell, node.storage_node)
|
wait_for_host_online(self.shell, node.storage_node)
|
||||||
|
self.stopped_nodes.remove(node)
|
||||||
wait_for_node_online(node.storage_node)
|
wait_for_node_online(node.storage_node)
|
||||||
self.stopped_nodes.remove(node)
|
if tree_healthcheck:
|
||||||
|
self.wait_tree_healthcheck()
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
@reporter.step_deco("Start stopped hosts")
|
@reporter.step_deco("Start stopped hosts")
|
||||||
|
@ -151,7 +180,8 @@ class ClusterStateController:
|
||||||
@reporter.step_deco("Wait for S3Gates reconnection to local storage")
|
@reporter.step_deco("Wait for S3Gates reconnection to local storage")
|
||||||
def wait_s3gates(self):
|
def wait_s3gates(self):
|
||||||
online_s3gates = self._get_online(S3Gate)
|
online_s3gates = self._get_online(S3Gate)
|
||||||
parallel(self.wait_s3gate, online_s3gates)
|
if online_s3gates:
|
||||||
|
parallel(self.wait_s3gate, online_s3gates)
|
||||||
|
|
||||||
@wait_for_success(600, 60)
|
@wait_for_success(600, 60)
|
||||||
def wait_tree_healthcheck(self):
|
def wait_tree_healthcheck(self):
|
||||||
|
@ -336,7 +366,7 @@ class ClusterStateController:
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
@reporter.step_deco("Hard reboot host {node} via magic SysRq option")
|
@reporter.step_deco("Hard reboot host {node} via magic SysRq option")
|
||||||
def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True):
|
def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True, tree_healthcheck: bool = True):
|
||||||
shell = node.host.get_shell()
|
shell = node.host.get_shell()
|
||||||
shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"')
|
shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"')
|
||||||
|
|
||||||
|
@ -353,6 +383,8 @@ class ClusterStateController:
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
wait_for_host_online(self.shell, node.storage_node)
|
wait_for_host_online(self.shell, node.storage_node)
|
||||||
wait_for_node_online(node.storage_node)
|
wait_for_node_online(node.storage_node)
|
||||||
|
if tree_healthcheck:
|
||||||
|
self.wait_tree_healthcheck()
|
||||||
|
|
||||||
@reporter.step_deco("Down {interface} to {nodes}")
|
@reporter.step_deco("Down {interface} to {nodes}")
|
||||||
def down_interface(self, nodes: list[ClusterNode], interface: str):
|
def down_interface(self, nodes: list[ClusterNode], interface: str):
|
||||||
|
@ -382,8 +414,8 @@ class ClusterStateController:
|
||||||
@reporter.step_deco("Set node time to {in_date}")
|
@reporter.step_deco("Set node time to {in_date}")
|
||||||
def change_node_date(self, node: ClusterNode, in_date: datetime) -> None:
|
def change_node_date(self, node: ClusterNode, in_date: datetime) -> None:
|
||||||
shell = node.host.get_shell()
|
shell = node.host.get_shell()
|
||||||
shell.exec(f"hwclock --set --date='{in_date}'")
|
shell.exec(f"date -s @{time.mktime(in_date.timetuple())}")
|
||||||
shell.exec("hwclock --hctosys")
|
shell.exec("hwclock --systohc")
|
||||||
node_time = self.get_node_date(node)
|
node_time = self.get_node_date(node)
|
||||||
with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"):
|
with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"):
|
||||||
assert (self.get_node_date(node) - in_date) < datetime.timedelta(minutes=1)
|
assert (self.get_node_date(node) - in_date) < datetime.timedelta(minutes=1)
|
||||||
|
@ -393,8 +425,8 @@ class ClusterStateController:
|
||||||
shell = node.host.get_shell()
|
shell = node.host.get_shell()
|
||||||
now_time = datetime.datetime.now(datetime.timezone.utc)
|
now_time = datetime.datetime.now(datetime.timezone.utc)
|
||||||
with reporter.step(f"Set {now_time} time"):
|
with reporter.step(f"Set {now_time} time"):
|
||||||
shell.exec(f"hwclock --set --date='{now_time}'")
|
shell.exec(f"date -s @{time.mktime(now_time.timetuple())}")
|
||||||
shell.exec("hwclock --hctosys")
|
shell.exec("hwclock --systohc")
|
||||||
|
|
||||||
@reporter.step_deco("Change the synchronizer status to {status}")
|
@reporter.step_deco("Change the synchronizer status to {status}")
|
||||||
def set_sync_date_all_nodes(self, status: str):
|
def set_sync_date_all_nodes(self, status: str):
|
||||||
|
@ -403,6 +435,79 @@ class ClusterStateController:
|
||||||
return
|
return
|
||||||
parallel(self._disable_date_synchronizer, self.cluster.cluster_nodes)
|
parallel(self._disable_date_synchronizer, self.cluster.cluster_nodes)
|
||||||
|
|
||||||
|
@reporter.step_deco("Set MaintenanceModeAllowed - {status}")
|
||||||
|
def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None:
|
||||||
|
frostfs_adm = FrostfsAdm(
|
||||||
|
shell=cluster_node.host.get_shell(),
|
||||||
|
frostfs_adm_exec_path=FROSTFS_ADM_EXEC,
|
||||||
|
config_file=FROSTFS_ADM_CONFIG_PATH,
|
||||||
|
)
|
||||||
|
frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}")
|
||||||
|
|
||||||
|
@reporter.step_deco("Set mode node to {status}")
|
||||||
|
def set_mode_node(self, cluster_node: ClusterNode, wallet: str, status: str, await_tick: bool = True) -> None:
|
||||||
|
rpc_endpoint = cluster_node.storage_node.get_rpc_endpoint()
|
||||||
|
control_endpoint = cluster_node.service(StorageNode).get_control_endpoint()
|
||||||
|
|
||||||
|
frostfs_adm, frostfs_cli, frostfs_cli_remote = self._get_cli(local_shell=self.shell, cluster_node=cluster_node)
|
||||||
|
node_netinfo = NetmapParser.netinfo(frostfs_cli.netmap.netinfo(rpc_endpoint=rpc_endpoint, wallet=wallet).stdout)
|
||||||
|
|
||||||
|
with reporter.step("If status maintenance, then check that the option is enabled"):
|
||||||
|
if node_netinfo.maintenance_mode_allowed == "false":
|
||||||
|
frostfs_adm.morph.set_config(set_key_value="MaintenanceModeAllowed=true")
|
||||||
|
|
||||||
|
with reporter.step(f"Change the status to {status}"):
|
||||||
|
frostfs_cli_remote.control.set_status(endpoint=control_endpoint, status=status)
|
||||||
|
|
||||||
|
if not await_tick:
|
||||||
|
return
|
||||||
|
|
||||||
|
with reporter.step("Tick 1 epoch, and await 2 block"):
|
||||||
|
frostfs_adm.morph.force_new_epoch()
|
||||||
|
time.sleep(parse_time(MORPH_BLOCK_TIME) * 2)
|
||||||
|
|
||||||
|
self.check_node_status(status=status, wallet=wallet, cluster_node=cluster_node)
|
||||||
|
|
||||||
|
@wait_for_success(80, 8)
|
||||||
|
@reporter.step_deco("Check status node, status - {status}")
|
||||||
|
def check_node_status(self, status: str, wallet: str, cluster_node: ClusterNode):
|
||||||
|
frostfs_cli = FrostfsCli(
|
||||||
|
shell=self.shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG
|
||||||
|
)
|
||||||
|
netmap = NetmapParser.snapshot_all_nodes(
|
||||||
|
frostfs_cli.netmap.snapshot(rpc_endpoint=cluster_node.storage_node.get_rpc_endpoint(), wallet=wallet).stdout
|
||||||
|
)
|
||||||
|
netmap = [node for node in netmap if cluster_node.host_ip == node.node]
|
||||||
|
if status == "offline":
|
||||||
|
assert cluster_node.host_ip not in netmap, f"{cluster_node.host_ip} not in Offline"
|
||||||
|
else:
|
||||||
|
assert netmap[0].node_status == status.upper(), f"Node state - {netmap[0].node_status} != {status} expect"
|
||||||
|
|
||||||
|
def _get_cli(self, local_shell: Shell, cluster_node: ClusterNode) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]:
|
||||||
|
# TODO Move to service config
|
||||||
|
host = cluster_node.host
|
||||||
|
service_config = host.get_service_config(cluster_node.storage_node.name)
|
||||||
|
wallet_path = service_config.attributes["wallet_path"]
|
||||||
|
wallet_password = service_config.attributes["wallet_password"]
|
||||||
|
|
||||||
|
shell = host.get_shell()
|
||||||
|
wallet_config_path = f"/tmp/{cluster_node.storage_node.name}-config.yaml"
|
||||||
|
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
|
||||||
|
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
|
||||||
|
|
||||||
|
frostfs_adm = FrostfsAdm(
|
||||||
|
shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH
|
||||||
|
)
|
||||||
|
frostfs_cli = FrostfsCli(
|
||||||
|
shell=local_shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG
|
||||||
|
)
|
||||||
|
frostfs_cli_remote = FrostfsCli(
|
||||||
|
shell=shell,
|
||||||
|
frostfs_cli_exec_path=FROSTFS_CLI_EXEC,
|
||||||
|
config_file=wallet_config_path,
|
||||||
|
)
|
||||||
|
return frostfs_adm, frostfs_cli, frostfs_cli_remote
|
||||||
|
|
||||||
def _enable_date_synchronizer(self, cluster_node: ClusterNode):
|
def _enable_date_synchronizer(self, cluster_node: ClusterNode):
|
||||||
shell = cluster_node.host.get_shell()
|
shell = cluster_node.host.get_shell()
|
||||||
shell.exec("timedatectl set-ntp true")
|
shell.exec("timedatectl set-ntp true")
|
||||||
|
|
|
@ -0,0 +1,51 @@
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from frostfs_testlib.reporter import get_reporter
|
||||||
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
|
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController, StateManager
|
||||||
|
from frostfs_testlib.storage.dataclasses.node_base import ServiceClass
|
||||||
|
from frostfs_testlib.testing import parallel
|
||||||
|
|
||||||
|
reporter = get_reporter()
|
||||||
|
|
||||||
|
|
||||||
|
class ConfigStateManager(StateManager):
|
||||||
|
def __init__(self, cluster_state_controller: ClusterStateController) -> None:
|
||||||
|
super().__init__(cluster_state_controller)
|
||||||
|
self.services_with_changed_config: set[tuple[ClusterNode, ServiceClass]] = set()
|
||||||
|
self.cluster = self.csc.cluster
|
||||||
|
|
||||||
|
@reporter.step_deco("Change configuration for {service_type} on all nodes")
|
||||||
|
def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any]):
|
||||||
|
services = self.cluster.services(service_type)
|
||||||
|
nodes = self.cluster.nodes(services)
|
||||||
|
self.services_with_changed_config.update([(node, service_type) for node in nodes])
|
||||||
|
|
||||||
|
self.csc.stop_services_of_type(service_type)
|
||||||
|
parallel([node.config(service_type).set for node in nodes], values=values)
|
||||||
|
self.csc.start_services_of_type(service_type)
|
||||||
|
|
||||||
|
@reporter.step_deco("Change configuration for {service_type} on {node}")
|
||||||
|
def set_on_node(self, node: ClusterNode, service_type: type[ServiceClass], values: dict[str, Any]):
|
||||||
|
self.services_with_changed_config.add((node, service_type))
|
||||||
|
|
||||||
|
self.csc.stop_service_of_type(node, service_type)
|
||||||
|
node.config(service_type).set(values)
|
||||||
|
self.csc.start_service_of_type(node, service_type)
|
||||||
|
|
||||||
|
@reporter.step_deco("Revert all configuration changes")
|
||||||
|
def revert_all(self):
|
||||||
|
if not self.services_with_changed_config:
|
||||||
|
return
|
||||||
|
|
||||||
|
parallel(self._revert_svc, self.services_with_changed_config)
|
||||||
|
self.services_with_changed_config.clear()
|
||||||
|
|
||||||
|
self.csc.start_all_stopped_services()
|
||||||
|
|
||||||
|
# TODO: parallel can't have multiple parallel_items :(
|
||||||
|
@reporter.step_deco("Revert all configuration {node_and_service}")
|
||||||
|
def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass]):
|
||||||
|
node, service_type = node_and_service
|
||||||
|
self.csc.stop_service_of_type(node, service_type)
|
||||||
|
node.config(service_type).revert()
|
|
@ -120,6 +120,15 @@ class NodeBase(HumanReadableABC):
|
||||||
ConfigAttributes.WALLET_CONFIG,
|
ConfigAttributes.WALLET_CONFIG,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def config_dir(self) -> str:
|
||||||
|
return self._get_attribute(ConfigAttributes.CONFIG_DIR)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def main_config_path(self) -> str:
|
||||||
|
return self._get_attribute(ConfigAttributes.CONFIG_PATH)
|
||||||
|
|
||||||
|
# TODO: Deprecated
|
||||||
def get_config(self, config_file_path: Optional[str] = None) -> tuple[str, dict]:
|
def get_config(self, config_file_path: Optional[str] = None) -> tuple[str, dict]:
|
||||||
if config_file_path is None:
|
if config_file_path is None:
|
||||||
config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH)
|
config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH)
|
||||||
|
@ -132,6 +141,7 @@ class NodeBase(HumanReadableABC):
|
||||||
config = yaml.safe_load(config_text)
|
config = yaml.safe_load(config_text)
|
||||||
return config_file_path, config
|
return config_file_path, config
|
||||||
|
|
||||||
|
# TODO: Deprecated
|
||||||
def save_config(self, new_config: dict, config_file_path: Optional[str] = None) -> None:
|
def save_config(self, new_config: dict, config_file_path: Optional[str] = None) -> None:
|
||||||
if config_file_path is None:
|
if config_file_path is None:
|
||||||
config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH)
|
config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH)
|
||||||
|
@ -146,9 +156,7 @@ class NodeBase(HumanReadableABC):
|
||||||
storage_wallet_pass = self.get_wallet_password()
|
storage_wallet_pass = self.get_wallet_password()
|
||||||
return wallet_utils.get_wallet_public_key(storage_wallet_path, storage_wallet_pass)
|
return wallet_utils.get_wallet_public_key(storage_wallet_path, storage_wallet_pass)
|
||||||
|
|
||||||
def _get_attribute(
|
def _get_attribute(self, attribute_name: str, default_attribute_name: Optional[str] = None) -> str:
|
||||||
self, attribute_name: str, default_attribute_name: Optional[str] = None
|
|
||||||
) -> str:
|
|
||||||
config = self.host.get_service_config(self.name)
|
config = self.host.get_service_config(self.name)
|
||||||
|
|
||||||
if attribute_name not in config.attributes:
|
if attribute_name not in config.attributes:
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from enum import Enum
|
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from frostfs_testlib.testing.readable import HumanReadableEnum
|
from frostfs_testlib.testing.readable import HumanReadableEnum
|
||||||
|
@ -28,10 +27,16 @@ class StorageObjectInfo(ObjectRef):
|
||||||
locks: Optional[list[LockObjectInfo]] = None
|
locks: Optional[list[LockObjectInfo]] = None
|
||||||
|
|
||||||
|
|
||||||
|
class ModeNode(HumanReadableEnum):
|
||||||
|
MAINTENANCE: str = "maintenance"
|
||||||
|
ONLINE: str = "online"
|
||||||
|
OFFLINE: str = "offline"
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class NodeNetmapInfo:
|
class NodeNetmapInfo:
|
||||||
node_id: str = None
|
node_id: str = None
|
||||||
node_status: str = None
|
node_status: ModeNode = None
|
||||||
node_data_ips: list[str] = None
|
node_data_ips: list[str] = None
|
||||||
cluster_name: str = None
|
cluster_name: str = None
|
||||||
continent: str = None
|
continent: str = None
|
||||||
|
@ -53,3 +58,19 @@ class Interfaces(HumanReadableEnum):
|
||||||
MGMT: str = "mgmt"
|
MGMT: str = "mgmt"
|
||||||
INTERNAL_0: str = "internal0"
|
INTERNAL_0: str = "internal0"
|
||||||
INTERNAL_1: str = "internal1"
|
INTERNAL_1: str = "internal1"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class NodeNetInfo:
|
||||||
|
epoch: str = None
|
||||||
|
network_magic: str = None
|
||||||
|
time_per_block: str = None
|
||||||
|
container_fee: str = None
|
||||||
|
epoch_duration: str = None
|
||||||
|
inner_ring_candidate_fee: str = None
|
||||||
|
maximum_object_size: str = None
|
||||||
|
withdrawal_fee: str = None
|
||||||
|
homomorphic_hashing_disabled: str = None
|
||||||
|
maintenance_mode_allowed: str = None
|
||||||
|
eigen_trust_alpha: str = None
|
||||||
|
eigen_trust_iterations: str = None
|
||||||
|
|
|
@ -12,6 +12,7 @@ from frostfs_testlib.steps.node_management import storage_node_healthcheck
|
||||||
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
|
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
|
||||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, NodeBase, StorageNode
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, NodeBase, StorageNode
|
||||||
from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain
|
from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain
|
||||||
|
from frostfs_testlib.testing.parallel import parallel
|
||||||
from frostfs_testlib.testing.test_control import retry, wait_for_success
|
from frostfs_testlib.testing.test_control import retry, wait_for_success
|
||||||
from frostfs_testlib.utils.datetime_utils import parse_time
|
from frostfs_testlib.utils.datetime_utils import parse_time
|
||||||
|
|
||||||
|
@ -26,12 +27,17 @@ def ping_host(shell: Shell, host: Host):
|
||||||
return shell.exec(f"ping {host.config.address} -c 1", options).return_code
|
return shell.exec(f"ping {host.config.address} -c 1", options).return_code
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: Move to ClusterStateController
|
||||||
@reporter.step_deco("Wait for storage nodes returned to cluster")
|
@reporter.step_deco("Wait for storage nodes returned to cluster")
|
||||||
def wait_all_storage_nodes_returned(shell: Shell, cluster: Cluster) -> None:
|
def wait_all_storage_nodes_returned(shell: Shell, cluster: Cluster) -> None:
|
||||||
for node in cluster.services(StorageNode):
|
nodes = cluster.services(StorageNode)
|
||||||
with reporter.step(f"Run health check for storage at '{node}'"):
|
parallel(_wait_for_storage_node, nodes, shell=shell)
|
||||||
wait_for_host_online(shell, node)
|
|
||||||
wait_for_node_online(node)
|
|
||||||
|
@reporter.step_deco("Run health check for storage at '{node}'")
|
||||||
|
def _wait_for_storage_node(node: StorageNode, shell: Shell) -> None:
|
||||||
|
wait_for_host_online(shell, node)
|
||||||
|
wait_for_node_online(node)
|
||||||
|
|
||||||
|
|
||||||
@retry(max_attempts=60, sleep_interval=5, expected_result=0)
|
@retry(max_attempts=60, sleep_interval=5, expected_result=0)
|
||||||
|
@ -64,10 +70,17 @@ def wait_for_node_online(node: StorageNode):
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
logger.warning(f"Node healthcheck fails with error {err}")
|
logger.warning(f"Node healthcheck fails with error {err}")
|
||||||
return False
|
return False
|
||||||
|
finally:
|
||||||
|
gather_socket_info(node)
|
||||||
|
|
||||||
return health_check.health_status == "READY" and health_check.network_status == "ONLINE"
|
return health_check.health_status == "READY" and health_check.network_status == "ONLINE"
|
||||||
|
|
||||||
|
|
||||||
|
@reporter.step_deco("Gather socket info for {node}")
|
||||||
|
def gather_socket_info(node: StorageNode):
|
||||||
|
node.host.get_shell().exec("ss -tuln | grep 8080", CommandOptions(check=False))
|
||||||
|
|
||||||
|
|
||||||
@reporter.step_deco("Check and return status of given service")
|
@reporter.step_deco("Check and return status of given service")
|
||||||
def service_status(service: str, shell: Shell) -> str:
|
def service_status(service: str, shell: Shell) -> str:
|
||||||
return shell.exec(f"sudo systemctl is-active {service}").stdout.rstrip()
|
return shell.exec(f"sudo systemctl is-active {service}").stdout.rstrip()
|
||||||
|
@ -139,9 +152,7 @@ def multiple_restart(
|
||||||
service_name = node.service(service_type).name
|
service_name = node.service(service_type).name
|
||||||
for _ in range(count):
|
for _ in range(count):
|
||||||
node.host.restart_service(service_name)
|
node.host.restart_service(service_name)
|
||||||
logger.info(
|
logger.info(f"Restart {service_systemctl_name}; sleep {sleep_interval} seconds and continue")
|
||||||
f"Restart {service_systemctl_name}; sleep {sleep_interval} seconds and continue"
|
|
||||||
)
|
|
||||||
sleep(sleep_interval)
|
sleep(sleep_interval)
|
||||||
|
|
||||||
|
|
||||||
|
@ -164,9 +175,7 @@ def check_services_status(service_list: list[str], expected_status: str, shell:
|
||||||
|
|
||||||
@reporter.step_deco("Wait for active status of passed service")
|
@reporter.step_deco("Wait for active status of passed service")
|
||||||
@wait_for_success(60, 5)
|
@wait_for_success(60, 5)
|
||||||
def wait_service_in_desired_state(
|
def wait_service_in_desired_state(service: str, shell: Shell, expected_status: Optional[str] = "active"):
|
||||||
service: str, shell: Shell, expected_status: Optional[str] = "active"
|
|
||||||
):
|
|
||||||
real_status = service_status(service=service, shell=shell)
|
real_status = service_status(service=service, shell=shell)
|
||||||
assert (
|
assert (
|
||||||
expected_status == real_status
|
expected_status == real_status
|
||||||
|
|
Loading…
Add table
Reference in a new issue