Compare commits

..

1 commit

Author SHA1 Message Date
436c61f635 science 2023-11-24 13:40:11 +03:00
85 changed files with 2106 additions and 3515 deletions

View file

@ -1,21 +0,0 @@
name: DCO action
on: [pull_request]
jobs:
dco:
name: DCO
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.21'
- name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
with:
from: 'origin/${{ github.event.pull_request.base.ref }}'

1
.github/CODEOWNERS vendored Normal file
View file

@ -0,0 +1 @@
* @aprasolova @vdomnich-yadro @dansingjulia @yadro-vavdeev @abereziny

21
.github/workflows/dco.yml vendored Normal file
View file

@ -0,0 +1,21 @@
name: DCO check
on:
pull_request:
branches:
- master
jobs:
commits_check_job:
runs-on: ubuntu-latest
name: Commits Check
steps:
- name: Get PR Commits
id: 'get-pr-commits'
uses: tim-actions/get-pr-commits@master
with:
token: ${{ secrets.GITHUB_TOKEN }}
- name: DCO Check
uses: tim-actions/dco@master
with:
commits: ${{ steps.get-pr-commits.outputs.commits }}

View file

@ -51,26 +51,19 @@ basic = "frostfs_testlib.healthcheck.basic_healthcheck:BasicHealthcheck"
config = "frostfs_testlib.storage.controllers.state_managers.config_state_manager:ConfigStateManager" config = "frostfs_testlib.storage.controllers.state_managers.config_state_manager:ConfigStateManager"
[project.entry-points."frostfs.testlib.services"] [project.entry-points."frostfs.testlib.services"]
frostfs-storage = "frostfs_testlib.storage.dataclasses.frostfs_services:StorageNode" s = "frostfs_testlib.storage.dataclasses.frostfs_services:StorageNode"
frostfs-s3 = "frostfs_testlib.storage.dataclasses.frostfs_services:S3Gate" s3-gate = "frostfs_testlib.storage.dataclasses.frostfs_services:S3Gate"
frostfs-http = "frostfs_testlib.storage.dataclasses.frostfs_services:HTTPGate" http-gate = "frostfs_testlib.storage.dataclasses.frostfs_services:HTTPGate"
neo-go = "frostfs_testlib.storage.dataclasses.frostfs_services:MorphChain" morph-chain = "frostfs_testlib.storage.dataclasses.frostfs_services:MorphChain"
frostfs-ir = "frostfs_testlib.storage.dataclasses.frostfs_services:InnerRing" ir = "frostfs_testlib.storage.dataclasses.frostfs_services:InnerRing"
[project.entry-points."frostfs.testlib.credentials_providers"]
authmate = "frostfs_testlib.credentials.authmate_s3_provider:AuthmateS3CredentialsProvider"
wallet_factory = "frostfs_testlib.credentials.wallet_factory_provider:WalletFactoryProvider"
[project.entry-points."frostfs.testlib.bucket_cid_resolver"]
frostfs = "frostfs_testlib.s3.curl_bucket_resolver:CurlBucketContainerResolver"
[tool.isort] [tool.isort]
profile = "black" profile = "black"
src_paths = ["src", "tests"] src_paths = ["src", "tests"]
line_length = 140 line_length = 120
[tool.black] [tool.black]
line-length = 140 line-length = 120
target-version = ["py310"] target-version = ["py310"]
[tool.bumpver] [tool.bumpver]

View file

@ -1,5 +1,4 @@
from frostfs_testlib.cli.frostfs_adm import FrostfsAdm from frostfs_testlib.cli.frostfs_adm import FrostfsAdm
from frostfs_testlib.cli.frostfs_authmate import FrostfsAuthmate from frostfs_testlib.cli.frostfs_authmate import FrostfsAuthmate
from frostfs_testlib.cli.frostfs_cli import FrostfsCli from frostfs_testlib.cli.frostfs_cli import FrostfsCli
from frostfs_testlib.cli.generic_cli import GenericCli
from frostfs_testlib.cli.neogo import NeoGo, NetworkType from frostfs_testlib.cli.neogo import NeoGo, NetworkType

View file

@ -8,7 +8,7 @@ class FrostfsCliContainer(CliCommand):
def create( def create(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: Optional[str] = None, wallet: str,
address: Optional[str] = None, address: Optional[str] = None,
attributes: Optional[dict] = None, attributes: Optional[dict] = None,
basic_acl: Optional[str] = None, basic_acl: Optional[str] = None,
@ -57,14 +57,15 @@ class FrostfsCliContainer(CliCommand):
def delete( def delete(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
await_mode: bool = False, await_mode: bool = False,
session: Optional[str] = None, session: Optional[str] = None,
ttl: Optional[int] = None, ttl: Optional[int] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
force: bool = False, force: bool = False,
timeout: Optional[str] = None,
) -> CommandResult: ) -> CommandResult:
""" """
Delete an existing container. Delete an existing container.
@ -80,6 +81,7 @@ class FrostfsCliContainer(CliCommand):
ttl: TTL value in request meta header (default 2). ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key. wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers. xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s).
Returns: Returns:
Command's result. Command's result.
@ -93,8 +95,8 @@ class FrostfsCliContainer(CliCommand):
def get( def get(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
await_mode: bool = False, await_mode: bool = False,
to: Optional[str] = None, to: Optional[str] = None,
@ -129,8 +131,8 @@ class FrostfsCliContainer(CliCommand):
def get_eacl( def get_eacl(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
await_mode: bool = False, await_mode: bool = False,
to: Optional[str] = None, to: Optional[str] = None,
@ -166,7 +168,7 @@ class FrostfsCliContainer(CliCommand):
def list( def list(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: Optional[str] = None, wallet: str,
address: Optional[str] = None, address: Optional[str] = None,
owner: Optional[str] = None, owner: Optional[str] = None,
ttl: Optional[int] = None, ttl: Optional[int] = None,
@ -197,8 +199,8 @@ class FrostfsCliContainer(CliCommand):
def list_objects( def list_objects(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
ttl: Optional[int] = None, ttl: Optional[int] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
@ -227,8 +229,8 @@ class FrostfsCliContainer(CliCommand):
def set_eacl( def set_eacl(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
await_mode: bool = False, await_mode: bool = False,
table: Optional[str] = None, table: Optional[str] = None,
@ -264,8 +266,8 @@ class FrostfsCliContainer(CliCommand):
def search_node( def search_node(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
ttl: Optional[int] = None, ttl: Optional[int] = None,
from_file: Optional[str] = None, from_file: Optional[str] = None,
@ -296,5 +298,9 @@ class FrostfsCliContainer(CliCommand):
return self._execute( return self._execute(
f"container nodes {from_str}", f"container nodes {from_str}",
**{param: value for param, value in locals().items() if param not in ["self", "from_file", "from_str"]}, **{
param: value
for param, value in locals().items()
if param not in ["self", "from_file", "from_str"]
},
) )

View file

@ -39,12 +39,14 @@ class FrostfsCliControl(CliCommand):
address: Optional[str] = None, address: Optional[str] = None,
timeout: Optional[str] = None, timeout: Optional[str] = None,
) -> CommandResult: ) -> CommandResult:
"""Health check for FrostFS storage nodes """Set status of the storage node in FrostFS network map
Args: Args:
wallet: Path to the wallet or binary key wallet: Path to the wallet or binary key
address: Address of wallet account address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>') endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
force: Force turning to local maintenance
status: New netmap status keyword ('online', 'offline', 'maintenance')
timeout: Timeout for an operation (default 15s) timeout: Timeout for an operation (default 15s)
Returns: Returns:
@ -54,28 +56,3 @@ class FrostfsCliControl(CliCommand):
"control healthcheck", "control healthcheck",
**{param: value for param, value in locals().items() if param not in ["self"]}, **{param: value for param, value in locals().items() if param not in ["self"]},
) )
def drop_objects(
self,
endpoint: str,
objects: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
wallet: Path to the wallet or binary key
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
objects: List of object addresses to be removed in string format
timeout: Timeout for an operation (default 15s)
Returns:
Command`s result.
"""
return self._execute(
"control drop-objects",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -8,7 +8,7 @@ class FrostfsCliNetmap(CliCommand):
def epoch( def epoch(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: Optional[str] = None, wallet: str,
address: Optional[str] = None, address: Optional[str] = None,
generate_key: bool = False, generate_key: bool = False,
ttl: Optional[int] = None, ttl: Optional[int] = None,
@ -38,7 +38,7 @@ class FrostfsCliNetmap(CliCommand):
def netinfo( def netinfo(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: Optional[str] = None, wallet: str,
address: Optional[str] = None, address: Optional[str] = None,
generate_key: bool = False, generate_key: bool = False,
ttl: Optional[int] = None, ttl: Optional[int] = None,
@ -68,7 +68,7 @@ class FrostfsCliNetmap(CliCommand):
def nodeinfo( def nodeinfo(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: Optional[str] = None, wallet: str,
address: Optional[str] = None, address: Optional[str] = None,
generate_key: bool = False, generate_key: bool = False,
json: bool = False, json: bool = False,
@ -100,7 +100,7 @@ class FrostfsCliNetmap(CliCommand):
def snapshot( def snapshot(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: Optional[str] = None, wallet: str,
address: Optional[str] = None, address: Optional[str] = None,
generate_key: bool = False, generate_key: bool = False,
ttl: Optional[int] = None, ttl: Optional[int] = None,

View file

@ -8,9 +8,9 @@ class FrostfsCliObject(CliCommand):
def delete( def delete(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
oid: str, oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
session: Optional[str] = None, session: Optional[str] = None,
@ -44,9 +44,9 @@ class FrostfsCliObject(CliCommand):
def get( def get(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
oid: str, oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
file: Optional[str] = None, file: Optional[str] = None,
@ -88,9 +88,9 @@ class FrostfsCliObject(CliCommand):
def hash( def hash(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
oid: str, oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
range: Optional[str] = None, range: Optional[str] = None,
@ -124,15 +124,17 @@ class FrostfsCliObject(CliCommand):
""" """
return self._execute( return self._execute(
"object hash", "object hash",
**{param: value for param, value in locals().items() if param not in ["self", "params"]}, **{
param: value for param, value in locals().items() if param not in ["self", "params"]
},
) )
def head( def head(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
oid: str, oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
file: Optional[str] = None, file: Optional[str] = None,
@ -176,9 +178,9 @@ class FrostfsCliObject(CliCommand):
def lock( def lock(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
oid: str, oid: str,
wallet: Optional[str] = None,
lifetime: Optional[int] = None, lifetime: Optional[int] = None,
expire_at: Optional[int] = None, expire_at: Optional[int] = None,
address: Optional[str] = None, address: Optional[str] = None,
@ -216,9 +218,9 @@ class FrostfsCliObject(CliCommand):
def put( def put(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
file: str, file: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
attributes: Optional[dict] = None, attributes: Optional[dict] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
@ -267,10 +269,10 @@ class FrostfsCliObject(CliCommand):
def range( def range(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
oid: str, oid: str,
range: str, range: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
file: Optional[str] = None, file: Optional[str] = None,
@ -311,8 +313,8 @@ class FrostfsCliObject(CliCommand):
def search( def search(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
filters: Optional[list] = None, filters: Optional[list] = None,
@ -353,11 +355,11 @@ class FrostfsCliObject(CliCommand):
def nodes( def nodes(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
generate_key: Optional[bool] = None, generate_key: Optional = None,
oid: Optional[str] = None, oid: Optional[str] = None,
trace: bool = False, trace: bool = False,
root: bool = False, root: bool = False,

View file

@ -9,6 +9,7 @@ class FrostfsCliSession(CliCommand):
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str, wallet: str,
wallet_password: str,
out: str, out: str,
lifetime: Optional[int] = None, lifetime: Optional[int] = None,
address: Optional[str] = None, address: Optional[str] = None,
@ -29,7 +30,12 @@ class FrostfsCliSession(CliCommand):
Returns: Returns:
Command's result. Command's result.
""" """
return self._execute( return self._execute_with_password(
"session create", "session create",
**{param: value for param, value in locals().items() if param not in ["self"]}, wallet_password,
**{
param: value
for param, value in locals().items()
if param not in ["self", "wallet_password"]
},
) )

View file

@ -39,10 +39,10 @@ class FrostfsCliShards(CliCommand):
def set_mode( def set_mode(
self, self,
endpoint: str, endpoint: str,
wallet: str,
wallet_password: str,
mode: str, mode: str,
id: Optional[list[str]], id: Optional[list[str]],
wallet: Optional[str] = None,
wallet_password: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
all: bool = False, all: bool = False,
clear_errors: bool = False, clear_errors: bool = False,
@ -65,15 +65,14 @@ class FrostfsCliShards(CliCommand):
Returns: Returns:
Command's result. Command's result.
""" """
if not wallet_password:
return self._execute(
"control shards set-mode",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
return self._execute_with_password( return self._execute_with_password(
"control shards set-mode", "control shards set-mode",
wallet_password, wallet_password,
**{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, **{
param: value
for param, value in locals().items()
if param not in ["self", "wallet_password"]
},
) )
def dump( def dump(
@ -106,14 +105,18 @@ class FrostfsCliShards(CliCommand):
return self._execute_with_password( return self._execute_with_password(
"control shards dump", "control shards dump",
wallet_password, wallet_password,
**{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, **{
param: value
for param, value in locals().items()
if param not in ["self", "wallet_password"]
},
) )
def list( def list(
self, self,
endpoint: str, endpoint: str,
wallet: Optional[str] = None, wallet: str,
wallet_password: Optional[str] = None, wallet_password: str,
address: Optional[str] = None, address: Optional[str] = None,
json_mode: bool = False, json_mode: bool = False,
timeout: Optional[str] = None, timeout: Optional[str] = None,
@ -132,14 +135,12 @@ class FrostfsCliShards(CliCommand):
Returns: Returns:
Command's result. Command's result.
""" """
if not wallet_password:
return self._execute(
"control shards list",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
return self._execute_with_password( return self._execute_with_password(
"control shards list", "control shards list",
wallet_password, wallet_password,
**{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, **{
param: value
for param, value in locals().items()
if param not in ["self", "wallet_password"]
},
) )

View file

@ -6,12 +6,12 @@ from frostfs_testlib.shell import CommandResult
class FrostfsCliUtil(CliCommand): class FrostfsCliUtil(CliCommand):
def sign_bearer_token( def sign_bearer_token(
self, self,
from_file: str, wallet: str,
to_file: str, from_file: str,
wallet: Optional[str] = None, to_file: str,
address: Optional[str] = None, address: Optional[str] = None,
json: Optional[bool] = False, json: Optional[bool] = False,
) -> CommandResult: ) -> CommandResult:
""" """
Sign bearer token to use it in requests. Sign bearer token to use it in requests.
@ -33,9 +33,9 @@ class FrostfsCliUtil(CliCommand):
def sign_session_token( def sign_session_token(
self, self,
wallet: str,
from_file: str, from_file: str,
to_file: str, to_file: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
) -> CommandResult: ) -> CommandResult:
""" """

View file

@ -1,30 +0,0 @@
from typing import Optional
from frostfs_testlib.hosting.interfaces import Host
from frostfs_testlib.shell.interfaces import CommandOptions, Shell
class GenericCli(object):
def __init__(self, cli_name: str, host: Host) -> None:
self.host = host
self.cli_name = cli_name
def __call__(
self,
args: Optional[str] = "",
pipes: Optional[str] = "",
shell: Optional[Shell] = None,
options: Optional[CommandOptions] = None,
):
if not shell:
shell = self.host.get_shell()
cli_config = self.host.get_cli_config(self.cli_name, True)
extra_args = ""
exec_path = self.cli_name
if cli_config:
extra_args = " ".join(cli_config.extra_args)
exec_path = cli_config.exec_path
cmd = f"{exec_path} {args} {extra_args} {pipes}"
return shell.exec(cmd, options)

View file

@ -1,7 +1,7 @@
import re import re
from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo, NodeStatus from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo
class NetmapParser: class NetmapParser:
@ -44,7 +44,7 @@ class NetmapParser:
regexes = { regexes = {
"node_id": r"\d+: (?P<node_id>\w+)", "node_id": r"\d+: (?P<node_id>\w+)",
"node_data_ips": r"(?P<node_data_ips>/ip4/.+?)$", "node_data_ips": r"(?P<node_data_ips>/ip4/.+?)$",
"node_status": r"(?P<node_status>ONLINE|MAINTENANCE|OFFLINE)", "node_status": r"(?P<node_status>ONLINE|OFFLINE)",
"cluster_name": r"ClusterName: (?P<cluster_name>\w+)", "cluster_name": r"ClusterName: (?P<cluster_name>\w+)",
"continent": r"Continent: (?P<continent>\w+)", "continent": r"Continent: (?P<continent>\w+)",
"country": r"Country: (?P<country>\w+)", "country": r"Country: (?P<country>\w+)",
@ -62,17 +62,14 @@ class NetmapParser:
for node in netmap_nodes: for node in netmap_nodes:
for key, regex in regexes.items(): for key, regex in regexes.items():
search_result = re.search(regex, node, flags=re.MULTILINE) search_result = re.search(regex, node, flags=re.MULTILINE)
if search_result == None:
result_netmap[key] = None
continue
if key == "node_data_ips": if key == "node_data_ips":
result_netmap[key] = search_result[key].strip().split(" ") result_netmap[key] = search_result[key].strip().split(" ")
continue continue
if key == "external_address": if key == "external_address":
result_netmap[key] = search_result[key].strip().split(",") result_netmap[key] = search_result[key].strip().split(",")
continue continue
if key == "node_status": if search_result == None:
result_netmap[key] = NodeStatus(search_result[key].strip().lower()) result_netmap[key] = None
continue continue
result_netmap[key] = search_result[key].strip() result_netmap[key] = search_result[key].strip()

View file

@ -1,47 +0,0 @@
import re
from datetime import datetime
from typing import Optional
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsAuthmate
from frostfs_testlib.credentials.interfaces import S3Credentials, S3CredentialsProvider, User
from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC
from frostfs_testlib.shell import LocalShell
from frostfs_testlib.steps.cli.container import list_containers
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate
class AuthmateS3CredentialsProvider(S3CredentialsProvider):
@reporter.step("Init S3 Credentials using Authmate CLI")
def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None) -> S3Credentials:
cluster_nodes: list[ClusterNode] = self.cluster.cluster_nodes
shell = LocalShell()
wallet = user.wallet
endpoint = cluster_node.storage_node.get_rpc_endpoint()
gate_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes]
# unique short bucket name
bucket = f"bucket-{hex(int(datetime.now().timestamp()*1000000))}"
frostfs_authmate: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC)
issue_secret_output = frostfs_authmate.secret.issue(
wallet=wallet.path,
peer=endpoint,
gate_public_key=gate_public_keys,
wallet_password=wallet.password,
container_policy=location_constraints,
container_friendly_name=bucket,
).stdout
aws_access_key_id = str(re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group("aws_access_key_id"))
aws_secret_access_key = str(
re.search(r"secret_access_key.*:\s.(?P<aws_secret_access_key>\w*)", issue_secret_output).group("aws_secret_access_key")
)
cid = str(re.search(r"container_id.*:\s.(?P<container_id>\w*)", issue_secret_output).group("container_id"))
containers_list = list_containers(wallet, shell, endpoint)
assert cid in containers_list, f"Expected cid {cid} in {containers_list}"
user.s3_credentials = S3Credentials(aws_access_key_id, aws_secret_access_key)
return user.s3_credentials

View file

@ -1,51 +0,0 @@
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Any, Optional
from frostfs_testlib.plugins import load_plugin
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
@dataclass
class S3Credentials:
access_key: str
secret_key: str
@dataclass
class User:
name: str
attributes: dict[str, Any] = field(default_factory=dict)
wallet: WalletInfo | None = None
s3_credentials: S3Credentials | None = None
class S3CredentialsProvider(ABC):
def __init__(self, cluster: Cluster) -> None:
self.cluster = cluster
@abstractmethod
def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None) -> S3Credentials:
raise NotImplementedError("Directly called abstract class?")
class GrpcCredentialsProvider(ABC):
def __init__(self, cluster: Cluster) -> None:
self.cluster = cluster
@abstractmethod
def provide(self, user: User, cluster_node: ClusterNode) -> WalletInfo:
raise NotImplementedError("Directly called abstract class?")
class CredentialsProvider(object):
S3: S3CredentialsProvider
GRPC: GrpcCredentialsProvider
def __init__(self, cluster: Cluster) -> None:
config = cluster.cluster_nodes[0].host.config
s3_cls = load_plugin("frostfs.testlib.credentials_providers", config.s3_creds_plugin_name)
self.S3 = s3_cls(cluster)
grpc_cls = load_plugin("frostfs.testlib.credentials_providers", config.grpc_creds_plugin_name)
self.GRPC = grpc_cls(cluster)

View file

@ -1,14 +0,0 @@
from frostfs_testlib import reporter
from frostfs_testlib.credentials.interfaces import GrpcCredentialsProvider, User
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_PASS
from frostfs_testlib.shell.local_shell import LocalShell
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.dataclasses.wallet import WalletFactory, WalletInfo
class WalletFactoryProvider(GrpcCredentialsProvider):
@reporter.step("Init gRPC Credentials using wallet generation")
def provide(self, user: User, cluster_node: ClusterNode) -> WalletInfo:
wallet_factory = WalletFactory(ASSETS_DIR, LocalShell())
user.wallet = wallet_factory.create_wallet(file_name=user.name, password=DEFAULT_WALLET_PASS)
return user.wallet

View file

@ -1,5 +1,5 @@
class Options: class Options:
DEFAULT_SHELL_TIMEOUT = 120 DEFAULT_SHELL_TIMEOUT = 90
@staticmethod @staticmethod
def get_default_shell_timeout(): def get_default_shell_timeout():

View file

@ -6,9 +6,8 @@ from frostfs_testlib.healthcheck.interfaces import Healthcheck
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
from frostfs_testlib.shell import CommandOptions from frostfs_testlib.shell import CommandOptions
from frostfs_testlib.steps.node_management import storage_node_healthcheck from frostfs_testlib.steps.node_management import storage_node_healthcheck
from frostfs_testlib.storage.cluster import ClusterNode, ServiceClass from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.failover_utils import check_services_status
class BasicHealthcheck(Healthcheck): class BasicHealthcheck(Healthcheck):
@ -21,50 +20,34 @@ class BasicHealthcheck(Healthcheck):
assert not issues, "Issues found:\n" + "\n".join(issues) assert not issues, "Issues found:\n" + "\n".join(issues)
@wait_for_success(900, 30, title="Wait for full healthcheck for {cluster_node}") @wait_for_success(900, 30)
def full_healthcheck(self, cluster_node: ClusterNode): def full_healthcheck(self, cluster_node: ClusterNode):
checks = { checks = {
self.storage_healthcheck: {}, self.storage_healthcheck: {},
self._tree_healthcheck: {}, self._tree_healthcheck: {},
} }
self._perform(cluster_node, checks) with reporter.step(f"Perform full healthcheck for {cluster_node}"):
self._perform(cluster_node, checks)
@wait_for_success(900, 30, title="Wait for startup healthcheck on {cluster_node}") @wait_for_success(900, 30)
def startup_healthcheck(self, cluster_node: ClusterNode): def startup_healthcheck(self, cluster_node: ClusterNode):
checks = { checks = {
self.storage_healthcheck: {}, self.storage_healthcheck: {},
self._tree_healthcheck: {}, self._tree_healthcheck: {},
} }
self._perform(cluster_node, checks) with reporter.step(f"Perform startup healthcheck on {cluster_node}"):
self._perform(cluster_node, checks)
@wait_for_success(900, 30, title="Wait for storage healthcheck on {cluster_node}") @wait_for_success(900, 30)
def storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: def storage_healthcheck(self, cluster_node: ClusterNode) -> str | None:
checks = { checks = {
self._storage_healthcheck: {}, self._storage_healthcheck: {},
} }
self._perform(cluster_node, checks) with reporter.step(f"Perform storage healthcheck on {cluster_node}"):
self._perform(cluster_node, checks)
@wait_for_success(120, 5, title="Wait for service healthcheck on {cluster_node}")
def services_healthcheck(self, cluster_node: ClusterNode):
svcs_to_check = cluster_node.services
checks = {
check_services_status: {
"service_list": svcs_to_check,
"expected_status": "active",
},
self._check_services: {"services": svcs_to_check},
}
self._perform(cluster_node, checks)
def _check_services(self, cluster_node: ClusterNode, services: list[ServiceClass]):
for svc in services:
result = svc.service_healthcheck()
if result == False:
return f"Service {svc.get_service_systemctl_name()} healthcheck failed on node {cluster_node}."
@reporter.step("Storage healthcheck on {cluster_node}") @reporter.step("Storage healthcheck on {cluster_node}")
def _storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: def _storage_healthcheck(self, cluster_node: ClusterNode) -> str | None:

View file

@ -15,7 +15,3 @@ class Healthcheck(ABC):
@abstractmethod @abstractmethod
def storage_healthcheck(self, cluster_node: ClusterNode): def storage_healthcheck(self, cluster_node: ClusterNode):
"""Perform storage service healthcheck on target cluster node""" """Perform storage service healthcheck on target cluster node"""
@abstractmethod
def services_healthcheck(self, cluster_node: ClusterNode):
"""Perform service status check on target cluster node"""

View file

@ -10,7 +10,9 @@ class ParsedAttributes:
def parse(cls, attributes: dict[str, Any]): def parse(cls, attributes: dict[str, Any]):
# Pick attributes supported by the class # Pick attributes supported by the class
field_names = set(field.name for field in fields(cls)) field_names = set(field.name for field in fields(cls))
supported_attributes = {key: value for key, value in attributes.items() if key in field_names} supported_attributes = {
key: value for key, value in attributes.items() if key in field_names
}
return cls(**supported_attributes) return cls(**supported_attributes)
@ -27,7 +29,6 @@ class CLIConfig:
name: str name: str
exec_path: str exec_path: str
attributes: dict[str, str] = field(default_factory=dict) attributes: dict[str, str] = field(default_factory=dict)
extra_args: list[str] = field(default_factory=list)
@dataclass @dataclass
@ -62,14 +63,10 @@ class HostConfig:
plugin_name: str plugin_name: str
healthcheck_plugin_name: str healthcheck_plugin_name: str
address: str address: str
s3_creds_plugin_name: str = field(default="authmate")
grpc_creds_plugin_name: str = field(default="wallet_factory")
product: str = field(default="frostfs")
services: list[ServiceConfig] = field(default_factory=list) services: list[ServiceConfig] = field(default_factory=list)
clis: list[CLIConfig] = field(default_factory=list) clis: list[CLIConfig] = field(default_factory=list)
attributes: dict[str, str] = field(default_factory=dict) attributes: dict[str, str] = field(default_factory=dict)
interfaces: dict[str, str] = field(default_factory=dict) interfaces: dict[str, str] = field(default_factory=dict)
environment: dict[str, str] = field(default_factory=dict)
def __post_init__(self) -> None: def __post_init__(self) -> None:
self.services = [ServiceConfig(**service) for service in self.services or []] self.services = [ServiceConfig(**service) for service in self.services or []]

View file

@ -152,7 +152,9 @@ class DockerHost(Host):
timeout=service_attributes.start_timeout, timeout=service_attributes.start_timeout,
) )
def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None: def wait_for_service_to_be_in_state(
self, systemd_service_name: str, expected_state: str, timeout: int
) -> None:
raise NotImplementedError("Not implemented for docker") raise NotImplementedError("Not implemented for docker")
def get_data_directory(self, service_name: str) -> str: def get_data_directory(self, service_name: str) -> str:
@ -179,12 +181,6 @@ class DockerHost(Host):
def delete_pilorama(self, service_name: str) -> None: def delete_pilorama(self, service_name: str) -> None:
raise NotImplementedError("Not implemented for docker") raise NotImplementedError("Not implemented for docker")
def delete_file(self, file_path: str) -> None:
raise NotImplementedError("Not implemented for docker")
def is_file_exist(self, file_path: str) -> None:
raise NotImplementedError("Not implemented for docker")
def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None:
volume_path = self.get_data_directory(service_name) volume_path = self.get_data_directory(service_name)
@ -239,7 +235,6 @@ class DockerHost(Host):
since: Optional[datetime] = None, since: Optional[datetime] = None,
until: Optional[datetime] = None, until: Optional[datetime] = None,
unit: Optional[str] = None, unit: Optional[str] = None,
exclude_filter: Optional[str] = None,
) -> str: ) -> str:
client = self._get_docker_client() client = self._get_docker_client()
filtered_logs = "" filtered_logs = ""
@ -251,11 +246,8 @@ class DockerHost(Host):
logger.info(f"Got exception while dumping logs of '{container_name}': {exc}") logger.info(f"Got exception while dumping logs of '{container_name}': {exc}")
continue continue
if exclude_filter:
filtered_logs = filtered_logs.replace(exclude_filter, "")
matches = re.findall(filter_regex, filtered_logs, re.IGNORECASE + re.MULTILINE) matches = re.findall(filter_regex, filtered_logs, re.IGNORECASE + re.MULTILINE)
found = list(matches) found = list(matches)
if found: if found:
filtered_logs += f"{container_name}:\n{os.linesep.join(found)}" filtered_logs += f"{container_name}:\n{os.linesep.join(found)}"
@ -309,7 +301,9 @@ class DockerHost(Host):
return container return container
return None return None
def _wait_for_container_to_be_in_state(self, container_name: str, expected_state: str, timeout: int) -> None: def _wait_for_container_to_be_in_state(
self, container_name: str, expected_state: str, timeout: int
) -> None:
iterations = 10 iterations = 10
iteration_wait_time = timeout / iterations iteration_wait_time = timeout / iterations

View file

@ -5,7 +5,6 @@ from typing import Optional
from frostfs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig from frostfs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig
from frostfs_testlib.shell.interfaces import Shell from frostfs_testlib.shell.interfaces import Shell
from frostfs_testlib.testing.readable import HumanReadableEnum from frostfs_testlib.testing.readable import HumanReadableEnum
from frostfs_testlib.testing.test_control import retry
class HostStatus(HumanReadableEnum): class HostStatus(HumanReadableEnum):
@ -26,7 +25,9 @@ class Host(ABC):
def __init__(self, config: HostConfig) -> None: def __init__(self, config: HostConfig) -> None:
self._config = config self._config = config
self._service_config_by_name = {service_config.name: service_config for service_config in config.services} self._service_config_by_name = {
service_config.name: service_config for service_config in config.services
}
self._cli_config_by_name = {cli_config.name: cli_config for cli_config in config.clis} self._cli_config_by_name = {cli_config.name: cli_config for cli_config in config.clis}
@property @property
@ -54,7 +55,7 @@ class Host(ABC):
raise ValueError(f"Unknown service name: '{service_name}'") raise ValueError(f"Unknown service name: '{service_name}'")
return service_config return service_config
def get_cli_config(self, cli_name: str, allow_empty: bool = False) -> CLIConfig: def get_cli_config(self, cli_name: str) -> CLIConfig:
"""Returns config of CLI tool with specified name. """Returns config of CLI tool with specified name.
The CLI must be located on this host. The CLI must be located on this host.
@ -66,7 +67,7 @@ class Host(ABC):
Config of the CLI tool. Config of the CLI tool.
""" """
cli_config = self._cli_config_by_name.get(cli_name) cli_config = self._cli_config_by_name.get(cli_name)
if cli_config is None and not allow_empty: if cli_config is None:
raise ValueError(f"Unknown CLI name: '{cli_name}'") raise ValueError(f"Unknown CLI name: '{cli_name}'")
return cli_config return cli_config
@ -219,22 +220,12 @@ class Host(ABC):
""" """
@abstractmethod @abstractmethod
def delete_file(self, file_path: str) -> None: def delete_pilorama(self, service_name: str) -> None:
""" """
Deletes file with provided file path Deletes all pilorama.db files in the node.
Args: Args:
file_path: full path to the file to delete service_name: Name of storage node service.
"""
@abstractmethod
def is_file_exist(self, file_path: str) -> bool:
"""
Checks if file exist
Args:
file_path: full path to the file to check
""" """
@ -296,7 +287,6 @@ class Host(ABC):
since: Optional[datetime] = None, since: Optional[datetime] = None,
until: Optional[datetime] = None, until: Optional[datetime] = None,
unit: Optional[str] = None, unit: Optional[str] = None,
exclude_filter: Optional[str] = None,
) -> str: ) -> str:
"""Get logs from host filtered by regex. """Get logs from host filtered by regex.
@ -332,7 +322,9 @@ class Host(ABC):
""" """
@abstractmethod @abstractmethod
def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None: def wait_for_service_to_be_in_state(
self, systemd_service_name: str, expected_state: str, timeout: int
) -> None:
""" """
Waites for service to be in specified state. Waites for service to be in specified state.
@ -342,23 +334,3 @@ class Host(ABC):
timeout: Seconds to wait timeout: Seconds to wait
""" """
def down_interface(self, interface: str) -> None:
shell = self.get_shell()
shell.exec(f"ip link set {interface} down")
def up_interface(self, interface: str) -> None:
shell = self.get_shell()
shell.exec(f"ip link set {interface} up")
def check_state(self, interface: str) -> str:
shell = self.get_shell()
return shell.exec(f"ip link show {interface} | sed -z 's/.*state \(.*\) mode .*/\\1/'").stdout.strip()
@retry(max_attempts=5, sleep_interval=5, expected_result="UP")
def check_state_up(self, interface: str) -> str:
return self.check_state(interface=interface)
@retry(max_attempts=5, sleep_interval=5, expected_result="DOWN")
def check_state_down(self, interface: str) -> str:
return self.check_state(interface=interface)

View file

@ -1,96 +0,0 @@
from dataclasses import dataclass, field
from frostfs_testlib.load.load_config import LoadParams, LoadScenario
from frostfs_testlib.load.load_metrics import get_metrics_object
@dataclass
class SummarizedErorrs:
total: int = field(default_factory=int)
percent: float = field(default_factory=float)
threshold: float = field(default_factory=float)
by_node: dict[str, int] = field(default_factory=dict)
def calc_stats(self, operations):
self.total += sum(self.by_node.values())
if not operations:
return
self.percent = self.total / operations * 100
@dataclass
class SummarizedLatencies:
avg: float = field(default_factory=float)
min: float = field(default_factory=float)
max: float = field(default_factory=float)
by_node: dict[str, dict[str, int]] = field(default_factory=dict)
def calc_stats(self):
if not self.by_node:
return
avgs = [lt["avg"] for lt in self.by_node.values()]
self.avg = sum(avgs) / len(avgs)
minimal = [lt["min"] for lt in self.by_node.values()]
self.min = min(minimal)
maximum = [lt["max"] for lt in self.by_node.values()]
self.max = max(maximum)
@dataclass
class SummarizedStats:
threads: int = field(default_factory=int)
requested_rate: int = field(default_factory=int)
operations: int = field(default_factory=int)
rate: float = field(default_factory=float)
throughput: float = field(default_factory=float)
latencies: SummarizedLatencies = field(default_factory=SummarizedLatencies)
errors: SummarizedErorrs = field(default_factory=SummarizedErorrs)
total_bytes: int = field(default_factory=int)
passed: bool = True
def calc_stats(self):
self.errors.calc_stats(self.operations)
self.latencies.calc_stats()
self.passed = self.errors.percent <= self.errors.threshold
@staticmethod
def collect(load_params: LoadParams, load_summaries: dict) -> dict[str, "SummarizedStats"]:
if load_params.scenario in [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]:
delete_vus = max(load_params.preallocated_deleters or 0, load_params.max_deleters or 0)
write_vus = max(load_params.preallocated_writers or 0, load_params.max_writers or 0)
read_vus = max(load_params.preallocated_readers or 0, load_params.max_readers or 0)
else:
write_vus = load_params.writers
read_vus = load_params.readers
delete_vus = load_params.deleters
summarized = {
"Write": SummarizedStats(threads=write_vus, requested_rate=load_params.write_rate),
"Read": SummarizedStats(threads=read_vus, requested_rate=load_params.read_rate),
"Delete": SummarizedStats(threads=delete_vus, requested_rate=load_params.delete_rate),
}
for node_key, load_summary in load_summaries.items():
metrics = get_metrics_object(load_params.scenario, load_summary)
for operation in metrics.operations:
target = summarized[operation._NAME]
if not operation.total_iterations:
continue
target.operations += operation.total_iterations
target.rate += operation.rate
target.latencies.by_node[node_key] = operation.latency
target.throughput += operation.throughput
target.errors.threshold = load_params.error_threshold
target.total_bytes = operation.total_bytes
if operation.failed_iterations:
target.errors.by_node[node_key] = operation.failed_iterations
for operation in summarized.values():
operation.calc_stats()
return summarized

View file

@ -4,24 +4,24 @@ import math
import os import os
from dataclasses import dataclass from dataclasses import dataclass
from datetime import datetime from datetime import datetime
from threading import Event
from time import sleep from time import sleep
from typing import Any from typing import Any
from urllib.parse import urlparse from urllib.parse import urlparse
from frostfs_testlib import reporter
from frostfs_testlib.credentials.interfaces import User
from frostfs_testlib.load.interfaces.loader import Loader from frostfs_testlib.load.interfaces.loader import Loader
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType
from frostfs_testlib.processes.remote_process import RemoteProcess from frostfs_testlib.processes.remote_process import RemoteProcess
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.common import STORAGE_USER_NAME from frostfs_testlib.resources.common import STORAGE_USER_NAME
from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, K6_TEARDOWN_PERIOD from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, K6_TEARDOWN_PERIOD
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.testing.test_control import wait_for_success
EXIT_RESULT_CODE = 0 EXIT_RESULT_CODE = 0
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
reporter = get_reporter()
@dataclass @dataclass
@ -35,6 +35,7 @@ class LoadResults:
class K6: class K6:
_k6_process: RemoteProcess _k6_process: RemoteProcess
_start_time: datetime
def __init__( def __init__(
self, self,
@ -43,16 +44,16 @@ class K6:
k6_dir: str, k6_dir: str,
shell: Shell, shell: Shell,
loader: Loader, loader: Loader,
user: User, wallet: WalletInfo,
): ):
if load_params.scenario is None: if load_params.scenario is None:
raise RuntimeError("Scenario should not be none") raise RuntimeError("Scenario should not be none")
self.load_params = load_params self.load_params: LoadParams = load_params
self.endpoints = endpoints self.endpoints = endpoints
self.loader = loader self.loader: Loader = loader
self.shell = shell self.shell: Shell = shell
self.user = user self.wallet = wallet
self.preset_output: str = "" self.preset_output: str = ""
self.summary_json: str = os.path.join( self.summary_json: str = os.path.join(
self.load_params.working_dir, self.load_params.working_dir,
@ -61,27 +62,6 @@ class K6:
self._k6_dir: str = k6_dir self._k6_dir: str = k6_dir
command = (
f"{self._generate_env_variables()}{self._k6_dir}/k6 run {self._generate_k6_variables()} "
f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js"
)
remote_user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None
process_id = self.load_params.load_id if self.load_params.scenario != LoadScenario.VERIFY else f"{self.load_params.load_id}_verify"
self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, remote_user, process_id)
def _get_fill_percents(self):
fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs | grep data").stdout.split("\n")
return [line.split() for line in fill_percents][:-1]
def check_fill_percent(self):
fill_percents = self._get_fill_percents()
percent_mean = 0
for line in fill_percents:
percent_mean += float(line[1].split("%")[0])
percent_mean = percent_mean / len(fill_percents)
logger.info(f"{self.loader.ip} mean fill percent is {percent_mean}")
return percent_mean >= self.load_params.fill_percent
@property @property
def process_dir(self) -> str: def process_dir(self) -> str:
return self._k6_process.process_dir return self._k6_process.process_dir
@ -100,8 +80,8 @@ class K6:
preset_grpc: [ preset_grpc: [
preset_grpc, preset_grpc,
f"--endpoint {','.join(self.endpoints)}", f"--endpoint {','.join(self.endpoints)}",
f"--wallet {self.user.wallet.path} ", f"--wallet {self.wallet.path} ",
f"--config {self.user.wallet.config_path} ", f"--config {self.wallet.config_path} ",
], ],
preset_s3: [ preset_s3: [
preset_s3, preset_s3,
@ -122,9 +102,9 @@ class K6:
self.preset_output = result.stdout.strip("\n") self.preset_output = result.stdout.strip("\n")
return self.preset_output return self.preset_output
@reporter.step("Generate K6 variables") @reporter.step_deco("Generate K6 command")
def _generate_k6_variables(self) -> str: def _generate_env_variables(self) -> str:
env_vars = self.load_params.get_k6_vars() env_vars = self.load_params.get_env_vars()
env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints) env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints)
env_vars["SUMMARY_JSON"] = self.summary_json env_vars["SUMMARY_JSON"] = self.summary_json
@ -132,39 +112,31 @@ class K6:
reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables") reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables")
return " ".join([f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None]) return " ".join([f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None])
@reporter.step("Generate env variables")
def _generate_env_variables(self) -> str:
env_vars = self.load_params.get_env_vars()
if not env_vars:
return ""
reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "ENV variables")
return " ".join([f"{param}='{value}'" for param, value in env_vars.items() if value is not None]) + " "
def get_start_time(self) -> datetime:
return datetime.fromtimestamp(self._k6_process.start_time())
def get_end_time(self) -> datetime:
return datetime.fromtimestamp(self._k6_process.end_time())
def start(self) -> None: def start(self) -> None:
with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"): with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"):
self._k6_process.start() self._start_time = int(datetime.utcnow().timestamp())
command = (
f"{self._k6_dir}/k6 run {self._generate_env_variables()} "
f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js"
)
user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None
self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, user)
def wait_until_finished(self, event: Event, soft_timeout: int = 0) -> None: def wait_until_finished(self, soft_timeout: int = 0) -> None:
with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"): with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"):
if self.load_params.scenario == LoadScenario.VERIFY: if self.load_params.scenario == LoadScenario.VERIFY:
timeout = self.load_params.verify_time or 0 timeout = self.load_params.verify_time or 0
else: else:
timeout = self.load_params.load_time or 0 timeout = self.load_params.load_time or 0
start_time = int(self.get_start_time().timestamp())
current_time = int(datetime.utcnow().timestamp()) current_time = int(datetime.utcnow().timestamp())
working_time = current_time - start_time working_time = current_time - self._start_time
remaining_time = timeout - working_time remaining_time = timeout - working_time
setup_teardown_time = ( setup_teardown_time = (
int(K6_TEARDOWN_PERIOD) + self.load_params.get_init_time() + int(self.load_params.setup_timeout.replace("s", "").strip()) int(K6_TEARDOWN_PERIOD)
+ self.load_params.get_init_time()
+ int(self.load_params.setup_timeout.replace("s", "").strip())
) )
remaining_time_including_setup_and_teardown = remaining_time + setup_teardown_time remaining_time_including_setup_and_teardown = remaining_time + setup_teardown_time
timeout = remaining_time_including_setup_and_teardown timeout = remaining_time_including_setup_and_teardown
@ -175,7 +147,7 @@ class K6:
original_timeout = timeout original_timeout = timeout
timeouts = { timeouts = {
"K6 start time": start_time, "K6 start time": self._start_time,
"Current time": current_time, "Current time": current_time,
"K6 working time": working_time, "K6 working time": working_time,
"Remaining time for load": remaining_time, "Remaining time for load": remaining_time,
@ -191,23 +163,9 @@ class K6:
wait_interval = min_wait_interval wait_interval = min_wait_interval
if self._k6_process is None: if self._k6_process is None:
assert "No k6 instances were executed" assert "No k6 instances were executed"
while timeout > 0: while timeout > 0:
if not self.load_params.fill_percent is None:
with reporter.step(f"Check the percentage of filling of all data disks on the node"):
if self.check_fill_percent():
logger.info(f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%")
event.set()
self.stop()
return
if event.is_set():
self.stop()
return
if not self._k6_process.running(): if not self._k6_process.running():
return return
remaining_time_hours = f"{timeout//3600}h" if timeout // 3600 != 0 else "" remaining_time_hours = f"{timeout//3600}h" if timeout // 3600 != 0 else ""
remaining_time_minutes = f"{timeout//60%60}m" if timeout // 60 % 60 != 0 else "" remaining_time_minutes = f"{timeout//60%60}m" if timeout // 60 % 60 != 0 else ""
logger.info( logger.info(
@ -258,7 +216,7 @@ class K6:
return self._k6_process.running() return self._k6_process.running()
return False return False
@reporter.step("Wait until K6 process end") @reporter.step_deco("Wait until K6 process end")
@wait_for_success(K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout") @wait_for_success(K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout")
def _wait_until_process_end(self): def _wait_until_process_end(self):
return self._k6_process.running() return self._k6_process.running()

View file

@ -3,28 +3,11 @@ import os
from dataclasses import dataclass, field, fields, is_dataclass from dataclasses import dataclass, field, fields, is_dataclass
from enum import Enum from enum import Enum
from types import MappingProxyType from types import MappingProxyType
from typing import Any, Callable, Optional, get_args from typing import Any, Optional, get_args
from frostfs_testlib.utils.converting_utils import calc_unit from frostfs_testlib.utils.converting_utils import calc_unit
def convert_time_to_seconds(time: int | str | None) -> int:
if time is None:
return None
if str(time).isdigit():
seconds = int(time)
else:
days, hours, minutes = 0, 0, 0
if "d" in time:
days, time = time.split("d")
if "h" in time:
hours, time = time.split("h")
if "min" in time:
minutes = time.replace("min", "")
seconds = int(days) * 86400 + int(hours) * 3600 + int(minutes) * 60
return seconds
class LoadType(Enum): class LoadType(Enum):
gRPC = "grpc" gRPC = "grpc"
S3 = "s3" S3 = "s3"
@ -57,18 +40,11 @@ all_load_scenarios = [
LoadScenario.gRPC_CAR, LoadScenario.gRPC_CAR,
LoadScenario.LOCAL, LoadScenario.LOCAL,
LoadScenario.S3_MULTIPART, LoadScenario.S3_MULTIPART,
LoadScenario.S3_LOCAL, LoadScenario.S3_LOCAL
] ]
all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY] all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY]
constant_vus_scenarios = [ constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP, LoadScenario.LOCAL, LoadScenario.S3_MULTIPART, LoadScenario.S3_LOCAL]
LoadScenario.gRPC,
LoadScenario.S3,
LoadScenario.HTTP,
LoadScenario.LOCAL,
LoadScenario.S3_MULTIPART,
LoadScenario.S3_LOCAL,
]
constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR] constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]
grpc_preset_scenarios = [ grpc_preset_scenarios = [
@ -93,19 +69,15 @@ def metadata_field(
scenario_variable: Optional[str] = None, scenario_variable: Optional[str] = None,
string_repr: Optional[bool] = True, string_repr: Optional[bool] = True,
distributed: Optional[bool] = False, distributed: Optional[bool] = False,
formatter: Optional[Callable] = None,
env_variable: Optional[str] = None,
): ):
return field( return field(
default=None, default=None,
metadata={ metadata={
"applicable_scenarios": applicable_scenarios, "applicable_scenarios": applicable_scenarios,
"preset_argument": preset_param, "preset_argument": preset_param,
"scenario_variable": scenario_variable, "env_variable": scenario_variable,
"string_repr": string_repr, "string_repr": string_repr,
"distributed": distributed, "distributed": distributed,
"formatter": formatter,
"env_variable": env_variable,
}, },
) )
@ -149,14 +121,16 @@ class Preset:
pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False) pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False)
# Workers count for preset # Workers count for preset
workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False) workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False)
# Acl for container/buckets
acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False)
# ------ GRPC ------ # ------ GRPC ------
# Amount of containers which should be created # Amount of containers which should be created
containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None, False) containers_count: Optional[int] = metadata_field(
grpc_preset_scenarios, "containers", None, False
)
# Container placement policy for containers for gRPC # Container placement policy for containers for gRPC
container_placement_policy: Optional[str] = metadata_field(grpc_preset_scenarios, "policy", None, False) container_placement_policy: Optional[str] = metadata_field(
grpc_preset_scenarios, "policy", None, False
)
# ------ S3 ------ # ------ S3 ------
# Amount of buckets which should be created # Amount of buckets which should be created
@ -170,19 +144,6 @@ class Preset:
# Flag to control preset erorrs # Flag to control preset erorrs
ignore_errors: Optional[bool] = metadata_field(all_load_scenarios, "ignore-errors", None, False) ignore_errors: Optional[bool] = metadata_field(all_load_scenarios, "ignore-errors", None, False)
# Flag to ensure created containers store data on local endpoints
local: Optional[bool] = metadata_field(grpc_preset_scenarios, "local", None, False)
@dataclass
class PrometheusParams:
# Prometheus server URL
server_url: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_SERVER_URL", string_repr=False)
# Prometheus trend stats
trend_stats: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_TREND_STATS", string_repr=False)
# Additional tags
metrics_tags: Optional[str] = metadata_field(all_load_scenarios, None, "METRIC_TAGS", False)
@dataclass @dataclass
class LoadParams: class LoadParams:
@ -219,48 +180,31 @@ class LoadParams:
awscli_url: Optional[str] = None awscli_url: Optional[str] = None
# No ssl verification flag # No ssl verification flag
no_verify_ssl: Optional[bool] = metadata_field( no_verify_ssl: Optional[bool] = metadata_field(
[ [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART, LoadScenario.S3_LOCAL, LoadScenario.VERIFY, LoadScenario.HTTP],
LoadScenario.S3,
LoadScenario.S3_CAR,
LoadScenario.S3_MULTIPART,
LoadScenario.S3_LOCAL,
LoadScenario.VERIFY,
LoadScenario.HTTP,
],
"no-verify-ssl", "no-verify-ssl",
"NO_VERIFY_SSL", "NO_VERIFY_SSL",
False, False,
) )
# Percentage of filling of all data disks on all nodes
fill_percent: Optional[float] = None
# if set, the payload is generated on the fly and is not read into memory fully.
streaming: Optional[int] = metadata_field(all_load_scenarios, None, "STREAMING", False)
# Output format
output: Optional[str] = metadata_field(all_load_scenarios, None, "K6_OUT", False)
# Prometheus params
prometheus: Optional[PrometheusParams] = None
# ------- COMMON SCENARIO PARAMS ------- # ------- COMMON SCENARIO PARAMS -------
# Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value. # Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value.
load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION", False, formatter=convert_time_to_seconds) load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION", False)
# Object size in KB for load and preset. # Object size in KB for load and preset.
object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False) object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False)
# For read operations, controls from which set get objects to read # For read operations, controls from which set get objects to read
read_from: Optional[ReadFrom] = None read_from: Optional[ReadFrom] = None
# For read operations done from REGISTRY, controls delay which object should live before it will be used for read operation
read_age: Optional[int] = metadata_field(all_load_scenarios, None, "READ_AGE", False)
# Output registry K6 file. Filled automatically. # Output registry K6 file. Filled automatically.
registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False) registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False)
# In case if we want to use custom registry file left from another load run
custom_registry: Optional[str] = None
# In case if we want to use custom registry file left from another load run
force_fresh_registry: Optional[bool] = None
# Specifies the minimum duration of every single execution (i.e. iteration). # Specifies the minimum duration of every single execution (i.e. iteration).
# Any iterations that are shorter than this value will cause that VU to # Any iterations that are shorter than this value will cause that VU to
# sleep for the remainder of the time until the specified minimum duration is reached. # sleep for the remainder of the time until the specified minimum duration is reached.
min_iteration_duration: Optional[str] = metadata_field(all_load_scenarios, None, "K6_MIN_ITERATION_DURATION", False) min_iteration_duration: Optional[str] = metadata_field(
all_load_scenarios, None, "K6_MIN_ITERATION_DURATION", False
)
# Prepare/cut objects locally on client before sending # Prepare/cut objects locally on client before sending
prepare_locally: Optional[bool] = metadata_field([LoadScenario.gRPC, LoadScenario.gRPC_CAR], None, "PREPARE_LOCALLY", False) prepare_locally: Optional[bool] = metadata_field(
[LoadScenario.gRPC, LoadScenario.gRPC_CAR], None, "PREPARE_LOCALLY", False
)
# Specifies K6 setupTimeout time. Currently hardcoded in xk6 as 5 seconds for all scenarios # Specifies K6 setupTimeout time. Currently hardcoded in xk6 as 5 seconds for all scenarios
# https://k6.io/docs/using-k6/k6-options/reference/#setup-timeout # https://k6.io/docs/using-k6/k6-options/reference/#setup-timeout
setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT", False) setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT", False)
@ -281,77 +225,85 @@ class LoadParams:
# ------- CONSTANT ARRIVAL RATE SCENARIO PARAMS ------- # ------- CONSTANT ARRIVAL RATE SCENARIO PARAMS -------
# Number of iterations to start during each timeUnit period for write. # Number of iterations to start during each timeUnit period for write.
write_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "WRITE_RATE", True, True) write_rate: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "WRITE_RATE", True, True
)
# Number of iterations to start during each timeUnit period for read. # Number of iterations to start during each timeUnit period for read.
read_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "READ_RATE", True, True) read_rate: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "READ_RATE", True, True
)
# Number of iterations to start during each timeUnit period for delete. # Number of iterations to start during each timeUnit period for delete.
delete_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "DELETE_RATE", True, True) delete_rate: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "DELETE_RATE", True, True
)
# Amount of preAllocatedVUs for write operations. # Amount of preAllocatedVUs for write operations.
preallocated_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True, True) preallocated_writers: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True, True
)
# Amount of maxVUs for write operations. # Amount of maxVUs for write operations.
max_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_WRITERS", False, True) max_writers: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "MAX_WRITERS", False, True
)
# Amount of preAllocatedVUs for read operations. # Amount of preAllocatedVUs for read operations.
preallocated_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True, True) preallocated_readers: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True, True
)
# Amount of maxVUs for read operations. # Amount of maxVUs for read operations.
max_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_READERS", False, True) max_readers: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "MAX_READERS", False, True
)
# Amount of preAllocatedVUs for read operations. # Amount of preAllocatedVUs for read operations.
preallocated_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True, True) preallocated_deleters: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True, True
)
# Amount of maxVUs for delete operations. # Amount of maxVUs for delete operations.
max_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_DELETERS", False, True) max_deleters: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "MAX_DELETERS", False, True
)
# Multipart # Multipart
# Number of parts to upload in parallel # Number of parts to upload in parallel
writers_multipart: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITERS_MULTIPART", False, True) writers_multipart: Optional[int] = metadata_field(
[LoadScenario.S3_MULTIPART], None, "WRITERS_MULTIPART", False, True
)
# part size must be greater than (5 MB) # part size must be greater than (5 MB)
write_object_part_size: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITE_OBJ_PART_SIZE", False) write_object_part_size: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITE_OBJ_PART_SIZE", False)
# Period of time to apply the rate value. # Period of time to apply the rate value.
time_unit: Optional[str] = metadata_field(constant_arrival_rate_scenarios, None, "TIME_UNIT", False) time_unit: Optional[str] = metadata_field(
constant_arrival_rate_scenarios, None, "TIME_UNIT", False
)
# ------- VERIFY SCENARIO PARAMS ------- # ------- VERIFY SCENARIO PARAMS -------
# Maximum verification time for k6 to verify objects. Default is BACKGROUND_LOAD_MAX_VERIFY_TIME (3600). # Maximum verification time for k6 to verify objects. Default is BACKGROUND_LOAD_MAX_VERIFY_TIME (3600).
verify_time: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "TIME_LIMIT", False) verify_time: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "TIME_LIMIT", False)
# Amount of Verification VU. # Amount of Verification VU.
verify_clients: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "CLIENTS", True, False) verify_clients: Optional[int] = metadata_field(
[LoadScenario.VERIFY], None, "CLIENTS", True, False
)
# ------- LOCAL SCENARIO PARAMS ------- # ------- LOCAL SCENARIO PARAMS -------
# Config file location (filled automatically) # Config file location (filled automatically)
config_file: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_FILE", False) config_file: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_FILE", False)
# Config directory location (filled automatically) # Config directory location (filled automatically)
config_dir: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_DIR", False) config_dir: Optional[str] = metadata_field([LoadScenario.S3_LOCAL], None, "CONFIG_DIR", False)
def set_id(self, load_id): def set_id(self, load_id):
self.load_id = load_id self.load_id = load_id
if self.read_from == ReadFrom.REGISTRY: if self.read_from == ReadFrom.REGISTRY:
self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt") self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt")
# For now it's okay to have it this way
if self.custom_registry is not None:
self.registry_file = self.custom_registry
if self.read_from == ReadFrom.PRESET: if self.read_from == ReadFrom.PRESET:
self.registry_file = None self.registry_file = None
if self.preset: if self.preset:
self.preset.pregen_json = os.path.join(self.working_dir, f"{load_id}_prepare.json") self.preset.pregen_json = os.path.join(self.working_dir, f"{load_id}_prepare.json")
def get_k6_vars(self):
env_vars = {
meta_field.metadata["scenario_variable"]: meta_field.value
for meta_field in self._get_meta_fields(self)
if self.scenario in meta_field.metadata["applicable_scenarios"]
and meta_field.metadata["scenario_variable"]
and meta_field.value is not None
}
return env_vars
def get_env_vars(self): def get_env_vars(self):
env_vars = { env_vars = {
meta_field.metadata["env_variable"]: meta_field.value meta_field.metadata["env_variable"]: meta_field.value
@ -389,8 +341,10 @@ class LoadParams:
return math.ceil(self._get_total_vus() * self.vu_init_time) return math.ceil(self._get_total_vus() * self.vu_init_time)
def _get_total_vus(self) -> int: def _get_total_vus(self) -> int:
vu_fields = ["writers", "preallocated_writers", "readers", "preallocated_readers"] vu_fields = ["writers", "preallocated_writers"]
data_fields = [getattr(self, field.name) or 0 for field in fields(self) if field.name in vu_fields] data_fields = [
getattr(self, field.name) or 0 for field in fields(self) if field.name in vu_fields
]
return sum(data_fields) return sum(data_fields)
def _get_applicable_fields(self): def _get_applicable_fields(self):
@ -421,31 +375,14 @@ class LoadParams:
] ]
for field in data_fields: for field in data_fields:
actual_field_type = get_args(field.type)[0] if len(get_args(field.type)) else get_args(field.type) actual_field_type = (
get_args(field.type)[0] if len(get_args(field.type)) else get_args(field.type)
)
if is_dataclass(actual_field_type) and getattr(instance, field.name): if is_dataclass(actual_field_type) and getattr(instance, field.name):
fields_with_data += LoadParams._get_meta_fields(getattr(instance, field.name)) fields_with_data += LoadParams._get_meta_fields(getattr(instance, field.name))
return fields_with_data or [] return fields_with_data or []
def _get_field_formatter(self, field_name: str) -> Callable | None:
data_fields = fields(self)
formatters = [
field.metadata["formatter"]
for field in data_fields
if field.name == field_name and "formatter" in field.metadata and field.metadata["formatter"] != None
]
if formatters:
return formatters[0]
return None
def __setattr__(self, field_name, value):
formatter = self._get_field_formatter(field_name)
if formatter:
value = formatter(value)
super().__setattr__(field_name, value)
def __str__(self) -> str: def __str__(self) -> str:
load_type_str = self.scenario.value if self.scenario else self.load_type.value load_type_str = self.scenario.value if self.scenario else self.load_type.value
# TODO: migrate load_params defaults to testlib # TODO: migrate load_params defaults to testlib
@ -456,7 +393,9 @@ class LoadParams:
static_params = [f"{load_type_str}"] static_params = [f"{load_type_str}"]
dynamic_params = [ dynamic_params = [
f"{meta_field.name}={meta_field.value}" for meta_field in self._get_applicable_fields() if meta_field.metadata["string_repr"] f"{meta_field.name}={meta_field.value}"
for meta_field in self._get_applicable_fields()
if meta_field.metadata["string_repr"]
] ]
params = ", ".join(static_params + dynamic_params) params = ", ".join(static_params + dynamic_params)

View file

@ -1,47 +1,95 @@
from abc import ABC from abc import ABC
from typing import Any, Optional from typing import Any
from frostfs_testlib.load.load_config import LoadScenario from frostfs_testlib.load.load_config import LoadScenario
class OperationMetric(ABC): class MetricsBase(ABC):
_NAME = "" _WRITE_SUCCESS = ""
_SUCCESS = "" _WRITE_ERRORS = ""
_ERRORS = "" _WRITE_THROUGHPUT = "data_sent"
_THROUGHPUT = "" _WRITE_LATENCY = ""
_LATENCY = ""
_READ_SUCCESS = ""
_READ_ERRORS = ""
_READ_LATENCY = ""
_READ_THROUGHPUT = "data_received"
_DELETE_SUCCESS = ""
_DELETE_LATENCY = ""
_DELETE_ERRORS = ""
def __init__(self, summary) -> None: def __init__(self, summary) -> None:
self.summary = summary self.summary = summary
self.metrics = summary["metrics"] self.metrics = summary["metrics"]
@property @property
def total_iterations(self) -> int: def write_total_iterations(self) -> int:
return self._get_metric(self._SUCCESS) + self._get_metric(self._ERRORS) return self._get_metric(self._WRITE_SUCCESS) + self._get_metric(self._WRITE_ERRORS)
@property @property
def success_iterations(self) -> int: def write_success_iterations(self) -> int:
return self._get_metric(self._SUCCESS) return self._get_metric(self._WRITE_SUCCESS)
@property @property
def latency(self) -> dict: def write_latency(self) -> dict:
return self._get_metric(self._LATENCY) return self._get_metric(self._WRITE_LATENCY)
@property @property
def rate(self) -> float: def write_rate(self) -> float:
return self._get_metric_rate(self._SUCCESS) return self._get_metric_rate(self._WRITE_SUCCESS)
@property @property
def failed_iterations(self) -> int: def write_failed_iterations(self) -> int:
return self._get_metric(self._ERRORS) return self._get_metric(self._WRITE_ERRORS)
@property @property
def throughput(self) -> float: def write_throughput(self) -> float:
return self._get_metric_rate(self._THROUGHPUT) return self._get_metric_rate(self._WRITE_THROUGHPUT)
@property @property
def total_bytes(self) -> float: def read_total_iterations(self) -> int:
return self._get_metric(self._THROUGHPUT) return self._get_metric(self._READ_SUCCESS) + self._get_metric(self._READ_ERRORS)
@property
def read_success_iterations(self) -> int:
return self._get_metric(self._READ_SUCCESS)
@property
def read_latency(self) -> dict:
return self._get_metric(self._READ_LATENCY)
@property
def read_rate(self) -> int:
return self._get_metric_rate(self._READ_SUCCESS)
@property
def read_failed_iterations(self) -> int:
return self._get_metric(self._READ_ERRORS)
@property
def read_throughput(self) -> float:
return self._get_metric_rate(self._READ_THROUGHPUT)
@property
def delete_total_iterations(self) -> int:
return self._get_metric(self._DELETE_SUCCESS) + self._get_metric(self._DELETE_ERRORS)
@property
def delete_success_iterations(self) -> int:
return self._get_metric(self._DELETE_SUCCESS)
@property
def delete_latency(self) -> dict:
return self._get_metric(self._DELETE_LATENCY)
@property
def delete_failed_iterations(self) -> int:
return self._get_metric(self._DELETE_ERRORS)
@property
def delete_rate(self) -> int:
return self._get_metric_rate(self._DELETE_SUCCESS)
def _get_metric(self, metric: str) -> int: def _get_metric(self, metric: str) -> int:
metrics_method_map = { metrics_method_map = {
@ -56,7 +104,9 @@ class OperationMetric(ABC):
metric = self.metrics[metric] metric = self.metrics[metric]
metric_type = metric["type"] metric_type = metric["type"]
if metric_type not in metrics_method_map: if metric_type not in metrics_method_map:
raise Exception(f"Unsupported metric type: {metric_type}, supported: {metrics_method_map.keys()}") raise Exception(
f"Unsupported metric type: {metric_type}, supported: {metrics_method_map.keys()}"
)
return metrics_method_map[metric_type](metric) return metrics_method_map[metric_type](metric)
@ -69,7 +119,9 @@ class OperationMetric(ABC):
metric = self.metrics[metric] metric = self.metrics[metric]
metric_type = metric["type"] metric_type = metric["type"]
if metric_type not in metrics_method_map: if metric_type not in metrics_method_map:
raise Exception(f"Unsupported rate metric type: {metric_type}, supported: {metrics_method_map.keys()}") raise Exception(
f"Unsupported rate metric type: {metric_type}, supported: {metrics_method_map.keys()}"
)
return metrics_method_map[metric_type](metric) return metrics_method_map[metric_type](metric)
@ -86,145 +138,63 @@ class OperationMetric(ABC):
return metric["values"] return metric["values"]
class WriteOperationMetric(OperationMetric):
_NAME = "Write"
_SUCCESS = ""
_ERRORS = ""
_THROUGHPUT = "data_sent"
_LATENCY = ""
class ReadOperationMetric(OperationMetric):
_NAME = "Read"
_SUCCESS = ""
_ERRORS = ""
_THROUGHPUT = "data_received"
_LATENCY = ""
class DeleteOperationMetric(OperationMetric):
_NAME = "Delete"
_SUCCESS = ""
_ERRORS = ""
_THROUGHPUT = ""
_LATENCY = ""
class GrpcWriteOperationMetric(WriteOperationMetric):
_SUCCESS = "frostfs_obj_put_success"
_ERRORS = "frostfs_obj_put_fails"
_LATENCY = "frostfs_obj_put_duration"
class GrpcReadOperationMetric(ReadOperationMetric):
_SUCCESS = "frostfs_obj_get_success"
_ERRORS = "frostfs_obj_get_fails"
_LATENCY = "frostfs_obj_get_duration"
class GrpcDeleteOperationMetric(DeleteOperationMetric):
_SUCCESS = "frostfs_obj_delete_success"
_ERRORS = "frostfs_obj_delete_fails"
_LATENCY = "frostfs_obj_delete_duration"
class S3WriteOperationMetric(WriteOperationMetric):
_SUCCESS = "aws_obj_put_success"
_ERRORS = "aws_obj_put_fails"
_LATENCY = "aws_obj_put_duration"
class S3ReadOperationMetric(ReadOperationMetric):
_SUCCESS = "aws_obj_get_success"
_ERRORS = "aws_obj_get_fails"
_LATENCY = "aws_obj_get_duration"
class S3DeleteOperationMetric(DeleteOperationMetric):
_SUCCESS = "aws_obj_delete_success"
_ERRORS = "aws_obj_delete_fails"
_LATENCY = "aws_obj_delete_duration"
class S3LocalWriteOperationMetric(WriteOperationMetric):
_SUCCESS = "s3local_obj_put_success"
_ERRORS = "s3local_obj_put_fails"
_LATENCY = "s3local_obj_put_duration"
class S3LocalReadOperationMetric(ReadOperationMetric):
_SUCCESS = "s3local_obj_get_success"
_ERRORS = "s3local_obj_get_fails"
_LATENCY = "s3local_obj_get_duration"
class LocalWriteOperationMetric(WriteOperationMetric):
_SUCCESS = "local_obj_put_success"
_ERRORS = "local_obj_put_fails"
_LATENCY = "local_obj_put_duration"
class LocalReadOperationMetric(ReadOperationMetric):
_SUCCESS = "local_obj_get_success"
_ERRORS = "local_obj_get_fails"
class LocalDeleteOperationMetric(DeleteOperationMetric):
_SUCCESS = "local_obj_delete_success"
_ERRORS = "local_obj_delete_fails"
class VerifyReadOperationMetric(ReadOperationMetric):
_SUCCESS = "verified_obj"
_ERRORS = "invalid_obj"
class MetricsBase(ABC):
def __init__(self) -> None:
self.write: Optional[WriteOperationMetric] = None
self.read: Optional[ReadOperationMetric] = None
self.delete: Optional[DeleteOperationMetric] = None
@property
def operations(self) -> list[OperationMetric]:
return [metric for metric in [self.write, self.read, self.delete] if metric is not None]
class GrpcMetrics(MetricsBase): class GrpcMetrics(MetricsBase):
def __init__(self, summary) -> None: _WRITE_SUCCESS = "frostfs_obj_put_total"
super().__init__() _WRITE_ERRORS = "frostfs_obj_put_fails"
self.write = GrpcWriteOperationMetric(summary) _WRITE_LATENCY = "frostfs_obj_put_duration"
self.read = GrpcReadOperationMetric(summary)
self.delete = GrpcDeleteOperationMetric(summary) _READ_SUCCESS = "frostfs_obj_get_total"
_READ_ERRORS = "frostfs_obj_get_fails"
_READ_LATENCY = "frostfs_obj_get_duration"
_DELETE_SUCCESS = "frostfs_obj_delete_total"
_DELETE_ERRORS = "frostfs_obj_delete_fails"
_DELETE_LATENCY = "frostfs_obj_delete_duration"
class S3Metrics(MetricsBase): class S3Metrics(MetricsBase):
def __init__(self, summary) -> None: _WRITE_SUCCESS = "aws_obj_put_total"
super().__init__() _WRITE_ERRORS = "aws_obj_put_fails"
self.write = S3WriteOperationMetric(summary) _WRITE_LATENCY = "aws_obj_put_duration"
self.read = S3ReadOperationMetric(summary)
self.delete = S3DeleteOperationMetric(summary)
_READ_SUCCESS = "aws_obj_get_total"
_READ_ERRORS = "aws_obj_get_fails"
_READ_LATENCY = "aws_obj_get_duration"
_DELETE_SUCCESS = "aws_obj_delete_total"
_DELETE_ERRORS = "aws_obj_delete_fails"
_DELETE_LATENCY = "aws_obj_delete_duration"
class S3LocalMetrics(MetricsBase): class S3LocalMetrics(MetricsBase):
def __init__(self, summary) -> None: _WRITE_SUCCESS = "s3local_obj_put_total"
super().__init__() _WRITE_ERRORS = "s3local_obj_put_fails"
self.write = S3LocalWriteOperationMetric(summary) _WRITE_LATENCY = "s3local_obj_put_duration"
self.read = S3LocalReadOperationMetric(summary)
_READ_SUCCESS = "s3local_obj_get_total"
_READ_ERRORS = "s3local_obj_get_fails"
_READ_LATENCY = "s3local_obj_get_duration"
class LocalMetrics(MetricsBase): class LocalMetrics(MetricsBase):
def __init__(self, summary) -> None: _WRITE_SUCCESS = "local_obj_put_total"
super().__init__() _WRITE_ERRORS = "local_obj_put_fails"
self.write = LocalWriteOperationMetric(summary) _WRITE_LATENCY = "local_obj_put_duration"
self.read = LocalReadOperationMetric(summary)
self.delete = LocalDeleteOperationMetric(summary) _READ_SUCCESS = "local_obj_get_total"
_READ_ERRORS = "local_obj_get_fails"
_DELETE_SUCCESS = "local_obj_delete_total"
_DELETE_ERRORS = "local_obj_delete_fails"
class VerifyMetrics(MetricsBase): class VerifyMetrics(MetricsBase):
def __init__(self, summary) -> None: _WRITE_SUCCESS = "N/A"
super().__init__() _WRITE_ERRORS = "N/A"
self.read = VerifyReadOperationMetric(summary)
_READ_SUCCESS = "verified_obj"
_READ_ERRORS = "invalid_obj"
_DELETE_SUCCESS = "N/A"
_DELETE_ERRORS = "N/A"
def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> MetricsBase: def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> MetricsBase:

View file

@ -3,8 +3,8 @@ from typing import Optional
import yaml import yaml
from frostfs_testlib.load.interfaces.summarized import SummarizedStats
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario
from frostfs_testlib.load.load_metrics import get_metrics_object
from frostfs_testlib.utils.converting_utils import calc_unit from frostfs_testlib.utils.converting_utils import calc_unit
@ -17,15 +17,11 @@ class LoadReport:
self.start_time: Optional[datetime] = None self.start_time: Optional[datetime] = None
self.end_time: Optional[datetime] = None self.end_time: Optional[datetime] = None
def set_start_time(self, time: datetime = None): def set_start_time(self):
if time is None: self.start_time = datetime.utcnow()
time = datetime.utcnow()
self.start_time = time
def set_end_time(self, time: datetime = None): def set_end_time(self):
if time is None: self.end_time = datetime.utcnow()
time = datetime.utcnow()
self.end_time = time
def add_summaries(self, load_summaries: dict): def add_summaries(self, load_summaries: dict):
self.load_summaries_list.append(load_summaries) self.load_summaries_list.append(load_summaries)
@ -35,7 +31,6 @@ class LoadReport:
def get_report_html(self): def get_report_html(self):
report_sections = [ report_sections = [
[self.load_params, self._get_load_id_section_html],
[self.load_test, self._get_load_params_section_html], [self.load_test, self._get_load_params_section_html],
[self.load_summaries_list, self._get_totals_section_html], [self.load_summaries_list, self._get_totals_section_html],
[self.end_time, self._get_test_time_html], [self.end_time, self._get_test_time_html],
@ -49,7 +44,9 @@ class LoadReport:
return html return html
def _get_load_params_section_html(self) -> str: def _get_load_params_section_html(self) -> str:
params: str = yaml.safe_dump([self.load_test], sort_keys=False, indent=2, explicit_start=True) params: str = yaml.safe_dump(
[self.load_test], sort_keys=False, indent=2, explicit_start=True
)
params = params.replace("\n", "<br>").replace(" ", "&nbsp;") params = params.replace("\n", "<br>").replace(" ", "&nbsp;")
section_html = f"""<h3>Scenario params</h3> section_html = f"""<h3>Scenario params</h3>
@ -58,17 +55,8 @@ class LoadReport:
return section_html return section_html
def _get_load_id_section_html(self) -> str:
section_html = f"""<h3>Load ID: {self.load_params.load_id}</h3>
<hr>"""
return section_html
def _get_test_time_html(self) -> str: def _get_test_time_html(self) -> str:
if not self.start_time or not self.end_time: html = f"""<h3>Scenario duration in UTC time (from agent)</h3>
return ""
html = f"""<h3>Scenario duration</h3>
{self.start_time} - {self.end_time}<br> {self.start_time} - {self.end_time}<br>
<hr> <hr>
""" """
@ -109,57 +97,73 @@ class LoadReport:
LoadScenario.gRPC_CAR: "open model", LoadScenario.gRPC_CAR: "open model",
LoadScenario.S3_CAR: "open model", LoadScenario.S3_CAR: "open model",
LoadScenario.LOCAL: "local fill", LoadScenario.LOCAL: "local fill",
LoadScenario.S3_LOCAL: "local fill", LoadScenario.S3_LOCAL: "local fill"
} }
return model_map[self.load_params.scenario] return model_map[self.load_params.scenario]
def _get_operations_sub_section_html(self, operation_type: str, stats: SummarizedStats): def _get_operations_sub_section_html(
self,
operation_type: str,
total_operations: int,
requested_rate_str: str,
vus_str: str,
total_rate: float,
throughput: float,
errors: dict[str, int],
latency: dict[str, dict],
):
throughput_html = "" throughput_html = ""
if stats.throughput > 0: if throughput > 0:
throughput, unit = calc_unit(stats.throughput) throughput, unit = calc_unit(throughput)
throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec") throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec")
bytes_html = ""
if stats.total_bytes > 0:
total_bytes, total_bytes_unit = calc_unit(stats.total_bytes)
bytes_html = self._row("Total transferred", f"{total_bytes:.2f} {total_bytes_unit}")
per_node_errors_html = "" per_node_errors_html = ""
for node_key, errors in stats.errors.by_node.items(): total_errors = 0
if self.load_params.k6_process_allocation_strategy == K6ProcessAllocationStrategy.PER_ENDPOINT: if errors:
per_node_errors_html += self._row(f"At {node_key}", errors) total_errors: int = 0
for node_key, errors in errors.items():
total_errors += errors
if (
self.load_params.k6_process_allocation_strategy
== K6ProcessAllocationStrategy.PER_ENDPOINT
):
per_node_errors_html += self._row(f"At {node_key}", errors)
latency_html = "" latency_html = ""
for node_key, latencies in stats.latencies.by_node.items(): if latency:
latency_values = "N/A" for node_key, latency_dict in latency.items():
if latencies: latency_values = "N/A"
latency_values = "" if latency_dict:
for param_name, param_val in latencies.items(): latency_values = ""
latency_values += f"{param_name}={param_val:.2f}ms " for param_name, param_val in latency_dict.items():
latency_values += f"{param_name}={param_val:.2f}ms "
latency_html += self._row(f"{operation_type} latency {node_key.split(':')[0]}", latency_values) latency_html += self._row(
f"{operation_type} latency {node_key.split(':')[0]}", latency_values
)
object_size, object_size_unit = calc_unit(self.load_params.object_size, 1) object_size, object_size_unit = calc_unit(self.load_params.object_size, 1)
duration = self._seconds_to_formatted_duration(self.load_params.load_time) duration = self._seconds_to_formatted_duration(self.load_params.load_time)
model = self._get_model_string() model = self._get_model_string()
requested_rate_str = f"{stats.requested_rate}op/sec" if stats.requested_rate else ""
# write 8KB 15h49m 50op/sec 50th open model/closed model/min_iteration duration=1s - 1.636MB/s 199.57451/s # write 8KB 15h49m 50op/sec 50th open model/closed model/min_iteration duration=1s - 1.636MB/s 199.57451/s
short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {stats.threads}th {model} - {throughput:.2f}{unit}/s {stats.rate:.2f}/s" short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {vus_str} {model} - {throughput:.2f}{unit}/s {total_rate:.2f}/s"
errors_percent = 0
if total_operations:
errors_percent = total_errors / total_operations * 100.0
html = f""" html = f"""
<table border="1" cellpadding="5px"><tbody> <table border="1" cellpadding="5px"><tbody>
<tr><th colspan="2" bgcolor="gainsboro">{short_summary}</th></tr> <tr><th colspan="2" bgcolor="gainsboro">{short_summary}</th></tr>
<tr><th colspan="2" bgcolor="gainsboro">Metrics</th></tr> <tr><th colspan="2" bgcolor="gainsboro">Metrics</th></tr>
{self._row("Total operations", stats.operations)} {self._row("Total operations", total_operations)}
{self._row("OP/sec", f"{stats.rate:.2f}")} {self._row("OP/sec", f"{total_rate:.2f}")}
{bytes_html}
{throughput_html} {throughput_html}
{latency_html} {latency_html}
<tr><th colspan="2" bgcolor="gainsboro">Errors</th></tr> <tr><th colspan="2" bgcolor="gainsboro">Errors</th></tr>
{per_node_errors_html} {per_node_errors_html}
{self._row("Total", f"{stats.errors.total} ({stats.errors.percent:.2f}%)")} {self._row("Total", f"{total_errors} ({errors_percent:.2f}%)")}
{self._row("Threshold", f"{stats.errors.threshold:.2f}%")} {self._row("Threshold", f"{self.load_params.error_threshold:.2f}%")}
</tbody></table><br><hr> </tbody></table><br><hr>
""" """
@ -167,12 +171,121 @@ class LoadReport:
def _get_totals_section_html(self): def _get_totals_section_html(self):
html = "" html = ""
for i in range(len(self.load_summaries_list)): for i, load_summaries in enumerate(self.load_summaries_list, 1):
html += f"<h3>Load Results for load #{i+1}</h3>" html += f"<h3>Load Results for load #{i}</h3>"
summarized = SummarizedStats.collect(self.load_params, self.load_summaries_list[i]) write_operations = 0
for operation_type, stats in summarized.items(): write_op_sec = 0
if stats.operations: write_throughput = 0
html += self._get_operations_sub_section_html(operation_type, stats) write_latency = {}
write_errors = {}
requested_write_rate = self.load_params.write_rate
requested_write_rate_str = (
f"{requested_write_rate}op/sec" if requested_write_rate else ""
)
read_operations = 0
read_op_sec = 0
read_throughput = 0
read_latency = {}
read_errors = {}
requested_read_rate = self.load_params.read_rate
requested_read_rate_str = f"{requested_read_rate}op/sec" if requested_read_rate else ""
delete_operations = 0
delete_op_sec = 0
delete_latency = {}
delete_errors = {}
requested_delete_rate = self.load_params.delete_rate
requested_delete_rate_str = (
f"{requested_delete_rate}op/sec" if requested_delete_rate else ""
)
if self.load_params.scenario in [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]:
delete_vus = max(
self.load_params.preallocated_deleters or 0, self.load_params.max_deleters or 0
)
write_vus = max(
self.load_params.preallocated_writers or 0, self.load_params.max_writers or 0
)
read_vus = max(
self.load_params.preallocated_readers or 0, self.load_params.max_readers or 0
)
else:
write_vus = self.load_params.writers
read_vus = self.load_params.readers
delete_vus = self.load_params.deleters
write_vus_str = f"{write_vus}th"
read_vus_str = f"{read_vus}th"
delete_vus_str = f"{delete_vus}th"
write_section_required = False
read_section_required = False
delete_section_required = False
for node_key, load_summary in load_summaries.items():
metrics = get_metrics_object(self.load_params.scenario, load_summary)
write_operations += metrics.write_total_iterations
if write_operations:
write_section_required = True
write_op_sec += metrics.write_rate
write_latency[node_key] = metrics.write_latency
write_throughput += metrics.write_throughput
if metrics.write_failed_iterations:
write_errors[node_key] = metrics.write_failed_iterations
read_operations += metrics.read_total_iterations
if read_operations:
read_section_required = True
read_op_sec += metrics.read_rate
read_throughput += metrics.read_throughput
read_latency[node_key] = metrics.read_latency
if metrics.read_failed_iterations:
read_errors[node_key] = metrics.read_failed_iterations
delete_operations += metrics.delete_total_iterations
if delete_operations:
delete_section_required = True
delete_op_sec += metrics.delete_rate
delete_latency[node_key] = metrics.delete_latency
if metrics.delete_failed_iterations:
delete_errors[node_key] = metrics.delete_failed_iterations
if write_section_required:
html += self._get_operations_sub_section_html(
"Write",
write_operations,
requested_write_rate_str,
write_vus_str,
write_op_sec,
write_throughput,
write_errors,
write_latency,
)
if read_section_required:
html += self._get_operations_sub_section_html(
"Read",
read_operations,
requested_read_rate_str,
read_vus_str,
read_op_sec,
read_throughput,
read_errors,
read_latency,
)
if delete_section_required:
html += self._get_operations_sub_section_html(
"Delete",
delete_operations,
requested_delete_rate_str,
delete_vus_str,
delete_op_sec,
0,
delete_errors,
delete_latency,
)
return html return html

View file

@ -1,7 +1,11 @@
from frostfs_testlib import reporter import logging
from frostfs_testlib.load.interfaces.summarized import SummarizedStats
from frostfs_testlib.load.load_config import LoadParams, LoadScenario from frostfs_testlib.load.load_config import LoadParams, LoadScenario
from frostfs_testlib.load.load_metrics import get_metrics_object from frostfs_testlib.load.load_metrics import get_metrics_object
from frostfs_testlib.reporter import get_reporter
reporter = get_reporter()
logger = logging.getLogger("NeoLogger")
class LoadVerifier: class LoadVerifier:
@ -9,16 +13,66 @@ class LoadVerifier:
self.load_params = load_params self.load_params = load_params
def collect_load_issues(self, load_summaries: dict[str, dict]) -> list[str]: def collect_load_issues(self, load_summaries: dict[str, dict]) -> list[str]:
summarized = SummarizedStats.collect(self.load_params, load_summaries) write_operations = 0
write_errors = 0
read_operations = 0
read_errors = 0
delete_operations = 0
delete_errors = 0
writers = self.load_params.writers or self.load_params.preallocated_writers or 0
readers = self.load_params.readers or self.load_params.preallocated_readers or 0
deleters = self.load_params.deleters or self.load_params.preallocated_deleters or 0
for load_summary in load_summaries.values():
metrics = get_metrics_object(self.load_params.scenario, load_summary)
if writers:
write_operations += metrics.write_total_iterations
write_errors += metrics.write_failed_iterations
if readers:
read_operations += metrics.read_total_iterations
read_errors += metrics.read_failed_iterations
if deleters:
delete_operations += metrics.delete_total_iterations
delete_errors += metrics.delete_failed_iterations
issues = [] issues = []
if writers and not write_operations:
issues.append(f"No any write operation was performed")
if readers and not read_operations:
issues.append(f"No any read operation was performed")
if deleters and not delete_operations:
issues.append(f"No any delete operation was performed")
for operation_type, stats in summarized.items(): if (
if stats.threads and not stats.operations: write_operations
issues.append(f"No any {operation_type.lower()} operation was performed") and writers
and write_errors / write_operations * 100 > self.load_params.error_threshold
if stats.errors.percent > stats.errors.threshold: ):
rate_str = self._get_rate_str(stats.errors.percent) issues.append(
issues.append(f"{operation_type} errors exceeded threshold: {rate_str} > {stats.errors.threshold}%") f"Write error rate is greater than threshold: {write_errors / write_operations * 100} > {self.load_params.error_threshold}"
)
if (
read_operations
and readers
and read_errors / read_operations * 100 > self.load_params.error_threshold
):
issues.append(
f"Read error rate is greater than threshold: {read_errors / read_operations * 100} > {self.load_params.error_threshold}"
)
if (
delete_operations
and deleters
and delete_errors / delete_operations * 100 > self.load_params.error_threshold
):
issues.append(
f"Delete error rate is greater than threshold: {delete_errors / delete_operations * 100} > {self.load_params.error_threshold}"
)
return issues return issues
@ -35,10 +89,9 @@ class LoadVerifier:
) )
return verify_issues return verify_issues
def _get_rate_str(self, rate: float, minimal: float = 0.01) -> str: def _collect_verify_issues_on_process(
return f"{rate:.2f}%" if rate >= minimal else f"~{minimal}%" self, label, load_summary, verification_summary
) -> list[str]:
def _collect_verify_issues_on_process(self, label, load_summary, verification_summary) -> list[str]:
issues = [] issues = []
load_metrics = get_metrics_object(self.load_params.scenario, load_summary) load_metrics = get_metrics_object(self.load_params.scenario, load_summary)
@ -49,13 +102,13 @@ class LoadVerifier:
delete_success = 0 delete_success = 0
if deleters > 0: if deleters > 0:
delete_success = load_metrics.delete.success_iterations delete_success = load_metrics.delete_success_iterations
if verification_summary: if verification_summary:
verify_metrics = get_metrics_object(LoadScenario.VERIFY, verification_summary) verify_metrics = get_metrics_object(LoadScenario.VERIFY, verification_summary)
verified_objects = verify_metrics.read.success_iterations verified_objects = verify_metrics.read_success_iterations
invalid_objects = verify_metrics.read.failed_iterations invalid_objects = verify_metrics.read_failed_iterations
total_left_objects = load_metrics.write.success_iterations - delete_success total_left_objects = load_metrics.write_success_iterations - delete_success
# Due to interruptions we may see total verified objects to be less than written on writers count # Due to interruptions we may see total verified objects to be less than written on writers count
if abs(total_left_objects - verified_objects) > writers: if abs(total_left_objects - verified_objects) > writers:

View file

@ -1,20 +1,23 @@
import copy import copy
import itertools import itertools
import math import math
import re
import time import time
from dataclasses import fields from dataclasses import fields
from threading import Event
from typing import Optional from typing import Optional
from urllib.parse import urlparse from urllib.parse import urlparse
from frostfs_testlib import reporter import yaml
from frostfs_testlib.credentials.interfaces import S3Credentials, User
from frostfs_testlib.cli.frostfs_authmate.authmate import FrostfsAuthmate
from frostfs_testlib.load.interfaces.loader import Loader from frostfs_testlib.load.interfaces.loader import Loader
from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner
from frostfs_testlib.load.k6 import K6 from frostfs_testlib.load.k6 import K6
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType
from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources import optionals from frostfs_testlib.resources import optionals
from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC
from frostfs_testlib.resources.common import STORAGE_USER_NAME from frostfs_testlib.resources.common import STORAGE_USER_NAME
from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_VUS_COUNT_DIVISOR, LOAD_NODE_SSH_USER, LOAD_NODES from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_VUS_COUNT_DIVISOR, LOAD_NODE_SSH_USER, LOAD_NODES
from frostfs_testlib.shell.command_inspectors import SuInspector from frostfs_testlib.shell.command_inspectors import SuInspector
@ -22,23 +25,25 @@ from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput
from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing import parallel, run_optionally from frostfs_testlib.testing import parallel, run_optionally
from frostfs_testlib.testing.test_control import retry from frostfs_testlib.testing.test_control import retry
from frostfs_testlib.utils import datetime_utils from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.utils.file_keeper import FileKeeper from frostfs_testlib.utils.file_keeper import FileKeeper
reporter = get_reporter()
class RunnerBase(ScenarioRunner): class RunnerBase(ScenarioRunner):
k6_instances: list[K6] k6_instances: list[K6]
@reporter.step("Run preset on loaders") @reporter.step_deco("Run preset on loaders")
def preset(self): def preset(self):
parallel([k6.preset for k6 in self.k6_instances]) parallel([k6.preset for k6 in self.k6_instances])
@reporter.step("Wait until load finish") @reporter.step_deco("Wait until load finish")
def wait_until_finish(self, soft_timeout: int = 0): def wait_until_finish(self, soft_timeout: int = 0):
event = Event() parallel([k6.wait_until_finished for k6 in self.k6_instances], soft_timeout=soft_timeout)
parallel([k6.wait_until_finished for k6 in self.k6_instances], event=event, soft_timeout=soft_timeout)
@property @property
def is_running(self): def is_running(self):
@ -52,20 +57,20 @@ class RunnerBase(ScenarioRunner):
class DefaultRunner(RunnerBase): class DefaultRunner(RunnerBase):
loaders: list[Loader] loaders: list[Loader]
user: User loaders_wallet: WalletInfo
def __init__( def __init__(
self, self,
user: User, loaders_wallet: WalletInfo,
load_ip_list: Optional[list[str]] = None, load_ip_list: Optional[list[str]] = None,
) -> None: ) -> None:
if load_ip_list is None: if load_ip_list is None:
load_ip_list = LOAD_NODES load_ip_list = LOAD_NODES
self.loaders = RemoteLoader.from_ip_list(load_ip_list) self.loaders = RemoteLoader.from_ip_list(load_ip_list)
self.user = user self.loaders_wallet = loaders_wallet
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step("Preparation steps") @reporter.step_deco("Preparation steps")
def prepare( def prepare(
self, self,
load_params: LoadParams, load_params: LoadParams,
@ -73,37 +78,56 @@ class DefaultRunner(RunnerBase):
nodes_under_load: list[ClusterNode], nodes_under_load: list[ClusterNode],
k6_dir: str, k6_dir: str,
): ):
if load_params.force_fresh_registry and load_params.custom_registry:
with reporter.step("Forcing fresh registry files"):
parallel(self._force_fresh_registry, self.loaders, load_params)
if load_params.load_type != LoadType.S3: if load_params.load_type != LoadType.S3:
return return
with reporter.step("Init s3 client on loaders"): with reporter.step("Init s3 client on loaders"):
s3_credentials = self.user.s3_credentials storage_node = nodes_under_load[0].service(StorageNode)
parallel(self._aws_configure_on_loader, self.loaders, s3_credentials) s3_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes]
grpc_peer = storage_node.get_rpc_endpoint()
def _force_fresh_registry(self, loader: Loader, load_params: LoadParams): parallel(self._prepare_loader, self.loaders, load_params, grpc_peer, s3_public_keys, k6_dir)
with reporter.step(f"Forcing fresh registry on {loader.ip}"):
shell = loader.get_shell()
shell.exec(f"rm -f {load_params.registry_file}")
def _aws_configure_on_loader( def _prepare_loader(
self, self,
loader: Loader, loader: Loader,
s3_credentials: S3Credentials, load_params: LoadParams,
grpc_peer: str,
s3_public_keys: list[str],
k6_dir: str,
): ):
with reporter.step(f"Aws configure on {loader.ip}"): with reporter.step(f"Init s3 client on {loader.ip}"):
shell = loader.get_shell()
frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC)
issue_secret_output = frostfs_authmate_exec.secret.issue(
wallet=self.loaders_wallet.path,
peer=grpc_peer,
gate_public_key=s3_public_keys,
container_placement_policy=load_params.preset.container_placement_policy,
container_policy=f"{k6_dir}/scenarios/files/policy.json",
wallet_password=self.loaders_wallet.password,
).stdout
aws_access_key_id = str(
re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group(
"aws_access_key_id"
)
)
aws_secret_access_key = str(
re.search(
r"secret_access_key.*:\s.(?P<aws_secret_access_key>\w*)",
issue_secret_output,
).group("aws_secret_access_key")
)
configure_input = [ configure_input = [
InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=s3_credentials.access_key), InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id),
InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=s3_credentials.secret_key), InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key),
InteractiveInput(prompt_pattern=r".*", input=""), InteractiveInput(prompt_pattern=r".*", input=""),
InteractiveInput(prompt_pattern=r".*", input=""), InteractiveInput(prompt_pattern=r".*", input=""),
] ]
loader.get_shell().exec("aws configure", CommandOptions(interactive_inputs=configure_input)) shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input))
@reporter.step("Init k6 instances") @reporter.step_deco("Init k6 instances")
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
self.k6_instances = [] self.k6_instances = []
cycled_loaders = itertools.cycle(self.loaders) cycled_loaders = itertools.cycle(self.loaders)
@ -143,10 +167,12 @@ class DefaultRunner(RunnerBase):
k6_dir, k6_dir,
shell, shell,
loader, loader,
self.user, self.loaders_wallet,
) )
def _get_distributed_load_params_list(self, original_load_params: LoadParams, workers_count: int) -> list[LoadParams]: def _get_distributed_load_params_list(
self, original_load_params: LoadParams, workers_count: int
) -> list[LoadParams]:
divisor = int(BACKGROUND_LOAD_VUS_COUNT_DIVISOR) divisor = int(BACKGROUND_LOAD_VUS_COUNT_DIVISOR)
distributed_load_params: list[LoadParams] = [] distributed_load_params: list[LoadParams] = []
@ -231,23 +257,21 @@ class LocalRunner(RunnerBase):
loaders: list[Loader] loaders: list[Loader]
cluster_state_controller: ClusterStateController cluster_state_controller: ClusterStateController
file_keeper: FileKeeper file_keeper: FileKeeper
user: User wallet: WalletInfo
def __init__( def __init__(
self, self,
cluster_state_controller: ClusterStateController, cluster_state_controller: ClusterStateController,
file_keeper: FileKeeper, file_keeper: FileKeeper,
nodes_under_load: list[ClusterNode], nodes_under_load: list[ClusterNode],
user: User,
) -> None: ) -> None:
self.cluster_state_controller = cluster_state_controller self.cluster_state_controller = cluster_state_controller
self.file_keeper = file_keeper self.file_keeper = file_keeper
self.loaders = [NodeLoader(node) for node in nodes_under_load] self.loaders = [NodeLoader(node) for node in nodes_under_load]
self.nodes_under_load = nodes_under_load self.nodes_under_load = nodes_under_load
self.user = user
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step("Preparation steps") @reporter.step_deco("Preparation steps")
def prepare( def prepare(
self, self,
load_params: LoadParams, load_params: LoadParams,
@ -274,7 +298,7 @@ class LocalRunner(RunnerBase):
return True return True
@reporter.step("Prepare node {cluster_node}") @reporter.step_deco("Prepare node {cluster_node}")
def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams): def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams):
shell = cluster_node.host.get_shell() shell = cluster_node.host.get_shell()
@ -290,14 +314,16 @@ class LocalRunner(RunnerBase):
with reporter.step("Download K6"): with reporter.step("Download K6"):
shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}") shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}")
shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}") shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}")
shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz --strip-components 2 -C {k6_dir}") shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz -C {k6_dir}")
shell.exec(f"sudo chmod -R 777 {k6_dir}") shell.exec(f"sudo chmod -R 777 {k6_dir}")
with reporter.step("chmod 777 wallet related files on loader"): with reporter.step("Create empty_passwd"):
shell.exec(f"sudo chmod -R 777 {self.user.wallet.config_path}") self.wallet = WalletInfo(f"{k6_dir}/scenarios/files/wallet.json", "", "/tmp/empty_passwd.yml")
shell.exec(f"sudo chmod -R 777 {self.user.wallet.path}") content = yaml.dump({"password": ""})
shell.exec(f'echo "{content}" | sudo tee {self.wallet.config_path}')
shell.exec(f"sudo chmod -R 777 {self.wallet.config_path}")
@reporter.step("Init k6 instances") @reporter.step_deco("Init k6 instances")
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
self.k6_instances = [] self.k6_instances = []
futures = parallel( futures = parallel(
@ -328,7 +354,7 @@ class LocalRunner(RunnerBase):
k6_dir, k6_dir,
shell, shell,
loader, loader,
self.user, self.wallet,
) )
def start(self): def start(self):
@ -343,12 +369,12 @@ class LocalRunner(RunnerBase):
with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"): with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"):
time.sleep(wait_after_start_time) time.sleep(wait_after_start_time)
@reporter.step("Restore passwd on {cluster_node}") @reporter.step_deco("Restore passwd on {cluster_node}")
def restore_passwd_on_node(self, cluster_node: ClusterNode): def restore_passwd_on_node(self, cluster_node: ClusterNode):
shell = cluster_node.host.get_shell() shell = cluster_node.host.get_shell()
shell.exec("sudo chattr -i /etc/passwd") shell.exec("sudo chattr -i /etc/passwd")
@reporter.step("Lock passwd on {cluster_node}") @reporter.step_deco("Lock passwd on {cluster_node}")
def lock_passwd_on_node(self, cluster_node: ClusterNode): def lock_passwd_on_node(self, cluster_node: ClusterNode):
shell = cluster_node.host.get_shell() shell = cluster_node.host.get_shell()
shell.exec("sudo chattr +i /etc/passwd") shell.exec("sudo chattr +i /etc/passwd")
@ -374,19 +400,19 @@ class S3LocalRunner(LocalRunner):
endpoints: list[str] endpoints: list[str]
k6_dir: str k6_dir: str
@reporter.step("Run preset on loaders") @reporter.step_deco("Run preset on loaders")
def preset(self): def preset(self):
LocalRunner.preset(self) LocalRunner.preset(self)
with reporter.step(f"Resolve containers in preset"): with reporter.step(f"Resolve containers in preset"):
parallel(self._resolve_containers_in_preset, self.k6_instances) parallel(self._resolve_containers_in_preset, self.k6_instances)
@reporter.step("Resolve containers in preset") @reporter.step_deco("Resolve containers in preset")
def _resolve_containers_in_preset(self, k6_instance: K6): def _resolve_containers_in_preset(self, k6_instance: K6):
k6_instance.shell.exec( k6_instance.shell.exec(
f"sudo {self.k6_dir}/scenarios/preset/resolve_containers_in_preset.py --endpoint {k6_instance.endpoints[0]} --preset_file {k6_instance.load_params.preset.pregen_json}" f"sudo {self.k6_dir}/scenarios/preset/resolve_containers_in_preset.py --endpoint {k6_instance.endpoints[0]} --preset_file {k6_instance.load_params.preset.pregen_json}"
) )
@reporter.step("Init k6 instances") @reporter.step_deco("Init k6 instances")
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
self.k6_instances = [] self.k6_instances = []
futures = parallel( futures = parallel(
@ -418,11 +444,11 @@ class S3LocalRunner(LocalRunner):
k6_dir, k6_dir,
shell, shell,
loader, loader,
self.user, self.wallet,
) )
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step("Preparation steps") @reporter.step_deco("Preparation steps")
def prepare( def prepare(
self, self,
load_params: LoadParams, load_params: LoadParams,
@ -431,10 +457,17 @@ class S3LocalRunner(LocalRunner):
k6_dir: str, k6_dir: str,
): ):
self.k6_dir = k6_dir self.k6_dir = k6_dir
parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, cluster_nodes) with reporter.step("Init s3 client on loaders"):
storage_node = nodes_under_load[0].service(StorageNode)
s3_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes]
grpc_peer = storage_node.get_rpc_endpoint()
@reporter.step("Prepare node {cluster_node}") parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, s3_public_keys, grpc_peer)
def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, cluster_nodes: list[ClusterNode]):
@reporter.step_deco("Prepare node {cluster_node}")
def prepare_node(
self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, s3_public_keys: list[str], grpc_peer: str
):
LocalRunner.prepare_node(self, cluster_node, k6_dir, load_params) LocalRunner.prepare_node(self, cluster_node, k6_dir, load_params)
self.endpoints = cluster_node.s3_gate.get_all_endpoints() self.endpoints = cluster_node.s3_gate.get_all_endpoints()
shell = cluster_node.host.get_shell() shell = cluster_node.host.get_shell()
@ -455,9 +488,29 @@ class S3LocalRunner(LocalRunner):
shell.exec(f"sudo python3 -m pip install -I {k6_dir}/requests.tar.gz") shell.exec(f"sudo python3 -m pip install -I {k6_dir}/requests.tar.gz")
with reporter.step(f"Init s3 client on {cluster_node.host_ip}"): with reporter.step(f"Init s3 client on {cluster_node.host_ip}"):
frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC)
issue_secret_output = frostfs_authmate_exec.secret.issue(
wallet=self.wallet.path,
peer=grpc_peer,
gate_public_key=s3_public_keys,
container_placement_policy=load_params.preset.container_placement_policy,
container_policy=f"{k6_dir}/scenarios/files/policy.json",
wallet_password=self.wallet.password,
).stdout
aws_access_key_id = str(
re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group(
"aws_access_key_id"
)
)
aws_secret_access_key = str(
re.search(
r"secret_access_key.*:\s.(?P<aws_secret_access_key>\w*)",
issue_secret_output,
).group("aws_secret_access_key")
)
configure_input = [ configure_input = [
InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=self.user.s3_credentials.access_key), InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id),
InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=self.user.s3_credentials.secret_key), InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key),
InteractiveInput(prompt_pattern=r".*", input=""), InteractiveInput(prompt_pattern=r".*", input=""),
InteractiveInput(prompt_pattern=r".*", input=""), InteractiveInput(prompt_pattern=r".*", input=""),
] ]

View file

@ -8,15 +8,17 @@ from tenacity import retry
from tenacity.stop import stop_after_attempt from tenacity.stop import stop_after_attempt
from tenacity.wait import wait_fixed from tenacity.wait import wait_fixed
from frostfs_testlib import reporter from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.shell.command_inspectors import SuInspector from frostfs_testlib.shell.command_inspectors import SuInspector
from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions
reporter = get_reporter()
class RemoteProcess: class RemoteProcess:
def __init__( def __init__(
self, cmd: str, process_dir: str, shell: Shell, cmd_inspector: Optional[CommandInspector], proc_id: str self, cmd: str, process_dir: str, shell: Shell, cmd_inspector: Optional[CommandInspector]
): ):
self.process_dir = process_dir self.process_dir = process_dir
self.cmd = cmd self.cmd = cmd
@ -24,23 +26,15 @@ class RemoteProcess:
self.stderr_last_line_number = 0 self.stderr_last_line_number = 0
self.pid: Optional[str] = None self.pid: Optional[str] = None
self.proc_rc: Optional[int] = None self.proc_rc: Optional[int] = None
self.proc_start_time: Optional[int] = None
self.proc_end_time: Optional[int] = None
self.saved_stdout: Optional[str] = None self.saved_stdout: Optional[str] = None
self.saved_stderr: Optional[str] = None self.saved_stderr: Optional[str] = None
self.shell = shell self.shell = shell
self.proc_id: str = proc_id
self.cmd_inspectors: list[CommandInspector] = [cmd_inspector] if cmd_inspector else [] self.cmd_inspectors: list[CommandInspector] = [cmd_inspector] if cmd_inspector else []
@classmethod @classmethod
@reporter.step("Create remote process") @reporter.step_deco("Create remote process")
def create( def create(
cls, cls, command: str, shell: Shell, working_dir: str = "/tmp", user: Optional[str] = None
command: str,
shell: Shell,
working_dir: str = "/tmp",
user: Optional[str] = None,
proc_id: Optional[str] = None,
) -> RemoteProcess: ) -> RemoteProcess:
""" """
Create a process on a remote host. Create a process on a remote host.
@ -52,7 +46,6 @@ class RemoteProcess:
stderr: contains script errors stderr: contains script errors
stdout: contains script output stdout: contains script output
user: user on behalf whom command will be executed user: user on behalf whom command will be executed
proc_id: process string identificator
Args: Args:
shell: Shell instance shell: Shell instance
@ -62,32 +55,20 @@ class RemoteProcess:
Returns: Returns:
RemoteProcess instance for further examination RemoteProcess instance for further examination
""" """
if proc_id is None:
proc_id = f"{uuid.uuid4()}"
cmd_inspector = SuInspector(user) if user else None cmd_inspector = SuInspector(user) if user else None
remote_process = cls( remote_process = cls(
cmd=command, cmd=command,
process_dir=os.path.join(working_dir, f"proc_{proc_id}"), process_dir=os.path.join(working_dir, f"proc_{uuid.uuid4()}"),
shell=shell, shell=shell,
cmd_inspector=cmd_inspector, cmd_inspector=cmd_inspector,
proc_id=proc_id,
) )
remote_process._create_process_dir()
remote_process._generate_command_script(command)
remote_process._start_process()
remote_process.pid = remote_process._get_pid()
return remote_process return remote_process
@reporter.step("Start remote process") @reporter.step_deco("Get process stdout")
def start(self):
"""
Starts a process on a remote host.
"""
self._create_process_dir()
self._generate_command_script()
self._start_process()
self.pid = self._get_pid()
@reporter.step("Get process stdout")
def stdout(self, full: bool = False) -> str: def stdout(self, full: bool = False) -> str:
""" """
Method to get process stdout, either fresh info or full. Method to get process stdout, either fresh info or full.
@ -119,7 +100,7 @@ class RemoteProcess:
return resulted_stdout return resulted_stdout
return "" return ""
@reporter.step("Get process stderr") @reporter.step_deco("Get process stderr")
def stderr(self, full: bool = False) -> str: def stderr(self, full: bool = False) -> str:
""" """
Method to get process stderr, either fresh info or full. Method to get process stderr, either fresh info or full.
@ -150,59 +131,28 @@ class RemoteProcess:
return resulted_stderr return resulted_stderr
return "" return ""
@reporter.step("Get process rc") @reporter.step_deco("Get process rc")
def rc(self) -> Optional[int]: def rc(self) -> Optional[int]:
if self.proc_rc is not None: if self.proc_rc is not None:
return self.proc_rc return self.proc_rc
result = self._cat_proc_file("rc")
if not result:
return None
self.proc_rc = int(result)
return self.proc_rc
@reporter.step("Get process start time")
def start_time(self) -> Optional[int]:
if self.proc_start_time is not None:
return self.proc_start_time
result = self._cat_proc_file("start_time")
if not result:
return None
self.proc_start_time = int(result)
return self.proc_start_time
@reporter.step("Get process end time")
def end_time(self) -> Optional[int]:
if self.proc_end_time is not None:
return self.proc_end_time
result = self._cat_proc_file("end_time")
if not result:
return None
self.proc_end_time = int(result)
return self.proc_end_time
def _cat_proc_file(self, file: str) -> Optional[str]:
terminal = self.shell.exec( terminal = self.shell.exec(
f"cat {self.process_dir}/{file}", f"cat {self.process_dir}/rc",
CommandOptions(check=False, extra_inspectors=self.cmd_inspectors, no_log=True), CommandOptions(check=False, extra_inspectors=self.cmd_inspectors, no_log=True),
) )
if "No such file or directory" in terminal.stderr: if "No such file or directory" in terminal.stderr:
return None return None
elif terminal.stderr or terminal.return_code != 0: elif terminal.stderr or terminal.return_code != 0:
raise AssertionError(f"cat process {file} was not successful: {terminal.stderr}") raise AssertionError(f"cat process rc was not successful: {terminal.stderr}")
return terminal.stdout self.proc_rc = int(terminal.stdout)
return self.proc_rc
@reporter.step("Check if process is running") @reporter.step_deco("Check if process is running")
def running(self) -> bool: def running(self) -> bool:
return self.rc() is None return self.rc() is None
@reporter.step("Send signal to process") @reporter.step_deco("Send signal to process")
def send_signal(self, signal: int) -> None: def send_signal(self, signal: int) -> None:
kill_res = self.shell.exec( kill_res = self.shell.exec(
f"kill -{signal} {self.pid}", f"kill -{signal} {self.pid}",
@ -211,23 +161,27 @@ class RemoteProcess:
if "No such process" in kill_res.stderr: if "No such process" in kill_res.stderr:
return return
if kill_res.return_code: if kill_res.return_code:
raise AssertionError(f"Signal {signal} not sent. Return code of kill: {kill_res.return_code}") raise AssertionError(
f"Signal {signal} not sent. Return code of kill: {kill_res.return_code}"
)
@reporter.step("Stop process") @reporter.step_deco("Stop process")
def stop(self) -> None: def stop(self) -> None:
self.send_signal(15) self.send_signal(15)
@reporter.step("Kill process") @reporter.step_deco("Kill process")
def kill(self) -> None: def kill(self) -> None:
self.send_signal(9) self.send_signal(9)
@reporter.step("Clear process directory") @reporter.step_deco("Clear process directory")
def clear(self) -> None: def clear(self) -> None:
if self.process_dir == "/": if self.process_dir == "/":
raise AssertionError(f"Invalid path to delete: {self.process_dir}") raise AssertionError(f"Invalid path to delete: {self.process_dir}")
self.shell.exec(f"rm -rf {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) self.shell.exec(
f"rm -rf {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)
)
@reporter.step("Start remote process") @reporter.step_deco("Start remote process")
def _start_process(self) -> None: def _start_process(self) -> None:
self.shell.exec( self.shell.exec(
f"nohup {self.process_dir}/command.sh </dev/null " f"nohup {self.process_dir}/command.sh </dev/null "
@ -236,34 +190,40 @@ class RemoteProcess:
CommandOptions(extra_inspectors=self.cmd_inspectors), CommandOptions(extra_inspectors=self.cmd_inspectors),
) )
@reporter.step("Create process directory") @reporter.step_deco("Create process directory")
def _create_process_dir(self) -> None: def _create_process_dir(self) -> None:
self.shell.exec(f"mkdir -p {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) self.shell.exec(
self.shell.exec(f"chmod 777 {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) f"mkdir -p {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)
terminal = self.shell.exec(f"realpath {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) )
self.shell.exec(
f"chmod 777 {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)
)
terminal = self.shell.exec(
f"realpath {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)
)
self.process_dir = terminal.stdout.strip() self.process_dir = terminal.stdout.strip()
@reporter.step("Get pid") @reporter.step_deco("Get pid")
@retry(wait=wait_fixed(10), stop=stop_after_attempt(5), reraise=True) @retry(wait=wait_fixed(10), stop=stop_after_attempt(5), reraise=True)
def _get_pid(self) -> str: def _get_pid(self) -> str:
terminal = self.shell.exec(f"cat {self.process_dir}/pid", CommandOptions(extra_inspectors=self.cmd_inspectors)) terminal = self.shell.exec(
f"cat {self.process_dir}/pid", CommandOptions(extra_inspectors=self.cmd_inspectors)
)
assert terminal.stdout, f"invalid pid: {terminal.stdout}" assert terminal.stdout, f"invalid pid: {terminal.stdout}"
return terminal.stdout.strip() return terminal.stdout.strip()
@reporter.step("Generate command script") @reporter.step_deco("Generate command script")
def _generate_command_script(self) -> None: def _generate_command_script(self, command: str) -> None:
command = self.cmd.replace('"', '\\"').replace("\\", "\\\\") command = command.replace('"', '\\"').replace("\\", "\\\\")
script = ( script = (
f"#!/bin/bash\n" f"#!/bin/bash\n"
f"cd {self.process_dir}\n" f"cd {self.process_dir}\n"
f"date +%s > {self.process_dir}/start_time\n"
f"{command} &\n" f"{command} &\n"
f"pid=\$!\n" f"pid=\$!\n"
f"cd {self.process_dir}\n" f"cd {self.process_dir}\n"
f"echo \$pid > {self.process_dir}/pid\n" f"echo \$pid > {self.process_dir}/pid\n"
f"wait \$pid\n" f"wait \$pid\n"
f"echo $? > {self.process_dir}/rc\n" f"echo $? > {self.process_dir}/rc"
f"date +%s > {self.process_dir}/end_time\n"
) )
self.shell.exec( self.shell.exec(

View file

@ -1,9 +1,6 @@
from typing import Any from frostfs_testlib.reporter.allure_handler import AllureHandler, StepLogger
from frostfs_testlib.reporter.allure_handler import AllureHandler
from frostfs_testlib.reporter.interfaces import ReporterHandler from frostfs_testlib.reporter.interfaces import ReporterHandler
from frostfs_testlib.reporter.reporter import Reporter from frostfs_testlib.reporter.reporter import Reporter
from frostfs_testlib.reporter.steps_logger import StepsLogger
__reporter = Reporter() __reporter = Reporter()
@ -22,7 +19,3 @@ def get_reporter() -> Reporter:
def step(title: str): def step(title: str):
return __reporter.step(title) return __reporter.step(title)
def attach(content: Any, file_name: str):
return __reporter.attach(content, file_name)

View file

@ -1,6 +1,8 @@
import logging
import os import os
from contextlib import AbstractContextManager, ContextDecorator from contextlib import AbstractContextManager, ContextDecorator
from textwrap import shorten from textwrap import shorten
from types import TracebackType
from typing import Any, Callable from typing import Any, Callable
import allure import allure
@ -9,6 +11,42 @@ from allure import attachment_type
from frostfs_testlib.reporter.interfaces import ReporterHandler from frostfs_testlib.reporter.interfaces import ReporterHandler
class StepLoggerContext(AbstractContextManager, Callable):
INDENT = 0
def __init__(self, title: str):
self.title = title
self.logger = logging.getLogger("NeoLogger")
def __enter__(self) -> Any:
indent = ">" * StepLoggerContext.INDENT * 2
self.logger.info(f"{indent}> {self.title}")
StepLoggerContext.INDENT += 1
def __exit__(
self,
__exc_type: type[BaseException] | None,
__exc_value: BaseException | None,
__traceback: TracebackType | None,
) -> bool | None:
StepLoggerContext.INDENT -= 1
indent = "<" * StepLoggerContext.INDENT * 2
self.logger.info(f"{indent}< {self.title}")
class StepLogger(ReporterHandler):
"""Handler that prints steps to log."""
def step(self, name: str) -> AbstractContextManager | ContextDecorator:
return StepLoggerContext(name)
def step_decorator(self, name: str) -> Callable:
return StepLoggerContext(name)
def attach(self, body: Any, file_name: str) -> None:
pass
class AllureHandler(ReporterHandler): class AllureHandler(ReporterHandler):
"""Handler that stores test artifacts in Allure report.""" """Handler that stores test artifacts in Allure report."""
@ -21,14 +59,9 @@ class AllureHandler(ReporterHandler):
def attach(self, body: Any, file_name: str) -> None: def attach(self, body: Any, file_name: str) -> None:
attachment_name, extension = os.path.splitext(file_name) attachment_name, extension = os.path.splitext(file_name)
if extension.startswith("."):
extension = extension[1:]
attachment_type = self._resolve_attachment_type(extension) attachment_type = self._resolve_attachment_type(extension)
if os.path.exists(body): allure.attach(body, attachment_name, attachment_type, extension)
allure.attach.file(body, file_name, attachment_type, extension)
else:
allure.attach(body, attachment_name, attachment_type, extension)
def _resolve_attachment_type(self, extension: str) -> attachment_type: def _resolve_attachment_type(self, extension: str) -> attachment_type:
"""Try to find matching Allure attachment type by extension. """Try to find matching Allure attachment type by extension.

View file

@ -5,7 +5,6 @@ from typing import Any, Callable, Optional
from frostfs_testlib.plugins import load_plugin from frostfs_testlib.plugins import load_plugin
from frostfs_testlib.reporter.interfaces import ReporterHandler from frostfs_testlib.reporter.interfaces import ReporterHandler
from frostfs_testlib.utils.func_utils import format_by_args
@contextmanager @contextmanager
@ -64,8 +63,7 @@ class Reporter:
def wrapper(*a, **kw): def wrapper(*a, **kw):
resulting_func = func resulting_func = func
for handler in self.handlers: for handler in self.handlers:
parsed_name = format_by_args(func, name, *a, **kw) decorator = handler.step_decorator(name)
decorator = handler.step_decorator(parsed_name)
resulting_func = decorator(resulting_func) resulting_func = decorator(resulting_func)
return resulting_func(*a, **kw) return resulting_func(*a, **kw)

View file

@ -1,56 +0,0 @@
import logging
import threading
from contextlib import AbstractContextManager, ContextDecorator
from functools import wraps
from types import TracebackType
from typing import Any, Callable
from frostfs_testlib.reporter.interfaces import ReporterHandler
class StepsLogger(ReporterHandler):
"""Handler that prints steps to log."""
def step(self, name: str) -> AbstractContextManager | ContextDecorator:
return StepLoggerContext(name)
def step_decorator(self, name: str) -> Callable:
return StepLoggerContext(name)
def attach(self, body: Any, file_name: str) -> None:
pass
class StepLoggerContext(AbstractContextManager):
INDENT = {}
def __init__(self, title: str):
self.title = title
self.logger = logging.getLogger("NeoLogger")
self.thread = threading.get_ident()
if self.thread not in StepLoggerContext.INDENT:
StepLoggerContext.INDENT[self.thread] = 1
def __enter__(self) -> Any:
indent = ">" * StepLoggerContext.INDENT[self.thread]
self.logger.info(f"[{self.thread}] {indent} {self.title}")
StepLoggerContext.INDENT[self.thread] += 1
def __exit__(
self,
__exc_type: type[BaseException] | None,
__exc_value: BaseException | None,
__traceback: TracebackType | None,
) -> bool | None:
StepLoggerContext.INDENT[self.thread] -= 1
indent = "<" * StepLoggerContext.INDENT[self.thread]
self.logger.info(f"[{self.thread}] {indent} {self.title}")
def __call__(self, func):
@wraps(func)
def impl(*a, **kw):
with self:
return func(*a, **kw)
return impl

View file

@ -9,4 +9,4 @@ FROSTFS_ADM_EXEC = os.getenv("FROSTFS_ADM_EXEC", "frostfs-adm")
# Config for frostfs-adm utility. Optional if tests are running against devenv # Config for frostfs-adm utility. Optional if tests are running against devenv
FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH") FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH")
CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", "100s") CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", None)

File diff suppressed because it is too large Load diff

View file

@ -13,14 +13,17 @@ from botocore.config import Config
from botocore.exceptions import ClientError from botocore.exceptions import ClientError
from mypy_boto3_s3 import S3Client from mypy_boto3_s3 import S3Client
from frostfs_testlib import reporter from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME from frostfs_testlib.resources.common import (
ASSETS_DIR,
MAX_REQUEST_ATTEMPTS,
RETRY_MODE,
S3_SYNC_WAIT_TIME,
)
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
from frostfs_testlib.utils.cli_utils import log_command_execution from frostfs_testlib.utils.cli_utils import log_command_execution
# TODO: Refactor this code to use shell instead of _cmd_run reporter = get_reporter()
from frostfs_testlib.utils.cli_utils import _configure_aws_cli
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
# Disable warnings on self-signed certificate which the # Disable warnings on self-signed certificate which the
@ -43,14 +46,11 @@ def report_error(func):
class Boto3ClientWrapper(S3ClientWrapper): class Boto3ClientWrapper(S3ClientWrapper):
__repr_name__: str = "Boto3 client" __repr_name__: str = "Boto3 client"
@reporter.step("Configure S3 client (boto3)") @reporter.step_deco("Configure S3 client (boto3)")
@report_error @report_error
def __init__( def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None:
self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1"
) -> None:
self.boto3_client: S3Client = None self.boto3_client: S3Client = None
self.session = boto3.Session() self.session = boto3.Session()
self.region = region
self.config = Config( self.config = Config(
retries={ retries={
"max_attempts": MAX_REQUEST_ATTEMPTS, "max_attempts": MAX_REQUEST_ATTEMPTS,
@ -60,10 +60,9 @@ class Boto3ClientWrapper(S3ClientWrapper):
self.access_key_id: str = access_key_id self.access_key_id: str = access_key_id
self.secret_access_key: str = secret_access_key self.secret_access_key: str = secret_access_key
self.s3gate_endpoint: str = "" self.s3gate_endpoint: str = ""
self.boto3_iam_client: S3Client = None
self.set_endpoint(s3gate_endpoint) self.set_endpoint(s3gate_endpoint)
@reporter.step("Set endpoint S3 to {s3gate_endpoint}") @reporter.step_deco("Set endpoint S3 to {s3gate_endpoint}")
def set_endpoint(self, s3gate_endpoint: str): def set_endpoint(self, s3gate_endpoint: str):
if self.s3gate_endpoint == s3gate_endpoint: if self.s3gate_endpoint == s3gate_endpoint:
return return
@ -74,23 +73,11 @@ class Boto3ClientWrapper(S3ClientWrapper):
service_name="s3", service_name="s3",
aws_access_key_id=self.access_key_id, aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key, aws_secret_access_key=self.secret_access_key,
region_name=self.region,
config=self.config, config=self.config,
endpoint_url=s3gate_endpoint, endpoint_url=s3gate_endpoint,
verify=False, verify=False,
) )
@reporter.step("Set endpoint IAM to {iam_endpoint}")
def set_iam_endpoint(self, iam_endpoint: str):
self.boto3_iam_client = self.session.client(
service_name="iam",
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
endpoint_url=iam_endpoint,
verify=False,)
def _to_s3_param(self, param: str): def _to_s3_param(self, param: str):
replacement_map = { replacement_map = {
"Acl": "ACL", "Acl": "ACL",
@ -103,7 +90,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return result return result
# BUCKET METHODS # # BUCKET METHODS #
@reporter.step("Create bucket S3") @reporter.step_deco("Create bucket S3")
@report_error @report_error
def create_bucket( def create_bucket(
self, self,
@ -131,14 +118,16 @@ class Boto3ClientWrapper(S3ClientWrapper):
elif grant_full_control: elif grant_full_control:
params.update({"GrantFullControl": grant_full_control}) params.update({"GrantFullControl": grant_full_control})
if location_constraint: if location_constraint:
params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}) params.update(
{"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}
)
s3_bucket = self.boto3_client.create_bucket(**params) s3_bucket = self.boto3_client.create_bucket(**params)
log_command_execution(f"Created S3 bucket {bucket}", s3_bucket) log_command_execution(f"Created S3 bucket {bucket}", s3_bucket)
sleep(S3_SYNC_WAIT_TIME * 10) sleep(S3_SYNC_WAIT_TIME)
return bucket return bucket
@reporter.step("List buckets S3") @reporter.step_deco("List buckets S3")
@report_error @report_error
def list_buckets(self) -> list[str]: def list_buckets(self) -> list[str]:
found_buckets = [] found_buckets = []
@ -151,20 +140,20 @@ class Boto3ClientWrapper(S3ClientWrapper):
return found_buckets return found_buckets
@reporter.step("Delete bucket S3") @reporter.step_deco("Delete bucket S3")
@report_error @report_error
def delete_bucket(self, bucket: str) -> None: def delete_bucket(self, bucket: str) -> None:
response = self.boto3_client.delete_bucket(Bucket=bucket) response = self.boto3_client.delete_bucket(Bucket=bucket)
log_command_execution("S3 Delete bucket result", response) log_command_execution("S3 Delete bucket result", response)
sleep(S3_SYNC_WAIT_TIME * 10) sleep(S3_SYNC_WAIT_TIME)
@reporter.step("Head bucket S3") @reporter.step_deco("Head bucket S3")
@report_error @report_error
def head_bucket(self, bucket: str) -> None: def head_bucket(self, bucket: str) -> None:
response = self.boto3_client.head_bucket(Bucket=bucket) response = self.boto3_client.head_bucket(Bucket=bucket)
log_command_execution("S3 Head bucket result", response) log_command_execution("S3 Head bucket result", response)
@reporter.step("Put bucket versioning status") @reporter.step_deco("Put bucket versioning status")
@report_error @report_error
def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None:
response = self.boto3_client.put_bucket_versioning( response = self.boto3_client.put_bucket_versioning(
@ -172,7 +161,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
) )
log_command_execution("S3 Set bucket versioning to", response) log_command_execution("S3 Set bucket versioning to", response)
@reporter.step("Get bucket versioning status") @reporter.step_deco("Get bucket versioning status")
@report_error @report_error
def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]:
response = self.boto3_client.get_bucket_versioning(Bucket=bucket) response = self.boto3_client.get_bucket_versioning(Bucket=bucket)
@ -180,7 +169,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
log_command_execution("S3 Got bucket versioning status", response) log_command_execution("S3 Got bucket versioning status", response)
return status return status
@reporter.step("Put bucket tagging") @reporter.step_deco("Put bucket tagging")
@report_error @report_error
def put_bucket_tagging(self, bucket: str, tags: list) -> None: def put_bucket_tagging(self, bucket: str, tags: list) -> None:
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
@ -188,27 +177,27 @@ class Boto3ClientWrapper(S3ClientWrapper):
response = self.boto3_client.put_bucket_tagging(Bucket=bucket, Tagging=tagging) response = self.boto3_client.put_bucket_tagging(Bucket=bucket, Tagging=tagging)
log_command_execution("S3 Put bucket tagging", response) log_command_execution("S3 Put bucket tagging", response)
@reporter.step("Get bucket tagging") @reporter.step_deco("Get bucket tagging")
@report_error @report_error
def get_bucket_tagging(self, bucket: str) -> list: def get_bucket_tagging(self, bucket: str) -> list:
response = self.boto3_client.get_bucket_tagging(Bucket=bucket) response = self.boto3_client.get_bucket_tagging(Bucket=bucket)
log_command_execution("S3 Get bucket tagging", response) log_command_execution("S3 Get bucket tagging", response)
return response.get("TagSet") return response.get("TagSet")
@reporter.step("Get bucket acl") @reporter.step_deco("Get bucket acl")
@report_error @report_error
def get_bucket_acl(self, bucket: str) -> list: def get_bucket_acl(self, bucket: str) -> list:
response = self.boto3_client.get_bucket_acl(Bucket=bucket) response = self.boto3_client.get_bucket_acl(Bucket=bucket)
log_command_execution("S3 Get bucket acl", response) log_command_execution("S3 Get bucket acl", response)
return response.get("Grants") return response.get("Grants")
@reporter.step("Delete bucket tagging") @reporter.step_deco("Delete bucket tagging")
@report_error @report_error
def delete_bucket_tagging(self, bucket: str) -> None: def delete_bucket_tagging(self, bucket: str) -> None:
response = self.boto3_client.delete_bucket_tagging(Bucket=bucket) response = self.boto3_client.delete_bucket_tagging(Bucket=bucket)
log_command_execution("S3 Delete bucket tagging", response) log_command_execution("S3 Delete bucket tagging", response)
@reporter.step("Put bucket ACL") @reporter.step_deco("Put bucket ACL")
@report_error @report_error
def put_bucket_acl( def put_bucket_acl(
self, self,
@ -225,56 +214,60 @@ class Boto3ClientWrapper(S3ClientWrapper):
response = self.boto3_client.put_bucket_acl(**params) response = self.boto3_client.put_bucket_acl(**params)
log_command_execution("S3 ACL bucket result", response) log_command_execution("S3 ACL bucket result", response)
@reporter.step("Put object lock configuration") @reporter.step_deco("Put object lock configuration")
@report_error @report_error
def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict:
response = self.boto3_client.put_object_lock_configuration(Bucket=bucket, ObjectLockConfiguration=configuration) response = self.boto3_client.put_object_lock_configuration(
Bucket=bucket, ObjectLockConfiguration=configuration
)
log_command_execution("S3 put_object_lock_configuration result", response) log_command_execution("S3 put_object_lock_configuration result", response)
return response return response
@reporter.step("Get object lock configuration") @reporter.step_deco("Get object lock configuration")
@report_error @report_error
def get_object_lock_configuration(self, bucket: str) -> dict: def get_object_lock_configuration(self, bucket: str) -> dict:
response = self.boto3_client.get_object_lock_configuration(Bucket=bucket) response = self.boto3_client.get_object_lock_configuration(Bucket=bucket)
log_command_execution("S3 get_object_lock_configuration result", response) log_command_execution("S3 get_object_lock_configuration result", response)
return response.get("ObjectLockConfiguration") return response.get("ObjectLockConfiguration")
@reporter.step("Get bucket policy") @reporter.step_deco("Get bucket policy")
@report_error @report_error
def get_bucket_policy(self, bucket: str) -> str: def get_bucket_policy(self, bucket: str) -> str:
response = self.boto3_client.get_bucket_policy(Bucket=bucket) response = self.boto3_client.get_bucket_policy(Bucket=bucket)
log_command_execution("S3 get_bucket_policy result", response) log_command_execution("S3 get_bucket_policy result", response)
return response.get("Policy") return response.get("Policy")
@reporter.step("Put bucket policy") @reporter.step_deco("Put bucket policy")
@report_error @report_error
def put_bucket_policy(self, bucket: str, policy: dict) -> None: def put_bucket_policy(self, bucket: str, policy: dict) -> None:
response = self.boto3_client.put_bucket_policy(Bucket=bucket, Policy=json.dumps(policy)) response = self.boto3_client.put_bucket_policy(Bucket=bucket, Policy=json.dumps(policy))
log_command_execution("S3 put_bucket_policy result", response) log_command_execution("S3 put_bucket_policy result", response)
return response return response
@reporter.step("Get bucket cors") @reporter.step_deco("Get bucket cors")
@report_error @report_error
def get_bucket_cors(self, bucket: str) -> dict: def get_bucket_cors(self, bucket: str) -> dict:
response = self.boto3_client.get_bucket_cors(Bucket=bucket) response = self.boto3_client.get_bucket_cors(Bucket=bucket)
log_command_execution("S3 get_bucket_cors result", response) log_command_execution("S3 get_bucket_cors result", response)
return response.get("CORSRules") return response.get("CORSRules")
@reporter.step("Get bucket location") @reporter.step_deco("Get bucket location")
@report_error @report_error
def get_bucket_location(self, bucket: str) -> str: def get_bucket_location(self, bucket: str) -> str:
response = self.boto3_client.get_bucket_location(Bucket=bucket) response = self.boto3_client.get_bucket_location(Bucket=bucket)
log_command_execution("S3 get_bucket_location result", response) log_command_execution("S3 get_bucket_location result", response)
return response.get("LocationConstraint") return response.get("LocationConstraint")
@reporter.step("Put bucket cors") @reporter.step_deco("Put bucket cors")
@report_error @report_error
def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None:
response = self.boto3_client.put_bucket_cors(Bucket=bucket, CORSConfiguration=cors_configuration) response = self.boto3_client.put_bucket_cors(
Bucket=bucket, CORSConfiguration=cors_configuration
)
log_command_execution("S3 put_bucket_cors result", response) log_command_execution("S3 put_bucket_cors result", response)
return response return response
@reporter.step("Delete bucket cors") @reporter.step_deco("Delete bucket cors")
@report_error @report_error
def delete_bucket_cors(self, bucket: str) -> None: def delete_bucket_cors(self, bucket: str) -> None:
response = self.boto3_client.delete_bucket_cors(Bucket=bucket) response = self.boto3_client.delete_bucket_cors(Bucket=bucket)
@ -283,7 +276,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
# END OF BUCKET METHODS # # END OF BUCKET METHODS #
# OBJECT METHODS # # OBJECT METHODS #
@reporter.step("List objects S3 v2") @reporter.step_deco("List objects S3 v2")
@report_error @report_error
def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
response = self.boto3_client.list_objects_v2(Bucket=bucket) response = self.boto3_client.list_objects_v2(Bucket=bucket)
@ -294,7 +287,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response if full_output else obj_list return response if full_output else obj_list
@reporter.step("List objects S3") @reporter.step_deco("List objects S3")
@report_error @report_error
def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
response = self.boto3_client.list_objects(Bucket=bucket) response = self.boto3_client.list_objects(Bucket=bucket)
@ -305,21 +298,21 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response if full_output else obj_list return response if full_output else obj_list
@reporter.step("List objects versions S3") @reporter.step_deco("List objects versions S3")
@report_error @report_error
def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict:
response = self.boto3_client.list_object_versions(Bucket=bucket) response = self.boto3_client.list_object_versions(Bucket=bucket)
log_command_execution("S3 List objects versions result", response) log_command_execution("S3 List objects versions result", response)
return response if full_output else response.get("Versions", []) return response if full_output else response.get("Versions", [])
@reporter.step("List objects delete markers S3") @reporter.step_deco("List objects delete markers S3")
@report_error @report_error
def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: def list_delete_markers(self, bucket: str, full_output: bool = False) -> list:
response = self.boto3_client.list_object_versions(Bucket=bucket) response = self.boto3_client.list_object_versions(Bucket=bucket)
log_command_execution("S3 List objects delete markers result", response) log_command_execution("S3 List objects delete markers result", response)
return response if full_output else response.get("DeleteMarkers", []) return response if full_output else response.get("DeleteMarkers", [])
@reporter.step("Put object S3") @reporter.step_deco("Put object S3")
@report_error @report_error
def put_object( def put_object(
self, self,
@ -350,7 +343,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
log_command_execution("S3 Put object result", response) log_command_execution("S3 Put object result", response)
return response.get("VersionId") return response.get("VersionId")
@reporter.step("Head object S3") @reporter.step_deco("Head object S3")
@report_error @report_error
def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
params = { params = {
@ -362,7 +355,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
log_command_execution("S3 Head object result", response) log_command_execution("S3 Head object result", response)
return response return response
@reporter.step("Delete object S3") @reporter.step_deco("Delete object S3")
@report_error @report_error
def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
params = { params = {
@ -372,10 +365,10 @@ class Boto3ClientWrapper(S3ClientWrapper):
} }
response = self.boto3_client.delete_object(**params) response = self.boto3_client.delete_object(**params)
log_command_execution("S3 Delete object result", response) log_command_execution("S3 Delete object result", response)
sleep(S3_SYNC_WAIT_TIME * 10) sleep(S3_SYNC_WAIT_TIME)
return response return response
@reporter.step("Delete objects S3") @reporter.step_deco("Delete objects S3")
@report_error @report_error
def delete_objects(self, bucket: str, keys: list[str]) -> dict: def delete_objects(self, bucket: str, keys: list[str]) -> dict:
response = self.boto3_client.delete_objects(Bucket=bucket, Delete=_make_objs_dict(keys)) response = self.boto3_client.delete_objects(Bucket=bucket, Delete=_make_objs_dict(keys))
@ -383,10 +376,10 @@ class Boto3ClientWrapper(S3ClientWrapper):
assert ( assert (
"Errors" not in response "Errors" not in response
), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}' ), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}'
sleep(S3_SYNC_WAIT_TIME * 10) sleep(S3_SYNC_WAIT_TIME)
return response return response
@reporter.step("Delete object versions S3") @reporter.step_deco("Delete object versions S3")
@report_error @report_error
def delete_object_versions(self, bucket: str, object_versions: list) -> dict: def delete_object_versions(self, bucket: str, object_versions: list) -> dict:
# Build deletion list in S3 format # Build deletion list in S3 format
@ -403,7 +396,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
log_command_execution("S3 Delete objects result", response) log_command_execution("S3 Delete objects result", response)
return response return response
@reporter.step("Delete object versions S3 without delete markers") @reporter.step_deco("Delete object versions S3 without delete markers")
@report_error @report_error
def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None:
# Delete objects without creating delete markers # Delete objects without creating delete markers
@ -413,7 +406,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
) )
log_command_execution("S3 Delete object result", response) log_command_execution("S3 Delete object result", response)
@reporter.step("Put object ACL") @reporter.step_deco("Put object ACL")
@report_error @report_error
def put_object_acl( def put_object_acl(
self, self,
@ -426,7 +419,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
# pytest.skip("Method put_object_acl is not supported by boto3 client") # pytest.skip("Method put_object_acl is not supported by boto3 client")
raise NotImplementedError("Unsupported for boto3 client") raise NotImplementedError("Unsupported for boto3 client")
@reporter.step("Get object ACL") @reporter.step_deco("Get object ACL")
@report_error @report_error
def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
params = { params = {
@ -438,7 +431,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
log_command_execution("S3 ACL objects result", response) log_command_execution("S3 ACL objects result", response)
return response.get("Grants") return response.get("Grants")
@reporter.step("Copy object S3") @reporter.step_deco("Copy object S3")
@report_error @report_error
def copy_object( def copy_object(
self, self,
@ -467,7 +460,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
log_command_execution("S3 Copy objects result", response) log_command_execution("S3 Copy objects result", response)
return key return key
@reporter.step("Get object S3") @reporter.step_deco("Get object S3")
@report_error @report_error
def get_object( def get_object(
self, self,
@ -485,7 +478,8 @@ class Boto3ClientWrapper(S3ClientWrapper):
params = { params = {
self._to_s3_param(param): value self._to_s3_param(param): value
for param, value in {**locals(), **{"Range": range_str}}.items() for param, value in {**locals(), **{"Range": range_str}}.items()
if param not in ["self", "object_range", "full_output", "range_str", "filename"] and value is not None if param not in ["self", "object_range", "full_output", "range_str", "filename"]
and value is not None
} }
response = self.boto3_client.get_object(**params) response = self.boto3_client.get_object(**params)
log_command_execution("S3 Get objects result", response) log_command_execution("S3 Get objects result", response)
@ -497,7 +491,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
chunk = response["Body"].read(1024) chunk = response["Body"].read(1024)
return response if full_output else filename return response if full_output else filename
@reporter.step("Create multipart upload S3") @reporter.step_deco("Create multipart upload S3")
@report_error @report_error
def create_multipart_upload(self, bucket: str, key: str) -> str: def create_multipart_upload(self, bucket: str, key: str) -> str:
response = self.boto3_client.create_multipart_upload(Bucket=bucket, Key=key) response = self.boto3_client.create_multipart_upload(Bucket=bucket, Key=key)
@ -506,7 +500,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response["UploadId"] return response["UploadId"]
@reporter.step("List multipart uploads S3") @reporter.step_deco("List multipart uploads S3")
@report_error @report_error
def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]:
response = self.boto3_client.list_multipart_uploads(Bucket=bucket) response = self.boto3_client.list_multipart_uploads(Bucket=bucket)
@ -514,15 +508,19 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response.get("Uploads") return response.get("Uploads")
@reporter.step("Abort multipart upload S3") @reporter.step_deco("Abort multipart upload S3")
@report_error @report_error
def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None:
response = self.boto3_client.abort_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id) response = self.boto3_client.abort_multipart_upload(
Bucket=bucket, Key=key, UploadId=upload_id
)
log_command_execution("S3 Abort multipart upload", response) log_command_execution("S3 Abort multipart upload", response)
@reporter.step("Upload part S3") @reporter.step_deco("Upload part S3")
@report_error @report_error
def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: def upload_part(
self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str
) -> str:
with open(filepath, "rb") as put_file: with open(filepath, "rb") as put_file:
body = put_file.read() body = put_file.read()
@ -538,9 +536,11 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response["ETag"] return response["ETag"]
@reporter.step("Upload copy part S3") @reporter.step_deco("Upload copy part S3")
@report_error @report_error
def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: def upload_part_copy(
self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str
) -> str:
response = self.boto3_client.upload_part_copy( response = self.boto3_client.upload_part_copy(
UploadId=upload_id, UploadId=upload_id,
Bucket=bucket, Bucket=bucket,
@ -549,11 +549,13 @@ class Boto3ClientWrapper(S3ClientWrapper):
CopySource=copy_source, CopySource=copy_source,
) )
log_command_execution("S3 Upload copy part", response) log_command_execution("S3 Upload copy part", response)
assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}" assert response.get("CopyPartResult", []).get(
"ETag"
), f"Expected ETag in response:\n{response}"
return response["CopyPartResult"]["ETag"] return response["CopyPartResult"]["ETag"]
@reporter.step("List parts S3") @reporter.step_deco("List parts S3")
@report_error @report_error
def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]:
response = self.boto3_client.list_parts(UploadId=upload_id, Bucket=bucket, Key=key) response = self.boto3_client.list_parts(UploadId=upload_id, Bucket=bucket, Key=key)
@ -562,7 +564,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response["Parts"] return response["Parts"]
@reporter.step("Complete multipart upload S3") @reporter.step_deco("Complete multipart upload S3")
@report_error @report_error
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None:
parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]
@ -571,7 +573,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
) )
log_command_execution("S3 Complete multipart upload", response) log_command_execution("S3 Complete multipart upload", response)
@reporter.step("Put object retention") @reporter.step_deco("Put object retention")
@report_error @report_error
def put_object_retention( def put_object_retention(
self, self,
@ -589,7 +591,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
response = self.boto3_client.put_object_retention(**params) response = self.boto3_client.put_object_retention(**params)
log_command_execution("S3 Put object retention ", response) log_command_execution("S3 Put object retention ", response)
@reporter.step("Put object legal hold") @reporter.step_deco("Put object legal hold")
@report_error @report_error
def put_object_legal_hold( def put_object_legal_hold(
self, self,
@ -607,15 +609,15 @@ class Boto3ClientWrapper(S3ClientWrapper):
response = self.boto3_client.put_object_legal_hold(**params) response = self.boto3_client.put_object_legal_hold(**params)
log_command_execution("S3 Put object legal hold ", response) log_command_execution("S3 Put object legal hold ", response)
@reporter.step("Put object tagging") @reporter.step_deco("Put object tagging")
@report_error @report_error
def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = '') -> None: def put_object_tagging(self, bucket: str, key: str, tags: list) -> None:
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
tagging = {"TagSet": tags} tagging = {"TagSet": tags}
response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging, VersionId=version_id) response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging)
log_command_execution("S3 Put object tagging", response) log_command_execution("S3 Put object tagging", response)
@reporter.step("Get object tagging") @reporter.step_deco("Get object tagging")
@report_error @report_error
def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
params = { params = {
@ -627,13 +629,13 @@ class Boto3ClientWrapper(S3ClientWrapper):
log_command_execution("S3 Get object tagging", response) log_command_execution("S3 Get object tagging", response)
return response.get("TagSet") return response.get("TagSet")
@reporter.step("Delete object tagging") @reporter.step_deco("Delete object tagging")
@report_error @report_error
def delete_object_tagging(self, bucket: str, key: str) -> None: def delete_object_tagging(self, bucket: str, key: str) -> None:
response = self.boto3_client.delete_object_tagging(Bucket=bucket, Key=key) response = self.boto3_client.delete_object_tagging(Bucket=bucket, Key=key)
log_command_execution("S3 Delete object tagging", response) log_command_execution("S3 Delete object tagging", response)
@reporter.step("Get object attributes") @reporter.step_deco("Get object attributes")
@report_error @report_error
def get_object_attributes( def get_object_attributes(
self, self,
@ -648,7 +650,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
logger.warning("Method get_object_attributes is not supported by boto3 client") logger.warning("Method get_object_attributes is not supported by boto3 client")
return {} return {}
@reporter.step("Sync directory S3") @reporter.step_deco("Sync directory S3")
@report_error @report_error
def sync( def sync(
self, self,
@ -659,7 +661,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
) -> dict: ) -> dict:
raise NotImplementedError("Sync is not supported for boto3 client") raise NotImplementedError("Sync is not supported for boto3 client")
@reporter.step("CP directory S3") @reporter.step_deco("CP directory S3")
@report_error @report_error
def cp( def cp(
self, self,
@ -671,287 +673,3 @@ class Boto3ClientWrapper(S3ClientWrapper):
raise NotImplementedError("Cp is not supported for boto3 client") raise NotImplementedError("Cp is not supported for boto3 client")
# END OBJECT METHODS # # END OBJECT METHODS #
# IAM METHODS #
# Some methods don't have checks because boto3 is silent in some cases (delete, attach, etc.)
@reporter.step("Adds the specified user to the specified group")
def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict:
response = self.boto3_iam_client.add_user_to_group(UserName=user_name, GroupName=group_name)
return response
@reporter.step("Attaches the specified managed policy to the specified IAM group")
def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict:
response = self.boto3_iam_client.attach_group_policy(GroupName=group_name, PolicyArn=policy_arn)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Attaches the specified managed policy to the specified user")
def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict:
response = self.boto3_iam_client.attach_user_policy(UserName=user_name, PolicyArn=policy_arn)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Creates a new AWS secret access key and access key ID for the specified user")
def iam_create_access_key(self, user_name: str) -> dict:
response = self.boto3_iam_client.create_access_key(UserName=user_name)
access_key_id = response["AccessKey"].get("AccessKeyId")
secret_access_key = response["AccessKey"].get("SecretAccessKey")
assert access_key_id, f"Expected AccessKeyId in response:\n{response}"
assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}"
return access_key_id, secret_access_key
@reporter.step("Creates a new group")
def iam_create_group(self, group_name: str) -> dict:
response = self.boto3_iam_client.create_group(GroupName=group_name)
assert response.get("Group"), f"Expected Group in response:\n{response}"
assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}"
return response
@reporter.step("Creates a new managed policy for your AWS account")
def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict:
response = self.boto3_iam_client.create_policy(PolicyName=policy_name, PolicyDocument=json.dumps(policy_document))
assert response.get("Policy"), f"Expected Policy in response:\n{response}"
assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}"
return response
@reporter.step("Creates a new IAM user for your AWS account")
def iam_create_user(self, user_name: str) -> dict:
response = self.boto3_iam_client.create_user(UserName=user_name)
assert response.get("User"), f"Expected User in response:\n{response}"
assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}"
return response
@reporter.step("Deletes the access key pair associated with the specified IAM user")
def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict:
response = self.boto3_iam_client.delete_access_key(AccessKeyId=access_key_id, UserName=user_name)
return response
@reporter.step("Deletes the specified IAM group")
def iam_delete_group(self, group_name: str) -> dict:
response = self.boto3_iam_client.delete_group(GroupName=group_name)
return response
@reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group")
def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict:
response = self.boto3_iam_client.delete_group_policy(GroupName=group_name, PolicyName=policy_name)
return response
@reporter.step("Deletes the specified managed policy")
def iam_delete_policy(self, policy_arn: str) -> dict:
response = self.boto3_iam_client.delete_policy(PolicyArn=policy_arn)
return response
@reporter.step("Deletes the specified IAM user")
def iam_delete_user(self, user_name: str) -> dict:
response = self.boto3_iam_client.delete_user(UserName=user_name)
return response
@reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user")
def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict:
response = self.boto3_iam_client.delete_user_policy(UserName=user_name, PolicyName=policy_name)
return response
@reporter.step("Removes the specified managed policy from the specified IAM group")
def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict:
response = self.boto3_iam_client.detach_group_policy(GroupName=group_name, PolicyArn=policy_arn)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Removes the specified managed policy from the specified user")
def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict:
response = self.boto3_iam_client.detach_user_policy(UserName=user_name, PolicyArn=policy_arn)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Returns a list of IAM users that are in the specified IAM group")
def iam_get_group(self, group_name: str) -> dict:
response = self.boto3_iam_client.get_group(GroupName=group_name)
assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}"
return response
@reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group")
def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict:
response = self.boto3_iam_client.get_group_policy(GroupName=group_name, PolicyName=policy_name)
return response
@reporter.step("Retrieves information about the specified managed policy")
def iam_get_policy(self, policy_arn: str) -> dict:
response = self.boto3_iam_client.get_policy(PolicyArn=policy_arn)
assert response.get("Policy"), f"Expected Policy in response:\n{response}"
assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}"
return response
@reporter.step("Retrieves information about the specified version of the specified managed policy")
def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict:
response = self.boto3_iam_client.get_policy_version(PolicyArn=policy_arn, VersionId=version_id)
assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}"
assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}"
return response
@reporter.step("Retrieves information about the specified IAM user")
def iam_get_user(self, user_name: str) -> dict:
response = self.boto3_iam_client.get_user(UserName=user_name)
assert response.get("User"), f"Expected User in response:\n{response}"
assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}"
return response
@reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user")
def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict:
response = self.boto3_iam_client.get_user_policy(UserName=user_name, PolicyName=policy_name)
assert response.get("UserName"), f"Expected UserName in response:\n{response}"
return response
@reporter.step("Returns information about the access key IDs associated with the specified IAM user")
def iam_list_access_keys(self, user_name: str) -> dict:
response = self.boto3_iam_client.list_access_keys(UserName=user_name)
return response
@reporter.step("Lists all managed policies that are attached to the specified IAM group")
def iam_list_attached_group_policies(self, group_name: str) -> dict:
response = self.boto3_iam_client.list_attached_group_policies(GroupName=group_name)
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}"
return response
@reporter.step("Lists all managed policies that are attached to the specified IAM user")
def iam_list_attached_user_policies(self, user_name: str) -> dict:
response = self.boto3_iam_client.list_attached_user_policies(UserName=user_name)
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}"
return response
@reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to")
def iam_list_entities_for_policy(self, policy_arn: str) -> dict:
response = self.boto3_iam_client.list_entities_for_policy(PolicyArn=policy_arn)
assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}"
assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}"
return response
@reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group")
def iam_list_group_policies(self, group_name: str) -> dict:
response = self.boto3_iam_client.list_group_policies(GroupName=group_name)
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}"
return response
@reporter.step("Lists the IAM groups")
def iam_list_groups(self) -> dict:
response = self.boto3_iam_client.list_groups()
assert response.get("Groups"), f"Expected Groups in response:\n{response}"
return response
@reporter.step("Lists the IAM groups that the specified IAM user belongs to")
def iam_list_groups_for_user(self, user_name: str) -> dict:
response = self.boto3_iam_client.list_groups_for_user(UserName=user_name)
assert response.get("Groups"), f"Expected Groups in response:\n{response}"
return response
@reporter.step("Lists all the managed policies that are available in your AWS account")
def iam_list_policies(self) -> dict:
response = self.boto3_iam_client.list_policies()
assert response.get("Policies"), f"Expected Policies in response:\n{response}"
return response
@reporter.step("Lists information about the versions of the specified managed policy")
def iam_list_policy_versions(self, policy_arn: str) -> dict:
response = self.boto3_iam_client.list_policy_versions(PolicyArn=policy_arn)
assert response.get("Versions"), f"Expected Versions in response:\n{response}"
return response
@reporter.step("Lists the names of the inline policies embedded in the specified IAM user")
def iam_list_user_policies(self, user_name: str) -> dict:
response = self.boto3_iam_client.list_user_policies(UserName=user_name)
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}"
return response
@reporter.step("Lists the IAM users")
def iam_list_users(self) -> dict:
response = self.boto3_iam_client.list_users()
assert response.get("Users"), f"Expected Users in response:\n{response}"
return response
@reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group")
def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict:
response = self.boto3_iam_client.put_group_policy(GroupName=group_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document))
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user")
def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict:
response = self.boto3_iam_client.put_user_policy(UserName=user_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document))
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Removes the specified user from the specified group")
def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict:
response = self.boto3_iam_client.remove_user_from_group(GroupName=group_name, UserName=user_name)
return response
@reporter.step("Updates the name and/or the path of the specified IAM group")
def iam_update_group(self, group_name: str, new_name: str, new_path: Optional[str] = None) -> dict:
response = self.boto3_iam_client.update_group(GroupName=group_name, NewGroupName=new_name, NewPath='/')
return response
@reporter.step("Updates the name and/or the path of the specified IAM user")
def iam_update_user(self, user_name: str, new_name: str, new_path: Optional[str] = None) -> dict:
response = self.boto3_iam_client.update_user(UserName=user_name, NewUserName=new_name, NewPath='/')
return response

View file

@ -1,16 +0,0 @@
import re
from frostfs_testlib.cli.generic_cli import GenericCli
from frostfs_testlib.s3.interfaces import BucketContainerResolver
from frostfs_testlib.storage.cluster import ClusterNode
class CurlBucketContainerResolver(BucketContainerResolver):
def resolve(self, node: ClusterNode, bucket_name: str, **kwargs: dict) -> str:
curl = GenericCli("curl", node.host)
output = curl(f"-I http://127.0.0.1:8084/{bucket_name}")
pattern = r"X-Container-Id: (\S+)"
cid = re.findall(pattern, output.stdout)
if cid:
return cid[0]
return None

View file

@ -1,8 +1,7 @@
from abc import ABC, abstractmethod from abc import abstractmethod
from datetime import datetime from datetime import datetime
from typing import Literal, Optional, Union from typing import Literal, Optional, Union
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.testing.readable import HumanReadableABC, HumanReadableEnum from frostfs_testlib.testing.readable import HumanReadableABC, HumanReadableEnum
@ -32,25 +31,9 @@ ACL_COPY = [
] ]
class BucketContainerResolver(ABC):
@abstractmethod
def resolve(self, node: ClusterNode, bucket_name: str, **kwargs: dict) -> str:
"""
Resolve Container ID from bucket name
Args:
node: node from where we want to resolve
bucket_name: name of the bucket
**kwargs: any other required params
Returns: Container ID
"""
raise NotImplementedError("Call from abstract class")
class S3ClientWrapper(HumanReadableABC): class S3ClientWrapper(HumanReadableABC):
@abstractmethod @abstractmethod
def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str, region: str) -> None: def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None:
pass pass
@abstractmethod @abstractmethod
@ -313,11 +296,15 @@ class S3ClientWrapper(HumanReadableABC):
abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.""" abort a given multipart upload multiple times in order to completely free all storage consumed by all parts."""
@abstractmethod @abstractmethod
def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: def upload_part(
self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str
) -> str:
"""Uploads a part in a multipart upload.""" """Uploads a part in a multipart upload."""
@abstractmethod @abstractmethod
def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: def upload_part_copy(
self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str
) -> str:
"""Uploads a part by copying data from an existing object as data source.""" """Uploads a part by copying data from an existing object as data source."""
@abstractmethod @abstractmethod
@ -395,154 +382,3 @@ class S3ClientWrapper(HumanReadableABC):
"""cp directory TODO: Add proper description""" """cp directory TODO: Add proper description"""
# END OF OBJECT METHODS # # END OF OBJECT METHODS #
# IAM METHODS #
@abstractmethod
def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict:
'''Adds the specified user to the specified group'''
@abstractmethod
def iam_attach_group_policy(self, group: str, policy_arn: str) -> dict:
'''Attaches the specified managed policy to the specified IAM group'''
@abstractmethod
def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict:
'''Attaches the specified managed policy to the specified user'''
@abstractmethod
def iam_create_access_key(self, user_name: str) -> dict:
'''Creates a new AWS secret access key and access key ID for the specified user'''
@abstractmethod
def iam_create_group(self, group_name: str) -> dict:
'''Creates a new group'''
@abstractmethod
def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict:
'''Creates a new managed policy for your AWS account'''
@abstractmethod
def iam_create_user(self, user_name: str) -> dict:
'''Creates a new IAM user for your AWS account'''
@abstractmethod
def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict:
'''Deletes the access key pair associated with the specified IAM user'''
@abstractmethod
def iam_delete_group(self, group_name: str) -> dict:
'''Deletes the specified IAM group'''
@abstractmethod
def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict:
'''Deletes the specified inline policy that is embedded in the specified IAM group'''
@abstractmethod
def iam_delete_policy(self, policy_arn: str) -> dict:
'''Deletes the specified managed policy'''
@abstractmethod
def iam_delete_user(self, user_name: str) -> dict:
'''Deletes the specified IAM user'''
@abstractmethod
def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict:
'''Deletes the specified inline policy that is embedded in the specified IAM user'''
@abstractmethod
def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict:
'''Removes the specified managed policy from the specified IAM group'''
@abstractmethod
def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict:
'''Removes the specified managed policy from the specified user'''
@abstractmethod
def iam_get_group(self, group_name: str) -> dict:
'''Returns a list of IAM users that are in the specified IAM group'''
@abstractmethod
def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict:
'''Retrieves the specified inline policy document that is embedded in the specified IAM group'''
@abstractmethod
def iam_get_policy(self, policy_arn: str) -> dict:
'''Retrieves information about the specified managed policy'''
@abstractmethod
def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict:
'''Retrieves information about the specified version of the specified managed policy'''
@abstractmethod
def iam_get_user(self, user_name: str) -> dict:
'''Retrieves information about the specified IAM user'''
@abstractmethod
def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict:
'''Retrieves the specified inline policy document that is embedded in the specified IAM user'''
@abstractmethod
def iam_list_access_keys(self, user_name: str) -> dict:
'''Returns information about the access key IDs associated with the specified IAM user'''
@abstractmethod
def iam_list_attached_group_policies(self, group_name: str) -> dict:
'''Lists all managed policies that are attached to the specified IAM group'''
@abstractmethod
def iam_list_attached_user_policies(self, user_name: str) -> dict:
'''Lists all managed policies that are attached to the specified IAM user'''
@abstractmethod
def iam_list_entities_for_policy(self, policy_arn: str) -> dict:
'''Lists all IAM users, groups, and roles that the specified managed policy is attached to'''
@abstractmethod
def iam_list_group_policies(self, group_name: str) -> dict:
'''Lists the names of the inline policies that are embedded in the specified IAM group'''
@abstractmethod
def iam_list_groups(self) -> dict:
'''Lists the IAM groups'''
@abstractmethod
def iam_list_groups_for_user(self, user_name: str) -> dict:
'''Lists the IAM groups that the specified IAM user belongs to'''
@abstractmethod
def iam_list_policies(self) -> dict:
'''Lists all the managed policies that are available in your AWS account'''
@abstractmethod
def iam_list_policy_versions(self, policy_arn: str) -> dict:
'''Lists information about the versions of the specified managed policy'''
@abstractmethod
def iam_list_user_policies(self, user_name: str) -> dict:
'''Lists the names of the inline policies embedded in the specified IAM user'''
@abstractmethod
def iam_list_users(self) -> dict:
'''Lists the IAM users'''
@abstractmethod
def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict:
'''Adds or updates an inline policy document that is embedded in the specified IAM group'''
@abstractmethod
def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict:
'''Adds or updates an inline policy document that is embedded in the specified IAM user'''
@abstractmethod
def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict:
'''Removes the specified user from the specified group'''
@abstractmethod
def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
'''Updates the name and/or the path of the specified IAM group'''
@abstractmethod
def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
'''Updates the name and/or the path of the specified IAM user'''

View file

@ -6,10 +6,11 @@ from typing import IO, Optional
import pexpect import pexpect
from frostfs_testlib import reporter from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell
logger = logging.getLogger("frostfs.testlib.shell") logger = logging.getLogger("frostfs.testlib.shell")
reporter = get_reporter()
class LocalShell(Shell): class LocalShell(Shell):

View file

@ -9,10 +9,11 @@ from typing import ClassVar, Optional, Tuple
from paramiko import AutoAddPolicy, Channel, ECDSAKey, Ed25519Key, PKey, RSAKey, SSHClient, SSHException, ssh_exception from paramiko import AutoAddPolicy, Channel, ECDSAKey, Ed25519Key, PKey, RSAKey, SSHClient, SSHException, ssh_exception
from paramiko.ssh_exception import AuthenticationException from paramiko.ssh_exception import AuthenticationException
from frostfs_testlib import reporter from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell, SshCredentials from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell, SshCredentials
logger = logging.getLogger("frostfs.testlib.shell") logger = logging.getLogger("frostfs.testlib.shell")
reporter = get_reporter()
class SshConnectionProvider: class SshConnectionProvider:
@ -185,7 +186,6 @@ class SSHShell(Shell):
private_key_passphrase: Optional[str] = None, private_key_passphrase: Optional[str] = None,
port: str = "22", port: str = "22",
command_inspectors: Optional[list[CommandInspector]] = None, command_inspectors: Optional[list[CommandInspector]] = None,
custom_environment: Optional[dict] = None
) -> None: ) -> None:
super().__init__() super().__init__()
self.connection_provider = SshConnectionProvider() self.connection_provider = SshConnectionProvider()
@ -197,8 +197,6 @@ class SSHShell(Shell):
self.command_inspectors = command_inspectors or [] self.command_inspectors = command_inspectors or []
self.environment = custom_environment
@property @property
def _connection(self): def _connection(self):
return self.connection_provider.provide(self.host, self.port) return self.connection_provider.provide(self.host, self.port)
@ -227,7 +225,7 @@ class SSHShell(Shell):
@log_command @log_command
def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult: def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult:
stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout, get_pty=True, environment=self.environment) stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout, get_pty=True)
for interactive_input in options.interactive_inputs: for interactive_input in options.interactive_inputs:
input = interactive_input.input input = interactive_input.input
if not input.endswith("\n"): if not input.endswith("\n"):
@ -254,7 +252,7 @@ class SSHShell(Shell):
@log_command @log_command
def _exec_non_interactive(self, command: str, options: CommandOptions) -> CommandResult: def _exec_non_interactive(self, command: str, options: CommandOptions) -> CommandResult:
try: try:
stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout, environment=self.environment) stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout)
if options.close_stdin: if options.close_stdin:
stdin.close() stdin.close()

View file

@ -8,23 +8,29 @@ from typing import List, Optional, Union
import base58 import base58
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.dataclasses.acl import EACL_LIFETIME, FROSTFS_CONTRACT_CACHE_TIMEOUT, EACLPubKey, EACLRole, EACLRule from frostfs_testlib.storage.dataclasses.acl import (
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo EACL_LIFETIME,
FROSTFS_CONTRACT_CACHE_TIMEOUT,
EACLPubKey,
EACLRole,
EACLRule,
)
from frostfs_testlib.utils import wallet_utils from frostfs_testlib.utils import wallet_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@reporter.step("Get extended ACL") @reporter.step_deco("Get extended ACL")
def get_eacl(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str) -> Optional[str]: def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optional[str]:
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
try: try:
result = cli.container.get_eacl(rpc_endpoint=endpoint, cid=cid) result = cli.container.get_eacl(wallet=wallet_path, rpc_endpoint=endpoint, cid=cid)
except RuntimeError as exc: except RuntimeError as exc:
logger.info("Extended ACL table is not set for this container") logger.info("Extended ACL table is not set for this container")
logger.info(f"Got exception while getting eacl: {exc}") logger.info(f"Got exception while getting eacl: {exc}")
@ -34,17 +40,18 @@ def get_eacl(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str) -> Optio
return result.stdout return result.stdout
@reporter.step("Set extended ACL") @reporter.step_deco("Set extended ACL")
def set_eacl( def set_eacl(
wallet: WalletInfo, wallet_path: str,
cid: str, cid: str,
eacl_table_path: str, eacl_table_path: str,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
session_token: Optional[str] = None, session_token: Optional[str] = None,
) -> None: ) -> None:
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
cli.container.set_eacl( cli.container.set_eacl(
wallet=wallet_path,
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
cid=cid, cid=cid,
table=eacl_table_path, table=eacl_table_path,
@ -60,7 +67,7 @@ def _encode_cid_for_eacl(cid: str) -> str:
def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str: def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str:
table_file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"eacl_table_{str(uuid.uuid4())}.json") table_file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"eacl_table_{str(uuid.uuid4())}.json")
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
cli.acl.extended_create(cid=cid, out=table_file_path, rule=rules_list) cli.acl.extended_create(cid=cid, out=table_file_path, rule=rules_list)
with open(table_file_path, "r") as file: with open(table_file_path, "r") as file:
@ -71,7 +78,7 @@ def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str:
def form_bearertoken_file( def form_bearertoken_file(
wallet: WalletInfo, wif: str,
cid: str, cid: str,
eacl_rule_list: List[Union[EACLRule, EACLPubKey]], eacl_rule_list: List[Union[EACLRule, EACLPubKey]],
shell: Shell, shell: Shell,
@ -86,7 +93,7 @@ def form_bearertoken_file(
enc_cid = _encode_cid_for_eacl(cid) if cid else None enc_cid = _encode_cid_for_eacl(cid) if cid else None
file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
eacl = get_eacl(wallet, cid, shell, endpoint) eacl = get_eacl(wif, cid, shell, endpoint)
json_eacl = dict() json_eacl = dict()
if eacl: if eacl:
eacl = eacl.replace("eACL: ", "").split("Signature")[0] eacl = eacl.replace("eACL: ", "").split("Signature")[0]
@ -127,7 +134,7 @@ def form_bearertoken_file(
if sign: if sign:
sign_bearer( sign_bearer(
shell=shell, shell=shell,
wallet=wallet, wallet_path=wif,
eacl_rules_file_from=file_path, eacl_rules_file_from=file_path,
eacl_rules_file_to=file_path, eacl_rules_file_to=file_path,
json=True, json=True,
@ -158,19 +165,27 @@ def eacl_rules(access: str, verbs: list, user: str) -> list[str]:
return rules return rules
def sign_bearer(shell: Shell, wallet: WalletInfo, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool) -> None: def sign_bearer(
frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) shell: Shell, wallet_path: str, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool
frostfscli.util.sign_bearer_token(eacl_rules_file_from, eacl_rules_file_to, json=json) ) -> None:
frostfscli = FrostfsCli(
shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG
)
frostfscli.util.sign_bearer_token(
wallet=wallet_path, from_file=eacl_rules_file_from, to_file=eacl_rules_file_to, json=json
)
@reporter.step("Wait for eACL cache expired") @reporter.step_deco("Wait for eACL cache expired")
def wait_for_cache_expired(): def wait_for_cache_expired():
sleep(FROSTFS_CONTRACT_CACHE_TIMEOUT) sleep(FROSTFS_CONTRACT_CACHE_TIMEOUT)
return return
@reporter.step("Return bearer token in base64 to caller") @reporter.step_deco("Return bearer token in base64 to caller")
def bearer_token_base64_from_file(bearer_path: str) -> str: def bearer_token_base64_from_file(
bearer_path: str,
) -> str:
with open(bearer_path, "rb") as file: with open(bearer_path, "rb") as file:
signed = file.read() signed = file.read()
return base64.b64encode(signed).decode("utf-8") return base64.b64encode(signed).decode("utf-8")

View file

@ -5,11 +5,10 @@ from dataclasses import dataclass
from time import sleep from time import sleep
from typing import Optional, Union from typing import Optional, Union
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.plugins import load_plugin from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node
from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.cluster import Cluster, ClusterNode
@ -18,13 +17,14 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.utils import json_utils from frostfs_testlib.utils import json_utils
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@dataclass @dataclass
class StorageContainerInfo: class StorageContainerInfo:
id: str id: str
wallet: WalletInfo wallet_file: WalletInfo
class StorageContainer: class StorageContainer:
@ -41,10 +41,13 @@ class StorageContainer:
def get_id(self) -> str: def get_id(self) -> str:
return self.storage_container_info.id return self.storage_container_info.id
def get_wallet(self) -> str: def get_wallet_path(self) -> str:
return self.storage_container_info.wallet return self.storage_container_info.wallet_file.path
@reporter.step("Generate new object and put in container") def get_wallet_config_path(self) -> str:
return self.storage_container_info.wallet_file.config_path
@reporter.step_deco("Generate new object and put in container")
def generate_object( def generate_object(
self, self,
size: int, size: int,
@ -57,34 +60,37 @@ class StorageContainer:
file_hash = get_file_hash(file_path) file_hash = get_file_hash(file_path)
container_id = self.get_id() container_id = self.get_id()
wallet = self.get_wallet() wallet_path = self.get_wallet_path()
wallet_config = self.get_wallet_config_path()
with reporter.step(f"Put object with size {size} to container {container_id}"): with reporter.step(f"Put object with size {size} to container {container_id}"):
if endpoint: if endpoint:
object_id = put_object( object_id = put_object(
wallet=wallet, wallet=wallet_path,
path=file_path, path=file_path,
cid=container_id, cid=container_id,
expire_at=expire_at, expire_at=expire_at,
shell=self.shell, shell=self.shell,
endpoint=endpoint, endpoint=endpoint,
bearer=bearer_token, bearer=bearer_token,
wallet_config=wallet_config,
) )
else: else:
object_id = put_object_to_random_node( object_id = put_object_to_random_node(
wallet=wallet, wallet=wallet_path,
path=file_path, path=file_path,
cid=container_id, cid=container_id,
expire_at=expire_at, expire_at=expire_at,
shell=self.shell, shell=self.shell,
cluster=self.cluster, cluster=self.cluster,
bearer=bearer_token, bearer=bearer_token,
wallet_config=wallet_config,
) )
storage_object = StorageObjectInfo( storage_object = StorageObjectInfo(
container_id, container_id,
object_id, object_id,
size=size, size=size,
wallet=wallet, wallet_file_path=wallet_path,
file_path=file_path, file_path=file_path,
file_hash=file_hash, file_hash=file_hash,
) )
@ -95,18 +101,18 @@ class StorageContainer:
DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X" SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X"
REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X"
DEFAULT_EC_PLACEMENT_RULE = "EC 3.1"
@reporter.step("Create Container") @reporter.step_deco("Create Container")
def create_container( def create_container(
wallet: WalletInfo, wallet: str,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
rule: str = DEFAULT_PLACEMENT_RULE, rule: str = DEFAULT_PLACEMENT_RULE,
basic_acl: str = "", basic_acl: str = "",
attributes: Optional[dict] = None, attributes: Optional[dict] = None,
session_token: str = "", session_token: str = "",
session_wallet: str = "",
name: Optional[str] = None, name: Optional[str] = None,
options: Optional[dict] = None, options: Optional[dict] = None,
await_mode: bool = True, await_mode: bool = True,
@ -117,7 +123,7 @@ def create_container(
A wrapper for `frostfs-cli container create` call. A wrapper for `frostfs-cli container create` call.
Args: Args:
wallet (WalletInfo): a wallet on whose behalf a container is created wallet (str): a wallet on whose behalf a container is created
rule (optional, str): placement rule for container rule (optional, str): placement rule for container
basic_acl (optional, str): an ACL for container, will be basic_acl (optional, str): an ACL for container, will be
appended to `--basic-acl` key appended to `--basic-acl` key
@ -139,9 +145,10 @@ def create_container(
(str): CID of the created container (str): CID of the created container
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
result = cli.container.create( result = cli.container.create(
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
wallet=session_wallet if session_wallet else wallet,
policy=rule, policy=rule,
basic_acl=basic_acl, basic_acl=basic_acl,
attributes=attributes, attributes=attributes,
@ -162,17 +169,23 @@ def create_container(
return cid return cid
def wait_for_container_creation(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, attempts: int = 15, sleep_interval: int = 1): def wait_for_container_creation(
wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 15, sleep_interval: int = 1
):
for _ in range(attempts): for _ in range(attempts):
containers = list_containers(wallet, shell, endpoint) containers = list_containers(wallet, shell, endpoint)
if cid in containers: if cid in containers:
return return
logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue") logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue")
sleep(sleep_interval) sleep(sleep_interval)
raise RuntimeError(f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting") raise RuntimeError(
f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting"
)
def wait_for_container_deletion(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, attempts: int = 30, sleep_interval: int = 1): def wait_for_container_deletion(
wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 30, sleep_interval: int = 1
):
for _ in range(attempts): for _ in range(attempts):
try: try:
get_container(wallet, cid, shell=shell, endpoint=endpoint) get_container(wallet, cid, shell=shell, endpoint=endpoint)
@ -185,28 +198,30 @@ def wait_for_container_deletion(wallet: WalletInfo, cid: str, shell: Shell, endp
raise AssertionError(f"Expected container deleted during {attempts * sleep_interval} sec.") raise AssertionError(f"Expected container deleted during {attempts * sleep_interval} sec.")
@reporter.step("List Containers") @reporter.step_deco("List Containers")
def list_containers(wallet: WalletInfo, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT) -> list[str]: def list_containers(
wallet: str, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT
) -> list[str]:
""" """
A wrapper for `frostfs-cli container list` call. It returns all the A wrapper for `frostfs-cli container list` call. It returns all the
available containers for the given wallet. available containers for the given wallet.
Args: Args:
wallet (WalletInfo): a wallet on whose behalf we list the containers wallet (str): a wallet on whose behalf we list the containers
shell: executor for cli command shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
timeout: Timeout for the operation. timeout: Timeout for the operation.
Returns: Returns:
(list): list of containers (list): list of containers
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
result = cli.container.list(rpc_endpoint=endpoint, timeout=timeout) result = cli.container.list(rpc_endpoint=endpoint, wallet=wallet, timeout=timeout)
logger.info(f"Containers: \n{result}") logger.info(f"Containers: \n{result}")
return result.stdout.split() return result.stdout.split()
@reporter.step("List Objects in container") @reporter.step_deco("List Objects in container")
def list_objects( def list_objects(
wallet: WalletInfo, wallet: str,
shell: Shell, shell: Shell,
container_id: str, container_id: str,
endpoint: str, endpoint: str,
@ -216,7 +231,7 @@ def list_objects(
A wrapper for `frostfs-cli container list-objects` call. It returns all the A wrapper for `frostfs-cli container list-objects` call. It returns all the
available objects in container. available objects in container.
Args: Args:
wallet (WalletInfo): a wallet on whose behalf we list the containers objects wallet (str): a wallet on whose behalf we list the containers objects
shell: executor for cli command shell: executor for cli command
container_id: cid of container container_id: cid of container
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
@ -224,15 +239,17 @@ def list_objects(
Returns: Returns:
(list): list of containers (list): list of containers
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
result = cli.container.list_objects(rpc_endpoint=endpoint, cid=container_id, timeout=timeout) result = cli.container.list_objects(
rpc_endpoint=endpoint, wallet=wallet, cid=container_id, timeout=timeout
)
logger.info(f"Container objects: \n{result}") logger.info(f"Container objects: \n{result}")
return result.stdout.split() return result.stdout.split()
@reporter.step("Get Container") @reporter.step_deco("Get Container")
def get_container( def get_container(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
@ -243,7 +260,7 @@ def get_container(
A wrapper for `frostfs-cli container get` call. It extracts container's A wrapper for `frostfs-cli container get` call. It extracts container's
attributes and rearranges them into a more compact view. attributes and rearranges them into a more compact view.
Args: Args:
wallet (WalletInfo): path to a wallet on whose behalf we get the container wallet (str): path to a wallet on whose behalf we get the container
cid (str): ID of the container to get cid (str): ID of the container to get
shell: executor for cli command shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
@ -253,8 +270,10 @@ def get_container(
(dict, str): dict of container attributes (dict, str): dict of container attributes
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
result = cli.container.get(rpc_endpoint=endpoint, cid=cid, json_mode=json_mode, timeout=timeout) result = cli.container.get(
rpc_endpoint=endpoint, wallet=wallet, cid=cid, json_mode=json_mode, timeout=timeout
)
if not json_mode: if not json_mode:
return result.stdout return result.stdout
@ -268,37 +287,40 @@ def get_container(
return container_info return container_info
@reporter.step("Delete Container") @reporter.step_deco("Delete Container")
# TODO: make the error message about a non-found container more user-friendly # TODO: make the error message about a non-found container more user-friendly
def delete_container( def delete_container(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
force: bool = False, force: bool = False,
session_token: Optional[str] = None, session_token: Optional[str] = None,
await_mode: bool = False, await_mode: bool = False,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> None: ) -> None:
""" """
A wrapper for `frostfs-cli container delete` call. A wrapper for `frostfs-cli container delete` call.
Args: Args:
await_mode: Block execution until container is removed. wallet (str): path to a wallet on whose behalf we delete the container
wallet (WalletInfo): path to a wallet on whose behalf we delete the container
cid (str): ID of the container to delete cid (str): ID of the container to delete
shell: executor for cli command shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
force (bool): do not check whether container contains locks and remove immediately force (bool): do not check whether container contains locks and remove immediately
session_token: a path to session token file session_token: a path to session token file
timeout: Timeout for the operation.
This function doesn't return anything. This function doesn't return anything.
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
cli.container.delete( cli.container.delete(
wallet=wallet,
cid=cid, cid=cid,
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
force=force, force=force,
session=session_token, session=session_token,
await_mode=await_mode, await_mode=await_mode,
timeout=timeout,
) )
@ -328,24 +350,29 @@ def _parse_cid(output: str) -> str:
return splitted[1] return splitted[1]
@reporter.step("Search container by name") @reporter.step_deco("Search container by name")
def search_container_by_name(name: str, node: ClusterNode): def search_container_by_name(wallet: str, name: str, shell: Shell, endpoint: str):
resolver_cls = load_plugin("frostfs.testlib.bucket_cid_resolver", node.host.config.product) list_cids = list_containers(wallet, shell, endpoint)
resolver: BucketContainerResolver = resolver_cls() for cid in list_cids:
return resolver.resolve(node, name) cont_info = get_container(wallet, cid, shell, endpoint, True)
if cont_info.get("attributes", {}).get("Name", None) == name:
return cid
return None
@reporter.step("Search for nodes with a container") @reporter.step_deco("Search for nodes with a container")
def search_nodes_with_container( def search_nodes_with_container(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
cluster: Cluster, cluster: Cluster,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> list[ClusterNode]: ) -> list[ClusterNode]:
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
result = cli.container.search_node(rpc_endpoint=endpoint, cid=cid, timeout=timeout) result = cli.container.search_node(
rpc_endpoint=endpoint, wallet=wallet, cid=cid, timeout=timeout
)
pattern = r"[0-9]+(?:\.[0-9]+){3}" pattern = r"[0-9]+(?:\.[0-9]+){3}"
nodes_ip = list(set(re.findall(pattern, result.stdout))) nodes_ip = list(set(re.findall(pattern, result.stdout)))

View file

@ -5,23 +5,23 @@ import re
import uuid import uuid
from typing import Any, Optional from typing import Any, Optional
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.cli.neogo import NeoGo from frostfs_testlib.cli.neogo import NeoGo
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE
from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.utils import json_utils from frostfs_testlib.utils import json_utils
from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
reporter = get_reporter()
@reporter.step("Get object from random node") @reporter.step_deco("Get object from random node")
def get_object_from_random_node( def get_object_from_random_node(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, shell: Shell,
@ -29,6 +29,7 @@ def get_object_from_random_node(
bearer: Optional[str] = None, bearer: Optional[str] = None,
write_object: Optional[str] = None, write_object: Optional[str] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
wallet_config: Optional[str] = None,
no_progress: bool = True, no_progress: bool = True,
session: Optional[str] = None, session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
@ -44,6 +45,7 @@ def get_object_from_random_node(
cluster: cluster object cluster: cluster object
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key bearer (optional, str): path to Bearer Token file, appends to `--bearer` key
write_object (optional, str): path to downloaded file, appends to `--file` key write_object (optional, str): path to downloaded file, appends to `--file` key
wallet_config(optional, str): path to the wallet config
no_progress(optional, bool): do not show progress bar no_progress(optional, bool): do not show progress bar
xhdr (optional, dict): Request X-Headers in form of Key=Value xhdr (optional, dict): Request X-Headers in form of Key=Value
session (optional, dict): path to a JSON-encoded container session token session (optional, dict): path to a JSON-encoded container session token
@ -61,15 +63,16 @@ def get_object_from_random_node(
bearer, bearer,
write_object, write_object,
xhdr, xhdr,
wallet_config,
no_progress, no_progress,
session, session,
timeout, timeout,
) )
@reporter.step("Get object from {endpoint}") @reporter.step_deco("Get object from {endpoint}")
def get_object( def get_object(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, shell: Shell,
@ -77,6 +80,7 @@ def get_object(
bearer: Optional[str] = None, bearer: Optional[str] = None,
write_object: Optional[str] = None, write_object: Optional[str] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
wallet_config: Optional[str] = None,
no_progress: bool = True, no_progress: bool = True,
session: Optional[str] = None, session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
@ -85,13 +89,14 @@ def get_object(
GET from FrostFS. GET from FrostFS.
Args: Args:
wallet (WalletInfo): wallet on whose behalf GET is done wallet (str): wallet on whose behalf GET is done
cid (str): ID of Container where we get the Object from cid (str): ID of Container where we get the Object from
oid (str): Object ID oid (str): Object ID
shell: executor for cli command shell: executor for cli command
bearer: path to Bearer Token file, appends to `--bearer` key bearer: path to Bearer Token file, appends to `--bearer` key
write_object: path to downloaded file, appends to `--file` key write_object: path to downloaded file, appends to `--file` key
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config(optional, str): path to the wallet config
no_progress(optional, bool): do not show progress bar no_progress(optional, bool): do not show progress bar
xhdr (optional, dict): Request X-Headers in form of Key=Value xhdr (optional, dict): Request X-Headers in form of Key=Value
session (optional, dict): path to a JSON-encoded container session token session (optional, dict): path to a JSON-encoded container session token
@ -104,9 +109,10 @@ def get_object(
write_object = str(uuid.uuid4()) write_object = str(uuid.uuid4())
file_path = os.path.join(ASSETS_DIR, write_object) file_path = os.path.join(ASSETS_DIR, write_object)
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
cli.object.get( cli.object.get(
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
wallet=wallet,
cid=cid, cid=cid,
oid=oid, oid=oid,
file=file_path, file=file_path,
@ -120,15 +126,16 @@ def get_object(
return file_path return file_path
@reporter.step("Get Range Hash from {endpoint}") @reporter.step_deco("Get Range Hash from {endpoint}")
def get_range_hash( def get_range_hash(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
oid: str, oid: str,
range_cut: str, range_cut: str,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
bearer: Optional[str] = None, bearer: Optional[str] = None,
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
session: Optional[str] = None, session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
@ -145,15 +152,17 @@ def get_range_hash(
range_cut: Range to take hash from in the form offset1:length1,..., range_cut: Range to take hash from in the form offset1:length1,...,
value to pass to the `--range` parameter value to pass to the `--range` parameter
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config: path to the wallet config
xhdr: Request X-Headers in form of Key=Values xhdr: Request X-Headers in form of Key=Values
session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session. session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session.
timeout: Timeout for the operation. timeout: Timeout for the operation.
Returns: Returns:
None None
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.hash( result = cli.object.hash(
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
wallet=wallet,
cid=cid, cid=cid,
oid=oid, oid=oid,
range=range_cut, range=range_cut,
@ -167,9 +176,9 @@ def get_range_hash(
return result.stdout.split(":")[1].strip() return result.stdout.split(":")[1].strip()
@reporter.step("Put object to random node") @reporter.step_deco("Put object to random node")
def put_object_to_random_node( def put_object_to_random_node(
wallet: WalletInfo, wallet: str,
path: str, path: str,
cid: str, cid: str,
shell: Shell, shell: Shell,
@ -178,6 +187,7 @@ def put_object_to_random_node(
copies_number: Optional[int] = None, copies_number: Optional[int] = None,
attributes: Optional[dict] = None, attributes: Optional[dict] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
wallet_config: Optional[str] = None,
expire_at: Optional[int] = None, expire_at: Optional[int] = None,
no_progress: bool = True, no_progress: bool = True,
session: Optional[str] = None, session: Optional[str] = None,
@ -196,6 +206,7 @@ def put_object_to_random_node(
copies_number: Number of copies of the object to store within the RPC call copies_number: Number of copies of the object to store within the RPC call
attributes: User attributes in form of Key1=Value1,Key2=Value2 attributes: User attributes in form of Key1=Value1,Key2=Value2
cluster: cluster under test cluster: cluster under test
wallet_config: path to the wallet config
no_progress: do not show progress bar no_progress: do not show progress bar
expire_at: Last epoch in the life of the object expire_at: Last epoch in the life of the object
xhdr: Request X-Headers in form of Key=Value xhdr: Request X-Headers in form of Key=Value
@ -216,6 +227,7 @@ def put_object_to_random_node(
copies_number, copies_number,
attributes, attributes,
xhdr, xhdr,
wallet_config,
expire_at, expire_at,
no_progress, no_progress,
session, session,
@ -223,9 +235,9 @@ def put_object_to_random_node(
) )
@reporter.step("Put object at {endpoint} in container {cid}") @reporter.step_deco("Put object at {endpoint} in container {cid}")
def put_object( def put_object(
wallet: WalletInfo, wallet: str,
path: str, path: str,
cid: str, cid: str,
shell: Shell, shell: Shell,
@ -234,6 +246,7 @@ def put_object(
copies_number: Optional[int] = None, copies_number: Optional[int] = None,
attributes: Optional[dict] = None, attributes: Optional[dict] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
wallet_config: Optional[str] = None,
expire_at: Optional[int] = None, expire_at: Optional[int] = None,
no_progress: bool = True, no_progress: bool = True,
session: Optional[str] = None, session: Optional[str] = None,
@ -251,6 +264,7 @@ def put_object(
copies_number: Number of copies of the object to store within the RPC call copies_number: Number of copies of the object to store within the RPC call
attributes: User attributes in form of Key1=Value1,Key2=Value2 attributes: User attributes in form of Key1=Value1,Key2=Value2
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config: path to the wallet config
no_progress: do not show progress bar no_progress: do not show progress bar
expire_at: Last epoch in the life of the object expire_at: Last epoch in the life of the object
xhdr: Request X-Headers in form of Key=Value xhdr: Request X-Headers in form of Key=Value
@ -260,9 +274,10 @@ def put_object(
(str): ID of uploaded Object (str): ID of uploaded Object
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.put( result = cli.object.put(
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
wallet=wallet,
file=path, file=path,
cid=cid, cid=cid,
attributes=attributes, attributes=attributes,
@ -281,14 +296,15 @@ def put_object(
return oid.strip() return oid.strip()
@reporter.step("Delete object {cid}/{oid} from {endpoint}") @reporter.step_deco("Delete object {cid}/{oid} from {endpoint}")
def delete_object( def delete_object(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
bearer: str = "", bearer: str = "",
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
session: Optional[str] = None, session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
@ -303,6 +319,7 @@ def delete_object(
shell: executor for cli command shell: executor for cli command
bearer: path to Bearer Token file, appends to `--bearer` key bearer: path to Bearer Token file, appends to `--bearer` key
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config: path to the wallet config
xhdr: Request X-Headers in form of Key=Value xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token session: path to a JSON-encoded container session token
timeout: Timeout for the operation. timeout: Timeout for the operation.
@ -310,9 +327,10 @@ def delete_object(
(str): Tombstone ID (str): Tombstone ID
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.delete( result = cli.object.delete(
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
wallet=wallet,
cid=cid, cid=cid,
oid=oid, oid=oid,
bearer=bearer, bearer=bearer,
@ -326,14 +344,15 @@ def delete_object(
return tombstone.strip() return tombstone.strip()
@reporter.step("Get Range") @reporter.step_deco("Get Range")
def get_range( def get_range(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
oid: str, oid: str,
range_cut: str, range_cut: str,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
wallet_config: Optional[str] = None,
bearer: str = "", bearer: str = "",
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
session: Optional[str] = None, session: Optional[str] = None,
@ -350,6 +369,7 @@ def get_range(
shell: executor for cli command shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
bearer: path to Bearer Token file, appends to `--bearer` key bearer: path to Bearer Token file, appends to `--bearer` key
wallet_config: path to the wallet config
xhdr: Request X-Headers in form of Key=Value xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token session: path to a JSON-encoded container session token
timeout: Timeout for the operation. timeout: Timeout for the operation.
@ -358,9 +378,10 @@ def get_range(
""" """
range_file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4())) range_file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4()))
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
cli.object.range( cli.object.range(
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
wallet=wallet,
cid=cid, cid=cid,
oid=oid, oid=oid,
range=range_cut, range=range_cut,
@ -376,9 +397,9 @@ def get_range(
return range_file_path, content return range_file_path, content
@reporter.step("Lock Object") @reporter.step_deco("Lock Object")
def lock_object( def lock_object(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, shell: Shell,
@ -388,6 +409,7 @@ def lock_object(
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
session: Optional[str] = None, session: Optional[str] = None,
wallet_config: Optional[str] = None,
ttl: Optional[int] = None, ttl: Optional[int] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
@ -414,12 +436,13 @@ def lock_object(
Lock object ID Lock object ID
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.lock( result = cli.object.lock(
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
lifetime=lifetime, lifetime=lifetime,
expire_at=expire_at, expire_at=expire_at,
address=address, address=address,
wallet=wallet,
cid=cid, cid=cid,
oid=oid, oid=oid,
bearer=bearer, bearer=bearer,
@ -435,15 +458,16 @@ def lock_object(
return oid.strip() return oid.strip()
@reporter.step("Search object") @reporter.step_deco("Search object")
def search_object( def search_object(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
bearer: str = "", bearer: str = "",
filters: Optional[dict] = None, filters: Optional[dict] = None,
expected_objects_list: Optional[list] = None, expected_objects_list: Optional[list] = None,
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
session: Optional[str] = None, session: Optional[str] = None,
phy: bool = False, phy: bool = False,
@ -461,6 +485,7 @@ def search_object(
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
filters: key=value pairs to filter Objects filters: key=value pairs to filter Objects
expected_objects_list: a list of ObjectIDs to compare found Objects with expected_objects_list: a list of ObjectIDs to compare found Objects with
wallet_config: path to the wallet config
xhdr: Request X-Headers in form of Key=Value xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token session: path to a JSON-encoded container session token
phy: Search physically stored objects. phy: Search physically stored objects.
@ -471,13 +496,16 @@ def search_object(
list of found ObjectIDs list of found ObjectIDs
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.search( result = cli.object.search(
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
wallet=wallet,
cid=cid, cid=cid,
bearer=bearer, bearer=bearer,
xhdr=xhdr, xhdr=xhdr,
filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None, filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()]
if filters
else None,
session=session, session=session,
phy=phy, phy=phy,
root=root, root=root,
@ -488,18 +516,25 @@ def search_object(
if expected_objects_list: if expected_objects_list:
if sorted(found_objects) == sorted(expected_objects_list): if sorted(found_objects) == sorted(expected_objects_list):
logger.info(f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'") logger.info(
f"Found objects list '{found_objects}' "
f"is equal for expected list '{expected_objects_list}'"
)
else: else:
logger.warning(f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'") logger.warning(
f"Found object list {found_objects} "
f"is not equal to expected list '{expected_objects_list}'"
)
return found_objects return found_objects
@reporter.step("Get netmap netinfo") @reporter.step_deco("Get netmap netinfo")
def get_netmap_netinfo( def get_netmap_netinfo(
wallet: WalletInfo, wallet: str,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
wallet_config: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
ttl: Optional[int] = None, ttl: Optional[int] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
@ -509,7 +544,7 @@ def get_netmap_netinfo(
Get netmap netinfo output from node Get netmap netinfo output from node
Args: Args:
wallet (WalletInfo): wallet on whose behalf request is done wallet (str): wallet on whose behalf request is done
shell: executor for cli command shell: executor for cli command
endpoint (optional, str): FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint (optional, str): FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
address: Address of wallet account address: Address of wallet account
@ -522,8 +557,9 @@ def get_netmap_netinfo(
(dict): dict of parsed command output (dict): dict of parsed command output
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
output = cli.netmap.netinfo( output = cli.netmap.netinfo(
wallet=wallet,
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
address=address, address=address,
ttl=ttl, ttl=ttl,
@ -545,9 +581,9 @@ def get_netmap_netinfo(
return settings return settings
@reporter.step("Head object") @reporter.step_deco("Head object")
def head_object( def head_object(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, shell: Shell,
@ -557,6 +593,7 @@ def head_object(
json_output: bool = True, json_output: bool = True,
is_raw: bool = False, is_raw: bool = False,
is_direct: bool = False, is_direct: bool = False,
wallet_config: Optional[str] = None,
session: Optional[str] = None, session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
): ):
@ -564,7 +601,7 @@ def head_object(
HEAD an Object. HEAD an Object.
Args: Args:
wallet (WalletInfo): wallet on whose behalf HEAD is done wallet (str): wallet on whose behalf HEAD is done
cid (str): ID of Container where we get the Object from cid (str): ID of Container where we get the Object from
oid (str): ObjectID to HEAD oid (str): ObjectID to HEAD
shell: executor for cli command shell: executor for cli command
@ -576,6 +613,7 @@ def head_object(
turns into `--raw` key turns into `--raw` key
is_direct(optional, bool): send request directly to the node or not; this flag is_direct(optional, bool): send request directly to the node or not; this flag
turns into `--ttl 1` key turns into `--ttl 1` key
wallet_config(optional, str): path to the wallet config
xhdr (optional, dict): Request X-Headers in form of Key=Value xhdr (optional, dict): Request X-Headers in form of Key=Value
session (optional, dict): path to a JSON-encoded container session token session (optional, dict): path to a JSON-encoded container session token
timeout: Timeout for the operation. timeout: Timeout for the operation.
@ -586,9 +624,10 @@ def head_object(
(str): HEAD response as a plain text (str): HEAD response as a plain text
""" """
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.head( result = cli.object.head(
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
wallet=wallet,
cid=cid, cid=cid,
oid=oid, oid=oid,
bearer=bearer, bearer=bearer,
@ -638,8 +677,8 @@ def head_object(
return json_utils.decode_simple_header(decoded) return json_utils.decode_simple_header(decoded)
@reporter.step("Run neo-go dump-keys") @reporter.step_deco("Run neo-go dump-keys")
def neo_go_dump_keys(shell: Shell, wallet: WalletInfo) -> dict: def neo_go_dump_keys(shell: Shell, wallet: str) -> dict:
""" """
Run neo-go dump keys command Run neo-go dump keys command
@ -663,7 +702,7 @@ def neo_go_dump_keys(shell: Shell, wallet: WalletInfo) -> dict:
return {address_id: wallet_key} return {address_id: wallet_key}
@reporter.step("Run neo-go query height") @reporter.step_deco("Run neo-go query height")
def neo_go_query_height(shell: Shell, endpoint: str) -> dict: def neo_go_query_height(shell: Shell, endpoint: str) -> dict:
""" """
Run neo-go query height command Run neo-go query height command
@ -695,27 +734,26 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict:
} }
@reporter.step("Search object nodes") @reporter.step_deco("Search object nodes")
def get_object_nodes( def get_object_nodes(
cluster: Cluster, cluster: Cluster,
wallet: str,
cid: str, cid: str,
oid: str, oid: str,
alive_node: ClusterNode, shell: Shell,
endpoint: str,
bearer: str = "", bearer: str = "",
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
is_direct: bool = False, is_direct: bool = False,
verify_presence_all: bool = False, verify_presence_all: bool = False,
wallet_config: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> list[ClusterNode]: ) -> list[ClusterNode]:
shell = alive_node.host.get_shell() cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
endpoint = alive_node.storage_node.get_rpc_endpoint()
wallet = alive_node.storage_node.get_remote_wallet_path()
wallet_config = alive_node.storage_node.get_remote_wallet_config_path()
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config)
result_object_nodes = cli.object.nodes( result_object_nodes = cli.object.nodes(
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
wallet=wallet,
cid=cid, cid=cid,
oid=oid, oid=oid,
bearer=bearer, bearer=bearer,
@ -727,7 +765,9 @@ def get_object_nodes(
parsing_output = parse_cmd_table(result_object_nodes.stdout, "|") parsing_output = parse_cmd_table(result_object_nodes.stdout, "|")
list_object_nodes = [ list_object_nodes = [
node for node in parsing_output if node["should_contain_object"] == "true" and node["actually_contains_object"] == "true" node
for node in parsing_output
if node["should_contain_object"] == "true" and node["actually_contains_object"] == "true"
] ]
netmap_nodes_list = parse_netmap_output( netmap_nodes_list = parse_netmap_output(
@ -744,7 +784,10 @@ def get_object_nodes(
] ]
result = [ result = [
cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes if netmap_node.node == cluster_node.host_ip cluster_node
for netmap_node in netmap_nodes
for cluster_node in cluster.cluster_nodes
if netmap_node.node == cluster_node.host_ip
] ]
return result return result

View file

@ -12,14 +12,15 @@
import logging import logging
from typing import Optional, Tuple from typing import Optional, Tuple
from frostfs_testlib import reporter from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import head_object from frostfs_testlib.steps.cli.object import head_object
from frostfs_testlib.storage.cluster import Cluster, StorageNode from frostfs_testlib.storage.cluster import Cluster, StorageNode
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -44,7 +45,7 @@ def get_storage_object_chunks(
with reporter.step(f"Get complex object chunks (f{storage_object.oid})"): with reporter.step(f"Get complex object chunks (f{storage_object.oid})"):
split_object_id = get_link_object( split_object_id = get_link_object(
storage_object.wallet, storage_object.wallet_file_path,
storage_object.cid, storage_object.cid,
storage_object.oid, storage_object.oid,
shell, shell,
@ -53,7 +54,7 @@ def get_storage_object_chunks(
timeout=timeout, timeout=timeout,
) )
head = head_object( head = head_object(
storage_object.wallet, storage_object.wallet_file_path,
storage_object.cid, storage_object.cid,
split_object_id, split_object_id,
shell, shell,
@ -96,7 +97,7 @@ def get_complex_object_split_ranges(
chunks_ids = get_storage_object_chunks(storage_object, shell, cluster) chunks_ids = get_storage_object_chunks(storage_object, shell, cluster)
for chunk_id in chunks_ids: for chunk_id in chunks_ids:
head = head_object( head = head_object(
storage_object.wallet, storage_object.wallet_file_path,
storage_object.cid, storage_object.cid,
chunk_id, chunk_id,
shell, shell,
@ -112,14 +113,15 @@ def get_complex_object_split_ranges(
return ranges return ranges
@reporter.step("Get Link Object") @reporter.step_deco("Get Link Object")
def get_link_object( def get_link_object(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, shell: Shell,
nodes: list[StorageNode], nodes: list[StorageNode],
bearer: str = "", bearer: str = "",
wallet_config: str = DEFAULT_WALLET_CONFIG,
is_direct: bool = True, is_direct: bool = True,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
): ):
@ -153,6 +155,7 @@ def get_link_object(
is_raw=True, is_raw=True,
is_direct=is_direct, is_direct=is_direct,
bearer=bearer, bearer=bearer,
wallet_config=wallet_config,
timeout=timeout, timeout=timeout,
) )
if resp["link"]: if resp["link"]:
@ -163,9 +166,9 @@ def get_link_object(
return None return None
@reporter.step("Get Last Object") @reporter.step_deco("Get Last Object")
def get_last_object( def get_last_object(
wallet: WalletInfo, wallet: str,
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, shell: Shell,

View file

@ -2,9 +2,15 @@ import logging
from time import sleep from time import sleep
from typing import Optional from typing import Optional
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import (
CLI_DEFAULT_TIMEOUT,
FROSTFS_ADM_CONFIG_PATH,
FROSTFS_ADM_EXEC,
FROSTFS_CLI_EXEC,
NEOGO_EXECUTABLE,
)
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.payment_neogo import get_contract_hash from frostfs_testlib.steps.payment_neogo import get_contract_hash
@ -13,10 +19,11 @@ from frostfs_testlib.storage.dataclasses.frostfs_services import InnerRing, Morp
from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils import datetime_utils, wallet_utils from frostfs_testlib.utils import datetime_utils, wallet_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@reporter.step("Get epochs from nodes") @reporter.step_deco("Get epochs from nodes")
def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]: def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]:
""" """
Get current epochs on each node. Get current epochs on each node.
@ -34,8 +41,10 @@ def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]:
return epochs_by_node return epochs_by_node
@reporter.step("Ensure fresh epoch") @reporter.step_deco("Ensure fresh epoch")
def ensure_fresh_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None) -> int: def ensure_fresh_epoch(
shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None
) -> int:
# ensure new fresh epoch to avoid epoch switch during test session # ensure new fresh epoch to avoid epoch switch during test session
alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] alive_node = alive_node if alive_node else cluster.services(StorageNode)[0]
current_epoch = get_epoch(shell, cluster, alive_node) current_epoch = get_epoch(shell, cluster, alive_node)
@ -45,7 +54,7 @@ def ensure_fresh_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[Stor
return epoch return epoch
@reporter.step("Wait up to {timeout} seconds for nodes on cluster to align epochs") @reporter.step_deco("Wait up to {timeout} seconds for nodes on cluster to align epochs")
def wait_for_epochs_align(shell: Shell, cluster: Cluster, timeout=60): def wait_for_epochs_align(shell: Shell, cluster: Cluster, timeout=60):
@wait_for_success(timeout, 5, None, True) @wait_for_success(timeout, 5, None, True)
def check_epochs(): def check_epochs():
@ -55,7 +64,7 @@ def wait_for_epochs_align(shell: Shell, cluster: Cluster, timeout=60):
check_epochs() check_epochs()
@reporter.step("Get Epoch") @reporter.step_deco("Get Epoch")
def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None): def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None):
alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] alive_node = alive_node if alive_node else cluster.services(StorageNode)[0]
endpoint = alive_node.get_rpc_endpoint() endpoint = alive_node.get_rpc_endpoint()
@ -68,7 +77,7 @@ def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode]
return int(epoch.stdout) return int(epoch.stdout)
@reporter.step("Tick Epoch") @reporter.step_deco("Tick Epoch")
def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None): def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None):
""" """
Tick epoch using frostfs-adm or NeoGo if frostfs-adm is not available (DevEnv) Tick epoch using frostfs-adm or NeoGo if frostfs-adm is not available (DevEnv)
@ -81,7 +90,7 @@ def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode]
alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] alive_node = alive_node if alive_node else cluster.services(StorageNode)[0]
remote_shell = alive_node.host.get_shell() remote_shell = alive_node.host.get_shell()
if "force_transactions" not in alive_node.host.config.attributes: if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH:
# If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests) # If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests)
frostfs_adm = FrostfsAdm( frostfs_adm = FrostfsAdm(
shell=remote_shell, shell=remote_shell,

View file

@ -10,29 +10,31 @@ from urllib.parse import quote_plus
import requests import requests
from frostfs_testlib import reporter from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.cli import GenericCli
from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE
from frostfs_testlib.s3.aws_cli_client import command_options from frostfs_testlib.s3.aws_cli_client import command_options
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.shell.local_shell import LocalShell from frostfs_testlib.shell.local_shell import LocalShell
from frostfs_testlib.steps.cli.object import get_object from frostfs_testlib.steps.cli.object import get_object
from frostfs_testlib.steps.storage_policy import get_nodes_without_object from frostfs_testlib.steps.storage_policy import get_nodes_without_object
from frostfs_testlib.storage.cluster import ClusterNode, StorageNode from frostfs_testlib.storage.cluster import StorageNode
from frostfs_testlib.testing.test_control import retry from frostfs_testlib.testing.test_control import retry
from frostfs_testlib.utils.file_utils import get_file_hash from frostfs_testlib.utils.file_utils import get_file_hash
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/") ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/")
local_shell = LocalShell() local_shell = LocalShell()
@reporter.step("Get via HTTP Gate") @reporter.step_deco("Get via HTTP Gate")
def get_via_http_gate( def get_via_http_gate(
cid: str, cid: str,
oid: str, oid: str,
node: ClusterNode, endpoint: str,
http_hostname: str,
request_path: Optional[str] = None, request_path: Optional[str] = None,
timeout: Optional[int] = 300, timeout: Optional[int] = 300,
): ):
@ -40,18 +42,19 @@ def get_via_http_gate(
This function gets given object from HTTP gate This function gets given object from HTTP gate
cid: container id to get object from cid: container id to get object from
oid: object ID oid: object ID
node: node to make request endpoint: http gate endpoint
http_hostname: http host name on the node
request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}] request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}]
""" """
# if `request_path` parameter omitted, use default # if `request_path` parameter omitted, use default
if request_path is None: if request_path is None:
request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" request = f"{endpoint}/get/{cid}/{oid}"
else: else:
request = f"{node.http_gate.get_endpoint()}{request_path}" request = f"{endpoint}{request_path}"
resp = requests.get( resp = requests.get(
request, headers={"Host": node.storage_node.get_http_hostname()[0]}, stream=True, timeout=timeout, verify=False request, headers={"Host": http_hostname}, stream=True, timeout=timeout, verify=False
) )
if not resp.ok: if not resp.ok:
@ -72,15 +75,18 @@ def get_via_http_gate(
return file_path return file_path
@reporter.step("Get via Zip HTTP Gate") @reporter.step_deco("Get via Zip HTTP Gate")
def get_via_zip_http_gate(cid: str, prefix: str, node: ClusterNode, timeout: Optional[int] = 300): def get_via_zip_http_gate(
cid: str, prefix: str, endpoint: str, http_hostname: str, timeout: Optional[int] = 300
):
""" """
This function gets given object from HTTP gate This function gets given object from HTTP gate
cid: container id to get object from cid: container id to get object from
prefix: common prefix prefix: common prefix
node: node to make request endpoint: http gate endpoint
http_hostname: http host name on the node
""" """
request = f"{node.http_gate.get_endpoint()}/zip/{cid}/{prefix}" request = f"{endpoint}/zip/{cid}/{prefix}"
resp = requests.get(request, stream=True, timeout=timeout, verify=False) resp = requests.get(request, stream=True, timeout=timeout, verify=False)
if not resp.ok: if not resp.ok:
@ -105,11 +111,12 @@ def get_via_zip_http_gate(cid: str, prefix: str, node: ClusterNode, timeout: Opt
return os.path.join(os.getcwd(), ASSETS_DIR, prefix) return os.path.join(os.getcwd(), ASSETS_DIR, prefix)
@reporter.step("Get via HTTP Gate by attribute") @reporter.step_deco("Get via HTTP Gate by attribute")
def get_via_http_gate_by_attribute( def get_via_http_gate_by_attribute(
cid: str, cid: str,
attribute: dict, attribute: dict,
node: ClusterNode, endpoint: str,
http_hostname: str,
request_path: Optional[str] = None, request_path: Optional[str] = None,
timeout: Optional[int] = 300, timeout: Optional[int] = 300,
): ):
@ -125,12 +132,12 @@ def get_via_http_gate_by_attribute(
attr_value = quote_plus(str(attribute.get(attr_name))) attr_value = quote_plus(str(attribute.get(attr_name)))
# if `request_path` parameter ommited, use default # if `request_path` parameter ommited, use default
if request_path is None: if request_path is None:
request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" request = f"{endpoint}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}"
else: else:
request = f"{node.http_gate.get_endpoint()}{request_path}" request = f"{endpoint}{request_path}"
resp = requests.get( resp = requests.get(
request, stream=True, timeout=timeout, verify=False, headers={"Host": node.storage_node.get_http_hostname()[0]} request, stream=True, timeout=timeout, verify=False, headers={"Host": http_hostname}
) )
if not resp.ok: if not resp.ok:
@ -152,7 +159,7 @@ def get_via_http_gate_by_attribute(
# TODO: pass http_hostname as a header # TODO: pass http_hostname as a header
@reporter.step("Upload via HTTP Gate") @reporter.step_deco("Upload via HTTP Gate")
def upload_via_http_gate( def upload_via_http_gate(
cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300 cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300
) -> str: ) -> str:
@ -166,7 +173,9 @@ def upload_via_http_gate(
request = f"{endpoint}/upload/{cid}" request = f"{endpoint}/upload/{cid}"
files = {"upload_file": open(path, "rb")} files = {"upload_file": open(path, "rb")}
body = {"filename": path} body = {"filename": path}
resp = requests.post(request, files=files, data=body, headers=headers, timeout=timeout, verify=False) resp = requests.post(
request, files=files, data=body, headers=headers, timeout=timeout, verify=False
)
if not resp.ok: if not resp.ok:
raise Exception( raise Exception(
@ -184,7 +193,7 @@ def upload_via_http_gate(
return resp.json().get("object_id") return resp.json().get("object_id")
@reporter.step("Check is the passed object large") @reporter.step_deco("Check is the passed object large")
def is_object_large(filepath: str) -> bool: def is_object_large(filepath: str) -> bool:
""" """
This function check passed file size and return True if file_size > SIMPLE_OBJECT_SIZE This function check passed file size and return True if file_size > SIMPLE_OBJECT_SIZE
@ -199,7 +208,7 @@ def is_object_large(filepath: str) -> bool:
# TODO: pass http_hostname as a header # TODO: pass http_hostname as a header
@reporter.step("Upload via HTTP Gate using Curl") @reporter.step_deco("Upload via HTTP Gate using Curl")
def upload_via_http_gate_curl( def upload_via_http_gate_curl(
cid: str, cid: str,
filepath: str, filepath: str,
@ -247,19 +256,20 @@ def upload_via_http_gate_curl(
@retry(max_attempts=3, sleep_interval=1) @retry(max_attempts=3, sleep_interval=1)
@reporter.step("Get via HTTP Gate using Curl") @reporter.step_deco("Get via HTTP Gate using Curl")
def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> str: def get_via_http_curl(cid: str, oid: str, endpoint: str, http_hostname: str) -> str:
""" """
This function gets given object from HTTP gate using curl utility. This function gets given object from HTTP gate using curl utility.
cid: CID to get object from cid: CID to get object from
oid: object OID oid: object OID
node: node for request endpoint: http gate endpoint
http_hostname: http host name of the node
""" """
request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" request = f"{endpoint}/get/{cid}/{oid}"
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}") file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}")
curl = GenericCli("curl", node.host) cmd = f'curl -k -H "Host: {http_hostname}" {request} > {file_path}'
curl(f'-k -H "Host: {node.storage_node.get_http_hostname()[0]}"', f"{request} > {file_path}", shell=local_shell) local_shell.exec(cmd)
return file_path return file_path
@ -270,31 +280,37 @@ def _attach_allure_step(request: str, status_code: int, req_type="GET"):
reporter.attach(command_attachment, f"{req_type} Request") reporter.attach(command_attachment, f"{req_type} Request")
@reporter.step("Try to get object and expect error") @reporter.step_deco("Try to get object and expect error")
def try_to_get_object_and_expect_error( def try_to_get_object_and_expect_error(
cid: str, cid: str,
oid: str, oid: str,
node: ClusterNode,
error_pattern: str, error_pattern: str,
endpoint: str,
http_hostname: str,
) -> None: ) -> None:
try: try:
get_via_http_gate(cid=cid, oid=oid, node=node) get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname)
raise AssertionError(f"Expected error on getting object with cid: {cid}") raise AssertionError(f"Expected error on getting object with cid: {cid}")
except Exception as err: except Exception as err:
match = error_pattern.casefold() in str(err).casefold() match = error_pattern.casefold() in str(err).casefold()
assert match, f"Expected {err} to match {error_pattern}" assert match, f"Expected {err} to match {error_pattern}"
@reporter.step("Verify object can be get using HTTP header attribute") @reporter.step_deco("Verify object can be get using HTTP header attribute")
def get_object_by_attr_and_verify_hashes( def get_object_by_attr_and_verify_hashes(
oid: str, oid: str,
file_name: str, file_name: str,
cid: str, cid: str,
attrs: dict, attrs: dict,
node: ClusterNode, endpoint: str,
http_hostname: str,
) -> None: ) -> None:
got_file_path_http = get_via_http_gate(cid=cid, oid=oid, node=node) got_file_path_http = get_via_http_gate(
got_file_path_http_attr = get_via_http_gate_by_attribute(cid=cid, attribute=attrs, node=node) cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname
)
got_file_path_http_attr = get_via_http_gate_by_attribute(
cid=cid, attribute=attrs, endpoint=endpoint, http_hostname=http_hostname
)
assert_hashes_are_equal(file_name, got_file_path_http, got_file_path_http_attr) assert_hashes_are_equal(file_name, got_file_path_http, got_file_path_http_attr)
@ -305,7 +321,8 @@ def verify_object_hash(
cid: str, cid: str,
shell: Shell, shell: Shell,
nodes: list[StorageNode], nodes: list[StorageNode],
request_node: ClusterNode, endpoint: str,
http_hostname: str,
object_getter=None, object_getter=None,
) -> None: ) -> None:
@ -331,7 +348,9 @@ def verify_object_hash(
shell=shell, shell=shell,
endpoint=random_node.get_rpc_endpoint(), endpoint=random_node.get_rpc_endpoint(),
) )
got_file_path_http = object_getter(cid=cid, oid=oid, node=request_node) got_file_path_http = object_getter(
cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname
)
assert_hashes_are_equal(file_name, got_file_path, got_file_path_http) assert_hashes_are_equal(file_name, got_file_path, got_file_path_http)
@ -340,14 +359,18 @@ def assert_hashes_are_equal(orig_file_name: str, got_file_1: str, got_file_2: st
msg = "Expected hashes are equal for files {f1} and {f2}" msg = "Expected hashes are equal for files {f1} and {f2}"
got_file_hash_http = get_file_hash(got_file_1) got_file_hash_http = get_file_hash(got_file_1)
assert get_file_hash(got_file_2) == got_file_hash_http, msg.format(f1=got_file_2, f2=got_file_1) assert get_file_hash(got_file_2) == got_file_hash_http, msg.format(f1=got_file_2, f2=got_file_1)
assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format(f1=orig_file_name, f2=got_file_1) assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format(
f1=orig_file_name, f2=got_file_1
)
def attr_into_header(attrs: dict) -> dict: def attr_into_header(attrs: dict) -> dict:
return {f"X-Attribute-{_key}": _value for _key, _value in attrs.items()} return {f"X-Attribute-{_key}": _value for _key, _value in attrs.items()}
@reporter.step("Convert each attribute (Key=Value) to the following format: -H 'X-Attribute-Key: Value'") @reporter.step_deco(
"Convert each attribute (Key=Value) to the following format: -H 'X-Attribute-Key: Value'"
)
def attr_into_str_header_curl(attrs: dict) -> list: def attr_into_str_header_curl(attrs: dict) -> list:
headers = [] headers = []
for k, v in attrs.items(): for k, v in attrs.items():
@ -356,13 +379,16 @@ def attr_into_str_header_curl(attrs: dict) -> list:
return headers return headers
@reporter.step("Try to get object via http (pass http_request and optional attributes) and expect error") @reporter.step_deco(
"Try to get object via http (pass http_request and optional attributes) and expect error"
)
def try_to_get_object_via_passed_request_and_expect_error( def try_to_get_object_via_passed_request_and_expect_error(
cid: str, cid: str,
oid: str, oid: str,
node: ClusterNode,
error_pattern: str, error_pattern: str,
endpoint: str,
http_request_path: str, http_request_path: str,
http_hostname: str,
attrs: Optional[dict] = None, attrs: Optional[dict] = None,
) -> None: ) -> None:
try: try:
@ -370,15 +396,17 @@ def try_to_get_object_via_passed_request_and_expect_error(
get_via_http_gate( get_via_http_gate(
cid=cid, cid=cid,
oid=oid, oid=oid,
node=node, endpoint=endpoint,
request_path=http_request_path, request_path=http_request_path,
http_hostname=http_hostname,
) )
else: else:
get_via_http_gate_by_attribute( get_via_http_gate_by_attribute(
cid=cid, cid=cid,
attribute=attrs, attribute=attrs,
node=node, endpoint=endpoint,
request_path=http_request_path, request_path=http_request_path,
http_hostname=http_hostname,
) )
raise AssertionError(f"Expected error on getting object with cid: {cid}") raise AssertionError(f"Expected error on getting object with cid: {cid}")
except Exception as err: except Exception as err:

View file

@ -1,19 +1,89 @@
from frostfs_testlib.shell import CommandOptions from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.testing.test_control import retry
reporter = get_reporter()
class IpHelper: class IpTablesHelper:
@staticmethod
def drop_input_traffic_to_port(node: ClusterNode, ports: list[str]) -> None:
shell = node.host.get_shell()
for port in ports:
shell.exec(f"iptables -A INPUT -p tcp --dport {port} -j DROP")
@staticmethod @staticmethod
def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[str]) -> None: def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[str]) -> None:
shell = node.host.get_shell() shell = node.host.get_shell()
for ip in block_ip: for ip in block_ip:
shell.exec(f"ip route add blackhole {ip}") shell.exec(f"iptables -A INPUT -s {ip} -j DROP")
@staticmethod
def restore_input_traffic_to_port(node: ClusterNode) -> None:
shell = node.host.get_shell()
ports = (
shell.exec("iptables -L --numeric | grep DROP | awk '{print $7}'")
.stdout.strip()
.split("\n")
)
if ports[0] == "":
return
for port in ports:
shell.exec(f"iptables -D INPUT -p tcp --dport {port.split(':')[-1]} -j DROP")
@staticmethod @staticmethod
def restore_input_traffic_to_node(node: ClusterNode) -> None: def restore_input_traffic_to_node(node: ClusterNode) -> None:
shell = node.host.get_shell() shell = node.host.get_shell()
unlock_ip = shell.exec("ip route list | grep blackhole", CommandOptions(check=False)) unlock_ip = (
if unlock_ip.return_code != 0: shell.exec("iptables -L --numeric | grep DROP | awk '{print $4}'")
.stdout.strip()
.split("\n")
)
if unlock_ip[0] == "":
return return
for ip in unlock_ip.stdout.strip().split("\n"): for ip in unlock_ip:
shell.exec(f"ip route del blackhole {ip.split(' ')[1]}") shell.exec(f"iptables -D INPUT -s {ip} -j DROP")
# TODO Move class to HOST
class IfUpDownHelper:
@reporter.step_deco("Down {interface} to {node}")
def down_interface(self, node: ClusterNode, interface: str) -> None:
shell = node.host.get_shell()
shell.exec(f"ifdown {interface}")
@reporter.step_deco("Up {interface} to {node}")
def up_interface(self, node: ClusterNode, interface: str) -> None:
shell = node.host.get_shell()
shell.exec(f"ifup {interface}")
@reporter.step_deco("Up all interface to {node}")
def up_all_interface(self, node: ClusterNode) -> None:
shell = node.host.get_shell()
interfaces = list(node.host.config.interfaces.keys())
shell.exec("ifup -av")
for name_interface in interfaces:
self.check_state_up(node, name_interface)
@reporter.step_deco("Down all interface to {node}")
def down_all_interface(self, node: ClusterNode) -> None:
shell = node.host.get_shell()
interfaces = list(node.host.config.interfaces.keys())
shell.exec("ifdown -av")
for name_interface in interfaces:
self.check_state_down(node, name_interface)
@reporter.step_deco("Check {node} to {interface}")
def check_state(self, node: ClusterNode, interface: str) -> str:
shell = node.host.get_shell()
return shell.exec(
f"ip link show {interface} | sed -z 's/.*state \(.*\) mode .*/\\1/'"
).stdout.strip()
@retry(max_attempts=5, sleep_interval=5, expected_result="UP")
def check_state_up(self, node: ClusterNode, interface: str) -> str:
return self.check_state(node=node, interface=interface)
@retry(max_attempts=5, sleep_interval=5, expected_result="DOWN")
def check_state_down(self, node: ClusterNode, interface: str) -> str:
return self.check_state(node=node, interface=interface)

View file

@ -6,15 +6,21 @@ from dataclasses import dataclass
from time import sleep from time import sleep
from typing import Optional from typing import Optional
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli from frostfs_testlib.cli import FrostfsAdm, FrostfsCli
from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import (
FROSTFS_ADM_CONFIG_PATH,
FROSTFS_ADM_EXEC,
FROSTFS_CLI_EXEC,
)
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align
from frostfs_testlib.storage.cluster import Cluster, StorageNode from frostfs_testlib.storage.cluster import Cluster, StorageNode
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate
from frostfs_testlib.utils import datetime_utils from frostfs_testlib.utils import datetime_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -34,7 +40,7 @@ class HealthStatus:
return HealthStatus(network, health) return HealthStatus(network, health)
@reporter.step("Get Locode from random storage node") @reporter.step_deco("Get Locode from random storage node")
def get_locode_from_random_node(cluster: Cluster) -> str: def get_locode_from_random_node(cluster: Cluster) -> str:
node = random.choice(cluster.services(StorageNode)) node = random.choice(cluster.services(StorageNode))
locode = node.get_un_locode() locode = node.get_un_locode()
@ -42,7 +48,7 @@ def get_locode_from_random_node(cluster: Cluster) -> str:
return locode return locode
@reporter.step("Healthcheck for storage node {node}") @reporter.step_deco("Healthcheck for storage node {node}")
def storage_node_healthcheck(node: StorageNode) -> HealthStatus: def storage_node_healthcheck(node: StorageNode) -> HealthStatus:
""" """
The function returns storage node's health status. The function returns storage node's health status.
@ -51,27 +57,12 @@ def storage_node_healthcheck(node: StorageNode) -> HealthStatus:
Returns: Returns:
health status as HealthStatus object. health status as HealthStatus object.
""" """
command = "control healthcheck"
host = node.host output = _run_control_command_with_retries(node, command)
service_config = host.get_service_config(node.name) return HealthStatus.from_stdout(output)
wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"]
control_endpoint = service_config.attributes["control_endpoint"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{node.name}-config.yaml"
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
cli_config = host.get_cli_config("frostfs-cli")
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
result = cli.control.healthcheck(control_endpoint)
return HealthStatus.from_stdout(result.stdout)
@reporter.step("Set status for {node}") @reporter.step_deco("Set status for {node}")
def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> None: def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> None:
""" """
The function sets particular status for given node. The function sets particular status for given node.
@ -80,24 +71,11 @@ def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) ->
status: online or offline. status: online or offline.
retries (optional, int): number of retry attempts if it didn't work from the first time retries (optional, int): number of retry attempts if it didn't work from the first time
""" """
host = node.host command = f"control set-status --status {status}"
service_config = host.get_service_config(node.name) _run_control_command_with_retries(node, command, retries)
wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"]
control_endpoint = service_config.attributes["control_endpoint"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{node.name}-config.yaml"
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
cli_config = host.get_cli_config("frostfs-cli")
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
cli.control.set_status(control_endpoint, status)
@reporter.step("Get netmap snapshot") @reporter.step_deco("Get netmap snapshot")
def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str: def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str:
""" """
The function returns string representation of netmap snapshot. The function returns string representation of netmap snapshot.
@ -117,8 +95,8 @@ def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str:
).stdout ).stdout
@reporter.step("Get shard list for {node}") @reporter.step_deco("Get shard list for {node}")
def node_shard_list(node: StorageNode, json: Optional[bool] = None) -> list[str]: def node_shard_list(node: StorageNode) -> list[str]:
""" """
The function returns list of shards for specified storage node. The function returns list of shards for specified storage node.
Args: Args:
@ -126,82 +104,41 @@ def node_shard_list(node: StorageNode, json: Optional[bool] = None) -> list[str]
Returns: Returns:
list of shards. list of shards.
""" """
host = node.host command = "control shards list"
service_config = host.get_service_config(node.name) output = _run_control_command_with_retries(node, command)
wallet_path = service_config.attributes["wallet_path"] return re.findall(r"Shard (.*):", output)
wallet_password = service_config.attributes["wallet_password"]
control_endpoint = service_config.attributes["control_endpoint"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{node.name}-config.yaml"
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
cli_config = host.get_cli_config("frostfs-cli")
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
result = cli.shards.list(endpoint=control_endpoint, json_mode=json)
return re.findall(r"Shard (.*):", result.stdout)
@reporter.step("Shard set for {node}") @reporter.step_deco("Shard set for {node}")
def node_shard_set_mode(node: StorageNode, shard: list[str], mode: str) -> None: def node_shard_set_mode(node: StorageNode, shard: str, mode: str) -> str:
""" """
The function sets mode for specified shard. The function sets mode for specified shard.
Args: Args:
node: node on which shard mode should be set. node: node on which shard mode should be set.
""" """
host = node.host command = f"control shards set-mode --id {shard} --mode {mode}"
service_config = host.get_service_config(node.name) return _run_control_command_with_retries(node, command)
wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"]
control_endpoint = service_config.attributes["control_endpoint"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{node.name}-config.yaml"
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
cli_config = host.get_cli_config("frostfs-cli")
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
cli.shards.set_mode(endpoint=control_endpoint, mode=mode, id=shard)
@reporter.step("Drop object from {node}") @reporter.step_deco("Drop object from {node}")
def drop_object(node: StorageNode, cid: str, oid: str) -> None: def drop_object(node: StorageNode, cid: str, oid: str) -> str:
""" """
The function drops object from specified node. The function drops object from specified node.
Args: Args:
node: node from which object should be dropped. node_id str: node from which object should be dropped.
""" """
host = node.host command = f"control drop-objects -o {cid}/{oid}"
service_config = host.get_service_config(node.name) return _run_control_command_with_retries(node, command)
wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"]
control_endpoint = service_config.attributes["control_endpoint"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{node.name}-config.yaml"
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
cli_config = host.get_cli_config("frostfs-cli")
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
objects = f"{cid}/{oid}"
cli.control.drop_objects(control_endpoint, objects)
@reporter.step("Delete data from host for node {node}") @reporter.step_deco("Delete data from host for node {node}")
def delete_node_data(node: StorageNode) -> None: def delete_node_data(node: StorageNode) -> None:
node.stop_service() node.stop_service()
node.host.delete_storage_node_data(node.name) node.host.delete_storage_node_data(node.name)
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
@reporter.step("Exclude node {node_to_exclude} from network map") @reporter.step_deco("Exclude node {node_to_exclude} from network map")
def exclude_node_from_network_map( def exclude_node_from_network_map(
node_to_exclude: StorageNode, node_to_exclude: StorageNode,
alive_node: StorageNode, alive_node: StorageNode,
@ -217,10 +154,12 @@ def exclude_node_from_network_map(
wait_for_epochs_align(shell, cluster) wait_for_epochs_align(shell, cluster)
snapshot = get_netmap_snapshot(node=alive_node, shell=shell) snapshot = get_netmap_snapshot(node=alive_node, shell=shell)
assert node_netmap_key not in snapshot, f"Expected node with key {node_netmap_key} to be absent in network map" assert (
node_netmap_key not in snapshot
), f"Expected node with key {node_netmap_key} to be absent in network map"
@reporter.step("Include node {node_to_include} into network map") @reporter.step_deco("Include node {node_to_include} into network map")
def include_node_to_network_map( def include_node_to_network_map(
node_to_include: StorageNode, node_to_include: StorageNode,
alive_node: StorageNode, alive_node: StorageNode,
@ -239,31 +178,39 @@ def include_node_to_network_map(
check_node_in_map(node_to_include, shell, alive_node) check_node_in_map(node_to_include, shell, alive_node)
@reporter.step("Check node {node} in network map") @reporter.step_deco("Check node {node} in network map")
def check_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: def check_node_in_map(
node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None
) -> None:
alive_node = alive_node or node alive_node = alive_node or node
node_netmap_key = node.get_wallet_public_key() node_netmap_key = node.get_wallet_public_key()
logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}") logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}")
snapshot = get_netmap_snapshot(alive_node, shell) snapshot = get_netmap_snapshot(alive_node, shell)
assert node_netmap_key in snapshot, f"Expected node with key {node_netmap_key} to be in network map" assert (
node_netmap_key in snapshot
), f"Expected node with key {node_netmap_key} to be in network map"
@reporter.step("Check node {node} NOT in network map") @reporter.step_deco("Check node {node} NOT in network map")
def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: def check_node_not_in_map(
node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None
) -> None:
alive_node = alive_node or node alive_node = alive_node or node
node_netmap_key = node.get_wallet_public_key() node_netmap_key = node.get_wallet_public_key()
logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}") logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}")
snapshot = get_netmap_snapshot(alive_node, shell) snapshot = get_netmap_snapshot(alive_node, shell)
assert node_netmap_key not in snapshot, f"Expected node with key {node_netmap_key} to be NOT in network map" assert (
node_netmap_key not in snapshot
), f"Expected node with key {node_netmap_key} to be NOT in network map"
@reporter.step("Wait for node {node} is ready") @reporter.step_deco("Wait for node {node} is ready")
def wait_for_node_to_be_ready(node: StorageNode) -> None: def wait_for_node_to_be_ready(node: StorageNode) -> None:
timeout, attempts = 60, 15 timeout, attempts = 30, 6
for _ in range(attempts): for _ in range(attempts):
try: try:
health_check = storage_node_healthcheck(node) health_check = storage_node_healthcheck(node)
@ -272,10 +219,12 @@ def wait_for_node_to_be_ready(node: StorageNode) -> None:
except Exception as err: except Exception as err:
logger.warning(f"Node {node} is not ready:\n{err}") logger.warning(f"Node {node} is not ready:\n{err}")
sleep(timeout) sleep(timeout)
raise AssertionError(f"Node {node} hasn't gone to the READY state after {timeout * attempts} seconds") raise AssertionError(
f"Node {node} hasn't gone to the READY state after {timeout * attempts} seconds"
)
@reporter.step("Remove nodes from network map trough cli-adm morph command") @reporter.step_deco("Remove nodes from network map trough cli-adm morph command")
def remove_nodes_from_map_morph( def remove_nodes_from_map_morph(
shell: Shell, shell: Shell,
cluster: Cluster, cluster: Cluster,
@ -306,3 +255,38 @@ def remove_nodes_from_map_morph(
config_file=FROSTFS_ADM_CONFIG_PATH, config_file=FROSTFS_ADM_CONFIG_PATH,
) )
frostfsadm.morph.remove_nodes(node_netmap_keys) frostfsadm.morph.remove_nodes(node_netmap_keys)
def _run_control_command_with_retries(node: StorageNode, command: str, retries: int = 0) -> str:
for attempt in range(1 + retries): # original attempt + specified retries
try:
return _run_control_command(node, command)
except AssertionError as err:
if attempt < retries:
logger.warning(f"Command {command} failed with error {err} and will be retried")
continue
raise AssertionError(f"Command {command} failed with error {err}") from err
def _run_control_command(node: StorageNode, command: str) -> None:
host = node.host
service_config = host.get_service_config(node.name)
wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"]
control_endpoint = service_config.attributes["control_endpoint"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{node.name}-config.yaml"
wallet_config = f'password: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
cli_config = host.get_cli_config("frostfs-cli")
# TODO: implement cli.control
# cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
result = shell.exec(
f"{cli_config.exec_path} {command} --endpoint {control_endpoint} "
f"--wallet {wallet_path} --config {wallet_config_path}"
)
return result.stdout

View file

@ -8,21 +8,21 @@ from typing import Optional
from neo3.wallet import utils as neo3_utils from neo3.wallet import utils as neo3_utils
from neo3.wallet import wallet as neo3_wallet from neo3.wallet import wallet as neo3_wallet
from frostfs_testlib import reporter
from frostfs_testlib.cli import NeoGo from frostfs_testlib.cli import NeoGo
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import NEOGO_EXECUTABLE from frostfs_testlib.resources.cli import NEOGO_EXECUTABLE
from frostfs_testlib.resources.common import FROSTFS_CONTRACT, GAS_HASH, MORPH_BLOCK_TIME from frostfs_testlib.resources.common import FROSTFS_CONTRACT, GAS_HASH, MORPH_BLOCK_TIME
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain
from frostfs_testlib.utils import converting_utils, datetime_utils, wallet_utils from frostfs_testlib.utils import converting_utils, datetime_utils, wallet_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
EMPTY_PASSWORD = "" EMPTY_PASSWORD = ""
TX_PERSIST_TIMEOUT = 15 # seconds TX_PERSIST_TIMEOUT = 15 # seconds
ASSET_POWER_SIDECHAIN = 10**12 ASSET_POWER_SIDECHAIN = 10**12
def get_nns_contract_hash(morph_chain: MorphChain) -> str: def get_nns_contract_hash(morph_chain: MorphChain) -> str:
return morph_chain.rpc_client.get_contract_state(1)["hash"] return morph_chain.rpc_client.get_contract_state(1)["hash"]
@ -39,7 +39,6 @@ def get_contract_hash(morph_chain: MorphChain, resolve_name: str, shell: Shell)
stack_data = json.loads(out.stdout.replace("\n", ""))["stack"][0]["value"] stack_data = json.loads(out.stdout.replace("\n", ""))["stack"][0]["value"]
return bytes.decode(base64.b64decode(stack_data[0]["value"])) return bytes.decode(base64.b64decode(stack_data[0]["value"]))
def transaction_accepted(morph_chain: MorphChain, tx_id: str): def transaction_accepted(morph_chain: MorphChain, tx_id: str):
""" """
This function returns True in case of accepted TX. This function returns True in case of accepted TX.
@ -63,7 +62,7 @@ def transaction_accepted(morph_chain: MorphChain, tx_id: str):
return False return False
@reporter.step("Get FrostFS Balance") @reporter.step_deco("Get FrostFS Balance")
def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_password: str = ""): def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_password: str = ""):
""" """
This function returns FrostFS balance for given wallet. This function returns FrostFS balance for given wallet.
@ -83,8 +82,7 @@ def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_
logger.error(f"failed to get wallet balance: {out}") logger.error(f"failed to get wallet balance: {out}")
raise out raise out
@reporter.step_deco("Transfer Gas")
@reporter.step("Transfer Gas")
def transfer_gas( def transfer_gas(
shell: Shell, shell: Shell,
amount: int, amount: int,
@ -113,10 +111,16 @@ def transfer_gas(
""" """
wallet_from_path = wallet_from_path or morph_chain.get_wallet_path() wallet_from_path = wallet_from_path or morph_chain.get_wallet_path()
wallet_from_password = ( wallet_from_password = (
wallet_from_password if wallet_from_password is not None else morph_chain.get_wallet_password() wallet_from_password
if wallet_from_password is not None
else morph_chain.get_wallet_password()
)
address_from = address_from or wallet_utils.get_last_address_from_wallet(
wallet_from_path, wallet_from_password
)
address_to = address_to or wallet_utils.get_last_address_from_wallet(
wallet_to_path, wallet_to_password
) )
address_from = address_from or wallet_utils.get_last_address_from_wallet(wallet_from_path, wallet_from_password)
address_to = address_to or wallet_utils.get_last_address_from_wallet(wallet_to_path, wallet_to_password)
neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE)
out = neogo.nep17.transfer( out = neogo.nep17.transfer(
@ -137,7 +141,7 @@ def transfer_gas(
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
@reporter.step("Get Sidechain Balance") @reporter.step_deco("Get Sidechain Balance")
def get_sidechain_balance(morph_chain: MorphChain, address: str): def get_sidechain_balance(morph_chain: MorphChain, address: str):
resp = morph_chain.rpc_client.get_nep17_balances(address=address) resp = morph_chain.rpc_client.get_nep17_balances(address=address)
logger.info(f"Got getnep17balances response: {resp}") logger.info(f"Got getnep17balances response: {resp}")

View file

@ -1,21 +1,34 @@
import json
import logging import logging
import os import os
import re
import uuid
from datetime import datetime, timedelta from datetime import datetime, timedelta
from typing import Optional from typing import Optional
from dateutil.parser import parse from dateutil.parser import parse
from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAuthmate
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC
from frostfs_testlib.resources.common import CREDENTIALS_CREATE_TIMEOUT
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import CommandOptions, InteractiveInput, Shell
from frostfs_testlib.steps.cli.container import search_container_by_name, search_nodes_with_container from frostfs_testlib.shell.interfaces import SshCredentials
from frostfs_testlib.steps.cli.container import (
search_container_by_name,
search_nodes_with_container,
)
from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.utils.cli_utils import _run_with_passwd
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@reporter.step("Expected all objects are presented in the bucket") @reporter.step_deco("Expected all objects are presented in the bucket")
def check_objects_in_bucket( def check_objects_in_bucket(
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
bucket: str, bucket: str,
@ -24,25 +37,35 @@ def check_objects_in_bucket(
) -> None: ) -> None:
unexpected_objects = unexpected_objects or [] unexpected_objects = unexpected_objects or []
bucket_objects = s3_client.list_objects(bucket) bucket_objects = s3_client.list_objects(bucket)
assert len(bucket_objects) == len(expected_objects), f"Expected {len(expected_objects)} objects in the bucket" assert len(bucket_objects) == len(
expected_objects
), f"Expected {len(expected_objects)} objects in the bucket"
for bucket_object in expected_objects: for bucket_object in expected_objects:
assert bucket_object in bucket_objects, f"Expected object {bucket_object} in objects list {bucket_objects}" assert (
bucket_object in bucket_objects
), f"Expected object {bucket_object} in objects list {bucket_objects}"
for bucket_object in unexpected_objects: for bucket_object in unexpected_objects:
assert bucket_object not in bucket_objects, f"Expected object {bucket_object} not in objects list {bucket_objects}" assert (
bucket_object not in bucket_objects
), f"Expected object {bucket_object} not in objects list {bucket_objects}"
@reporter.step("Try to get object and got error") @reporter.step_deco("Try to get object and got error")
def try_to_get_objects_and_expect_error(s3_client: S3ClientWrapper, bucket: str, object_keys: list) -> None: def try_to_get_objects_and_expect_error(
s3_client: S3ClientWrapper, bucket: str, object_keys: list
) -> None:
for obj in object_keys: for obj in object_keys:
try: try:
s3_client.get_object(bucket, obj) s3_client.get_object(bucket, obj)
raise AssertionError(f"Object {obj} found in bucket {bucket}") raise AssertionError(f"Object {obj} found in bucket {bucket}")
except Exception as err: except Exception as err:
assert "The specified key does not exist" in str(err), f"Expected error in exception {err}" assert "The specified key does not exist" in str(
err
), f"Expected error in exception {err}"
@reporter.step("Set versioning status to '{status}' for bucket '{bucket}'") @reporter.step_deco("Set versioning status to '{status}' for bucket '{bucket}'")
def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: VersioningStatus): def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: VersioningStatus):
if status == VersioningStatus.UNDEFINED: if status == VersioningStatus.UNDEFINED:
return return
@ -57,9 +80,15 @@ def object_key_from_file_path(full_path: str) -> str:
return os.path.basename(full_path) return os.path.basename(full_path)
def assert_tags(actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None) -> None: def assert_tags(
expected_tags = [{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else [] actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None
unexpected_tags = [{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else [] ) -> None:
expected_tags = (
[{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else []
)
unexpected_tags = (
[{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else []
)
if expected_tags == []: if expected_tags == []:
assert not actual_tags, f"Expected there is no tags, got {actual_tags}" assert not actual_tags, f"Expected there is no tags, got {actual_tags}"
assert len(expected_tags) == len(actual_tags) assert len(expected_tags) == len(actual_tags)
@ -69,7 +98,7 @@ def assert_tags(actual_tags: list, expected_tags: Optional[list] = None, unexpec
assert tag not in actual_tags, f"Tag {tag} should not be in {actual_tags}" assert tag not in actual_tags, f"Tag {tag} should not be in {actual_tags}"
@reporter.step("Expected all tags are presented in object") @reporter.step_deco("Expected all tags are presented in object")
def check_tags_by_object( def check_tags_by_object(
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
bucket: str, bucket: str,
@ -78,10 +107,12 @@ def check_tags_by_object(
unexpected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None,
) -> None: ) -> None:
actual_tags = s3_client.get_object_tagging(bucket, key) actual_tags = s3_client.get_object_tagging(bucket, key)
assert_tags(expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags) assert_tags(
expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags
)
@reporter.step("Expected all tags are presented in bucket") @reporter.step_deco("Expected all tags are presented in bucket")
def check_tags_by_bucket( def check_tags_by_bucket(
s3_client: S3ClientWrapper, s3_client: S3ClientWrapper,
bucket: str, bucket: str,
@ -89,7 +120,9 @@ def check_tags_by_bucket(
unexpected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None,
) -> None: ) -> None:
actual_tags = s3_client.get_bucket_tagging(bucket) actual_tags = s3_client.get_bucket_tagging(bucket)
assert_tags(expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags) assert_tags(
expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags
)
def assert_object_lock_mode( def assert_object_lock_mode(
@ -102,19 +135,25 @@ def assert_object_lock_mode(
retain_period: Optional[int] = None, retain_period: Optional[int] = None,
): ):
object_dict = s3_client.get_object(bucket, file_name, full_output=True) object_dict = s3_client.get_object(bucket, file_name, full_output=True)
assert object_dict.get("ObjectLockMode") == object_lock_mode, f"Expected Object Lock Mode is {object_lock_mode}" assert (
object_dict.get("ObjectLockMode") == object_lock_mode
), f"Expected Object Lock Mode is {object_lock_mode}"
assert ( assert (
object_dict.get("ObjectLockLegalHoldStatus") == legal_hold_status object_dict.get("ObjectLockLegalHoldStatus") == legal_hold_status
), f"Expected Object Lock Legal Hold Status is {legal_hold_status}" ), f"Expected Object Lock Legal Hold Status is {legal_hold_status}"
object_retain_date = object_dict.get("ObjectLockRetainUntilDate") object_retain_date = object_dict.get("ObjectLockRetainUntilDate")
retain_date = parse(object_retain_date) if isinstance(object_retain_date, str) else object_retain_date retain_date = (
parse(object_retain_date) if isinstance(object_retain_date, str) else object_retain_date
)
if retain_until_date: if retain_until_date:
assert retain_date.strftime("%Y-%m-%dT%H:%M:%S") == retain_until_date.strftime( assert retain_date.strftime("%Y-%m-%dT%H:%M:%S") == retain_until_date.strftime(
"%Y-%m-%dT%H:%M:%S" "%Y-%m-%dT%H:%M:%S"
), f'Expected Object Lock Retain Until Date is {str(retain_until_date.strftime("%Y-%m-%dT%H:%M:%S"))}' ), f'Expected Object Lock Retain Until Date is {str(retain_until_date.strftime("%Y-%m-%dT%H:%M:%S"))}'
elif retain_period: elif retain_period:
last_modify_date = object_dict.get("LastModified") last_modify_date = object_dict.get("LastModified")
last_modify = parse(last_modify_date) if isinstance(last_modify_date, str) else last_modify_date last_modify = (
parse(last_modify_date) if isinstance(last_modify_date, str) else last_modify_date
)
assert ( assert (
retain_date - last_modify + timedelta(seconds=1) retain_date - last_modify + timedelta(seconds=1)
).days == retain_period, f"Expected retention period is {retain_period} days" ).days == retain_period, f"Expected retention period is {retain_period} days"
@ -148,7 +187,50 @@ def assert_s3_acl(acl_grants: list, permitted_users: str):
logger.error("FULL_CONTROL is given to All Users") logger.error("FULL_CONTROL is given to All Users")
@reporter.step("Delete bucket with all objects") @reporter.step_deco("Init S3 Credentials")
def init_s3_credentials(
wallet: WalletInfo,
shell: Shell,
cluster: Cluster,
policy: Optional[dict] = None,
s3gates: Optional[list[S3Gate]] = None,
container_placement_policy: Optional[str] = None,
):
gate_public_keys = []
bucket = str(uuid.uuid4())
if not s3gates:
s3gates = [cluster.s3_gates[0]]
for s3gate in s3gates:
gate_public_keys.append(s3gate.get_wallet_public_key())
frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC)
issue_secret_output = frostfs_authmate_exec.secret.issue(
wallet=wallet.path,
peer=cluster.default_rpc_endpoint,
gate_public_key=gate_public_keys,
wallet_password=wallet.password,
container_policy=policy,
container_friendly_name=bucket,
container_placement_policy=container_placement_policy,
).stdout
aws_access_key_id = str(
re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group(
"aws_access_key_id"
)
)
aws_secret_access_key = str(
re.search(
r"secret_access_key.*:\s.(?P<aws_secret_access_key>\w*)", issue_secret_output
).group("aws_secret_access_key")
)
cid = str(
re.search(r"container_id.*:\s.(?P<container_id>\w*)", issue_secret_output).group(
"container_id"
)
)
return cid, aws_access_key_id, aws_secret_access_key
@reporter.step_deco("Delete bucket with all objects")
def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str): def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str):
versioning_status = s3_client.get_bucket_versioning_status(bucket) versioning_status = s3_client.get_bucket_versioning_status(bucket)
if versioning_status == VersioningStatus.ENABLED.value: if versioning_status == VersioningStatus.ENABLED.value:
@ -173,18 +255,16 @@ def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str):
s3_client.delete_bucket(bucket) s3_client.delete_bucket(bucket)
@reporter.step("Search nodes bucket") @reporter.step_deco("Search nodes bucket")
def search_nodes_with_bucket( def search_nodes_with_bucket(
cluster: Cluster, cluster: Cluster,
bucket_name: str, bucket_name: str,
wallet: WalletInfo, wallet: str,
shell: Shell, shell: Shell,
endpoint: str, endpoint: str,
) -> list[ClusterNode]: ) -> list[ClusterNode]:
cid = None cid = search_container_by_name(wallet=wallet, name=bucket_name, shell=shell, endpoint=endpoint)
for cluster_node in cluster.cluster_nodes: nodes_list = search_nodes_with_container(
cid = search_container_by_name(name=bucket_name, node=cluster_node) wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster
if cid: )
break
nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster)
return nodes_list return nodes_list

View file

@ -4,18 +4,20 @@ import logging
import os import os
import uuid import uuid
from dataclasses import dataclass from dataclasses import dataclass
from enum import Enum
from typing import Any, Optional from typing import Any, Optional
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.readable import HumanReadableEnum from frostfs_testlib.testing.readable import HumanReadableEnum
from frostfs_testlib.utils import json_utils, wallet_utils from frostfs_testlib.utils import json_utils, wallet_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
UNRELATED_KEY = "unrelated key in the session" UNRELATED_KEY = "unrelated key in the session"
@ -48,7 +50,7 @@ class Lifetime:
iat: int = 0 iat: int = 0
@reporter.step("Generate Session Token") @reporter.step_deco("Generate Session Token")
def generate_session_token( def generate_session_token(
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
session_wallet: WalletInfo, session_wallet: WalletInfo,
@ -70,7 +72,9 @@ def generate_session_token(
file_path = os.path.join(tokens_dir, str(uuid.uuid4())) file_path = os.path.join(tokens_dir, str(uuid.uuid4()))
pub_key_64 = wallet_utils.get_wallet_public_key(session_wallet.path, session_wallet.password, "base64") pub_key_64 = wallet_utils.get_wallet_public_key(
session_wallet.path, session_wallet.password, "base64"
)
lifetime = lifetime or Lifetime() lifetime = lifetime or Lifetime()
@ -95,7 +99,7 @@ def generate_session_token(
return file_path return file_path
@reporter.step("Generate Session Token For Container") @reporter.step_deco("Generate Session Token For Container")
def generate_container_session_token( def generate_container_session_token(
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
session_wallet: WalletInfo, session_wallet: WalletInfo,
@ -122,7 +126,11 @@ def generate_container_session_token(
"container": { "container": {
"verb": verb.value, "verb": verb.value,
"wildcard": cid is None, "wildcard": cid is None,
**({"containerID": {"value": f"{json_utils.encode_for_json(cid)}"}} if cid is not None else {}), **(
{"containerID": {"value": f"{json_utils.encode_for_json(cid)}"}}
if cid is not None
else {}
),
}, },
} }
@ -135,7 +143,7 @@ def generate_container_session_token(
) )
@reporter.step("Generate Session Token For Object") @reporter.step_deco("Generate Session Token For Object")
def generate_object_session_token( def generate_object_session_token(
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
session_wallet: WalletInfo, session_wallet: WalletInfo,
@ -177,7 +185,7 @@ def generate_object_session_token(
) )
@reporter.step("Get signed token for container session") @reporter.step_deco("Get signed token for container session")
def get_container_signed_token( def get_container_signed_token(
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
user_wallet: WalletInfo, user_wallet: WalletInfo,
@ -199,7 +207,7 @@ def get_container_signed_token(
return sign_session_token(shell, session_token_file, owner_wallet) return sign_session_token(shell, session_token_file, owner_wallet)
@reporter.step("Get signed token for object session") @reporter.step_deco("Get signed token for object session")
def get_object_signed_token( def get_object_signed_token(
owner_wallet: WalletInfo, owner_wallet: WalletInfo,
user_wallet: WalletInfo, user_wallet: WalletInfo,
@ -226,11 +234,12 @@ def get_object_signed_token(
return sign_session_token(shell, session_token_file, owner_wallet) return sign_session_token(shell, session_token_file, owner_wallet)
@reporter.step("Create Session Token") @reporter.step_deco("Create Session Token")
def create_session_token( def create_session_token(
shell: Shell, shell: Shell,
owner: str, owner: str,
wallet: WalletInfo, wallet_path: str,
wallet_password: str,
rpc_endpoint: str, rpc_endpoint: str,
) -> str: ) -> str:
""" """
@ -245,18 +254,19 @@ def create_session_token(
The path to the generated session token file. The path to the generated session token file.
""" """
session_token = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) session_token = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC)
frostfscli.session.create( frostfscli.session.create(
rpc_endpoint=rpc_endpoint, rpc_endpoint=rpc_endpoint,
address=owner, address=owner,
wallet=wallet_path,
wallet_password=wallet_password,
out=session_token, out=session_token,
wallet=wallet.path,
) )
return session_token return session_token
@reporter.step("Sign Session Token") @reporter.step_deco("Sign Session Token")
def sign_session_token(shell: Shell, session_token_file: str, wallet: WalletInfo) -> str: def sign_session_token(shell: Shell, session_token_file: str, wlt: WalletInfo) -> str:
""" """
This function signs the session token by the given wallet. This function signs the session token by the given wallet.
@ -269,6 +279,10 @@ def sign_session_token(shell: Shell, session_token_file: str, wallet: WalletInfo
The path to the signed token. The path to the signed token.
""" """
signed_token_file = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) signed_token_file = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) frostfscli = FrostfsCli(
frostfscli.util.sign_session_token(session_token_file, signed_token_file) shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG
)
frostfscli.util.sign_session_token(
wallet=wlt.path, from_file=session_token_file, to_file=signed_token_file
)
return signed_token_file return signed_token_file

View file

@ -3,7 +3,7 @@ from time import sleep
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.error_patterns import OBJECT_ALREADY_REMOVED from frostfs_testlib.resources.error_patterns import OBJECT_ALREADY_REMOVED
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import delete_object, get_object from frostfs_testlib.steps.cli.object import delete_object, get_object
@ -12,13 +12,16 @@ from frostfs_testlib.steps.tombstone import verify_head_tombstone
from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
CLEANUP_TIMEOUT = 10 CLEANUP_TIMEOUT = 10
@reporter.step("Delete Objects") @reporter.step_deco("Delete Objects")
def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster) -> None: def delete_objects(
storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster
) -> None:
""" """
Deletes given storage objects. Deletes given storage objects.
@ -30,14 +33,14 @@ def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, clust
with reporter.step("Delete objects"): with reporter.step("Delete objects"):
for storage_object in storage_objects: for storage_object in storage_objects:
storage_object.tombstone = delete_object( storage_object.tombstone = delete_object(
storage_object.wallet, storage_object.wallet_file_path,
storage_object.cid, storage_object.cid,
storage_object.oid, storage_object.oid,
shell=shell, shell=shell,
endpoint=cluster.default_rpc_endpoint, endpoint=cluster.default_rpc_endpoint,
) )
verify_head_tombstone( verify_head_tombstone(
wallet=storage_object.wallet, wallet_path=storage_object.wallet_file_path,
cid=storage_object.cid, cid=storage_object.cid,
oid_ts=storage_object.tombstone, oid_ts=storage_object.tombstone,
oid=storage_object.oid, oid=storage_object.oid,
@ -52,7 +55,7 @@ def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, clust
for storage_object in storage_objects: for storage_object in storage_objects:
with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED): with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED):
get_object( get_object(
storage_object.wallet, storage_object.wallet_file_path,
storage_object.cid, storage_object.cid,
storage_object.oid, storage_object.oid,
shell=shell, shell=shell,

View file

@ -6,21 +6,22 @@
""" """
import logging import logging
from frostfs_testlib import reporter from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import head_object from frostfs_testlib.steps.cli.object import head_object
from frostfs_testlib.steps.complex_object_actions import get_last_object from frostfs_testlib.steps.complex_object_actions import get_last_object
from frostfs_testlib.storage.cluster import StorageNode from frostfs_testlib.storage.cluster import StorageNode
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.utils import string_utils from frostfs_testlib.utils import string_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
# TODO: Unused, remove or make use of @reporter.step_deco("Get Object Copies")
@reporter.step("Get Object Copies") def get_object_copies(
def get_object_copies(complexity: str, wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: complexity: str, wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> int:
""" """
The function performs requests to all nodes of the container and The function performs requests to all nodes of the container and
finds out if they store a copy of the object. The procedure is finds out if they store a copy of the object. The procedure is
@ -44,8 +45,10 @@ def get_object_copies(complexity: str, wallet: WalletInfo, cid: str, oid: str, s
) )
@reporter.step("Get Simple Object Copies") @reporter.step_deco("Get Simple Object Copies")
def get_simple_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: def get_simple_object_copies(
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> int:
""" """
To figure out the number of a simple object copies, only direct To figure out the number of a simple object copies, only direct
HEAD requests should be made to the every node of the container. HEAD requests should be made to the every node of the container.
@ -63,7 +66,9 @@ def get_simple_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shel
copies = 0 copies = 0
for node in nodes: for node in nodes:
try: try:
response = head_object(wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True) response = head_object(
wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True
)
if response: if response:
logger.info(f"Found object {oid} on node {node}") logger.info(f"Found object {oid} on node {node}")
copies += 1 copies += 1
@ -73,8 +78,10 @@ def get_simple_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shel
return copies return copies
@reporter.step("Get Complex Object Copies") @reporter.step_deco("Get Complex Object Copies")
def get_complex_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: def get_complex_object_copies(
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> int:
""" """
To figure out the number of a complex object copies, we firstly To figure out the number of a complex object copies, we firstly
need to retrieve its Last object. We consider that the number of need to retrieve its Last object. We consider that the number of
@ -95,8 +102,10 @@ def get_complex_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: She
return get_simple_object_copies(wallet, cid, last_oid, shell, nodes) return get_simple_object_copies(wallet, cid, last_oid, shell, nodes)
@reporter.step("Get Nodes With Object") @reporter.step_deco("Get Nodes With Object")
def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> list[StorageNode]: def get_nodes_with_object(
cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> list[StorageNode]:
""" """
The function returns list of nodes which store The function returns list of nodes which store
the given object. the given object.
@ -111,7 +120,8 @@ def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageN
nodes_list = [] nodes_list = []
for node in nodes: for node in nodes:
wallet = WalletInfo.from_node(node) wallet = node.get_wallet_path()
wallet_config = node.get_wallet_config_path()
try: try:
res = head_object( res = head_object(
wallet, wallet,
@ -120,6 +130,7 @@ def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageN
shell=shell, shell=shell,
endpoint=node.get_rpc_endpoint(), endpoint=node.get_rpc_endpoint(),
is_direct=True, is_direct=True,
wallet_config=wallet_config,
) )
if res is not None: if res is not None:
logger.info(f"Found object {oid} on node {node}") logger.info(f"Found object {oid} on node {node}")
@ -130,8 +141,10 @@ def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageN
return nodes_list return nodes_list
@reporter.step("Get Nodes Without Object") @reporter.step_deco("Get Nodes Without Object")
def get_nodes_without_object(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> list[StorageNode]: def get_nodes_without_object(
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> list[StorageNode]:
""" """
The function returns list of nodes which do not store The function returns list of nodes which do not store
the given object. the given object.
@ -147,7 +160,9 @@ def get_nodes_without_object(wallet: WalletInfo, cid: str, oid: str, shell: Shel
nodes_list = [] nodes_list = []
for node in nodes: for node in nodes:
try: try:
res = head_object(wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True) res = head_object(
wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True
)
if res is None: if res is None:
nodes_list.append(node) nodes_list.append(node)
except Exception as err: except Exception as err:

View file

@ -1,24 +1,41 @@
import json
import logging import logging
from frostfs_testlib import reporter from neo3.wallet import wallet
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import head_object from frostfs_testlib.steps.cli.object import head_object
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@reporter.step("Verify Head Tombstone") @reporter.step_deco("Verify Head Tombstone")
def verify_head_tombstone(wallet: WalletInfo, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str): def verify_head_tombstone(
header = head_object(wallet, cid, oid_ts, shell=shell, endpoint=endpoint)["header"] wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str
):
header = head_object(wallet_path, cid, oid_ts, shell=shell, endpoint=endpoint)["header"]
s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"] s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"]
logger.info(f"Header Session OIDs is {s_oid}") logger.info(f"Header Session OIDs is {s_oid}")
logger.info(f"OID is {oid}") logger.info(f"OID is {oid}")
assert header["containerID"] == cid, "Tombstone Header CID is wrong" assert header["containerID"] == cid, "Tombstone Header CID is wrong"
assert header["ownerID"] == wallet.get_address_from_json(0), "Tombstone Owner ID is wrong"
with open(wallet_path, "r") as file:
wlt_data = json.loads(file.read())
wlt = wallet.Wallet.from_json(wlt_data, password="")
addr = wlt.accounts[0].address
assert header["ownerID"] == addr, "Tombstone Owner ID is wrong"
assert header["objectType"] == "TOMBSTONE", "Header Type isn't Tombstone" assert header["objectType"] == "TOMBSTONE", "Header Type isn't Tombstone"
assert header["sessionToken"]["body"]["object"]["verb"] == "DELETE", "Header Session Type isn't DELETE" assert (
assert header["sessionToken"]["body"]["object"]["target"]["container"] == cid, "Header Session ID is wrong" header["sessionToken"]["body"]["object"]["verb"] == "DELETE"
assert oid in header["sessionToken"]["body"]["object"]["target"]["objects"], "Header Session OID is wrong" ), "Header Session Type isn't DELETE"
assert (
header["sessionToken"]["body"]["object"]["target"]["container"] == cid
), "Header Session ID is wrong"
assert (
oid in header["sessionToken"]["body"]["object"]["target"]["objects"]
), "Header Session OID is wrong"

View file

@ -4,17 +4,20 @@ import re
import yaml import yaml
from yarl import URL from yarl import URL
from frostfs_testlib import reporter
from frostfs_testlib.hosting import Host, Hosting from frostfs_testlib.hosting import Host, Hosting
from frostfs_testlib.hosting.config import ServiceConfig from frostfs_testlib.hosting.config import ServiceConfig
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.storage import get_service_registry from frostfs_testlib.storage import get_service_registry
from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml
from frostfs_testlib.storage.configuration.service_configuration import ServiceConfiguration
from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.storage.constants import ConfigAttributes
from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces
from frostfs_testlib.storage.service_registry import ServiceRegistry from frostfs_testlib.storage.service_registry import ServiceRegistry
reporter = get_reporter()
class ClusterNode: class ClusterNode:
""" """
@ -71,7 +74,6 @@ class ClusterNode:
def s3_gate(self) -> S3Gate: def s3_gate(self) -> S3Gate:
return self.service(S3Gate) return self.service(S3Gate)
# TODO: Deprecated. Use config with ServiceConfigurationYml interface
def get_config(self, config_file_path: str) -> dict: def get_config(self, config_file_path: str) -> dict:
shell = self.host.get_shell() shell = self.host.get_shell()
@ -81,7 +83,6 @@ class ClusterNode:
config = yaml.safe_load(config_text) config = yaml.safe_load(config_text)
return config return config
# TODO: Deprecated. Use config with ServiceConfigurationYml interface
def save_config(self, new_config: dict, config_file_path: str) -> None: def save_config(self, new_config: dict, config_file_path: str) -> None:
shell = self.host.get_shell() shell = self.host.get_shell()
@ -89,7 +90,7 @@ class ClusterNode:
shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}") shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}")
def config(self, service_type: type[ServiceClass]) -> ServiceConfigurationYml: def config(self, service_type: type[ServiceClass]) -> ServiceConfigurationYml:
return self.service(service_type).config return ServiceConfiguration(self.service(service_type))
def service(self, service_type: type[ServiceClass]) -> ServiceClass: def service(self, service_type: type[ServiceClass]) -> ServiceClass:
""" """
@ -106,7 +107,7 @@ class ClusterNode:
service_entry = self.class_registry.get_entry(service_type) service_entry = self.class_registry.get_entry(service_type)
service_name = service_entry["hosting_service_name"] service_name = service_entry["hosting_service_name"]
pattern = f"{service_name}_{self.id:02}" pattern = f"{service_name}{self.id:02}"
config = self.host.get_service_config(pattern) config = self.host.get_service_config(pattern)
return service_type( return service_type(
@ -115,24 +116,8 @@ class ClusterNode:
self.host, self.host,
) )
@property def get_list_of_services(self) -> list[str]:
def services(self) -> list[NodeBase]: return [config.attributes[ConfigAttributes.SERVICE_NAME] for config in self.host.config.services]
svcs: list[NodeBase] = []
svcs_names_on_node = [svc.name for svc in self.host.config.services]
for entry in self.class_registry._class_mapping.values():
hosting_svc_name = entry["hosting_service_name"]
pattern = f"{hosting_svc_name}_{self.id:02}"
if pattern in svcs_names_on_node:
config = self.host.get_service_config(pattern)
svcs.append(
entry["cls"](
self.id,
config.name,
self.host,
)
)
return svcs
def get_all_interfaces(self) -> dict[str, str]: def get_all_interfaces(self) -> dict[str, str]:
return self.host.config.interfaces return self.host.config.interfaces
@ -268,13 +253,13 @@ class Cluster:
service_name = service["hosting_service_name"] service_name = service["hosting_service_name"]
cls: type[NodeBase] = service["cls"] cls: type[NodeBase] = service["cls"]
pattern = f"{service_name}_\d*$" pattern = f"{service_name}\d*$"
configs = self.hosting.find_service_configs(pattern) configs = self.hosting.find_service_configs(pattern)
found_nodes = [] found_nodes = []
for config in configs: for config in configs:
# config.name is something like s3-gate01. Cut last digits to know service type # config.name is something like s3-gate01. Cut last digits to know service type
service_type = re.findall("(.*)_\d+", config.name)[0] service_type = re.findall(".*\D", config.name)[0]
# exclude unsupported services # exclude unsupported services
if service_type != service_name: if service_type != service_name:
continue continue

View file

@ -4,75 +4,54 @@ from typing import Any
import yaml import yaml
from frostfs_testlib import reporter from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.shell.interfaces import CommandOptions, Shell from frostfs_testlib.shell.interfaces import CommandOptions
from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml
from frostfs_testlib.storage.dataclasses.node_base import ServiceClass
reporter = get_reporter()
def extend_dict(extend_me: dict, extend_by: dict):
if isinstance(extend_by, dict):
for k, v in extend_by.items():
if k in extend_me:
extend_dict(extend_me.get(k), v)
else:
extend_me[k] = v
else:
extend_me += extend_by
class ServiceConfiguration(ServiceConfigurationYml): class ServiceConfiguration(ServiceConfigurationYml):
def __init__(self, service_name: str, shell: Shell, config_dir: str, main_config_path: str) -> None: def __init__(self, service: "ServiceClass") -> None:
self.service_name = service_name self.service = service
self.shell = shell self.shell = self.service.host.get_shell()
self.main_config_path = main_config_path self.confd_path = os.path.join(self.service.config_dir, "conf.d")
self.confd_path = os.path.join(config_dir, "conf.d")
self.custom_file = os.path.join(self.confd_path, "99_changes.yml") self.custom_file = os.path.join(self.confd_path, "99_changes.yml")
def _path_exists(self, path: str) -> bool: def _path_exists(self, path: str) -> bool:
return not self.shell.exec(f"test -e {path}", options=CommandOptions(check=False)).return_code return not self.shell.exec(f"test -e {path}", options=CommandOptions(check=False)).return_code
def _get_config_files(self): def _get_data_from_file(self, path: str) -> dict:
config_files = [self.main_config_path] content = self.shell.exec(f"cat {path}").stdout
data = yaml.safe_load(content)
return data
if self._path_exists(self.confd_path): def get(self, key: str) -> str:
files = self.shell.exec(f"find {self.confd_path} -type f").stdout.strip().split() with reporter.step(f"Get {key} configuration value for {self.service}"):
# Sorting files in backwards order from latest to first one config_files = [self.service.main_config_path]
config_files.extend(sorted(files, key=lambda x: -int(re.findall("^\d+", os.path.basename(x))[0])))
return config_files if self._path_exists(self.confd_path):
files = self.shell.exec(f"find {self.confd_path} -type f").stdout.strip().split()
# Sorting files in backwards order from latest to first one
config_files.extend(sorted(files, key=lambda x: -int(re.findall("^\d+", os.path.basename(x))[0])))
def _get_configuration(self, config_files: list[str]) -> dict: result = None
if not config_files: for file in files:
return [{}] data = self._get_data_from_file(file)
result = self._find_option(key, data)
if result is not None:
break
splitter = "+++++"
files_str = " ".join(config_files)
all_content = self.shell.exec(
f"echo Getting config files; for file in {files_str}; do (echo {splitter}; sudo cat ${{file}}); done"
).stdout
files_content = all_content.split("+++++")[1:]
files_data = [yaml.safe_load(file_content) for file_content in files_content]
mergedData = {}
for data in files_data:
extend_dict(mergedData, data)
return mergedData
def get(self, key: str) -> str | Any:
with reporter.step(f"Get {key} configuration value for {self.service_name}"):
config_files = self._get_config_files()
configuration = self._get_configuration(config_files)
result = self._find_option(key, configuration)
return result return result
def set(self, values: dict[str, Any]): def set(self, values: dict[str, Any]):
with reporter.step(f"Change configuration for {self.service_name}"): with reporter.step(f"Change configuration for {self.service}"):
if not self._path_exists(self.confd_path): if not self._path_exists(self.confd_path):
self.shell.exec(f"mkdir {self.confd_path}") self.shell.exec(f"mkdir {self.confd_path}")
if self._path_exists(self.custom_file): if self._path_exists(self.custom_file):
data = self._get_configuration([self.custom_file]) data = self._get_data_from_file(self.custom_file)
else: else:
data = {} data = {}
@ -84,5 +63,5 @@ class ServiceConfiguration(ServiceConfigurationYml):
self.shell.exec(f"chmod 777 {self.custom_file}") self.shell.exec(f"chmod 777 {self.custom_file}")
def revert(self): def revert(self):
with reporter.step(f"Revert changed options for {self.service_name}"): with reporter.step(f"Revert changed options for {self.service}"):
self.shell.exec(f"rm -rf {self.custom_file}") self.shell.exec(f"rm -rf {self.custom_file}")

View file

@ -6,10 +6,8 @@ class ConfigAttributes:
CONFIG_DIR = "service_config_dir" CONFIG_DIR = "service_config_dir"
CONFIG_PATH = "config_path" CONFIG_PATH = "config_path"
SHARD_CONFIG_PATH = "shard_config_path" SHARD_CONFIG_PATH = "shard_config_path"
LOGGER_CONFIG_PATH = "logger_config_path"
LOCAL_WALLET_PATH = "local_wallet_path" LOCAL_WALLET_PATH = "local_wallet_path"
LOCAL_WALLET_CONFIG = "local_wallet_config_path" LOCAL_WALLET_CONFIG = "local_config_path"
REMOTE_WALLET_CONFIG = "remote_wallet_config_path"
ENDPOINT_DATA_0 = "endpoint_data0" ENDPOINT_DATA_0 = "endpoint_data0"
ENDPOINT_DATA_1 = "endpoint_data1" ENDPOINT_DATA_1 = "endpoint_data1"
ENDPOINT_INTERNAL = "endpoint_internal0" ENDPOINT_INTERNAL = "endpoint_internal0"
@ -18,3 +16,11 @@ class ConfigAttributes:
UN_LOCODE = "un_locode" UN_LOCODE = "un_locode"
HTTP_HOSTNAME = "http_hostname" HTTP_HOSTNAME = "http_hostname"
S3_HOSTNAME = "s3_hostname" S3_HOSTNAME = "s3_hostname"
class _FrostfsServicesNames:
STORAGE = "s"
S3_GATE = "s3-gate"
HTTP_GATE = "http-gate"
MORPH_CHAIN = "morph-chain"
INNER_RING = "ir"

View file

@ -1,17 +1,19 @@
import copy import copy
from datetime import datetime from typing import Optional
import frostfs_testlib.resources.optionals as optionals import frostfs_testlib.resources.optionals as optionals
from frostfs_testlib import reporter
from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner
from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType
from frostfs_testlib.load.load_report import LoadReport from frostfs_testlib.load.load_report import LoadReport
from frostfs_testlib.load.load_verifiers import LoadVerifier from frostfs_testlib.load.load_verifiers import LoadVerifier
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode
from frostfs_testlib.testing.parallel import parallel from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.test_control import run_optionally from frostfs_testlib.testing.test_control import run_optionally
reporter = get_reporter()
class BackgroundLoadController: class BackgroundLoadController:
k6_dir: str k6_dir: str
@ -21,16 +23,17 @@ class BackgroundLoadController:
cluster_nodes: list[ClusterNode] cluster_nodes: list[ClusterNode]
nodes_under_load: list[ClusterNode] nodes_under_load: list[ClusterNode]
load_counter: int load_counter: int
loaders_wallet: WalletInfo
load_summaries: dict load_summaries: dict
endpoints: list[str] endpoints: list[str]
runner: ScenarioRunner runner: ScenarioRunner
started: bool started: bool
load_reporters: list[LoadReport]
def __init__( def __init__(
self, self,
k6_dir: str, k6_dir: str,
load_params: LoadParams, load_params: LoadParams,
loaders_wallet: WalletInfo,
cluster_nodes: list[ClusterNode], cluster_nodes: list[ClusterNode],
nodes_under_load: list[ClusterNode], nodes_under_load: list[ClusterNode],
runner: ScenarioRunner, runner: ScenarioRunner,
@ -41,9 +44,9 @@ class BackgroundLoadController:
self.cluster_nodes = cluster_nodes self.cluster_nodes = cluster_nodes
self.nodes_under_load = nodes_under_load self.nodes_under_load = nodes_under_load
self.load_counter = 1 self.load_counter = 1
self.loaders_wallet = loaders_wallet
self.runner = runner self.runner = runner
self.started = False self.started = False
self.load_reporters = []
if load_params.endpoint_selection_strategy is None: if load_params.endpoint_selection_strategy is None:
raise RuntimeError("endpoint_selection_strategy should not be None") raise RuntimeError("endpoint_selection_strategy should not be None")
@ -59,7 +62,10 @@ class BackgroundLoadController:
) )
), ),
EndpointSelectionStrategy.FIRST: list( EndpointSelectionStrategy.FIRST: list(
set(node_under_load.service(StorageNode).get_rpc_endpoint() for node_under_load in self.nodes_under_load) set(
node_under_load.service(StorageNode).get_rpc_endpoint()
for node_under_load in self.nodes_under_load
)
), ),
}, },
# for some reason xk6 appends http protocol on its own # for some reason xk6 appends http protocol on its own
@ -80,19 +86,11 @@ class BackgroundLoadController:
return all_endpoints[load_type][endpoint_selection_strategy] return all_endpoints[load_type][endpoint_selection_strategy]
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step("Init k6 instances") @reporter.step_deco("Prepare load instances")
def init_k6(self):
self.endpoints = self._get_endpoints(self.load_params.load_type, self.load_params.endpoint_selection_strategy)
self.runner.init_k6_instances(self.load_params, self.endpoints, self.k6_dir)
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step("Prepare load instances")
def prepare(self): def prepare(self):
self.endpoints = self._get_endpoints(self.load_params.load_type, self.load_params.endpoint_selection_strategy)
self.runner.prepare(self.load_params, self.cluster_nodes, self.nodes_under_load, self.k6_dir) self.runner.prepare(self.load_params, self.cluster_nodes, self.nodes_under_load, self.k6_dir)
self.init_k6() self.runner.init_k6_instances(self.load_params, self.endpoints, self.k6_dir)
def append_reporter(self, load_report: LoadReport):
self.load_reporters.append(load_report)
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
def start(self): def start(self):
@ -101,7 +99,7 @@ class BackgroundLoadController:
self.started = True self.started = True
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step("Stop load") @reporter.step_deco("Stop load")
def stop(self): def stop(self):
self.runner.stop() self.runner.stop()
@ -110,7 +108,7 @@ class BackgroundLoadController:
return self.runner.is_running return self.runner.is_running
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step("Reset load") @reporter.step_deco("Reset load")
def _reset_for_consequent_load(self): def _reset_for_consequent_load(self):
"""This method is required if we want to run multiple loads during test run. """This method is required if we want to run multiple loads during test run.
Raise load counter by 1 and append it to load_id Raise load counter by 1 and append it to load_id
@ -120,7 +118,7 @@ class BackgroundLoadController:
self.load_params.set_id(f"{self.load_params.load_id}_{self.load_counter}") self.load_params.set_id(f"{self.load_params.load_id}_{self.load_counter}")
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step("Startup load") @reporter.step_deco("Startup load")
def startup(self): def startup(self):
self.prepare() self.prepare()
self.preset() self.preset()
@ -131,33 +129,19 @@ class BackgroundLoadController:
self.runner.preset() self.runner.preset()
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step("Stop and get results of load") @reporter.step_deco("Stop and get results of load")
def teardown(self): def teardown(self, load_report: Optional[LoadReport] = None):
if not self.started: if not self.started:
return return
self.stop() self.stop()
self.load_summaries = self._get_results() self.load_summaries = self._get_results()
self.started = False self.started = False
if load_report:
start_time = min(self._get_start_times())
end_time = max(self._get_end_times())
for load_report in self.load_reporters:
load_report.set_start_time(start_time)
load_report.set_end_time(end_time)
load_report.add_summaries(self.load_summaries) load_report.add_summaries(self.load_summaries)
def _get_start_times(self) -> list[datetime]:
futures = parallel([k6.get_start_time for k6 in self.runner.get_k6_instances()])
return [future.result() for future in futures]
def _get_end_times(self) -> list[datetime]:
futures = parallel([k6.get_end_time for k6 in self.runner.get_k6_instances()])
return [future.result() for future in futures]
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step("Run post-load verification") @reporter.step_deco("Run post-load verification")
def verify(self): def verify(self):
try: try:
load_issues = self._collect_load_issues() load_issues = self._collect_load_issues()
@ -169,7 +153,7 @@ class BackgroundLoadController:
self._reset_for_consequent_load() self._reset_for_consequent_load()
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step("Collect load issues") @reporter.step_deco("Collect load issues")
def _collect_load_issues(self): def _collect_load_issues(self):
verifier = LoadVerifier(self.load_params) verifier = LoadVerifier(self.load_params)
return verifier.collect_load_issues(self.load_summaries) return verifier.collect_load_issues(self.load_summaries)
@ -179,7 +163,7 @@ class BackgroundLoadController:
self.runner.wait_until_finish(soft_timeout) self.runner.wait_until_finish(soft_timeout)
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step("Verify loaded objects") @reporter.step_deco("Verify loaded objects")
def _run_verify_scenario(self) -> list[str]: def _run_verify_scenario(self) -> list[str]:
self.verification_params = LoadParams( self.verification_params = LoadParams(
verify_clients=self.load_params.verify_clients, verify_clients=self.load_params.verify_clients,
@ -187,19 +171,15 @@ class BackgroundLoadController:
read_from=self.load_params.read_from, read_from=self.load_params.read_from,
registry_file=self.load_params.registry_file, registry_file=self.load_params.registry_file,
verify_time=self.load_params.verify_time, verify_time=self.load_params.verify_time,
custom_registry=self.load_params.custom_registry,
load_type=self.load_params.load_type, load_type=self.load_params.load_type,
load_id=self.load_params.load_id, load_id=self.load_params.load_id,
vu_init_time=0, vu_init_time=0,
working_dir=self.load_params.working_dir, working_dir=self.load_params.working_dir,
endpoint_selection_strategy=self.load_params.endpoint_selection_strategy, endpoint_selection_strategy=self.load_params.endpoint_selection_strategy,
k6_process_allocation_strategy=self.load_params.k6_process_allocation_strategy, k6_process_allocation_strategy=self.load_params.k6_process_allocation_strategy,
setup_timeout=self.load_params.setup_timeout, setup_timeout="1s",
) )
if self.verification_params.custom_registry:
self.verification_params.registry_file = self.load_params.custom_registry
if self.verification_params.verify_time is None: if self.verification_params.verify_time is None:
raise RuntimeError("verify_time should not be none") raise RuntimeError("verify_time should not be none")

View file

@ -4,26 +4,26 @@ import time
from typing import TypeVar from typing import TypeVar
import frostfs_testlib.resources.optionals as optionals import frostfs_testlib.resources.optionals as optionals
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli from frostfs_testlib.cli import FrostfsAdm, FrostfsCli
from frostfs_testlib.cli.netmap_parser import NetmapParser from frostfs_testlib.cli.netmap_parser import NetmapParser
from frostfs_testlib.healthcheck.interfaces import Healthcheck from frostfs_testlib.healthcheck.interfaces import Healthcheck
from frostfs_testlib.hosting.interfaces import HostStatus from frostfs_testlib.hosting.interfaces import HostStatus
from frostfs_testlib.plugins import load_all from frostfs_testlib.plugins import load_all
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, MORPH_BLOCK_TIME
from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider
from frostfs_testlib.steps.network import IpHelper from frostfs_testlib.steps.network import IfUpDownHelper, IpTablesHelper
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode
from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.controllers.disk_controller import DiskController
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeStatus
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing import parallel from frostfs_testlib.testing import parallel
from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success
from frostfs_testlib.utils.datetime_utils import parse_time from frostfs_testlib.utils.datetime_utils import parse_time
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
if_up_down_helper = IfUpDownHelper()
class StateManager: class StateManager:
@ -76,7 +76,7 @@ class ClusterStateController:
return online_svc return online_svc
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Stop host of node {node}") @reporter.step_deco("Stop host of node {node}")
def stop_node_host(self, node: ClusterNode, mode: str): def stop_node_host(self, node: ClusterNode, mode: str):
# Drop ssh connection for this node before shutdown # Drop ssh connection for this node before shutdown
provider = SshConnectionProvider() provider = SshConnectionProvider()
@ -88,7 +88,7 @@ class ClusterStateController:
self._wait_for_host_offline(node) self._wait_for_host_offline(node)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Shutdown whole cluster") @reporter.step_deco("Shutdown whole cluster")
def shutdown_cluster(self, mode: str, reversed_order: bool = False): def shutdown_cluster(self, mode: str, reversed_order: bool = False):
nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes
@ -105,7 +105,7 @@ class ClusterStateController:
self._wait_for_host_offline(node) self._wait_for_host_offline(node)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Start host of node {node}") @reporter.step_deco("Start host of node {node}")
def start_node_host(self, node: ClusterNode, startup_healthcheck: bool = True): def start_node_host(self, node: ClusterNode, startup_healthcheck: bool = True):
with reporter.step(f"Start host {node.host.config.address}"): with reporter.step(f"Start host {node.host.config.address}"):
node.host.start_host() node.host.start_host()
@ -115,7 +115,7 @@ class ClusterStateController:
self.wait_startup_healthcheck() self.wait_startup_healthcheck()
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Start stopped hosts") @reporter.step_deco("Start stopped hosts")
def start_stopped_hosts(self, reversed_order: bool = False): def start_stopped_hosts(self, reversed_order: bool = False):
if not self.stopped_nodes: if not self.stopped_nodes:
return return
@ -133,35 +133,35 @@ class ClusterStateController:
self.wait_after_storage_startup() self.wait_after_storage_startup()
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Detach disk {device} at {mountpoint} on node {node}") @reporter.step_deco("Detach disk {device} at {mountpoint} on node {node}")
def detach_disk(self, node: StorageNode, device: str, mountpoint: str): def detach_disk(self, node: StorageNode, device: str, mountpoint: str):
disk_controller = self._get_disk_controller(node, device, mountpoint) disk_controller = self._get_disk_controller(node, device, mountpoint)
self.detached_disks[disk_controller.id] = disk_controller self.detached_disks[disk_controller.id] = disk_controller
disk_controller.detach() disk_controller.detach()
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Attach disk {device} at {mountpoint} on node {node}") @reporter.step_deco("Attach disk {device} at {mountpoint} on node {node}")
def attach_disk(self, node: StorageNode, device: str, mountpoint: str): def attach_disk(self, node: StorageNode, device: str, mountpoint: str):
disk_controller = self._get_disk_controller(node, device, mountpoint) disk_controller = self._get_disk_controller(node, device, mountpoint)
disk_controller.attach() disk_controller.attach()
self.detached_disks.pop(disk_controller.id, None) self.detached_disks.pop(disk_controller.id, None)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Restore detached disks") @reporter.step_deco("Restore detached disks")
def restore_disks(self): def restore_disks(self):
for disk_controller in self.detached_disks.values(): for disk_controller in self.detached_disks.values():
disk_controller.attach() disk_controller.attach()
self.detached_disks = {} self.detached_disks = {}
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Stop all {service_type} services") @reporter.step_deco("Stop all {service_type} services")
def stop_services_of_type(self, service_type: type[ServiceClass], mask: bool = True): def stop_services_of_type(self, service_type: type[ServiceClass], mask: bool = True):
services = self.cluster.services(service_type) services = self.cluster.services(service_type)
self.stopped_services.update(services) self.stopped_services.update(services)
parallel([service.stop_service for service in services], mask=mask) parallel([service.stop_service for service in services], mask=mask)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Start all {service_type} services") @reporter.step_deco("Start all {service_type} services")
def start_services_of_type(self, service_type: type[ServiceClass]): def start_services_of_type(self, service_type: type[ServiceClass]):
services = self.cluster.services(service_type) services = self.cluster.services(service_type)
parallel([service.start_service for service in services]) parallel([service.start_service for service in services])
@ -176,24 +176,24 @@ class ClusterStateController:
result = s3gate.get_metric("frostfs_s3_gw_pool_current_nodes") result = s3gate.get_metric("frostfs_s3_gw_pool_current_nodes")
assert 'address="127.0.0.1' in result.stdout, "S3Gate should connect to local storage node" assert 'address="127.0.0.1' in result.stdout, "S3Gate should connect to local storage node"
@reporter.step("Wait for S3Gates reconnection to local storage") @reporter.step_deco("Wait for S3Gates reconnection to local storage")
def wait_s3gates(self): def wait_s3gates(self):
online_s3gates = self._get_online(S3Gate) online_s3gates = self._get_online(S3Gate)
if online_s3gates: if online_s3gates:
parallel(self.wait_s3gate, online_s3gates) parallel(self.wait_s3gate, online_s3gates)
@reporter.step("Wait for cluster startup healtcheck") @reporter.step_deco("Wait for cluster startup healtcheck")
def wait_startup_healthcheck(self): def wait_startup_healthcheck(self):
nodes = self.cluster.nodes(self._get_online(StorageNode)) nodes = self.cluster.nodes(self._get_online(StorageNode))
parallel(self.healthcheck.startup_healthcheck, nodes) parallel(self.healthcheck.startup_healthcheck, nodes)
@reporter.step("Wait for storage reconnection to the system") @reporter.step_deco("Wait for storage reconnection to the system")
def wait_after_storage_startup(self): def wait_after_storage_startup(self):
self.wait_startup_healthcheck() self.wait_startup_healthcheck()
self.wait_s3gates() self.wait_s3gates()
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Start all stopped services") @reporter.step_deco("Start all stopped services")
def start_all_stopped_services(self): def start_all_stopped_services(self):
stopped_storages = self._get_stopped_by_type(StorageNode) stopped_storages = self._get_stopped_by_type(StorageNode)
parallel([service.start_service for service in self.stopped_services]) parallel([service.start_service for service in self.stopped_services])
@ -203,21 +203,21 @@ class ClusterStateController:
self.wait_after_storage_startup() self.wait_after_storage_startup()
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Stop {service_type} service on {node}") @reporter.step_deco("Stop {service_type} service on {node}")
def stop_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass], mask: bool = True): def stop_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass], mask: bool = True):
service = node.service(service_type) service = node.service(service_type)
service.stop_service(mask) service.stop_service(mask)
self.stopped_services.add(service) self.stopped_services.add(service)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Start {service_type} service on {node}") @reporter.step_deco("Start {service_type} service on {node}")
def start_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]): def start_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]):
service = node.service(service_type) service = node.service(service_type)
service.start_service() service.start_service()
self.stopped_services.discard(service) self.stopped_services.discard(service)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Start all stopped {service_type} services") @reporter.step_deco("Start all stopped {service_type} services")
def start_stopped_services_of_type(self, service_type: type[ServiceClass]): def start_stopped_services_of_type(self, service_type: type[ServiceClass]):
stopped_svc = self._get_stopped_by_type(service_type) stopped_svc = self._get_stopped_by_type(service_type)
if not stopped_svc: if not stopped_svc:
@ -231,7 +231,7 @@ class ClusterStateController:
# TODO: Deprecated # TODO: Deprecated
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Stop all storage services on cluster") @reporter.step_deco("Stop all storage services on cluster")
def stop_all_storage_services(self, reversed_order: bool = False): def stop_all_storage_services(self, reversed_order: bool = False):
nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes
@ -240,7 +240,7 @@ class ClusterStateController:
# TODO: Deprecated # TODO: Deprecated
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Stop all S3 gates on cluster") @reporter.step_deco("Stop all S3 gates on cluster")
def stop_all_s3_gates(self, reversed_order: bool = False): def stop_all_s3_gates(self, reversed_order: bool = False):
nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes
@ -249,42 +249,42 @@ class ClusterStateController:
# TODO: Deprecated # TODO: Deprecated
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Stop storage service on {node}") @reporter.step_deco("Stop storage service on {node}")
def stop_storage_service(self, node: ClusterNode, mask: bool = True): def stop_storage_service(self, node: ClusterNode, mask: bool = True):
self.stop_service_of_type(node, StorageNode, mask) self.stop_service_of_type(node, StorageNode, mask)
# TODO: Deprecated # TODO: Deprecated
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Start storage service on {node}") @reporter.step_deco("Start storage service on {node}")
def start_storage_service(self, node: ClusterNode): def start_storage_service(self, node: ClusterNode):
self.start_service_of_type(node, StorageNode) self.start_service_of_type(node, StorageNode)
# TODO: Deprecated # TODO: Deprecated
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Start stopped storage services") @reporter.step_deco("Start stopped storage services")
def start_stopped_storage_services(self): def start_stopped_storage_services(self):
self.start_stopped_services_of_type(StorageNode) self.start_stopped_services_of_type(StorageNode)
# TODO: Deprecated # TODO: Deprecated
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Stop s3 gate on {node}") @reporter.step_deco("Stop s3 gate on {node}")
def stop_s3_gate(self, node: ClusterNode, mask: bool = True): def stop_s3_gate(self, node: ClusterNode, mask: bool = True):
self.stop_service_of_type(node, S3Gate, mask) self.stop_service_of_type(node, S3Gate, mask)
# TODO: Deprecated # TODO: Deprecated
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Start s3 gate on {node}") @reporter.step_deco("Start s3 gate on {node}")
def start_s3_gate(self, node: ClusterNode): def start_s3_gate(self, node: ClusterNode):
self.start_service_of_type(node, S3Gate) self.start_service_of_type(node, S3Gate)
# TODO: Deprecated # TODO: Deprecated
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Start stopped S3 gates") @reporter.step_deco("Start stopped S3 gates")
def start_stopped_s3_gates(self): def start_stopped_s3_gates(self):
self.start_stopped_services_of_type(S3Gate) self.start_stopped_services_of_type(S3Gate)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Suspend {process_name} service in {node}") @reporter.step_deco("Suspend {process_name} service in {node}")
def suspend_service(self, process_name: str, node: ClusterNode): def suspend_service(self, process_name: str, node: ClusterNode):
node.host.wait_success_suspend_process(process_name) node.host.wait_success_suspend_process(process_name)
if self.suspended_services.get(process_name): if self.suspended_services.get(process_name):
@ -293,47 +293,77 @@ class ClusterStateController:
self.suspended_services[process_name] = [node] self.suspended_services[process_name] = [node]
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Resume {process_name} service in {node}") @reporter.step_deco("Resume {process_name} service in {node}")
def resume_service(self, process_name: str, node: ClusterNode): def resume_service(self, process_name: str, node: ClusterNode):
node.host.wait_success_resume_process(process_name) node.host.wait_success_resume_process(process_name)
if self.suspended_services.get(process_name) and node in self.suspended_services[process_name]: if self.suspended_services.get(process_name) and node in self.suspended_services[process_name]:
self.suspended_services[process_name].remove(node) self.suspended_services[process_name].remove(node)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Start suspend processes services") @reporter.step_deco("Start suspend processes services")
def resume_suspended_services(self): def resume_suspended_services(self):
for process_name, list_nodes in self.suspended_services.items(): for process_name, list_nodes in self.suspended_services.items():
[node.host.wait_success_resume_process(process_name) for node in list_nodes] [node.host.wait_success_resume_process(process_name) for node in list_nodes]
self.suspended_services = {} self.suspended_services = {}
@reporter.step("Drop traffic to {node}, nodes - {block_nodes}") @reporter.step_deco("Drop traffic to {node}, with ports - {ports}, nodes - {block_nodes}")
def drop_traffic( def drop_traffic(
self, self,
mode: str,
node: ClusterNode, node: ClusterNode,
wakeup_timeout: int, wakeup_timeout: int,
name_interface: str, ports: list[str] = None,
block_nodes: list[ClusterNode] = None, block_nodes: list[ClusterNode] = None,
) -> None: ) -> None:
list_ip = self._parse_interfaces(block_nodes, name_interface) allowed_modes = ["ports", "nodes"]
IpHelper.drop_input_traffic_to_node(node, list_ip) assert mode in allowed_modes
match mode:
case "ports":
IpTablesHelper.drop_input_traffic_to_port(node, ports)
case "nodes":
list_ip = self._parse_intefaces(block_nodes)
IpTablesHelper.drop_input_traffic_to_node(node, list_ip)
time.sleep(wakeup_timeout) time.sleep(wakeup_timeout)
self.dropped_traffic.append(node) self.dropped_traffic.append(node)
@reporter.step("Start traffic to {node}") @reporter.step_deco("Ping traffic")
def restore_traffic( def ping_traffic(
self, self,
node: ClusterNode, node: ClusterNode,
) -> None: nodes_list: list[ClusterNode],
IpHelper.restore_input_traffic_to_node(node=node) expect_result: int,
) -> bool:
shell = node.host.get_shell()
options = CommandOptions(check=False)
ips = self._parse_intefaces(nodes_list)
for ip in ips:
code = shell.exec(f"ping {ip} -c 1", options).return_code
if code != expect_result:
return False
return True
@reporter.step("Restore blocked nodes") @reporter.step_deco("Start traffic to {node}")
def restore_traffic(
self,
mode: str,
node: ClusterNode,
) -> None:
allowed_modes = ["ports", "nodes"]
assert mode in allowed_modes
match mode:
case "ports":
IpTablesHelper.restore_input_traffic_to_port(node=node)
case "nodes":
IpTablesHelper.restore_input_traffic_to_node(node=node)
@reporter.step_deco("Restore blocked nodes")
def restore_all_traffic(self): def restore_all_traffic(self):
if not self.dropped_traffic:
return
parallel(self._restore_traffic_to_node, self.dropped_traffic) parallel(self._restore_traffic_to_node, self.dropped_traffic)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Hard reboot host {node} via magic SysRq option") @reporter.step_deco("Hard reboot host {node} via magic SysRq option")
def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True, startup_healthcheck: bool = True): def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True, startup_healthcheck: bool = True):
shell = node.host.get_shell() shell = node.host.get_shell()
shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"') shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"')
@ -353,35 +383,32 @@ class ClusterStateController:
if startup_healthcheck: if startup_healthcheck:
self.wait_startup_healthcheck() self.wait_startup_healthcheck()
@reporter.step("Down {interface} to {nodes}") @reporter.step_deco("Down {interface} to {nodes}")
def down_interface(self, nodes: list[ClusterNode], interface: str): def down_interface(self, nodes: list[ClusterNode], interface: str):
for node in nodes: for node in nodes:
node.host.down_interface(interface=interface) if_up_down_helper.down_interface(node=node, interface=interface)
assert node.host.check_state(interface=interface) == "DOWN" assert if_up_down_helper.check_state(node=node, interface=interface) == "DOWN"
self.nodes_with_modified_interface.append(node) self.nodes_with_modified_interface.append(node)
@reporter.step("Up {interface} to {nodes}") @reporter.step_deco("Up {interface} to {nodes}")
def up_interface(self, nodes: list[ClusterNode], interface: str): def up_interface(self, nodes: list[ClusterNode], interface: str):
for node in nodes: for node in nodes:
node.host.up_interface(interface=interface) if_up_down_helper.up_interface(node=node, interface=interface)
assert node.host.check_state(interface=interface) == "UP" assert if_up_down_helper.check_state(node=node, interface=interface) == "UP"
if node in self.nodes_with_modified_interface: if node in self.nodes_with_modified_interface:
self.nodes_with_modified_interface.remove(node) self.nodes_with_modified_interface.remove(node)
@reporter.step("Restore interface") @reporter.step_deco("Restore interface")
def restore_interfaces(self): def restore_interfaces(self):
for node in self.nodes_with_modified_interface: for node in self.nodes_with_modified_interface:
dict_interfaces = node.host.config.interfaces.keys() if_up_down_helper.up_all_interface(node)
for name_interface in dict_interfaces:
if "mgmt" not in name_interface:
node.host.up_interface(interface=name_interface)
@reporter.step("Get node time") @reporter.step_deco("Get node time")
def get_node_date(self, node: ClusterNode) -> datetime: def get_node_date(self, node: ClusterNode) -> datetime:
shell = node.host.get_shell() shell = node.host.get_shell()
return datetime.datetime.strptime(shell.exec("hwclock -r").stdout.strip(), "%Y-%m-%d %H:%M:%S.%f%z") return datetime.datetime.strptime(shell.exec("hwclock -r").stdout.strip(), "%Y-%m-%d %H:%M:%S.%f%z")
@reporter.step("Set node time to {in_date}") @reporter.step_deco("Set node time to {in_date}")
def change_node_date(self, node: ClusterNode, in_date: datetime) -> None: def change_node_date(self, node: ClusterNode, in_date: datetime) -> None:
shell = node.host.get_shell() shell = node.host.get_shell()
shell.exec(f"date -s @{time.mktime(in_date.timetuple())}") shell.exec(f"date -s @{time.mktime(in_date.timetuple())}")
@ -390,7 +417,7 @@ class ClusterStateController:
with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"): with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"):
assert (self.get_node_date(node) - in_date) < datetime.timedelta(minutes=1) assert (self.get_node_date(node) - in_date) < datetime.timedelta(minutes=1)
@reporter.step(f"Restore time") @reporter.step_deco(f"Restore time")
def restore_node_date(self, node: ClusterNode) -> None: def restore_node_date(self, node: ClusterNode) -> None:
shell = node.host.get_shell() shell = node.host.get_shell()
now_time = datetime.datetime.now(datetime.timezone.utc) now_time = datetime.datetime.now(datetime.timezone.utc)
@ -398,14 +425,14 @@ class ClusterStateController:
shell.exec(f"date -s @{time.mktime(now_time.timetuple())}") shell.exec(f"date -s @{time.mktime(now_time.timetuple())}")
shell.exec("hwclock --systohc") shell.exec("hwclock --systohc")
@reporter.step("Change the synchronizer status to {status}") @reporter.step_deco("Change the synchronizer status to {status}")
def set_sync_date_all_nodes(self, status: str): def set_sync_date_all_nodes(self, status: str):
if status == "active": if status == "active":
parallel(self._enable_date_synchronizer, self.cluster.cluster_nodes) parallel(self._enable_date_synchronizer, self.cluster.cluster_nodes)
return return
parallel(self._disable_date_synchronizer, self.cluster.cluster_nodes) parallel(self._disable_date_synchronizer, self.cluster.cluster_nodes)
@reporter.step("Set MaintenanceModeAllowed - {status}") @reporter.step_deco("Set MaintenanceModeAllowed - {status}")
def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None: def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None:
frostfs_adm = FrostfsAdm( frostfs_adm = FrostfsAdm(
shell=cluster_node.host.get_shell(), shell=cluster_node.host.get_shell(),
@ -414,43 +441,46 @@ class ClusterStateController:
) )
frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}") frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}")
@reporter.step("Set node status to {status} in CSC") @reporter.step_deco("Set mode node to {status}")
def set_node_status(self, cluster_node: ClusterNode, wallet: WalletInfo, status: NodeStatus, await_tick: bool = True) -> None: def set_mode_node(self, cluster_node: ClusterNode, wallet: str, status: str, await_tick: bool = True) -> None:
rpc_endpoint = cluster_node.storage_node.get_rpc_endpoint() rpc_endpoint = cluster_node.storage_node.get_rpc_endpoint()
control_endpoint = cluster_node.service(StorageNode).get_control_endpoint() control_endpoint = cluster_node.service(StorageNode).get_control_endpoint()
frostfs_adm, frostfs_cli, frostfs_cli_remote = self._get_cli(self.shell, wallet, cluster_node) frostfs_adm, frostfs_cli, frostfs_cli_remote = self._get_cli(local_shell=self.shell, cluster_node=cluster_node)
node_netinfo = NetmapParser.netinfo(frostfs_cli.netmap.netinfo(rpc_endpoint).stdout) node_netinfo = NetmapParser.netinfo(frostfs_cli.netmap.netinfo(rpc_endpoint=rpc_endpoint, wallet=wallet).stdout)
if node_netinfo.maintenance_mode_allowed == "false": with reporter.step("If status maintenance, then check that the option is enabled"):
with reporter.step("Enable maintenance mode"): if node_netinfo.maintenance_mode_allowed == "false":
frostfs_adm.morph.set_config("MaintenanceModeAllowed=true") frostfs_adm.morph.set_config(set_key_value="MaintenanceModeAllowed=true")
with reporter.step(f"Set node status to {status} using FrostfsCli"): with reporter.step(f"Change the status to {status}"):
frostfs_cli_remote.control.set_status(control_endpoint, status.value) frostfs_cli_remote.control.set_status(endpoint=control_endpoint, status=status)
if not await_tick: if not await_tick:
return return
with reporter.step("Tick 1 epoch and await 2 block"): with reporter.step("Tick 1 epoch, and await 2 block"):
frostfs_adm.morph.force_new_epoch() frostfs_adm.morph.force_new_epoch()
time.sleep(parse_time(MORPH_BLOCK_TIME) * 2) time.sleep(parse_time(MORPH_BLOCK_TIME) * 2)
self.await_node_status(status, wallet, cluster_node) self.check_node_status(status=status, wallet=wallet, cluster_node=cluster_node)
@wait_for_success(80, 8, title="Wait for node status become {status}") @wait_for_success(80, 8)
def await_node_status(self, status: NodeStatus, wallet: WalletInfo, cluster_node: ClusterNode): @reporter.step_deco("Check status node, status - {status}")
frostfs_cli = FrostfsCli(self.shell, FROSTFS_CLI_EXEC, wallet.config_path) def check_node_status(self, status: str, wallet: str, cluster_node: ClusterNode):
netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(cluster_node.storage_node.get_rpc_endpoint()).stdout) frostfs_cli = FrostfsCli(
shell=self.shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG
)
netmap = NetmapParser.snapshot_all_nodes(
frostfs_cli.netmap.snapshot(rpc_endpoint=cluster_node.storage_node.get_rpc_endpoint(), wallet=wallet).stdout
)
netmap = [node for node in netmap if cluster_node.host_ip == node.node] netmap = [node for node in netmap if cluster_node.host_ip == node.node]
if status == NodeStatus.OFFLINE: if status == "offline":
assert cluster_node.host_ip not in netmap, f"{cluster_node.host_ip} not in Offline" assert cluster_node.host_ip not in netmap, f"{cluster_node.host_ip} not in Offline"
else: else:
assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'" assert netmap[0].node_status == status.upper(), f"Node state - {netmap[0].node_status} != {status} expect"
def _get_cli( def _get_cli(self, local_shell: Shell, cluster_node: ClusterNode) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]:
self, local_shell: Shell, local_wallet: WalletInfo, cluster_node: ClusterNode
) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]:
# TODO Move to service config # TODO Move to service config
host = cluster_node.host host = cluster_node.host
service_config = host.get_service_config(cluster_node.storage_node.name) service_config = host.get_service_config(cluster_node.storage_node.name)
@ -462,8 +492,12 @@ class ClusterStateController:
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
frostfs_adm = FrostfsAdm(shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH) frostfs_adm = FrostfsAdm(
frostfs_cli = FrostfsCli(local_shell, FROSTFS_CLI_EXEC, local_wallet.config_path) shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH
)
frostfs_cli = FrostfsCli(
shell=local_shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG
)
frostfs_cli_remote = FrostfsCli( frostfs_cli_remote = FrostfsCli(
shell=shell, shell=shell,
frostfs_cli_exec_path=FROSTFS_CLI_EXEC, frostfs_cli_exec_path=FROSTFS_CLI_EXEC,
@ -474,12 +508,12 @@ class ClusterStateController:
def _enable_date_synchronizer(self, cluster_node: ClusterNode): def _enable_date_synchronizer(self, cluster_node: ClusterNode):
shell = cluster_node.host.get_shell() shell = cluster_node.host.get_shell()
shell.exec("timedatectl set-ntp true") shell.exec("timedatectl set-ntp true")
cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "active", 15) cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "active", 5)
def _disable_date_synchronizer(self, cluster_node: ClusterNode): def _disable_date_synchronizer(self, cluster_node: ClusterNode):
shell = cluster_node.host.get_shell() shell = cluster_node.host.get_shell()
shell.exec("timedatectl set-ntp false") shell.exec("timedatectl set-ntp false")
cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "inactive", 15) cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "inactive", 5)
def _get_disk_controller(self, node: StorageNode, device: str, mountpoint: str) -> DiskController: def _get_disk_controller(self, node: StorageNode, device: str, mountpoint: str) -> DiskController:
disk_controller_id = DiskController.get_id(node, device) disk_controller_id = DiskController.get_id(node, device)
@ -491,23 +525,25 @@ class ClusterStateController:
return disk_controller return disk_controller
def _restore_traffic_to_node(self, node): def _restore_traffic_to_node(self, node):
IpHelper.restore_input_traffic_to_node(node) IpTablesHelper.restore_input_traffic_to_port(node)
IpTablesHelper.restore_input_traffic_to_node(node)
def _parse_interfaces(self, nodes: list[ClusterNode], name_interface: str): def _parse_intefaces(self, nodes: list[ClusterNode]):
interfaces = [] interfaces = []
for node in nodes: for node in nodes:
dict_interfaces = node.host.config.interfaces dict_interfaces = node.host.config.interfaces
for type, ip in dict_interfaces.items(): for type, ip in dict_interfaces.items():
if name_interface in type: if "mgmt" not in type:
interfaces.append(ip) interfaces.append(ip)
return interfaces return interfaces
@reporter.step("Ping node") @reporter.step_deco("Ping node")
def _ping_host(self, node: ClusterNode): def _ping_host(self, node: ClusterNode):
options = CommandOptions(check=False) options = CommandOptions(check=False)
return self.shell.exec(f"ping {node.host.config.address} -c 1", options).return_code return self.shell.exec(f"ping {node.host.config.address} -c 1", options).return_code
@retry(max_attempts=60, sleep_interval=10, expected_result=HostStatus.ONLINE, title="Waiting for {node} to go online") @retry(max_attempts=60, sleep_interval=5, expected_result=HostStatus.ONLINE)
@reporter.step_deco("Waiting for {node} to go online")
def _wait_for_host_online(self, node: ClusterNode): def _wait_for_host_online(self, node: ClusterNode):
try: try:
ping_result = self._ping_host(node) ping_result = self._ping_host(node)
@ -518,7 +554,8 @@ class ClusterStateController:
logger.warning(f"Host ping fails with error {err}") logger.warning(f"Host ping fails with error {err}")
return HostStatus.OFFLINE return HostStatus.OFFLINE
@retry(max_attempts=60, sleep_interval=10, expected_result=HostStatus.OFFLINE, title="Waiting for {node} to go offline") @retry(max_attempts=60, sleep_interval=5, expected_result=HostStatus.OFFLINE)
@reporter.step_deco("Waiting for {node} to go offline")
def _wait_for_host_offline(self, node: ClusterNode): def _wait_for_host_offline(self, node: ClusterNode):
try: try:
ping_result = self._ping_host(node) ping_result = self._ping_host(node)

View file

@ -79,7 +79,9 @@ class ShardsWatcher:
assert self._is_shard_present(shard_id) assert self._is_shard_present(shard_id)
shards_with_new_errors = self.get_shards_with_new_errors() shards_with_new_errors = self.get_shards_with_new_errors()
assert shard_id in shards_with_new_errors, f"Expected shard {shard_id} to have new errors, but haven't {self.shards_snapshots[-1]}" assert (
shard_id in shards_with_new_errors
), f"Expected shard {shard_id} to have new errors, but haven't {self.shards_snapshots[-1]}"
@wait_for_success(300, 5) @wait_for_success(300, 5)
def await_for_shards_have_no_new_errors(self): def await_for_shards_have_no_new_errors(self):
@ -108,9 +110,9 @@ class ShardsWatcher:
self.storage_node.host.get_cli_config("frostfs-cli").exec_path, self.storage_node.host.get_cli_config("frostfs-cli").exec_path,
) )
return shards_cli.set_mode( return shards_cli.set_mode(
endpoint=self.storage_node.get_control_endpoint(), self.storage_node.get_control_endpoint(),
wallet=self.storage_node.get_remote_wallet_path(), self.storage_node.get_remote_wallet_path(),
wallet_password=self.storage_node.get_wallet_password(), self.storage_node.get_wallet_password(),
mode=mode, mode=mode,
id=[shard_id], id=[shard_id],
clear_errors=clear_errors, clear_errors=clear_errors,

View file

@ -1,11 +1,13 @@
from typing import Any from typing import Any
from frostfs_testlib import reporter from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController, StateManager from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController, StateManager
from frostfs_testlib.storage.dataclasses.node_base import ServiceClass from frostfs_testlib.storage.dataclasses.node_base import ServiceClass
from frostfs_testlib.testing import parallel from frostfs_testlib.testing import parallel
reporter = get_reporter()
class ConfigStateManager(StateManager): class ConfigStateManager(StateManager):
def __init__(self, cluster_state_controller: ClusterStateController) -> None: def __init__(self, cluster_state_controller: ClusterStateController) -> None:
@ -13,7 +15,7 @@ class ConfigStateManager(StateManager):
self.services_with_changed_config: set[tuple[ClusterNode, ServiceClass]] = set() self.services_with_changed_config: set[tuple[ClusterNode, ServiceClass]] = set()
self.cluster = self.csc.cluster self.cluster = self.csc.cluster
@reporter.step("Change configuration for {service_type} on all nodes") @reporter.step_deco("Change configuration for {service_type} on all nodes")
def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any]): def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any]):
services = self.cluster.services(service_type) services = self.cluster.services(service_type)
nodes = self.cluster.nodes(services) nodes = self.cluster.nodes(services)
@ -23,7 +25,7 @@ class ConfigStateManager(StateManager):
parallel([node.config(service_type).set for node in nodes], values=values) parallel([node.config(service_type).set for node in nodes], values=values)
self.csc.start_services_of_type(service_type) self.csc.start_services_of_type(service_type)
@reporter.step("Change configuration for {service_type} on {node}") @reporter.step_deco("Change configuration for {service_type} on {node}")
def set_on_node(self, node: ClusterNode, service_type: type[ServiceClass], values: dict[str, Any]): def set_on_node(self, node: ClusterNode, service_type: type[ServiceClass], values: dict[str, Any]):
self.services_with_changed_config.add((node, service_type)) self.services_with_changed_config.add((node, service_type))
@ -31,7 +33,7 @@ class ConfigStateManager(StateManager):
node.config(service_type).set(values) node.config(service_type).set(values)
self.csc.start_service_of_type(node, service_type) self.csc.start_service_of_type(node, service_type)
@reporter.step("Revert all configuration changes") @reporter.step_deco("Revert all configuration changes")
def revert_all(self): def revert_all(self):
if not self.services_with_changed_config: if not self.services_with_changed_config:
return return
@ -42,7 +44,7 @@ class ConfigStateManager(StateManager):
self.csc.start_all_stopped_services() self.csc.start_all_stopped_services()
# TODO: parallel can't have multiple parallel_items :( # TODO: parallel can't have multiple parallel_items :(
@reporter.step("Revert all configuration {node_and_service}") @reporter.step_deco("Revert all configuration {node_and_service}")
def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass]): def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass]):
node, service_type = node_and_service node, service_type = node_and_service
self.csc.stop_service_of_type(node, service_type) self.csc.stop_service_of_type(node, service_type)

View file

@ -1,8 +1,8 @@
import logging import logging
from dataclasses import dataclass from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, List, Optional, Union from typing import Any, Dict, List, Optional, Union
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.readable import HumanReadableEnum from frostfs_testlib.testing.readable import HumanReadableEnum
from frostfs_testlib.utils import wallet_utils from frostfs_testlib.utils import wallet_utils
@ -65,7 +65,11 @@ class EACLFilters:
def __str__(self): def __str__(self):
return ",".join( return ",".join(
[f"{filter.header_type.value}:" f"{filter.key}{filter.match_type.value}{filter.value}" for filter in self.filters] [
f"{filter.header_type.value}:"
f"{filter.key}{filter.match_type.value}{filter.value}"
for filter in self.filters
]
if self.filters if self.filters
else [] else []
) )
@ -80,7 +84,7 @@ class EACLPubKey:
class EACLRule: class EACLRule:
operation: Optional[EACLOperation] = None operation: Optional[EACLOperation] = None
access: Optional[EACLAccess] = None access: Optional[EACLAccess] = None
role: Optional[Union[EACLRole, WalletInfo]] = None role: Optional[Union[EACLRole, str]] = None
filters: Optional[EACLFilters] = None filters: Optional[EACLFilters] = None
def to_dict(self) -> Dict[str, Any]: def to_dict(self) -> Dict[str, Any]:
@ -92,9 +96,9 @@ class EACLRule:
} }
def __str__(self): def __str__(self):
role = "" role = (
if isinstance(self.role, EACLRole): self.role.value
role = self.role.value if isinstance(self.role, EACLRole)
if isinstance(self.role, WalletInfo): else f'pubkey:{wallet_utils.get_wallet_public_key(self.role, "")}'
role = f"pubkey:{wallet_utils.get_wallet_public_key(self.role.path, self.role.password)}" )
return f'{self.access.value} {self.operation.value} {self.filters or ""} {role}' return f'{self.access.value} {self.operation.value} {self.filters or ""} {role}'

View file

@ -3,7 +3,6 @@ import yaml
from frostfs_testlib.blockchain import RPCClient from frostfs_testlib.blockchain import RPCClient
from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.storage.constants import ConfigAttributes
from frostfs_testlib.storage.dataclasses.node_base import NodeBase from frostfs_testlib.storage.dataclasses.node_base import NodeBase
from frostfs_testlib.storage.dataclasses.shard import Shard
class InnerRing(NodeBase): class InnerRing(NodeBase):
@ -18,7 +17,11 @@ class InnerRing(NodeBase):
def service_healthcheck(self) -> bool: def service_healthcheck(self) -> bool:
health_metric = "frostfs_ir_ir_health" health_metric = "frostfs_ir_ir_health"
output = self.host.get_shell().exec(f"curl -s localhost:6662 | grep {health_metric} | sed 1,2d").stdout output = (
self.host.get_shell()
.exec(f"curl -s localhost:6662 | grep {health_metric} | sed 1,2d")
.stdout
)
return health_metric in output return health_metric in output
def get_netmap_cleaner_threshold(self) -> str: def get_netmap_cleaner_threshold(self) -> str:
@ -47,7 +50,11 @@ class S3Gate(NodeBase):
def service_healthcheck(self) -> bool: def service_healthcheck(self) -> bool:
health_metric = "frostfs_s3_gw_state_health" health_metric = "frostfs_s3_gw_state_health"
output = self.host.get_shell().exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d").stdout output = (
self.host.get_shell()
.exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d")
.stdout
)
return health_metric in output return health_metric in output
@property @property
@ -65,7 +72,11 @@ class HTTPGate(NodeBase):
def service_healthcheck(self) -> bool: def service_healthcheck(self) -> bool:
health_metric = "frostfs_http_gw_state_health" health_metric = "frostfs_http_gw_state_health"
output = self.host.get_shell().exec(f"curl -s localhost:5662 | grep {health_metric} | sed 1,2d").stdout output = (
self.host.get_shell()
.exec(f"curl -s localhost:5662 | grep {health_metric} | sed 1,2d")
.stdout
)
return health_metric in output return health_metric in output
@property @property
@ -124,27 +135,19 @@ class StorageNode(NodeBase):
def service_healthcheck(self) -> bool: def service_healthcheck(self) -> bool:
health_metric = "frostfs_node_state_health" health_metric = "frostfs_node_state_health"
output = self.host.get_shell().exec(f"curl -s localhost:6672 | grep {health_metric} | sed 1,2d").stdout output = (
self.host.get_shell()
.exec(f"curl -s localhost:6672 | grep {health_metric} | sed 1,2d")
.stdout
)
return health_metric in output return health_metric in output
# TODO: Deprecated. Use new approach with config
def get_shard_config_path(self) -> str: def get_shard_config_path(self) -> str:
return self._get_attribute(ConfigAttributes.SHARD_CONFIG_PATH) return self._get_attribute(ConfigAttributes.SHARD_CONFIG_PATH)
# TODO: Deprecated. Use new approach with config
def get_shards_config(self) -> tuple[str, dict]: def get_shards_config(self) -> tuple[str, dict]:
return self.get_config(self.get_shard_config_path()) return self.get_config(self.get_shard_config_path())
def get_shards(self) -> list[Shard]:
shards = self.config.get("storage:shard")
if not shards:
raise RuntimeError(f"Cannot get shards information for {self.name} on {self.host.config.address}")
if "default" in shards:
shards.pop("default")
return [Shard.from_object(shard) for shard in shards.values()]
def get_control_endpoint(self) -> str: def get_control_endpoint(self) -> str:
return self._get_attribute(ConfigAttributes.CONTROL_ENDPOINT) return self._get_attribute(ConfigAttributes.CONTROL_ENDPOINT)
@ -154,13 +157,10 @@ class StorageNode(NodeBase):
def get_data_directory(self) -> str: def get_data_directory(self) -> str:
return self.host.get_data_directory(self.name) return self.host.get_data_directory(self.name)
def get_storage_config(self) -> str: def get_http_hostname(self) -> str:
return self.host.get_storage_config(self.name)
def get_http_hostname(self) -> list[str]:
return self._get_attribute(ConfigAttributes.HTTP_HOSTNAME) return self._get_attribute(ConfigAttributes.HTTP_HOSTNAME)
def get_s3_hostname(self) -> list[str]: def get_s3_hostname(self) -> str:
return self._get_attribute(ConfigAttributes.S3_HOSTNAME) return self._get_attribute(ConfigAttributes.S3_HOSTNAME)
def delete_blobovnicza(self): def delete_blobovnicza(self):
@ -169,11 +169,8 @@ class StorageNode(NodeBase):
def delete_fstree(self): def delete_fstree(self):
self.host.delete_fstree(self.name) self.host.delete_fstree(self.name)
def delete_file(self, file_path: str) -> None: def delete_pilorama(self):
self.host.delete_file(file_path) self.host.delete_pilorama(self.name)
def is_file_exist(self, file_path) -> bool:
return self.host.is_file_exist(file_path)
def delete_metabase(self): def delete_metabase(self):
self.host.delete_metabase(self.name) self.host.delete_metabase(self.name)

View file

@ -1,20 +1,19 @@
from abc import abstractmethod from abc import abstractmethod
from dataclasses import dataclass from dataclasses import dataclass
from datetime import datetime, timezone
from typing import Optional, TypedDict, TypeVar from typing import Optional, TypedDict, TypeVar
import yaml import yaml
from dateutil import parser
from frostfs_testlib import reporter
from frostfs_testlib.hosting.config import ServiceConfig from frostfs_testlib.hosting.config import ServiceConfig
from frostfs_testlib.hosting.interfaces import Host from frostfs_testlib.hosting.interfaces import Host
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.shell.interfaces import CommandResult from frostfs_testlib.shell.interfaces import CommandResult
from frostfs_testlib.storage.configuration.service_configuration import ServiceConfiguration, ServiceConfigurationYml
from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.storage.constants import ConfigAttributes
from frostfs_testlib.testing.readable import HumanReadableABC from frostfs_testlib.testing.readable import HumanReadableABC
from frostfs_testlib.utils import wallet_utils from frostfs_testlib.utils import wallet_utils
reporter = get_reporter()
@dataclass @dataclass
class NodeBase(HumanReadableABC): class NodeBase(HumanReadableABC):
@ -115,31 +114,12 @@ class NodeBase(HumanReadableABC):
ConfigAttributes.CONFIG_PATH, ConfigAttributes.CONFIG_PATH,
) )
def get_remote_wallet_config_path(self) -> str:
"""
Returns node config file path located on remote host
"""
return self._get_attribute(
ConfigAttributes.REMOTE_WALLET_CONFIG,
)
def get_wallet_config_path(self) -> str: def get_wallet_config_path(self) -> str:
return self._get_attribute( return self._get_attribute(
ConfigAttributes.LOCAL_WALLET_CONFIG, ConfigAttributes.LOCAL_WALLET_CONFIG,
ConfigAttributes.WALLET_CONFIG, ConfigAttributes.WALLET_CONFIG,
) )
def get_logger_config_path(self) -> str:
"""
Returns config path for logger located on remote host
"""
config_attributes = self.host.get_service_config(self.name)
return (
self._get_attribute(ConfigAttributes.LOGGER_CONFIG_PATH)
if ConfigAttributes.LOGGER_CONFIG_PATH in config_attributes.attributes
else None
)
@property @property
def config_dir(self) -> str: def config_dir(self) -> str:
return self._get_attribute(ConfigAttributes.CONFIG_DIR) return self._get_attribute(ConfigAttributes.CONFIG_DIR)
@ -148,11 +128,7 @@ class NodeBase(HumanReadableABC):
def main_config_path(self) -> str: def main_config_path(self) -> str:
return self._get_attribute(ConfigAttributes.CONFIG_PATH) return self._get_attribute(ConfigAttributes.CONFIG_PATH)
@property # TODO: Deprecated
def config(self) -> ServiceConfigurationYml:
return ServiceConfiguration(self.name, self.host.get_shell(), self.config_dir, self.main_config_path)
# TODO: Deprecated. Use config with ServiceConfigurationYml interface
def get_config(self, config_file_path: Optional[str] = None) -> tuple[str, dict]: def get_config(self, config_file_path: Optional[str] = None) -> tuple[str, dict]:
if config_file_path is None: if config_file_path is None:
config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH)
@ -165,7 +141,7 @@ class NodeBase(HumanReadableABC):
config = yaml.safe_load(config_text) config = yaml.safe_load(config_text)
return config_file_path, config return config_file_path, config
# TODO: Deprecated. Use config with ServiceConfigurationYml interface # TODO: Deprecated
def save_config(self, new_config: dict, config_file_path: Optional[str] = None) -> None: def save_config(self, new_config: dict, config_file_path: Optional[str] = None) -> None:
if config_file_path is None: if config_file_path is None:
config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH)
@ -196,15 +172,6 @@ class NodeBase(HumanReadableABC):
def _get_service_config(self) -> ServiceConfig: def _get_service_config(self) -> ServiceConfig:
return self.host.get_service_config(self.name) return self.host.get_service_config(self.name)
def get_service_uptime(self, service: str) -> datetime:
result = self.host.get_shell().exec(
f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2"
)
start_time = parser.parse(result.stdout.strip())
current_time = datetime.now(tz=timezone.utc)
active_time = current_time - start_time
return active_time
ServiceClass = TypeVar("ServiceClass", bound=NodeBase) ServiceClass = TypeVar("ServiceClass", bound=NodeBase)

View file

@ -1,13 +0,0 @@
from dataclasses import dataclass
@dataclass
class PlacementPolicy:
name: str
value: str
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return self.__str__()

View file

@ -1,88 +0,0 @@
from dataclasses import dataclass
from configobj import ConfigObj
SHARD_PREFIX = "FROSTFS_STORAGE_SHARD_"
BLOBSTOR_PREFIX = "_BLOBSTOR_"
@dataclass
class Blobstor:
path: str
path_type: str
def __eq__(self, other) -> bool:
if not isinstance(other, self.__class__):
raise RuntimeError(f"Only two {self.__class__.__name__} instances can be compared")
return self.path == other.path and self.path_type == other.path_type
def __hash__(self):
return hash((self.path, self.path_type))
@staticmethod
def from_config_object(section: ConfigObj, shard_id: str, blobstor_id: str):
var_prefix = f"{SHARD_PREFIX}{shard_id}{BLOBSTOR_PREFIX}{blobstor_id}"
return Blobstor(section.get(f"{var_prefix}_PATH"), section.get(f"{var_prefix}_TYPE"))
@dataclass
class Shard:
blobstor: list[Blobstor]
metabase: str
writecache: str
pilorama: str
def __eq__(self, other) -> bool:
if not isinstance(other, self.__class__):
raise RuntimeError(f"Only two {self.__class__.__name__} instances can be compared")
return (
set(self.blobstor) == set(other.blobstor)
and self.metabase == other.metabase
and self.writecache == other.writecache
and self.pilorama == other.pilorama
)
def __hash__(self):
return hash((self.metabase, self.writecache))
@staticmethod
def _get_blobstor_count_from_section(config_object: ConfigObj, shard_id: int):
pattern = f"{SHARD_PREFIX}{shard_id}{BLOBSTOR_PREFIX}"
blobstors = {key[: len(pattern) + 2] for key in config_object.keys() if pattern in key}
return len(blobstors)
@staticmethod
def from_config_object(config_object: ConfigObj, shard_id: int):
var_prefix = f"{SHARD_PREFIX}{shard_id}"
blobstor_count = Shard._get_blobstor_count_from_section(config_object, shard_id)
blobstors = [
Blobstor.from_config_object(config_object, shard_id, blobstor_id) for blobstor_id in range(blobstor_count)
]
write_cache_enabled = config_object.as_bool(f"{var_prefix}_WRITECACHE_ENABLED")
return Shard(
blobstors,
config_object.get(f"{var_prefix}_METABASE_PATH"),
config_object.get(f"{var_prefix}_WRITECACHE_PATH") if write_cache_enabled else "",
)
@staticmethod
def from_object(shard):
metabase = shard["metabase"]["path"] if "path" in shard["metabase"] else shard["metabase"]
writecache = shard["writecache"]["path"] if "path" in shard["writecache"] else shard["writecache"]
# Currently due to issue we need to check if pilorama exists in keys
# TODO: make pilorama mandatory after fix
if shard.get("pilorama"):
pilorama = shard["pilorama"]["path"] if "path" in shard["pilorama"] else shard["pilorama"]
else:
pilorama = None
return Shard(
blobstor=[Blobstor(path=blobstor["path"], path_type=blobstor["type"]) for blobstor in shard["blobstor"]],
metabase=metabase,
writecache=writecache,
pilorama=pilorama,
)

View file

@ -1,7 +1,6 @@
from dataclasses import dataclass from dataclasses import dataclass
from typing import Optional from typing import Optional
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.readable import HumanReadableEnum from frostfs_testlib.testing.readable import HumanReadableEnum
@ -20,7 +19,7 @@ class LockObjectInfo(ObjectRef):
@dataclass @dataclass
class StorageObjectInfo(ObjectRef): class StorageObjectInfo(ObjectRef):
size: Optional[int] = None size: Optional[int] = None
wallet: Optional[WalletInfo] = None wallet_file_path: Optional[str] = None
file_path: Optional[str] = None file_path: Optional[str] = None
file_hash: Optional[str] = None file_hash: Optional[str] = None
attributes: Optional[list[dict[str, str]]] = None attributes: Optional[list[dict[str, str]]] = None
@ -28,7 +27,7 @@ class StorageObjectInfo(ObjectRef):
locks: Optional[list[LockObjectInfo]] = None locks: Optional[list[LockObjectInfo]] = None
class NodeStatus(HumanReadableEnum): class ModeNode(HumanReadableEnum):
MAINTENANCE: str = "maintenance" MAINTENANCE: str = "maintenance"
ONLINE: str = "online" ONLINE: str = "online"
OFFLINE: str = "offline" OFFLINE: str = "offline"
@ -37,7 +36,7 @@ class NodeStatus(HumanReadableEnum):
@dataclass @dataclass
class NodeNetmapInfo: class NodeNetmapInfo:
node_id: str = None node_id: str = None
node_status: NodeStatus = None node_status: ModeNode = None
node_data_ips: list[str] = None node_data_ips: list[str] = None
cluster_name: str = None cluster_name: str = None
continent: str = None continent: str = None

View file

@ -1,15 +1,13 @@
import json import json
import logging import logging
import os import os
import uuid
from dataclasses import dataclass from dataclasses import dataclass
from typing import Optional from typing import Optional
import yaml from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, DEFAULT_WALLET_PASS
from frostfs_testlib import reporter
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG, DEFAULT_WALLET_PASS
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.cluster import NodeBase from frostfs_testlib.storage.cluster import Cluster, NodeBase
from frostfs_testlib.utils.wallet_utils import get_last_address_from_wallet, init_wallet from frostfs_testlib.utils.wallet_utils import get_last_address_from_wallet, init_wallet
logger = logging.getLogger("frostfs.testlib.utils") logger = logging.getLogger("frostfs.testlib.utils")
@ -23,13 +21,9 @@ class WalletInfo:
@staticmethod @staticmethod
def from_node(node: NodeBase): def from_node(node: NodeBase):
wallet_path = node.get_wallet_path() return WalletInfo(
wallet_password = node.get_wallet_password() node.get_wallet_path(), node.get_wallet_password(), node.get_wallet_config_path()
wallet_config_file = os.path.join(ASSETS_DIR, os.path.basename(node.get_wallet_config_path())) )
with open(wallet_config_file, "w") as file:
file.write(yaml.dump({"wallet": wallet_path, "password": wallet_password}))
return WalletInfo(wallet_path, wallet_password, wallet_config_file)
def get_address(self) -> str: def get_address(self) -> str:
""" """
@ -53,17 +47,22 @@ class WalletInfo:
""" """
with open(self.path, "r") as wallet: with open(self.path, "r") as wallet:
wallet_json = json.load(wallet) wallet_json = json.load(wallet)
assert abs(account_id) + 1 <= len(wallet_json["accounts"]), f"There is no index '{account_id}' in wallet: {wallet_json}" assert abs(account_id) + 1 <= len(
wallet_json["accounts"]
), f"There is no index '{account_id}' in wallet: {wallet_json}"
return wallet_json["accounts"][account_id]["address"] return wallet_json["accounts"][account_id]["address"]
class WalletFactory: class WalletFactory:
def __init__(self, wallets_dir: str, shell: Shell) -> None: def __init__(self, wallets_dir: str, shell: Shell, cluster: Cluster) -> None:
self.shell = shell self.shell = shell
self.wallets_dir = wallets_dir self.wallets_dir = wallets_dir
self.cluster = cluster
def create_wallet(self, file_name: str, password: Optional[str] = None) -> WalletInfo: def create_wallet(
self, file_name: Optional[str] = None, password: Optional[str] = None
) -> WalletInfo:
""" """
Creates new default wallet. Creates new default wallet.
@ -75,6 +74,8 @@ class WalletFactory:
WalletInfo object of new wallet. WalletInfo object of new wallet.
""" """
if file_name is None:
file_name = str(uuid.uuid4())
if password is None: if password is None:
password = "" password = ""
@ -84,8 +85,6 @@ class WalletFactory:
init_wallet(wallet_path, password) init_wallet(wallet_path, password)
with open(wallet_config_path, "w") as config_file: with open(wallet_config_path, "w") as config_file:
config_file.write(f'wallet: {wallet_path}\npassword: "{password}"') config_file.write(f'password: "{password}"')
reporter.attach(wallet_path, os.path.basename(wallet_path))
return WalletInfo(wallet_path, password, wallet_config_path) return WalletInfo(wallet_path, password, wallet_config_path)

View file

@ -1,7 +1,7 @@
import time import time
from typing import Optional from typing import Optional
from frostfs_testlib import reporter from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps import epoch from frostfs_testlib.steps import epoch
@ -9,13 +9,15 @@ from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
from frostfs_testlib.utils import datetime_utils from frostfs_testlib.utils import datetime_utils
reporter = get_reporter()
# To skip adding every mandatory singleton dependency to EACH test function # To skip adding every mandatory singleton dependency to EACH test function
class ClusterTestBase: class ClusterTestBase:
shell: Shell shell: Shell
cluster: Cluster cluster: Cluster
@reporter.step("Tick {epochs_to_tick} epochs, wait {wait_block} block") @reporter.step_deco("Tick {epochs_to_tick} epochs, wait {wait_block} block")
def tick_epochs( def tick_epochs(
self, self,
epochs_to_tick: int, epochs_to_tick: int,

View file

@ -42,7 +42,7 @@ def parallel(
exceptions = [future.exception() for future in futures if future.exception()] exceptions = [future.exception() for future in futures if future.exception()]
if exceptions: if exceptions:
message = "\n".join([str(e) for e in exceptions]) message = "\n".join([str(e) for e in exceptions])
raise RuntimeError(f"The following exceptions occured during parallel run:\n{message}") raise RuntimeError(f"The following exceptions occured during parallel run:\n {message}")
return futures return futures

View file

@ -2,13 +2,13 @@ import inspect
import logging import logging
from functools import wraps from functools import wraps
from time import sleep, time from time import sleep, time
from typing import Any from typing import Any, Callable
from _pytest.outcomes import Failed from _pytest.outcomes import Failed
from allure_commons.utils import func_parameters, represent
from pytest import fail from pytest import fail
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.utils.func_utils import format_by_args
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -53,7 +53,7 @@ class expect_not_raises:
return impl return impl
def retry(max_attempts: int, sleep_interval: int = 1, expected_result: Any = None, title: str = None): def retry(max_attempts: int, sleep_interval: int = 1, expected_result: Any = None):
""" """
Decorator to wait for some conditions/functions to pass successfully. Decorator to wait for some conditions/functions to pass successfully.
This is useful if you don't know exact time when something should pass successfully and do not This is useful if you don't know exact time when something should pass successfully and do not
@ -65,7 +65,8 @@ def retry(max_attempts: int, sleep_interval: int = 1, expected_result: Any = Non
assert max_attempts >= 1, "Cannot apply retry decorator with max_attempts < 1" assert max_attempts >= 1, "Cannot apply retry decorator with max_attempts < 1"
def wrapper(func): def wrapper(func):
def call(func, *a, **kw): @wraps(func)
def impl(*a, **kw):
last_exception = None last_exception = None
for _ in range(max_attempts): for _ in range(max_attempts):
try: try:
@ -86,14 +87,6 @@ def retry(max_attempts: int, sleep_interval: int = 1, expected_result: Any = Non
if last_exception is not None: if last_exception is not None:
raise last_exception raise last_exception
@wraps(func)
def impl(*a, **kw):
if title is not None:
with reporter.step(format_by_args(func, title, *a, **kw)):
return call(func, *a, **kw)
return call(func, *a, **kw)
return impl return impl
return wrapper return wrapper
@ -173,7 +166,7 @@ def wait_for_success(
@wraps(func) @wraps(func)
def impl(*a, **kw): def impl(*a, **kw):
if title is not None: if title is not None:
with reporter.step(format_by_args(func, title, *a, **kw)): with reporter.step(_format_title(func, title, *a, **kw)):
return call(func, *a, **kw) return call(func, *a, **kw)
return call(func, *a, **kw) return call(func, *a, **kw)
@ -181,3 +174,10 @@ def wait_for_success(
return impl return impl
return wrapper return wrapper
def _format_title(__func: Callable, __title: str, *a, **kw) -> str:
params = func_parameters(__func, *a, **kw)
args = list(map(lambda x: represent(x), a))
return __title.format(*args, **params)

View file

@ -1,7 +1,3 @@
"""
Idea of utils is to have small utilitary functions which are not dependent of anything.
"""
import frostfs_testlib.utils.converting_utils import frostfs_testlib.utils.converting_utils
import frostfs_testlib.utils.datetime_utils import frostfs_testlib.utils.datetime_utils
import frostfs_testlib.utils.json_utils import frostfs_testlib.utils.json_utils

View file

@ -19,9 +19,10 @@ from typing import Dict, List, TypedDict, Union
import pexpect import pexpect
from frostfs_testlib import reporter from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetmapInfo from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetmapInfo
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
COLOR_GREEN = "\033[92m" COLOR_GREEN = "\033[92m"
COLOR_OFF = "\033[0m" COLOR_OFF = "\033[0m"
@ -41,7 +42,7 @@ def _run_with_passwd(cmd: str) -> str:
return cmd.decode() return cmd.decode()
def _configure_aws_cli(cmd: str, key_id: str, access_key: str, region: str, out_format: str = "json") -> str: def _configure_aws_cli(cmd: str, key_id: str, access_key: str, out_format: str = "json") -> str:
child = pexpect.spawn(cmd) child = pexpect.spawn(cmd)
child.delaybeforesend = 1 child.delaybeforesend = 1
@ -52,7 +53,7 @@ def _configure_aws_cli(cmd: str, key_id: str, access_key: str, region: str, out_
child.sendline(access_key) child.sendline(access_key)
child.expect("Default region name.*") child.expect("Default region name.*")
child.sendline("region") child.sendline("")
child.expect("Default output format.*") child.expect("Default output format.*")
child.sendline(out_format) child.sendline(out_format)
@ -64,7 +65,9 @@ def _configure_aws_cli(cmd: str, key_id: str, access_key: str, region: str, out_
return cmd.decode() return cmd.decode()
def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: datetime, end_time: datetime) -> None: def _attach_allure_log(
cmd: str, output: str, return_code: int, start_time: datetime, end_time: datetime
) -> None:
command_attachment = ( command_attachment = (
f"COMMAND: '{cmd}'\n" f"COMMAND: '{cmd}'\n"
f"OUTPUT:\n {output}\n" f"OUTPUT:\n {output}\n"

View file

@ -1,12 +1,13 @@
import logging import logging
import re import re
from frostfs_testlib import reporter from frostfs_testlib.reporter import get_reporter
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@reporter.step("Read environment.properties") @reporter.step_deco("Read environment.properties")
def read_env_properties(file_path: str) -> dict: def read_env_properties(file_path: str) -> dict:
with open(file_path, "r") as file: with open(file_path, "r") as file:
raw_content = file.read() raw_content = file.read()
@ -22,7 +23,7 @@ def read_env_properties(file_path: str) -> dict:
return env_properties return env_properties
@reporter.step("Update data in environment.properties") @reporter.step_deco("Update data in environment.properties")
def save_env_properties(file_path: str, env_data: dict) -> None: def save_env_properties(file_path: str, env_data: dict) -> None:
with open(file_path, "a+") as env_file: with open(file_path, "a+") as env_file:
for env, env_value in env_data.items(): for env, env_value in env_data.items():

View file

@ -3,7 +3,7 @@ from dataclasses import dataclass
from time import sleep from time import sleep
from typing import Optional from typing import Optional
from frostfs_testlib import reporter from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.common import SERVICE_MAX_STARTUP_TIME from frostfs_testlib.resources.common import SERVICE_MAX_STARTUP_TIME
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import neo_go_dump_keys from frostfs_testlib.steps.cli.object import neo_go_dump_keys
@ -11,14 +11,15 @@ from frostfs_testlib.steps.node_management import storage_node_healthcheck
from frostfs_testlib.steps.storage_policy import get_nodes_with_object from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, NodeBase, StorageNode from frostfs_testlib.storage.cluster import Cluster, ClusterNode, NodeBase, StorageNode
from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain
from frostfs_testlib.storage.dataclasses.node_base import ServiceClass
from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.datetime_utils import parse_time from frostfs_testlib.utils.datetime_utils import parse_time
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@reporter.step("Check and return status of given service") @reporter.step_deco("Check and return status of given service")
def service_status(service: str, shell: Shell) -> str: def service_status(service: str, shell: Shell) -> str:
return shell.exec(f"sudo systemctl is-active {service}").stdout.rstrip() return shell.exec(f"sudo systemctl is-active {service}").stdout.rstrip()
@ -71,14 +72,14 @@ class TopCommand:
) )
@reporter.step("Run `top` command with specified PID") @reporter.step_deco("Run `top` command with specified PID")
def service_status_top(service: str, shell: Shell) -> TopCommand: def service_status_top(service: str, shell: Shell) -> TopCommand:
pid = service_pid(service, shell) pid = service_pid(service, shell)
output = shell.exec(f"sudo top -b -n 1 -p {pid}").stdout output = shell.exec(f"sudo top -b -n 1 -p {pid}").stdout
return TopCommand.from_stdout(output, pid) return TopCommand.from_stdout(output, pid)
@reporter.step("Restart service n times with sleep") @reporter.step_deco("Restart service n times with sleep")
def multiple_restart( def multiple_restart(
service_type: type[NodeBase], service_type: type[NodeBase],
node: ClusterNode, node: ClusterNode,
@ -93,12 +94,13 @@ def multiple_restart(
sleep(sleep_interval) sleep(sleep_interval)
@wait_for_success(60, 5, title="Wait for services become {expected_status} on node {cluster_node}") @reporter.step_deco("Get status of list of services and check expected status")
def check_services_status(cluster_node: ClusterNode, service_list: list[ServiceClass], expected_status: str): @wait_for_success(60, 5)
def check_services_status(service_list: list[str], expected_status: str, shell: Shell):
cmd = "" cmd = ""
for service in service_list: for service in service_list:
cmd += f' sudo systemctl status {service.get_service_systemctl_name()} --lines=0 | grep "Active:";' cmd += f' sudo systemctl status {service} --lines=0 | grep "Active:";'
result = cluster_node.host.get_shell().exec(cmd).stdout.rstrip() result = shell.exec(cmd).stdout.rstrip()
statuses = list() statuses = list()
for line in result.split("\n"): for line in result.split("\n"):
status_substring = line.split() status_substring = line.split()
@ -109,7 +111,8 @@ def check_services_status(cluster_node: ClusterNode, service_list: list[ServiceC
), f"Requested status={expected_status} not found in requested services={service_list}, list of statuses={result}" ), f"Requested status={expected_status} not found in requested services={service_list}, list of statuses={result}"
@wait_for_success(60, 5, title="Wait for {service} become active") @reporter.step_deco("Wait for active status of passed service")
@wait_for_success(60, 5)
def wait_service_in_desired_state(service: str, shell: Shell, expected_status: Optional[str] = "active"): def wait_service_in_desired_state(service: str, shell: Shell, expected_status: Optional[str] = "active"):
real_status = service_status(service=service, shell=shell) real_status = service_status(service=service, shell=shell)
assert ( assert (
@ -117,7 +120,8 @@ def wait_service_in_desired_state(service: str, shell: Shell, expected_status: O
), f"Service {service}: expected status= {expected_status}, real status {real_status}" ), f"Service {service}: expected status= {expected_status}, real status {real_status}"
@wait_for_success(parse_time(SERVICE_MAX_STARTUP_TIME), 1, title="Wait for {service_type} passes healtcheck on {node}") @reporter.step_deco("Run healthcheck against passed service")
@wait_for_success(parse_time(SERVICE_MAX_STARTUP_TIME), 1)
def service_type_healthcheck( def service_type_healthcheck(
service_type: type[NodeBase], service_type: type[NodeBase],
node: ClusterNode, node: ClusterNode,
@ -128,25 +132,26 @@ def service_type_healthcheck(
), f"Healthcheck failed for {service.get_service_systemctl_name()}, IP={node.host_ip}" ), f"Healthcheck failed for {service.get_service_systemctl_name()}, IP={node.host_ip}"
@reporter.step("Kill by process name") @reporter.step_deco("Kill by process name")
def kill_by_service_name(service_type: type[NodeBase], node: ClusterNode): def kill_by_service_name(service_type: type[NodeBase], node: ClusterNode):
service_systemctl_name = node.service(service_type).get_service_systemctl_name() service_systemctl_name = node.service(service_type).get_service_systemctl_name()
pid = service_pid(service_systemctl_name, node.host.get_shell()) pid = service_pid(service_systemctl_name, node.host.get_shell())
node.host.get_shell().exec(f"sudo kill -9 {pid}") node.host.get_shell().exec(f"sudo kill -9 {pid}")
@reporter.step("Suspend {service}") @reporter.step_deco("Service {service} suspend")
def suspend_service(shell: Shell, service: str): def suspend_service(shell: Shell, service: str):
shell.exec(f"sudo kill -STOP {service_pid(service, shell)}") shell.exec(f"sudo kill -STOP {service_pid(service, shell)}")
@reporter.step("Resume {service}") @reporter.step_deco("Service {service} resume")
def resume_service(shell: Shell, service: str): def resume_service(shell: Shell, service: str):
shell.exec(f"sudo kill -CONT {service_pid(service, shell)}") shell.exec(f"sudo kill -CONT {service_pid(service, shell)}")
@reporter.step_deco("Retrieve service's pid")
# retry mechanism cause when the task has been started recently '0' PID could be returned # retry mechanism cause when the task has been started recently '0' PID could be returned
@wait_for_success(10, 1, title="Get {service} pid") @wait_for_success(10, 1)
def service_pid(service: str, shell: Shell) -> int: def service_pid(service: str, shell: Shell) -> int:
output = shell.exec(f"systemctl show --property MainPID {service}").stdout.rstrip() output = shell.exec(f"systemctl show --property MainPID {service}").stdout.rstrip()
splitted = output.split("=") splitted = output.split("=")
@ -155,7 +160,7 @@ def service_pid(service: str, shell: Shell) -> int:
return PID return PID
@reporter.step("Wrapper for neo-go dump keys command") @reporter.step_deco("Wrapper for neo-go dump keys command")
def dump_keys(shell: Shell, node: ClusterNode) -> dict: def dump_keys(shell: Shell, node: ClusterNode) -> dict:
host = node.host host = node.host
service_config = host.get_service_config(node.service(MorphChain).name) service_config = host.get_service_config(node.service(MorphChain).name)
@ -163,7 +168,7 @@ def dump_keys(shell: Shell, node: ClusterNode) -> dict:
return neo_go_dump_keys(shell=shell, wallet=wallet) return neo_go_dump_keys(shell=shell, wallet=wallet)
@reporter.step("Wait for object replication") @reporter.step_deco("Wait for object replication")
def wait_object_replication( def wait_object_replication(
cid: str, cid: str,
oid: str, oid: str,

View file

@ -1,15 +1,17 @@
from concurrent.futures import ThreadPoolExecutor from concurrent.futures import ThreadPoolExecutor
from frostfs_testlib import reporter from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.storage.dataclasses.node_base import NodeBase from frostfs_testlib.storage.dataclasses.node_base import NodeBase
reporter = get_reporter()
class FileKeeper: class FileKeeper:
"""This class is responsible to make backup copy of modified file and restore when required (mostly after the test)""" """This class is responsible to make backup copy of modified file and restore when required (mostly after the test)"""
files_to_restore: dict[NodeBase, list[str]] = {} files_to_restore: dict[NodeBase, list[str]] = {}
@reporter.step("Adding {file_to_restore} from node {node} to restore list") @reporter.step_deco("Adding {file_to_restore} from node {node} to restore list")
def add(self, node: NodeBase, file_to_restore: str): def add(self, node: NodeBase, file_to_restore: str):
if node in self.files_to_restore and file_to_restore in self.files_to_restore[node]: if node in self.files_to_restore and file_to_restore in self.files_to_restore[node]:
# Already added # Already added
@ -24,7 +26,7 @@ class FileKeeper:
shell = node.host.get_shell() shell = node.host.get_shell()
shell.exec(f"cp {file_to_restore} {file_to_restore}.bak") shell.exec(f"cp {file_to_restore} {file_to_restore}.bak")
@reporter.step("Restore files") @reporter.step_deco("Restore files")
def restore_files(self): def restore_files(self):
nodes = self.files_to_restore.keys() nodes = self.files_to_restore.keys()
if not nodes: if not nodes:
@ -39,7 +41,7 @@ class FileKeeper:
# Iterate through results for exception check if any # Iterate through results for exception check if any
pass pass
@reporter.step("Restore files on node {node}") @reporter.step_deco("Restore files on node {node}")
def _restore_files_on_node(self, node: NodeBase): def _restore_files_on_node(self, node: NodeBase):
shell = node.host.get_shell() shell = node.host.get_shell()
for file_to_restore in self.files_to_restore[node]: for file_to_restore in self.files_to_restore[node]:

View file

@ -4,9 +4,10 @@ import os
import uuid import uuid
from typing import Any, Optional from typing import Any, Optional
from frostfs_testlib import reporter from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.resources.common import ASSETS_DIR
reporter = get_reporter()
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@ -60,7 +61,7 @@ def generate_file_with_content(
return file_path return file_path
@reporter.step("Get File Hash") @reporter.step_deco("Get File Hash")
def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[int] = None) -> str: def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[int] = None) -> str:
"""Generates hash for the specified file. """Generates hash for the specified file.
@ -87,7 +88,7 @@ def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[in
return file_hash.hexdigest() return file_hash.hexdigest()
@reporter.step("Concatenation set of files to one file") @reporter.step_deco("Concatenation set of files to one file")
def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) -> str: def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) -> str:
"""Concatenates several files into a single file. """Concatenates several files into a single file.

View file

@ -1,58 +0,0 @@
import collections
import inspect
import sys
from typing import Callable
def format_by_args(__func: Callable, __title: str, *a, **kw) -> str:
params = _func_parameters(__func, *a, **kw)
args = list(map(lambda x: _represent(x), a))
return __title.format(*args, **params)
# These 2 functions are copied from allure_commons._allure
# Duplicate it here in order to be independent of allure and make some adjustments.
def _represent(item):
if isinstance(item, str):
return item
elif isinstance(item, (bytes, bytearray)):
return repr(type(item))
else:
return repr(item)
def _func_parameters(func, *args, **kwargs):
parameters = {}
arg_spec = inspect.getfullargspec(func)
arg_order = list(arg_spec.args)
args_dict = dict(zip(arg_spec.args, args))
if arg_spec.defaults:
kwargs_defaults_dict = dict(zip(arg_spec.args[-len(arg_spec.defaults) :], arg_spec.defaults))
parameters.update(kwargs_defaults_dict)
if arg_spec.varargs:
arg_order.append(arg_spec.varargs)
varargs = args[len(arg_spec.args) :]
parameters.update({arg_spec.varargs: varargs} if varargs else {})
if arg_spec.args and arg_spec.args[0] in ["cls", "self"]:
args_dict.pop(arg_spec.args[0], None)
if kwargs:
if sys.version_info < (3, 7):
# Sort alphabetically as old python versions does
# not preserve call order for kwargs.
arg_order.extend(sorted(list(kwargs.keys())))
else:
# Keep py3.7 behaviour to preserve kwargs order
arg_order.extend(list(kwargs.keys()))
parameters.update(kwargs)
parameters.update(args_dict)
items = parameters.items()
sorted_items = sorted(map(lambda kv: (kv[0], _represent(kv[1])), items), key=lambda x: arg_order.index(x[0]))
return collections.OrderedDict(sorted_items)

View file

@ -1,17 +1,20 @@
import logging import logging
import re import re
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli from frostfs_testlib.cli import FrostfsAdm, FrostfsCli
from frostfs_testlib.hosting import Host, Hosting from frostfs_testlib.hosting import Hosting
from frostfs_testlib.resources.cli import FROSTFS_ADM_EXEC, FROSTFS_AUTHMATE_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE from frostfs_testlib.resources.cli import (
FROSTFS_ADM_EXEC,
FROSTFS_AUTHMATE_EXEC,
FROSTFS_CLI_EXEC,
NEOGO_EXECUTABLE,
)
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell from frostfs_testlib.shell import Shell
from frostfs_testlib.testing.parallel import parallel
logger = logging.getLogger("NeoLogger") logger = logging.getLogger("NeoLogger")
@reporter.step("Get local binaries versions")
def get_local_binaries_versions(shell: Shell) -> dict[str, str]: def get_local_binaries_versions(shell: Shell) -> dict[str, str]:
versions = {} versions = {}
@ -19,7 +22,7 @@ def get_local_binaries_versions(shell: Shell) -> dict[str, str]:
out = shell.exec(f"{binary} --version").stdout out = shell.exec(f"{binary} --version").stdout
versions[binary] = _parse_version(out) versions[binary] = _parse_version(out)
frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC) frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
versions[FROSTFS_CLI_EXEC] = _parse_version(frostfs_cli.version.get().stdout) versions[FROSTFS_CLI_EXEC] = _parse_version(frostfs_cli.version.get().stdout)
try: try:
@ -31,85 +34,46 @@ def get_local_binaries_versions(shell: Shell) -> dict[str, str]:
out = shell.exec("aws --version").stdout out = shell.exec("aws --version").stdout
out_lines = out.split("\n") out_lines = out.split("\n")
versions["AWS"] = out_lines[0] if out_lines else "Unknown" versions["AWS"] = out_lines[0] if out_lines else "Unknown"
logger.info(f"Local binaries version: {out_lines[0]}")
return versions return versions
def parallel_binary_verions(host: Host) -> dict[str, str]:
versions_by_host = {}
binary_path_by_name = {} # Maps binary name to executable path
for service_config in host.config.services:
exec_path = service_config.attributes.get("exec_path")
requires_check = service_config.attributes.get("requires_version_check", "true")
if exec_path:
binary_path_by_name[service_config.name] = {
"exec_path": exec_path,
"check": requires_check.lower() == "true",
}
for cli_config in host.config.clis:
requires_check = cli_config.attributes.get("requires_version_check", "true")
binary_path_by_name[cli_config.name] = {
"exec_path": cli_config.exec_path,
"check": requires_check.lower() == "true",
}
shell = host.get_shell()
versions_at_host = {}
for binary_name, binary in binary_path_by_name.items():
try:
binary_path = binary["exec_path"]
result = shell.exec(f"{binary_path} --version")
versions_at_host[binary_name] = {"version": _parse_version(result.stdout), "check": binary["check"]}
except Exception as exc:
logger.error(f"Cannot get version for {binary_path} because of\n{exc}")
versions_at_host[binary_name] = {"version": "Unknown", "check": binary["check"]}
versions_by_host[host.config.address] = versions_at_host
return versions_by_host
@reporter.step("Get remote binaries versions")
def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]: def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]:
versions_by_host = {} versions_by_host = {}
future_binary_verions = parallel(parallel_binary_verions, parallel_items=hosting.hosts) for host in hosting.hosts:
for future in future_binary_verions: binary_path_by_name = {} # Maps binary name to executable path
versions_by_host.update(future.result()) for service_config in host.config.services:
exec_path = service_config.attributes.get("exec_path")
if exec_path:
binary_path_by_name[service_config.name] = exec_path
for cli_config in host.config.clis:
binary_path_by_name[cli_config.name] = cli_config.exec_path
shell = host.get_shell()
versions_at_host = {}
for binary_name, binary_path in binary_path_by_name.items():
try:
result = shell.exec(f"{binary_path} --version")
versions_at_host[binary_name] = _parse_version(result.stdout)
except Exception as exc:
logger.error(f"Cannot get version for {binary_path} because of\n{exc}")
versions_at_host[binary_name] = "Unknown"
versions_by_host[host.config.address] = versions_at_host
# Consolidate versions across all hosts # Consolidate versions across all hosts
cheak_versions = {}
exсeptions = []
exception = set()
previous_host = None
versions = {} versions = {}
captured_version = None
for host, binary_versions in versions_by_host.items(): for host, binary_versions in versions_by_host.items():
for name, binary in binary_versions.items(): for name, version in binary_versions.items():
version = binary["version"] captured_version = versions.get(name)
if not cheak_versions.get(f"{name[:-2]}", None): if captured_version:
captured_version = cheak_versions.get(f"{name[:-2]}", {}).get(host, {}).get(captured_version) assert (
cheak_versions[f"{name[:-2]}"] = {host: {version: name}} captured_version == version
), f"Binary {name} has inconsistent version on host {host}"
else: else:
captured_version = list(cheak_versions.get(f"{name[:-2]}", {}).get(previous_host).keys())[0] versions[name] = version
cheak_versions[f"{name[:-2]}"].update({host: {version: name}}) return versions
if captured_version and captured_version != version:
exception.add(name[:-2])
versions[name] = {"version": version, "check": binary["check"]}
previous_host = host
logger.info(
"Remote binaries versions:\n" + "\n".join([f"{key} ver: {value['version']}" for key, value in versions.items()])
)
if exception:
for i in exception:
for host in versions_by_host.keys():
for version, name in cheak_versions.get(i).get(host).items():
exсeptions.append(f"Binary {name} has inconsistent version {version} on host {host}")
exсeptions.append("\n")
return versions, exсeptions
def _parse_version(version_output: str) -> str: def _parse_version(version_output: str) -> str:
version = re.search(r"version[:\s]*v?(.+)", version_output, re.IGNORECASE) version = re.search(r"version[:\s]*v?(.+)", version_output, re.IGNORECASE)
return version.group(1).strip() if version else version_output return version.group(1).strip() if version else "Unknown"

View file

@ -9,16 +9,6 @@ from neo3.wallet import wallet as neo3_wallet
logger = logging.getLogger("frostfs.testlib.utils") logger = logging.getLogger("frostfs.testlib.utils")
def __fix_wallet_schema(wallet: dict) -> None:
# Temporary function to fix wallets that do not conform to the schema
# TODO: get rid of it once issue is solved
if "name" not in wallet:
wallet["name"] = None
for account in wallet["accounts"]:
if "extra" not in account:
account["extra"] = None
def init_wallet(wallet_path: str, wallet_password: str): def init_wallet(wallet_path: str, wallet_password: str):
""" """
Create new wallet and new account. Create new wallet and new account.
@ -43,15 +33,29 @@ def get_last_address_from_wallet(wallet_path: str, wallet_password: str):
Returns: Returns:
The address for the wallet. The address for the wallet.
""" """
wallet = load_wallet(wallet_path, wallet_password) with open(wallet_path) as wallet_file:
wallet = neo3_wallet.Wallet.from_json(json.load(wallet_file), password=wallet_password)
address = wallet.accounts[-1].address address = wallet.accounts[-1].address
logger.info(f"got address: {address}") logger.info(f"got address: {address}")
return address return address
def get_wallet_public_key(wallet_path: str, wallet_password: str, format: str = "hex") -> str: def get_wallet_public_key(wallet_path: str, wallet_password: str, format: str = "hex") -> str:
wallet = load_wallet(wallet_path, wallet_password) def __fix_wallet_schema(wallet: dict) -> None:
public_key_hex = str(wallet.accounts[0].public_key) # Temporary function to fix wallets that do not conform to the schema
# TODO: get rid of it once issue is solved
if "name" not in wallet:
wallet["name"] = None
for account in wallet["accounts"]:
if "extra" not in account:
account["extra"] = None
# Get public key from wallet file
with open(wallet_path, "r") as file:
wallet_content = json.load(file)
__fix_wallet_schema(wallet_content)
wallet_from_json = neo3_wallet.Wallet.from_json(wallet_content, password=wallet_password)
public_key_hex = str(wallet_from_json.accounts[0].public_key)
# Convert public key to specified format # Convert public key to specified format
if format == "hex": if format == "hex":
@ -65,9 +69,7 @@ def get_wallet_public_key(wallet_path: str, wallet_password: str, format: str =
raise ValueError(f"Invalid public key format: {format}") raise ValueError(f"Invalid public key format: {format}")
def load_wallet(wallet_path: str, wallet_password: str) -> neo3_wallet.Wallet: def load_wallet(path: str, passwd: str = "") -> neo3_wallet.Wallet:
with open(wallet_path) as wallet_file: with open(path, "r") as wallet_file:
wallet_content = json.load(wallet_file) wlt_data = wallet_file.read()
return neo3_wallet.Wallet.from_json(json.loads(wlt_data), password=passwd)
__fix_wallet_schema(wallet_content)
return neo3_wallet.Wallet.from_json(wallet_content, password=wallet_password)

View file

@ -4,7 +4,13 @@ import pytest
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper
from frostfs_testlib.storage.dataclasses.acl import EACLRole from frostfs_testlib.storage.dataclasses.acl import EACLRole
from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import (
HTTPGate,
InnerRing,
MorphChain,
S3Gate,
StorageNode,
)
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
@ -16,10 +22,10 @@ class TestDataclassesStr:
[ [
(Boto3ClientWrapper, "Boto3 client"), (Boto3ClientWrapper, "Boto3 client"),
(AwsCliClient, "AWS CLI"), (AwsCliClient, "AWS CLI"),
(ObjectSize("simple", 1), "simple"), (ObjectSize("simple", 1), "simple object size"),
(ObjectSize("simple", 10), "simple"), (ObjectSize("simple", 10), "simple object size"),
(ObjectSize("complex", 5000), "complex"), (ObjectSize("complex", 5000), "complex object size"),
(ObjectSize("complex", 5555), "complex"), (ObjectSize("complex", 5555), "complex object size"),
(StorageNode, "StorageNode"), (StorageNode, "StorageNode"),
(MorphChain, "MorphChain"), (MorphChain, "MorphChain"),
(S3Gate, "S3Gate"), (S3Gate, "S3Gate"),

View file

@ -15,7 +15,6 @@ class TestHosting(TestCase):
HOST1 = { HOST1 = {
"address": HOST1_ADDRESS, "address": HOST1_ADDRESS,
"plugin_name": HOST1_PLUGIN, "plugin_name": HOST1_PLUGIN,
"healthcheck_plugin_name": "basic",
"attributes": HOST1_ATTRIBUTES, "attributes": HOST1_ATTRIBUTES,
"clis": HOST1_CLIS, "clis": HOST1_CLIS,
"services": HOST1_SERVICES, "services": HOST1_SERVICES,
@ -33,7 +32,6 @@ class TestHosting(TestCase):
HOST2 = { HOST2 = {
"address": HOST2_ADDRESS, "address": HOST2_ADDRESS,
"plugin_name": HOST2_PLUGIN, "plugin_name": HOST2_PLUGIN,
"healthcheck_plugin_name": "basic",
"attributes": HOST2_ATTRIBUTES, "attributes": HOST2_ATTRIBUTES,
"clis": HOST2_CLIS, "clis": HOST2_CLIS,
"services": HOST2_SERVICES, "services": HOST2_SERVICES,
@ -54,14 +52,18 @@ class TestHosting(TestCase):
self.assertEqual(host1.config.plugin_name, self.HOST1_PLUGIN) self.assertEqual(host1.config.plugin_name, self.HOST1_PLUGIN)
self.assertDictEqual(host1.config.attributes, self.HOST1_ATTRIBUTES) self.assertDictEqual(host1.config.attributes, self.HOST1_ATTRIBUTES)
self.assertListEqual(host1.config.clis, [CLIConfig(**cli) for cli in self.HOST1_CLIS]) self.assertListEqual(host1.config.clis, [CLIConfig(**cli) for cli in self.HOST1_CLIS])
self.assertListEqual(host1.config.services, [ServiceConfig(**service) for service in self.HOST1_SERVICES]) self.assertListEqual(
host1.config.services, [ServiceConfig(**service) for service in self.HOST1_SERVICES]
)
host2 = hosting.get_host_by_address(self.HOST2_ADDRESS) host2 = hosting.get_host_by_address(self.HOST2_ADDRESS)
self.assertEqual(host2.config.address, self.HOST2_ADDRESS) self.assertEqual(host2.config.address, self.HOST2_ADDRESS)
self.assertEqual(host2.config.plugin_name, self.HOST2_PLUGIN) self.assertEqual(host2.config.plugin_name, self.HOST2_PLUGIN)
self.assertDictEqual(host2.config.attributes, self.HOST2_ATTRIBUTES) self.assertDictEqual(host2.config.attributes, self.HOST2_ATTRIBUTES)
self.assertListEqual(host2.config.clis, [CLIConfig(**cli) for cli in self.HOST2_CLIS]) self.assertListEqual(host2.config.clis, [CLIConfig(**cli) for cli in self.HOST2_CLIS])
self.assertListEqual(host2.config.services, [ServiceConfig(**service) for service in self.HOST2_SERVICES]) self.assertListEqual(
host2.config.services, [ServiceConfig(**service) for service in self.HOST2_SERVICES]
)
def test_get_host_by_service(self): def test_get_host_by_service(self):
hosting = Hosting() hosting = Hosting()
@ -102,7 +104,9 @@ class TestHosting(TestCase):
services = hosting.find_service_configs(rf"^{self.SERVICE_NAME_PREFIX}") services = hosting.find_service_configs(rf"^{self.SERVICE_NAME_PREFIX}")
self.assertEqual(len(services), 2) self.assertEqual(len(services), 2)
for service in services: for service in services:
self.assertEqual(service.name[: len(self.SERVICE_NAME_PREFIX)], self.SERVICE_NAME_PREFIX) self.assertEqual(
service.name[: len(self.SERVICE_NAME_PREFIX)], self.SERVICE_NAME_PREFIX
)
service1 = hosting.find_service_configs(self.SERVICE1["name"]) service1 = hosting.find_service_configs(self.SERVICE1["name"])
self.assertEqual(len(service1), 1) self.assertEqual(len(service1), 1)

View file

@ -12,7 +12,6 @@ from frostfs_testlib.load.load_config import (
ReadFrom, ReadFrom,
) )
from frostfs_testlib.load.runners import DefaultRunner from frostfs_testlib.load.runners import DefaultRunner
from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME
from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
@ -54,25 +53,6 @@ class TestLoadConfig:
assert repr(load_params) == expected assert repr(load_params) == expected
assert f"{load_params}" == expected assert f"{load_params}" == expected
def test_load_params_init_time(self):
load_params = LoadParams(load_type=LoadType.S3)
vus = 100
load_params.vu_init_time = BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME
# Used in time calculations
load_params.readers = vus
load_params.writers = vus
load_params.preallocated_readers = vus
load_params.preallocated_writers = vus
# Not used in time calculations
load_params.deleters = vus
load_params.preallocated_deleters = vus
expected = vus * 4 * BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME
actual = load_params.get_init_time()
assert actual == expected, "Incorrect time for get_init_time()"
def test_load_params_initially_have_all_values_none(self): def test_load_params_initially_have_all_values_none(self):
load_params = LoadParams(load_type=LoadType.S3) load_params = LoadParams(load_type=LoadType.S3)
self._check_all_values_none(load_params, ["load_type", "scenario"]) self._check_all_values_none(load_params, ["load_type", "scenario"])
@ -136,15 +116,11 @@ class TestLoadConfig:
def test_argument_parsing_for_grpc_scenario(self, load_params: LoadParams): def test_argument_parsing_for_grpc_scenario(self, load_params: LoadParams):
expected_preset_args = [ expected_preset_args = [
"--size '11'", "--size '11'",
"--acl 'acl'",
"--preload_obj '13'", "--preload_obj '13'",
"--out 'pregen_json'", "--out 'pregen_json'",
"--workers '7'", "--workers '7'",
"--containers '16'", "--containers '16'",
"--policy 'container_placement_policy'", "--policy 'container_placement_policy'",
"--ignore-errors",
"--sleep '19'",
"--local",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 9, "DURATION": 9,
@ -155,9 +131,6 @@ class TestLoadConfig:
"WRITERS": 7, "WRITERS": 7,
"READERS": 7, "READERS": 7,
"DELETERS": 8, "DELETERS": 8,
"READ_AGE": 8,
"STREAMING": 9,
"K6_OUT": "output",
"PREGEN_JSON": "pregen_json", "PREGEN_JSON": "pregen_json",
"PREPARE_LOCALLY": True, "PREPARE_LOCALLY": True,
} }
@ -174,15 +147,10 @@ class TestLoadConfig:
"--workers '7'", "--workers '7'",
"--containers '16'", "--containers '16'",
"--policy 'container_placement_policy'", "--policy 'container_placement_policy'",
"--ignore-errors",
"--sleep '19'",
"--local",
"--acl 'acl'",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 9, "DURATION": 9,
"WRITE_OBJ_SIZE": 11, "WRITE_OBJ_SIZE": 11,
"K6_OUT": "output",
"REGISTRY_FILE": "registry_file", "REGISTRY_FILE": "registry_file",
"K6_MIN_ITERATION_DURATION": "min_iteration_duration", "K6_MIN_ITERATION_DURATION": "min_iteration_duration",
"K6_SETUP_TIMEOUT": "setup_timeout", "K6_SETUP_TIMEOUT": "setup_timeout",
@ -196,9 +164,7 @@ class TestLoadConfig:
"TIME_UNIT": "time_unit", "TIME_UNIT": "time_unit",
"WRITE_RATE": 10, "WRITE_RATE": 10,
"READ_RATE": 9, "READ_RATE": 9,
"READ_AGE": 8,
"DELETE_RATE": 11, "DELETE_RATE": 11,
"STREAMING": 9,
"PREPARE_LOCALLY": True, "PREPARE_LOCALLY": True,
} }
@ -215,22 +181,16 @@ class TestLoadConfig:
"--workers '7'", "--workers '7'",
"--buckets '13'", "--buckets '13'",
"--location 's3_location'", "--location 's3_location'",
"--ignore-errors",
"--sleep '19'",
"--acl 'acl'",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 9, "DURATION": 9,
"WRITE_OBJ_SIZE": 11, "WRITE_OBJ_SIZE": 11,
"REGISTRY_FILE": "registry_file", "REGISTRY_FILE": "registry_file",
"K6_OUT": "output",
"K6_MIN_ITERATION_DURATION": "min_iteration_duration", "K6_MIN_ITERATION_DURATION": "min_iteration_duration",
"K6_SETUP_TIMEOUT": "setup_timeout", "K6_SETUP_TIMEOUT": "setup_timeout",
"WRITERS": 7, "WRITERS": 7,
"READERS": 7, "READERS": 7,
"DELETERS": 8, "DELETERS": 8,
"READ_AGE": 8,
"STREAMING": 9,
"NO_VERIFY_SSL": True, "NO_VERIFY_SSL": True,
"PREGEN_JSON": "pregen_json", "PREGEN_JSON": "pregen_json",
} }
@ -238,47 +198,6 @@ class TestLoadConfig:
self._check_preset_params(load_params, expected_preset_args) self._check_preset_params(load_params, expected_preset_args)
self._check_env_vars(load_params, expected_env_vars) self._check_env_vars(load_params, expected_env_vars)
@pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True)
def test_argument_parsing_for_s3_car_scenario_with_stringed_time(self, load_params: LoadParams):
load_params.load_time = "2d3h5min"
expected_preset_args = [
"--size '11'",
"--preload_obj '13'",
"--no-verify-ssl",
"--out 'pregen_json'",
"--workers '7'",
"--buckets '13'",
"--location 's3_location'",
"--ignore-errors",
"--sleep '19'",
"--acl 'acl'",
]
expected_env_vars = {
"DURATION": 183900,
"WRITE_OBJ_SIZE": 11,
"REGISTRY_FILE": "registry_file",
"K6_OUT": "output",
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
"K6_SETUP_TIMEOUT": "setup_timeout",
"NO_VERIFY_SSL": True,
"MAX_WRITERS": 11,
"MAX_READERS": 11,
"MAX_DELETERS": 12,
"PRE_ALLOC_DELETERS": 21,
"PRE_ALLOC_READERS": 20,
"PRE_ALLOC_WRITERS": 20,
"PREGEN_JSON": "pregen_json",
"TIME_UNIT": "time_unit",
"WRITE_RATE": 10,
"READ_RATE": 9,
"READ_AGE": 8,
"STREAMING": 9,
"DELETE_RATE": 11,
}
self._check_preset_params(load_params, expected_preset_args)
self._check_env_vars(load_params, expected_env_vars)
@pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True) @pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True)
def test_argument_parsing_for_s3_car_scenario(self, load_params: LoadParams): def test_argument_parsing_for_s3_car_scenario(self, load_params: LoadParams):
expected_preset_args = [ expected_preset_args = [
@ -289,15 +208,11 @@ class TestLoadConfig:
"--workers '7'", "--workers '7'",
"--buckets '13'", "--buckets '13'",
"--location 's3_location'", "--location 's3_location'",
"--ignore-errors",
"--sleep '19'",
"--acl 'acl'",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 9, "DURATION": 9,
"WRITE_OBJ_SIZE": 11, "WRITE_OBJ_SIZE": 11,
"REGISTRY_FILE": "registry_file", "REGISTRY_FILE": "registry_file",
"K6_OUT": "output",
"K6_MIN_ITERATION_DURATION": "min_iteration_duration", "K6_MIN_ITERATION_DURATION": "min_iteration_duration",
"K6_SETUP_TIMEOUT": "setup_timeout", "K6_SETUP_TIMEOUT": "setup_timeout",
"NO_VERIFY_SSL": True, "NO_VERIFY_SSL": True,
@ -311,8 +226,6 @@ class TestLoadConfig:
"TIME_UNIT": "time_unit", "TIME_UNIT": "time_unit",
"WRITE_RATE": 10, "WRITE_RATE": 10,
"READ_RATE": 9, "READ_RATE": 9,
"READ_AGE": 8,
"STREAMING": 9,
"DELETE_RATE": 11, "DELETE_RATE": 11,
} }
@ -321,7 +234,6 @@ class TestLoadConfig:
@pytest.mark.parametrize("load_params", [LoadScenario.HTTP], indirect=True) @pytest.mark.parametrize("load_params", [LoadScenario.HTTP], indirect=True)
def test_argument_parsing_for_http_scenario(self, load_params: LoadParams): def test_argument_parsing_for_http_scenario(self, load_params: LoadParams):
load_params.preset.local = False
expected_preset_args = [ expected_preset_args = [
"--no-verify-ssl", "--no-verify-ssl",
"--size '11'", "--size '11'",
@ -330,14 +242,10 @@ class TestLoadConfig:
"--workers '7'", "--workers '7'",
"--containers '16'", "--containers '16'",
"--policy 'container_placement_policy'", "--policy 'container_placement_policy'",
"--ignore-errors",
"--sleep '19'",
"--acl 'acl'",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 9, "DURATION": 9,
"WRITE_OBJ_SIZE": 11, "WRITE_OBJ_SIZE": 11,
"K6_OUT": "output",
"NO_VERIFY_SSL": True, "NO_VERIFY_SSL": True,
"REGISTRY_FILE": "registry_file", "REGISTRY_FILE": "registry_file",
"K6_MIN_ITERATION_DURATION": "min_iteration_duration", "K6_MIN_ITERATION_DURATION": "min_iteration_duration",
@ -345,8 +253,6 @@ class TestLoadConfig:
"WRITERS": 7, "WRITERS": 7,
"READERS": 7, "READERS": 7,
"DELETERS": 8, "DELETERS": 8,
"READ_AGE": 8,
"STREAMING": 9,
"PREGEN_JSON": "pregen_json", "PREGEN_JSON": "pregen_json",
} }
@ -355,7 +261,6 @@ class TestLoadConfig:
@pytest.mark.parametrize("load_params", [LoadScenario.LOCAL], indirect=True) @pytest.mark.parametrize("load_params", [LoadScenario.LOCAL], indirect=True)
def test_argument_parsing_for_local_scenario(self, load_params: LoadParams): def test_argument_parsing_for_local_scenario(self, load_params: LoadParams):
load_params.preset.local = False
expected_preset_args = [ expected_preset_args = [
"--size '11'", "--size '11'",
"--preload_obj '13'", "--preload_obj '13'",
@ -363,30 +268,26 @@ class TestLoadConfig:
"--workers '7'", "--workers '7'",
"--containers '16'", "--containers '16'",
"--policy 'container_placement_policy'", "--policy 'container_placement_policy'",
"--ignore-errors",
"--sleep '19'",
"--acl 'acl'",
] ]
expected_env_vars = { expected_env_vars = {
"CONFIG_FILE": "config_file", "CONFIG_FILE": "config_file",
"DURATION": 9, "DURATION": 9,
"WRITE_OBJ_SIZE": 11, "WRITE_OBJ_SIZE": 11,
"K6_OUT": "output",
"REGISTRY_FILE": "registry_file", "REGISTRY_FILE": "registry_file",
"K6_MIN_ITERATION_DURATION": "min_iteration_duration", "K6_MIN_ITERATION_DURATION": "min_iteration_duration",
"K6_SETUP_TIMEOUT": "setup_timeout", "K6_SETUP_TIMEOUT": "setup_timeout",
"WRITERS": 7, "WRITERS": 7,
"READERS": 7, "READERS": 7,
"DELETERS": 8, "DELETERS": 8,
"READ_AGE": 8,
"STREAMING": 9,
"PREGEN_JSON": "pregen_json", "PREGEN_JSON": "pregen_json",
} }
self._check_preset_params(load_params, expected_preset_args) self._check_preset_params(load_params, expected_preset_args)
self._check_env_vars(load_params, expected_env_vars) self._check_env_vars(load_params, expected_env_vars)
@pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True) @pytest.mark.parametrize(
"load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True
)
def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams): def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams):
expected_env_vars = { expected_env_vars = {
"CLIENTS": 14, "CLIENTS": 14,
@ -398,7 +299,9 @@ class TestLoadConfig:
self._check_env_vars(load_params, expected_env_vars) self._check_env_vars(load_params, expected_env_vars)
@pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.gRPC)], indirect=True) @pytest.mark.parametrize(
"load_params, load_type", [(LoadScenario.VERIFY, LoadType.gRPC)], indirect=True
)
def test_argument_parsing_for_grpc_verify_scenario(self, load_params: LoadParams): def test_argument_parsing_for_grpc_verify_scenario(self, load_params: LoadParams):
expected_env_vars = { expected_env_vars = {
"CLIENTS": 14, "CLIENTS": 14,
@ -419,21 +322,16 @@ class TestLoadConfig:
"--workers '0'", "--workers '0'",
"--containers '0'", "--containers '0'",
"--policy ''", "--policy ''",
"--sleep '0'",
"--acl ''",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 0, "DURATION": 0,
"WRITE_OBJ_SIZE": 0, "WRITE_OBJ_SIZE": 0,
"REGISTRY_FILE": "", "REGISTRY_FILE": "",
"K6_OUT": "",
"K6_MIN_ITERATION_DURATION": "", "K6_MIN_ITERATION_DURATION": "",
"K6_SETUP_TIMEOUT": "", "K6_SETUP_TIMEOUT": "",
"WRITERS": 0, "WRITERS": 0,
"READERS": 0, "READERS": 0,
"DELETERS": 0, "DELETERS": 0,
"READ_AGE": 0,
"STREAMING": 0,
"PREGEN_JSON": "", "PREGEN_JSON": "",
"PREPARE_LOCALLY": False, "PREPARE_LOCALLY": False,
} }
@ -441,7 +339,9 @@ class TestLoadConfig:
self._check_preset_params(load_params, expected_preset_args) self._check_preset_params(load_params, expected_preset_args)
self._check_env_vars(load_params, expected_env_vars) self._check_env_vars(load_params, expected_env_vars)
@pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.gRPC_CAR, True)], indirect=True) @pytest.mark.parametrize(
"load_params, set_empty", [(LoadScenario.gRPC_CAR, True)], indirect=True
)
def test_empty_argument_parsing_for_grpc_car_scenario(self, load_params: LoadParams): def test_empty_argument_parsing_for_grpc_car_scenario(self, load_params: LoadParams):
expected_preset_args = [ expected_preset_args = [
"--size '0'", "--size '0'",
@ -450,14 +350,11 @@ class TestLoadConfig:
"--workers '0'", "--workers '0'",
"--containers '0'", "--containers '0'",
"--policy ''", "--policy ''",
"--sleep '0'",
"--acl ''",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 0, "DURATION": 0,
"WRITE_OBJ_SIZE": 0, "WRITE_OBJ_SIZE": 0,
"REGISTRY_FILE": "", "REGISTRY_FILE": "",
"K6_OUT": "",
"K6_MIN_ITERATION_DURATION": "", "K6_MIN_ITERATION_DURATION": "",
"K6_SETUP_TIMEOUT": "", "K6_SETUP_TIMEOUT": "",
"MAX_WRITERS": 0, "MAX_WRITERS": 0,
@ -471,8 +368,6 @@ class TestLoadConfig:
"WRITE_RATE": 0, "WRITE_RATE": 0,
"READ_RATE": 0, "READ_RATE": 0,
"DELETE_RATE": 0, "DELETE_RATE": 0,
"READ_AGE": 0,
"STREAMING": 0,
"PREPARE_LOCALLY": False, "PREPARE_LOCALLY": False,
} }
@ -488,21 +383,16 @@ class TestLoadConfig:
"--workers '0'", "--workers '0'",
"--buckets '0'", "--buckets '0'",
"--location ''", "--location ''",
"--sleep '0'",
"--acl ''",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 0, "DURATION": 0,
"WRITE_OBJ_SIZE": 0, "WRITE_OBJ_SIZE": 0,
"REGISTRY_FILE": "", "REGISTRY_FILE": "",
"K6_OUT": "",
"K6_MIN_ITERATION_DURATION": "", "K6_MIN_ITERATION_DURATION": "",
"K6_SETUP_TIMEOUT": "", "K6_SETUP_TIMEOUT": "",
"WRITERS": 0, "WRITERS": 0,
"READERS": 0, "READERS": 0,
"DELETERS": 0, "DELETERS": 0,
"READ_AGE": 0,
"STREAMING": 0,
"NO_VERIFY_SSL": False, "NO_VERIFY_SSL": False,
"PREGEN_JSON": "", "PREGEN_JSON": "",
} }
@ -519,14 +409,11 @@ class TestLoadConfig:
"--workers '0'", "--workers '0'",
"--buckets '0'", "--buckets '0'",
"--location ''", "--location ''",
"--sleep '0'",
"--acl ''",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 0, "DURATION": 0,
"WRITE_OBJ_SIZE": 0, "WRITE_OBJ_SIZE": 0,
"REGISTRY_FILE": "", "REGISTRY_FILE": "",
"K6_OUT": "",
"K6_MIN_ITERATION_DURATION": "", "K6_MIN_ITERATION_DURATION": "",
"K6_SETUP_TIMEOUT": "", "K6_SETUP_TIMEOUT": "",
"NO_VERIFY_SSL": False, "NO_VERIFY_SSL": False,
@ -541,8 +428,6 @@ class TestLoadConfig:
"WRITE_RATE": 0, "WRITE_RATE": 0,
"READ_RATE": 0, "READ_RATE": 0,
"DELETE_RATE": 0, "DELETE_RATE": 0,
"READ_AGE": 0,
"STREAMING": 0,
} }
self._check_preset_params(load_params, expected_preset_args) self._check_preset_params(load_params, expected_preset_args)
@ -557,22 +442,17 @@ class TestLoadConfig:
"--workers '0'", "--workers '0'",
"--containers '0'", "--containers '0'",
"--policy ''", "--policy ''",
"--sleep '0'",
"--acl ''",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 0, "DURATION": 0,
"WRITE_OBJ_SIZE": 0, "WRITE_OBJ_SIZE": 0,
"NO_VERIFY_SSL": False, "NO_VERIFY_SSL": False,
"REGISTRY_FILE": "", "REGISTRY_FILE": "",
"K6_OUT": "",
"K6_MIN_ITERATION_DURATION": "", "K6_MIN_ITERATION_DURATION": "",
"K6_SETUP_TIMEOUT": "", "K6_SETUP_TIMEOUT": "",
"WRITERS": 0, "WRITERS": 0,
"READERS": 0, "READERS": 0,
"DELETERS": 0, "DELETERS": 0,
"READ_AGE": 0,
"STREAMING": 0,
"PREGEN_JSON": "", "PREGEN_JSON": "",
} }
@ -588,22 +468,17 @@ class TestLoadConfig:
"--workers '0'", "--workers '0'",
"--containers '0'", "--containers '0'",
"--policy ''", "--policy ''",
"--sleep '0'",
"--acl ''",
] ]
expected_env_vars = { expected_env_vars = {
"CONFIG_FILE": "", "CONFIG_FILE": "",
"DURATION": 0, "DURATION": 0,
"WRITE_OBJ_SIZE": 0, "WRITE_OBJ_SIZE": 0,
"REGISTRY_FILE": "", "REGISTRY_FILE": "",
"K6_OUT": "",
"K6_MIN_ITERATION_DURATION": "", "K6_MIN_ITERATION_DURATION": "",
"K6_SETUP_TIMEOUT": "", "K6_SETUP_TIMEOUT": "",
"WRITERS": 0, "WRITERS": 0,
"READERS": 0, "READERS": 0,
"DELETERS": 0, "DELETERS": 0,
"READ_AGE": 0,
"STREAMING": 0,
"PREGEN_JSON": "", "PREGEN_JSON": "",
} }
@ -642,33 +517,12 @@ class TestLoadConfig:
self._check_env_vars(load_params, expected_env_vars) self._check_env_vars(load_params, expected_env_vars)
@pytest.mark.parametrize(
"load_params, load_type",
[(LoadScenario.gRPC, LoadType.gRPC)],
indirect=True,
)
@pytest.mark.parametrize(
"load_time, expected_seconds",
[
(300, 300),
("2d3h45min", 186300),
("1d6h", 108000),
("1d", 86400),
("1d1min", 86460),
("2h", 7200),
("2h2min", 7320),
],
)
def test_convert_time_to_seconds(self, load_params: LoadParams, load_time: str | int, expected_seconds: int):
load_params.load_time = load_time
assert load_params.load_time == expected_seconds
def _check_preset_params(self, load_params: LoadParams, expected_preset_args: list[str]): def _check_preset_params(self, load_params: LoadParams, expected_preset_args: list[str]):
preset_parameters = load_params.get_preset_arguments() preset_parameters = load_params.get_preset_arguments()
assert sorted(preset_parameters) == sorted(expected_preset_args) assert sorted(preset_parameters) == sorted(expected_preset_args)
def _check_env_vars(self, load_params: LoadParams, expected_env_vars: dict[str, str]): def _check_env_vars(self, load_params: LoadParams, expected_env_vars: dict[str, str]):
env_vars = load_params.get_k6_vars() env_vars = load_params.get_env_vars()
assert env_vars == expected_env_vars assert env_vars == expected_env_vars
def _check_all_values_none(self, dataclass, skip_fields=None): def _check_all_values_none(self, dataclass, skip_fields=None):