Compare commits

...
Sign in to create a new pull request.

26 commits

Author SHA1 Message Date
9ad620121e [#372] Added decorator wait until stabilization metric values
Signed-off-by: Ilyas Niyazov <i.niyazov@yadro.com>
2025-04-16 07:42:56 +00:00
aab4d4f657 [#373] Add step to httpClient for log write
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2025-04-15 15:05:46 +00:00
80226ee0a8 [#371] Add IAM and STS clients to boto3-stubs
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2025-04-07 08:15:51 +00:00
d38808a1f5 [#354] Support of presigned url methods for S3
Signed-off-by: Yaroslava Lukoyanova <y.lukoyanova@yadro.com>
2025-03-31 07:15:59 +00:00
c4ab14fce8 [#370] Unify delete_object_tagging method in S3 clients
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2025-03-24 07:31:42 +00:00
c8eec11906 [#369] Set region in S3 STS client
Signed-off-by: Yaroslava Lukoyanova <y.lukoyanova@yadro.com>
2025-03-24 09:57:11 +03:00
6bbc359ec9 [#368] Fixed function check metrics
Signed-off-by: Ilyas Niyazov <i.niyazov@yadro.com>
2025-03-21 11:06:39 +00:00
8bedd9b3d6 [#367] Use full date during log
Signed-off-by: a.berezin <a.berezin@yadro.com>
2025-03-19 11:34:12 +00:00
91a2706b06 [#366] Test order depends on location
Signed-off-by: Dmitry Anurin <danurin@yadro.com>
2025-03-19 11:33:06 +00:00
dcde9e15b1 [#365] Change type hint for NetmapOperations.nodeinfo
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2025-03-18 14:56:12 +00:00
3966f65c95 [#364] Fixed hook order tests collection
Signed-off-by: Ilyas Niyazov <i.niyazov@yadro.com>
2025-03-17 16:24:36 +03:00
dfb048fe51 [#363] Add accounting for timeout inaccuracy between process and cli
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2025-03-13 08:07:25 +00:00
c2af1bba5c [#362] Add functions to change date on nodes in ClusterStateController
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2025-03-11 16:33:48 +03:00
7d2c92ebc0 [#361] Move common fixture to testlib
Signed-off-by: a.berezin <a.berezin@yadro.com>
2025-03-07 17:06:14 +03:00
0c4e601840 [#359] Override represantation method for Host
Signed-off-by: Ilyas Niyazov <i.niyazov@yadro.com>
2025-03-06 08:44:34 +00:00
f1073d214c [#360] Increased timeout for IAM policy attach/detach
Signed-off-by: Yaroslava Lukoyanova <y.lukoyanova@yadro.com>
2025-03-05 16:43:55 +03:00
b00d080982 [#357] Synchronize client and CliCommand timeouts
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2025-03-03 12:49:49 +00:00
97b9b5498a [#358] Add minor improvements for convenient work with clients
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2025-02-25 17:07:23 +03:00
e9bc36b3d3 [#355] Change CSC time methods
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2025-02-05 11:47:09 +00:00
Dmitry Anurin
87afc4b58c [#356] Added pprof endpoint and working dir to service attributes
Signed-off-by: Dmitry Anurin <danurin@yadro.com>
2025-02-05 09:47:49 +03:00
b44705eb2f [#353] Added Netmap command for CliWrapper
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2025-01-31 09:11:44 +00:00
ace9564243 [#352] Fix versions parsing
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2025-01-30 11:21:09 +03:00
0015ea7f93 [#350] Add ape rule for load config
Signed-off-by: a.berezin <a.berezin@yadro.com>
2025-01-23 17:47:49 +03:00
aed20e02ac [#349] Fixed hook pytest-collect-modifyitems
Signed-off-by: Ilyas Niyazov <i.niyazov@yadro.com>
2025-01-17 17:37:51 +03:00
80dd8d0b16 [#348] Fixed check of fields in S3 aws/boto3 methods related to policies
Signed-off-by: y.lukoyanova <y.lukoyanova@yadro.com>
2025-01-17 11:09:47 +03:00
daf186690b [#345] Fix curl request generation
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2025-01-16 07:45:42 +00:00
39 changed files with 1067 additions and 602 deletions

View file

@ -28,7 +28,7 @@ dependencies = [
"pytest==7.1.2", "pytest==7.1.2",
"tenacity==8.0.1", "tenacity==8.0.1",
"boto3==1.35.30", "boto3==1.35.30",
"boto3-stubs[essential]==1.35.30", "boto3-stubs[s3,iam,sts]==1.35.30",
] ]
requires-python = ">=3.10" requires-python = ">=3.10"

View file

@ -9,7 +9,8 @@ testrail-api==1.12.0
tenacity==8.0.1 tenacity==8.0.1
pytest==7.1.2 pytest==7.1.2
boto3==1.35.30 boto3==1.35.30
boto3-stubs[essential]==1.35.30 boto3-stubs[s3,iam,sts]==1.35.30
pydantic==2.10.6
# Dev dependencies # Dev dependencies
black==22.8.0 black==22.8.0
@ -21,4 +22,4 @@ pylint==2.17.4
# Packaging dependencies # Packaging dependencies
build==0.8.0 build==0.8.0
setuptools==65.3.0 setuptools==65.3.0
twine==4.0.1 twine==4.0.1

View file

@ -1,4 +1,4 @@
__version__ = "2.0.1" __version__ = "2.0.1"
from .fixtures import configure_testlib, hosting, temp_directory from .fixtures import configure_testlib, hosting, session_start_time, temp_directory
from .hooks import pytest_collection_modifyitems from .hooks import pytest_add_frostfs_marker, pytest_collection_modifyitems

View file

@ -1,10 +1,11 @@
from typing import Optional from typing import Optional
from frostfs_testlib.shell import CommandOptions, CommandResult, InteractiveInput, Shell from frostfs_testlib.shell import CommandOptions, CommandResult, InteractiveInput, Shell
from frostfs_testlib.utils.datetime_utils import parse_time
class CliCommand: class CliCommand:
TIMEOUT_INACCURACY = 10
WALLET_SOURCE_ERROR_MSG = "Provide either wallet or wallet_config to specify wallet location" WALLET_SOURCE_ERROR_MSG = "Provide either wallet or wallet_config to specify wallet location"
WALLET_PASSWD_ERROR_MSG = "Provide either wallet_password or wallet_config to specify password" WALLET_PASSWD_ERROR_MSG = "Provide either wallet_password or wallet_config to specify password"
@ -24,9 +25,7 @@ class CliCommand:
def __init__(self, shell: Shell, cli_exec_path: str, **base_params): def __init__(self, shell: Shell, cli_exec_path: str, **base_params):
self.shell = shell self.shell = shell
self.cli_exec_path = cli_exec_path self.cli_exec_path = cli_exec_path
self.__base_params = " ".join( self.__base_params = " ".join([f"--{param} {value}" for param, value in base_params.items() if value])
[f"--{param} {value}" for param, value in base_params.items() if value]
)
def _format_command(self, command: str, **params) -> str: def _format_command(self, command: str, **params) -> str:
param_str = [] param_str = []
@ -48,9 +47,7 @@ class CliCommand:
val_str = str(value_item).replace("'", "\\'") val_str = str(value_item).replace("'", "\\'")
param_str.append(f"--{param} '{val_str}'") param_str.append(f"--{param} '{val_str}'")
elif isinstance(value, dict): elif isinstance(value, dict):
param_str.append( param_str.append(f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\'')
f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\''
)
else: else:
if "'" in str(value): if "'" in str(value):
value_str = str(value).replace('"', '\\"') value_str = str(value).replace('"', '\\"')
@ -63,12 +60,22 @@ class CliCommand:
return f"{self.cli_exec_path} {self.__base_params} {command or ''} {param_str}" return f"{self.cli_exec_path} {self.__base_params} {command or ''} {param_str}"
def _execute(self, command: Optional[str], **params) -> CommandResult: def _execute(self, command: Optional[str], **params) -> CommandResult:
return self.shell.exec(self._format_command(command, **params)) if timeout := params.get("timeout"):
timeout = parse_time(timeout) + self.TIMEOUT_INACCURACY
def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult:
return self.shell.exec( return self.shell.exec(
self._format_command(command, **params), self._format_command(command, **params),
options=CommandOptions( CommandOptions(timeout=timeout),
interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)] )
def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult:
if timeout := params.get("timeout"):
timeout = parse_time(timeout) + self.TIMEOUT_INACCURACY
return self.shell.exec(
self._format_command(command, **params),
CommandOptions(
interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)],
timeout=timeout,
), ),
) )

View file

@ -12,6 +12,7 @@ class FrostfsCliNetmap(CliCommand):
address: Optional[str] = None, address: Optional[str] = None,
generate_key: bool = False, generate_key: bool = False,
ttl: Optional[int] = None, ttl: Optional[int] = None,
trace: Optional[bool] = False,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
timeout: Optional[str] = None, timeout: Optional[str] = None,
) -> CommandResult: ) -> CommandResult:
@ -42,6 +43,7 @@ class FrostfsCliNetmap(CliCommand):
address: Optional[str] = None, address: Optional[str] = None,
generate_key: bool = False, generate_key: bool = False,
ttl: Optional[int] = None, ttl: Optional[int] = None,
trace: Optional[bool] = False,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
timeout: Optional[str] = None, timeout: Optional[str] = None,
) -> CommandResult: ) -> CommandResult:
@ -73,6 +75,7 @@ class FrostfsCliNetmap(CliCommand):
generate_key: bool = False, generate_key: bool = False,
json: bool = False, json: bool = False,
ttl: Optional[int] = None, ttl: Optional[int] = None,
trace: Optional[bool] = False,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
timeout: Optional[str] = None, timeout: Optional[str] = None,
) -> CommandResult: ) -> CommandResult:
@ -104,6 +107,7 @@ class FrostfsCliNetmap(CliCommand):
address: Optional[str] = None, address: Optional[str] = None,
generate_key: bool = False, generate_key: bool = False,
ttl: Optional[int] = None, ttl: Optional[int] = None,
trace: Optional[bool] = False,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
timeout: Optional[str] = None, timeout: Optional[str] = None,
) -> CommandResult: ) -> CommandResult:

View file

@ -1,7 +1,7 @@
import re import re
from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeNetInfo, NodeNetmapInfo, NodeStatus from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeInfo, NodeNetInfo, NodeNetmapInfo, NodeStatus
class NetmapParser: class NetmapParser:
@ -20,8 +20,6 @@ class NetmapParser:
"withdrawal_fee": r"Withdrawal fee: (?P<withdrawal_fee>\d+)", "withdrawal_fee": r"Withdrawal fee: (?P<withdrawal_fee>\d+)",
"homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?P<homomorphic_hashing_disabled>true|false)", "homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?P<homomorphic_hashing_disabled>true|false)",
"maintenance_mode_allowed": r"Maintenance mode allowed: (?P<maintenance_mode_allowed>true|false)", "maintenance_mode_allowed": r"Maintenance mode allowed: (?P<maintenance_mode_allowed>true|false)",
"eigen_trust_alpha": r"EigenTrustAlpha: (?P<eigen_trust_alpha>\d+\w+$)",
"eigen_trust_iterations": r"EigenTrustIterations: (?P<eigen_trust_iterations>\d+)",
} }
parse_result = {} parse_result = {}
@ -64,7 +62,7 @@ class NetmapParser:
for node in netmap_nodes: for node in netmap_nodes:
for key, regex in regexes.items(): for key, regex in regexes.items():
search_result = re.search(regex, node, flags=re.MULTILINE) search_result = re.search(regex, node, flags=re.MULTILINE)
if search_result == None: if search_result is None:
result_netmap[key] = None result_netmap[key] = None
continue continue
if key == "node_data_ips": if key == "node_data_ips":
@ -83,9 +81,22 @@ class NetmapParser:
return dataclasses_netmap return dataclasses_netmap
@staticmethod @staticmethod
def snapshot_one_node(output: str, cluster_node: ClusterNode) -> NodeNetmapInfo | None: def snapshot_one_node(output: str, rpc_endpoint: str) -> NodeNetmapInfo | None:
snapshot_nodes = NetmapParser.snapshot_all_nodes(output=output) snapshot_nodes = NetmapParser.snapshot_all_nodes(output=output)
snapshot_node = [node for node in snapshot_nodes if node.node == cluster_node.get_interface(Interfaces.MGMT)] for snapshot in snapshot_nodes:
if not snapshot_node: for endpoint in snapshot.external_address:
return None if rpc_endpoint.split(":")[0] in endpoint:
return snapshot_node[0] return snapshot
@staticmethod
def node_info(output: dict) -> NodeInfo:
data_dict = {"attributes": {}}
for key, value in output.items():
if key != "attributes":
data_dict[key] = value
for attribute in output["attributes"]:
data_dict["attributes"][attribute["key"]] = attribute["value"]
return NodeInfo(**data_dict)

View file

@ -0,0 +1 @@
from frostfs_testlib.clients.http.http_client import HttpClient

View file

@ -15,14 +15,14 @@ LOGGING_CONFIG = {
"handlers": {"default": {"class": "logging.StreamHandler", "formatter": "http", "stream": "ext://sys.stderr"}}, "handlers": {"default": {"class": "logging.StreamHandler", "formatter": "http", "stream": "ext://sys.stderr"}},
"formatters": { "formatters": {
"http": { "http": {
"format": "%(levelname)s [%(asctime)s] %(name)s - %(message)s", "format": "%(asctime)s [%(levelname)s] %(name)s - %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S", "datefmt": "%Y-%m-%d %H:%M:%S",
} }
}, },
"loggers": { "loggers": {
"httpx": { "httpx": {
"handlers": ["default"], "handlers": ["default"],
"level": "DEBUG", "level": "ERROR",
}, },
"httpcore": { "httpcore": {
"handlers": ["default"], "handlers": ["default"],
@ -43,7 +43,7 @@ class HttpClient:
response = client.request(method, url, **kwargs) response = client.request(method, url, **kwargs)
self._attach_response(response, **kwargs) self._attach_response(response, **kwargs)
logger.info(f"Response: {response.status_code} => {response.text}") # logger.info(f"Response: {response.status_code} => {response.text}")
if expected_status_code: if expected_status_code:
assert ( assert (
@ -131,14 +131,22 @@ class HttpClient:
reporter.attach(report, "Requests Info") reporter.attach(report, "Requests Info")
reporter.attach(curl_request, "CURL") reporter.attach(curl_request, "CURL")
cls._write_log(curl_request, response_body, response.status_code)
@classmethod @classmethod
def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict) -> str: def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict) -> str:
headers = " ".join(f'-H "{name.title()}: {value}"' for name, value in headers.items()) excluded_headers = {"Accept-Encoding", "Connection", "User-Agent", "Content-Length"}
data = f" -d '{data}'" if data else "" headers = " ".join(f"-H '{header.title()}: {value}'" for header, value in headers.items() if header.title() not in excluded_headers)
data = f" -d '{data}'" if data else ""
for name, path in files.items(): for name, path in files.items():
data += f' -F "{name}=@{path}"' data += f' -F "{name}=@{path}"'
# Option -k means no verify SSL # Option -k means no verify SSL
return f"curl {url} -X {method} {headers}{data} -k" return f"curl {url} -X {method} {headers}{data} -k"
@classmethod
def _write_log(cls, curl: str, res_body: str, res_code: int) -> None:
if res_body:
curl += f"\nResponse: {res_code}\n{res_body}"
logger.info(f"{curl}")

View file

@ -1 +1,3 @@
from frostfs_testlib.clients.s3.interfaces import BucketContainerResolver, S3ClientWrapper, VersioningStatus from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient
from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper
from frostfs_testlib.clients.s3.interfaces import ACL, BucketContainerResolver, S3ClientWrapper, VersioningStatus

View file

@ -33,12 +33,14 @@ class AwsCliClient(S3ClientWrapper):
self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1"
) -> None: ) -> None:
self.s3gate_endpoint = s3gate_endpoint self.s3gate_endpoint = s3gate_endpoint
self.iam_endpoint = None
self.access_key_id: str = access_key_id self.access_key_id: str = access_key_id
self.secret_access_key: str = secret_access_key self.secret_access_key: str = secret_access_key
self.profile = profile self.profile = profile
self.local_shell = LocalShell()
self.region = region self.region = region
self.iam_endpoint = None
self.local_shell = LocalShell()
try: try:
_configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key, region) _configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key, region)
self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS} --profile {profile}") self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS} --profile {profile}")
@ -957,6 +959,15 @@ class AwsCliClient(S3ClientWrapper):
return json_output return json_output
@reporter.step("Create presign url for the object")
def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str:
# AWS CLI does not support method definition and world only in 'get_object' state by default
cmd = f"aws {self.common_flags} s3 presign s3://{bucket}/{key} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}"
if expires_in:
cmd += f" --expires-in {expires_in}"
response = self.local_shell.exec(cmd).stdout
return response.strip()
# IAM METHODS # # IAM METHODS #
# Some methods don't have checks because AWS is silent in some cases (delete, attach, etc.) # Some methods don't have checks because AWS is silent in some cases (delete, attach, etc.)
@ -977,7 +988,7 @@ class AwsCliClient(S3ClientWrapper):
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 10) sleep(S3_SYNC_WAIT_TIME * 14)
return response return response
@ -988,7 +999,7 @@ class AwsCliClient(S3ClientWrapper):
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 10) sleep(S3_SYNC_WAIT_TIME * 14)
return response return response
@ -1120,7 +1131,7 @@ class AwsCliClient(S3ClientWrapper):
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 10) sleep(S3_SYNC_WAIT_TIME * 14)
return response return response
@ -1131,7 +1142,7 @@ class AwsCliClient(S3ClientWrapper):
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 10) sleep(S3_SYNC_WAIT_TIME * 14)
return response return response
@ -1227,7 +1238,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}"
return response return response
@ -1239,7 +1250,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}"
return response return response
@ -1264,7 +1275,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}"
return response return response
@ -1276,7 +1287,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
assert response.get("Groups"), f"Expected Groups in response:\n{response}" assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}"
return response return response
@ -1288,7 +1299,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
assert response.get("Groups"), f"Expected Groups in response:\n{response}" assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}"
return response return response
@ -1324,7 +1335,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}"
return response return response
@ -1350,7 +1361,7 @@ class AwsCliClient(S3ClientWrapper):
cmd += f" --profile {self.profile}" cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 10) sleep(S3_SYNC_WAIT_TIME * 14)
return response return response
@ -1365,7 +1376,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd).stdout output = self.local_shell.exec(cmd).stdout
response = self._to_json(output) response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 10) sleep(S3_SYNC_WAIT_TIME * 14)
return response return response

View file

@ -10,7 +10,9 @@ import boto3
import urllib3 import urllib3
from botocore.config import Config from botocore.config import Config
from botocore.exceptions import ClientError from botocore.exceptions import ClientError
from mypy_boto3_iam import IAMClient
from mypy_boto3_s3 import S3Client from mypy_boto3_s3 import S3Client
from mypy_boto3_sts import STSClient
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
@ -35,25 +37,25 @@ class Boto3ClientWrapper(S3ClientWrapper):
def __init__( def __init__(
self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1"
) -> None: ) -> None:
self.boto3_client: S3Client = None
self.s3gate_endpoint: str = "" self.s3gate_endpoint: str = ""
self.boto3_client: S3Client = None
self.boto3_iam_client: S3Client = None
self.iam_endpoint: str = "" self.iam_endpoint: str = ""
self.boto3_iam_client: IAMClient = None
self.boto3_sts_client: STSClient = None
self.boto3_sts_client: S3Client = None self.access_key_id = access_key_id
self.secret_access_key = secret_access_key
self.access_key_id: str = access_key_id
self.secret_access_key: str = secret_access_key
self.profile = profile self.profile = profile
self.region = region self.region = region
self.session = boto3.Session() self.session = boto3.Session()
self.config = Config( self.config = Config(
signature_version="s3v4",
retries={ retries={
"max_attempts": MAX_REQUEST_ATTEMPTS, "max_attempts": MAX_REQUEST_ATTEMPTS,
"mode": RETRY_MODE, "mode": RETRY_MODE,
} },
) )
self.set_endpoint(s3gate_endpoint) self.set_endpoint(s3gate_endpoint)
@ -90,12 +92,13 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint_url=self.iam_endpoint, endpoint_url=self.iam_endpoint,
verify=False, verify=False,
) )
# since the STS does not have an enpoint, IAM is used # since the STS does not have an endpoint, IAM is used
self.boto3_sts_client = self.session.client( self.boto3_sts_client = self.session.client(
service_name="sts", service_name="sts",
aws_access_key_id=self.access_key_id, aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key, aws_secret_access_key=self.secret_access_key,
endpoint_url=iam_endpoint, endpoint_url=iam_endpoint,
region_name=self.region,
verify=False, verify=False,
) )
@ -145,6 +148,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
params = {"Bucket": bucket} params = {"Bucket": bucket}
if object_lock_enabled_for_bucket is not None: if object_lock_enabled_for_bucket is not None:
params.update({"ObjectLockEnabledForBucket": object_lock_enabled_for_bucket}) params.update({"ObjectLockEnabledForBucket": object_lock_enabled_for_bucket})
if acl is not None: if acl is not None:
params.update({"ACL": acl}) params.update({"ACL": acl})
elif grant_write or grant_read or grant_full_control: elif grant_write or grant_read or grant_full_control:
@ -154,6 +158,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
params.update({"GrantRead": grant_read}) params.update({"GrantRead": grant_read})
elif grant_full_control: elif grant_full_control:
params.update({"GrantFullControl": grant_full_control}) params.update({"GrantFullControl": grant_full_control})
if location_constraint: if location_constraint:
params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}) params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}})
@ -773,7 +778,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response.get("TagSet") return response.get("TagSet")
@reporter.step("Delete object tagging") @reporter.step("Delete object tagging")
def delete_object_tagging(self, bucket: str, key: str) -> None: def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None:
params = self._convert_to_s3_params(locals()) params = self._convert_to_s3_params(locals())
self._exec_request( self._exec_request(
self.boto3_client.delete_object_tagging, self.boto3_client.delete_object_tagging,
@ -816,6 +821,16 @@ class Boto3ClientWrapper(S3ClientWrapper):
) -> dict: ) -> dict:
raise NotImplementedError("Cp is not supported for boto3 client") raise NotImplementedError("Cp is not supported for boto3 client")
@reporter.step("Create presign url for the object")
def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str:
response = self._exec_request(
method=self.boto3_client.generate_presigned_url,
params={"ClientMethod": method, "Params": {"Bucket": bucket, "Key": key}, "ExpiresIn": expires_in},
endpoint=self.s3gate_endpoint,
profile=self.profile,
)
return response
# END OBJECT METHODS # # END OBJECT METHODS #
# IAM METHODS # # IAM METHODS #
@ -840,7 +855,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint, endpoint=self.iam_endpoint,
profile=self.profile, profile=self.profile,
) )
sleep(S3_SYNC_WAIT_TIME * 10) sleep(S3_SYNC_WAIT_TIME * 14)
return response return response
@reporter.step("Attaches the specified managed policy to the specified user") @reporter.step("Attaches the specified managed policy to the specified user")
@ -852,7 +867,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint, endpoint=self.iam_endpoint,
profile=self.profile, profile=self.profile,
) )
sleep(S3_SYNC_WAIT_TIME * 10) sleep(S3_SYNC_WAIT_TIME * 14)
return response return response
@reporter.step("Creates a new AWS secret access key and access key ID for the specified user") @reporter.step("Creates a new AWS secret access key and access key ID for the specified user")
@ -983,7 +998,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint, endpoint=self.iam_endpoint,
profile=self.profile, profile=self.profile,
) )
sleep(S3_SYNC_WAIT_TIME * 10) sleep(S3_SYNC_WAIT_TIME * 14)
return response return response
@reporter.step("Removes the specified managed policy from the specified user") @reporter.step("Removes the specified managed policy from the specified user")
@ -995,7 +1010,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint, endpoint=self.iam_endpoint,
profile=self.profile, profile=self.profile,
) )
sleep(S3_SYNC_WAIT_TIME * 10) sleep(S3_SYNC_WAIT_TIME * 14)
return response return response
@reporter.step("Returns a list of IAM users that are in the specified IAM group") @reporter.step("Returns a list of IAM users that are in the specified IAM group")
@ -1091,7 +1106,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint, endpoint=self.iam_endpoint,
profile=self.profile, profile=self.profile,
) )
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}"
return response return response
@reporter.step("Lists all managed policies that are attached to the specified IAM user") @reporter.step("Lists all managed policies that are attached to the specified IAM user")
@ -1102,7 +1117,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint, endpoint=self.iam_endpoint,
profile=self.profile, profile=self.profile,
) )
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}"
return response return response
@reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to")
@ -1127,7 +1142,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint, endpoint=self.iam_endpoint,
profile=self.profile, profile=self.profile,
) )
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}"
return response return response
@reporter.step("Lists the IAM groups") @reporter.step("Lists the IAM groups")
@ -1137,7 +1152,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint, endpoint=self.iam_endpoint,
profile=self.profile, profile=self.profile,
) )
assert response.get("Groups"), f"Expected Groups in response:\n{response}" assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}"
return response return response
@reporter.step("Lists the IAM groups that the specified IAM user belongs to") @reporter.step("Lists the IAM groups that the specified IAM user belongs to")
@ -1148,7 +1163,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint, endpoint=self.iam_endpoint,
profile=self.profile, profile=self.profile,
) )
assert response.get("Groups"), f"Expected Groups in response:\n{response}" assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}"
return response return response
@reporter.step("Lists all the managed policies that are available in your AWS account") @reporter.step("Lists all the managed policies that are available in your AWS account")
@ -1180,7 +1195,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint, endpoint=self.iam_endpoint,
profile=self.profile, profile=self.profile,
) )
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}"
return response return response
@reporter.step("Lists the IAM users") @reporter.step("Lists the IAM users")
@ -1205,7 +1220,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint, endpoint=self.iam_endpoint,
profile=self.profile, profile=self.profile,
) )
sleep(S3_SYNC_WAIT_TIME * 10) sleep(S3_SYNC_WAIT_TIME * 14)
return response return response
@reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user")
@ -1220,7 +1235,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint, endpoint=self.iam_endpoint,
profile=self.profile, profile=self.profile,
) )
sleep(S3_SYNC_WAIT_TIME * 10) sleep(S3_SYNC_WAIT_TIME * 14)
return response return response
@reporter.step("Removes the specified user from the specified group") @reporter.step("Removes the specified user from the specified group")

View file

@ -22,15 +22,15 @@ class VersioningStatus(HumanReadableEnum):
SUSPENDED = "Suspended" SUSPENDED = "Suspended"
ACL_COPY = [ class ACL:
"private", PRIVATE = "private"
"public-read", PUBLIC_READ = "public-read"
"public-read-write", PUBLIC_READ_WRITE = "public-read-write"
"authenticated-read", AUTHENTICATED_READ = "authenticated-read"
"aws-exec-read", AWS_EXEC_READ = "aws-exec-read"
"bucket-owner-read", BUCKET_OWNER_READ = "bucket-owner-read"
"bucket-owner-full-control", BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
] LOG_DELIVERY_WRITE = "log-delivery-write"
class BucketContainerResolver(ABC): class BucketContainerResolver(ABC):
@ -50,6 +50,14 @@ class BucketContainerResolver(ABC):
class S3ClientWrapper(HumanReadableABC): class S3ClientWrapper(HumanReadableABC):
access_key_id: str
secret_access_key: str
profile: str
region: str
s3gate_endpoint: str
iam_endpoint: str
@abstractmethod @abstractmethod
def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str, region: str) -> None: def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str, region: str) -> None:
pass pass
@ -369,7 +377,7 @@ class S3ClientWrapper(HumanReadableABC):
"""Returns the tag-set of an object.""" """Returns the tag-set of an object."""
@abstractmethod @abstractmethod
def delete_object_tagging(self, bucket: str, key: str) -> None: def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None:
"""Removes the entire tag set from the specified object.""" """Removes the entire tag set from the specified object."""
@abstractmethod @abstractmethod
@ -417,6 +425,10 @@ class S3ClientWrapper(HumanReadableABC):
) -> dict: ) -> dict:
"""cp directory TODO: Add proper description""" """cp directory TODO: Add proper description"""
@abstractmethod
def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str:
"""Creates presign URL"""
# END OF OBJECT METHODS # # END OF OBJECT METHODS #
# IAM METHODS # # IAM METHODS #

View file

@ -1,5 +1,6 @@
import logging import logging
import os import os
from datetime import datetime
from importlib.metadata import entry_points from importlib.metadata import entry_points
import pytest import pytest
@ -11,6 +12,12 @@ from frostfs_testlib.resources.common import ASSETS_DIR, HOSTING_CONFIG_FILE
from frostfs_testlib.storage import get_service_registry from frostfs_testlib.storage import get_service_registry
@pytest.fixture(scope="session", autouse=True)
def session_start_time():
start_time = datetime.utcnow()
return start_time
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def configure_testlib(): def configure_testlib():
reporter.get_reporter().register_handler(reporter.AllureHandler()) reporter.get_reporter().register_handler(reporter.AllureHandler())

View file

@ -1,8 +1,8 @@
import pytest import pytest
@pytest.hookimpl @pytest.hookimpl(specname="pytest_collection_modifyitems")
def pytest_collection_modifyitems(items: list[pytest.Item]): def pytest_add_frostfs_marker(items: list[pytest.Item]):
# All tests which reside in frostfs nodeid are granted with frostfs marker, excluding # All tests which reside in frostfs nodeid are granted with frostfs marker, excluding
# nodeid = full path of the test # nodeid = full path of the test
# 1. plugins # 1. plugins
@ -11,3 +11,21 @@ def pytest_collection_modifyitems(items: list[pytest.Item]):
location = item.location[0] location = item.location[0]
if "frostfs" in location and "plugin" not in location and "testlib" not in location: if "frostfs" in location and "plugin" not in location and "testlib" not in location:
item.add_marker("frostfs") item.add_marker("frostfs")
# pytest hook. Do not rename
@pytest.hookimpl(trylast=True)
def pytest_collection_modifyitems(items: list[pytest.Item]):
# The order of running tests corresponded to the suites
items.sort(key=lambda item: item.location[0])
# Change order of tests based on @pytest.mark.order(<int>) marker
def order(item: pytest.Item) -> int:
order_marker = item.get_closest_marker("order")
if order_marker and (len(order_marker.args) != 1 or not isinstance(order_marker.args[0], int)):
raise RuntimeError("Incorrect usage of pytest.mark.order")
order_value = order_marker.args[0] if order_marker else 0
return order_value
items.sort(key=lambda item: order(item))

View file

@ -29,6 +29,9 @@ class Host(ABC):
self._service_config_by_name = {service_config.name: service_config for service_config in config.services} self._service_config_by_name = {service_config.name: service_config for service_config in config.services}
self._cli_config_by_name = {cli_config.name: cli_config for cli_config in config.clis} self._cli_config_by_name = {cli_config.name: cli_config for cli_config in config.clis}
def __repr__(self) -> str:
return self.config.address
@property @property
def config(self) -> HostConfig: def config(self) -> HostConfig:
"""Returns config of the host. """Returns config of the host.

View file

@ -182,8 +182,10 @@ class Preset(MetaConfig):
pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False) pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False)
# Workers count for preset # Workers count for preset
workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False) workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False)
# Acl for container/buckets # TODO: Deprecated. Acl for container/buckets
acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False) acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False)
# APE rule for containers instead of deprecated ACL
rule: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "rule", None, False, formatter=force_list)
# ------ GRPC ------ # ------ GRPC ------
# Amount of containers which should be created # Amount of containers which should be created

View file

@ -1,5 +1,6 @@
# Regex patterns of status codes of Container service # Regex patterns of status codes of Container service
CONTAINER_NOT_FOUND = "code = 3072.*message = container not found" CONTAINER_NOT_FOUND = "code = 3072.*message = container not found"
SUBJECT_NOT_FOUND = "code = 1024.*message = frostfs error: chain/client.*subject not found.*"
# Regex patterns of status codes of Object service # Regex patterns of status codes of Object service
MALFORMED_REQUEST = "code = 1024.*message = malformed request" MALFORMED_REQUEST = "code = 1024.*message = malformed request"

View file

@ -141,6 +141,6 @@ class LocalShell(Shell):
f"RETCODE: {result.return_code}\n\n" f"RETCODE: {result.return_code}\n\n"
f"STDOUT:\n{result.stdout}\n" f"STDOUT:\n{result.stdout}\n"
f"STDERR:\n{result.stderr}\n" f"STDERR:\n{result.stderr}\n"
f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" f"Start / End / Elapsed\t {start_time} / {end_time} / {elapsed_time}"
) )
reporter.attach(command_attachment, "Command execution.txt") reporter.attach(command_attachment, "Command execution.txt")

View file

@ -68,8 +68,7 @@ class SshConnectionProvider:
try: try:
if creds.ssh_key_path: if creds.ssh_key_path:
logger.info( logger.info(
f"Trying to connect to host {host} as {creds.ssh_login} using SSH key " f"Trying to connect to host {host} as {creds.ssh_login} using SSH key " f"{creds.ssh_key_path} (attempt {attempt})"
f"{creds.ssh_key_path} (attempt {attempt})"
) )
connection.connect( connection.connect(
hostname=host, hostname=host,
@ -79,9 +78,7 @@ class SshConnectionProvider:
timeout=self.CONNECTION_TIMEOUT, timeout=self.CONNECTION_TIMEOUT,
) )
else: else:
logger.info( logger.info(f"Trying to connect to host {host} as {creds.ssh_login} using password " f"(attempt {attempt})")
f"Trying to connect to host {host} as {creds.ssh_login} using password " f"(attempt {attempt})"
)
connection.connect( connection.connect(
hostname=host, hostname=host,
port=port, port=port,
@ -104,9 +101,7 @@ class SshConnectionProvider:
connection.close() connection.close()
can_retry = attempt + 1 < self.SSH_CONNECTION_ATTEMPTS can_retry = attempt + 1 < self.SSH_CONNECTION_ATTEMPTS
if can_retry: if can_retry:
logger.warn( logger.warn(f"Can't connect to host {host}, will retry after {self.SSH_ATTEMPTS_INTERVAL}s. Error: {exc}")
f"Can't connect to host {host}, will retry after {self.SSH_ATTEMPTS_INTERVAL}s. Error: {exc}"
)
sleep(self.SSH_ATTEMPTS_INTERVAL) sleep(self.SSH_ATTEMPTS_INTERVAL)
continue continue
logger.exception(f"Can't connect to host {host}") logger.exception(f"Can't connect to host {host}")
@ -139,7 +134,7 @@ def log_command(func):
f"RC:\n {result.return_code}\n" f"RC:\n {result.return_code}\n"
f"STDOUT:\n{textwrap.indent(result.stdout, ' ')}\n" f"STDOUT:\n{textwrap.indent(result.stdout, ' ')}\n"
f"STDERR:\n{textwrap.indent(result.stderr, ' ')}\n" f"STDERR:\n{textwrap.indent(result.stderr, ' ')}\n"
f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" f"Start / End / Elapsed\t {start_time} / {end_time} / {elapsed_time}"
) )
if not options.no_log: if not options.no_log:
@ -185,13 +180,11 @@ class SSHShell(Shell):
private_key_passphrase: Optional[str] = None, private_key_passphrase: Optional[str] = None,
port: str = "22", port: str = "22",
command_inspectors: Optional[list[CommandInspector]] = None, command_inspectors: Optional[list[CommandInspector]] = None,
custom_environment: Optional[dict] = None custom_environment: Optional[dict] = None,
) -> None: ) -> None:
super().__init__() super().__init__()
self.connection_provider = SshConnectionProvider() self.connection_provider = SshConnectionProvider()
self.connection_provider.store_creds( self.connection_provider.store_creds(host, SshCredentials(login, password, private_key_path, private_key_passphrase))
host, SshCredentials(login, password, private_key_path, private_key_passphrase)
)
self.host = host self.host = host
self.port = port self.port = port
@ -220,9 +213,7 @@ class SSHShell(Shell):
result = self._exec_non_interactive(command, options) result = self._exec_non_interactive(command, options)
if options.check and result.return_code != 0: if options.check and result.return_code != 0:
raise RuntimeError( raise RuntimeError(f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}\nStderr: {result.stderr}\n")
f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}\nStderr: {result.stderr}\n"
)
return result return result
@log_command @log_command

View file

@ -33,6 +33,7 @@ def get_via_http_gate(
oid: str, oid: str,
node: ClusterNode, node: ClusterNode,
request_path: Optional[str] = None, request_path: Optional[str] = None,
presigned_url: Optional[str] = None,
timeout: Optional[int] = 300, timeout: Optional[int] = 300,
): ):
""" """
@ -47,6 +48,9 @@ def get_via_http_gate(
if request_path: if request_path:
request = f"{node.http_gate.get_endpoint()}{request_path}" request = f"{node.http_gate.get_endpoint()}{request_path}"
if presigned_url:
request = presigned_url
response = requests.get(request, stream=True, timeout=timeout, verify=False) response = requests.get(request, stream=True, timeout=timeout, verify=False)
if not response.ok: if not response.ok:

View file

@ -6,7 +6,7 @@ from frostfs_testlib.testing.test_control import wait_for_success
@reporter.step("Check metrics result") @reporter.step("Check metrics result")
@wait_for_success(interval=10) @wait_for_success(max_wait_time=300, interval=10)
def check_metrics_counter( def check_metrics_counter(
cluster_nodes: list[ClusterNode], cluster_nodes: list[ClusterNode],
operator: str = "==", operator: str = "==",
@ -19,7 +19,7 @@ def check_metrics_counter(
counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps) counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps)
assert eval( assert eval(
f"{counter_act} {operator} {counter_exp}" f"{counter_act} {operator} {counter_exp}"
), f"Expected: {counter_exp} {operator} Actual: {counter_act} in nodes: {cluster_nodes}" ), f"Actual: {counter_act} {operator} Expected: {counter_exp} in nodes: {cluster_nodes}"
@reporter.step("Get metrics value from node: {node}") @reporter.step("Get metrics value from node: {node}")

View file

@ -5,6 +5,7 @@ class ConfigAttributes:
WALLET_CONFIG = "wallet_config" WALLET_CONFIG = "wallet_config"
CONFIG_DIR = "service_config_dir" CONFIG_DIR = "service_config_dir"
CONFIG_PATH = "config_path" CONFIG_PATH = "config_path"
WORKING_DIR = "working_dir"
SHARD_CONFIG_PATH = "shard_config_path" SHARD_CONFIG_PATH = "shard_config_path"
LOGGER_CONFIG_PATH = "logger_config_path" LOGGER_CONFIG_PATH = "logger_config_path"
LOCAL_WALLET_PATH = "local_wallet_path" LOCAL_WALLET_PATH = "local_wallet_path"
@ -15,6 +16,7 @@ class ConfigAttributes:
ENDPOINT_DATA_0_NS = "endpoint_data0_namespace" ENDPOINT_DATA_0_NS = "endpoint_data0_namespace"
ENDPOINT_INTERNAL = "endpoint_internal0" ENDPOINT_INTERNAL = "endpoint_internal0"
ENDPOINT_PROMETHEUS = "endpoint_prometheus" ENDPOINT_PROMETHEUS = "endpoint_prometheus"
ENDPOINT_PPROF = "endpoint_pprof"
CONTROL_ENDPOINT = "control_endpoint" CONTROL_ENDPOINT = "control_endpoint"
UN_LOCODE = "un_locode" UN_LOCODE = "un_locode"

View file

@ -1,7 +1,7 @@
import datetime
import itertools import itertools
import logging import logging
import time import time
from datetime import datetime, timezone
from typing import TypeVar from typing import TypeVar
import frostfs_testlib.resources.optionals as optionals import frostfs_testlib.resources.optionals as optionals
@ -247,23 +247,20 @@ class ClusterStateController:
if service_type == StorageNode: if service_type == StorageNode:
self.wait_after_storage_startup() self.wait_after_storage_startup()
# TODO: Deprecated
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Stop all storage services on cluster") @reporter.step("Restart {service_type} service on {node}")
def stop_all_storage_services(self, reversed_order: bool = False): def restart_service_of_type(self, node: ClusterNode, service_type: ServiceClass):
nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes service = node.service(service_type)
service.restart_service()
for node in nodes:
self.stop_service_of_type(node, StorageNode)
# TODO: Deprecated
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Stop all S3 gates on cluster") @reporter.step("Restart all {service_type} services")
def stop_all_s3_gates(self, reversed_order: bool = False): def restart_services_of_type(self, service_type: type[ServiceClass]):
nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes services = self.cluster.services(service_type)
parallel([service.restart_service for service in services])
for node in nodes: if service_type == StorageNode:
self.stop_service_of_type(node, S3Gate) self.wait_after_storage_startup()
# TODO: Deprecated # TODO: Deprecated
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@ -277,30 +274,6 @@ class ClusterStateController:
def start_storage_service(self, node: ClusterNode): def start_storage_service(self, node: ClusterNode):
self.start_service_of_type(node, StorageNode) self.start_service_of_type(node, StorageNode)
# TODO: Deprecated
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Start stopped storage services")
def start_stopped_storage_services(self):
self.start_stopped_services_of_type(StorageNode)
# TODO: Deprecated
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Stop s3 gate on {node}")
def stop_s3_gate(self, node: ClusterNode, mask: bool = True):
self.stop_service_of_type(node, S3Gate, mask)
# TODO: Deprecated
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Start s3 gate on {node}")
def start_s3_gate(self, node: ClusterNode):
self.start_service_of_type(node, S3Gate)
# TODO: Deprecated
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Start stopped S3 gates")
def start_stopped_s3_gates(self):
self.start_stopped_services_of_type(S3Gate)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Suspend {process_name} service in {node}") @reporter.step("Suspend {process_name} service in {node}")
def suspend_service(self, process_name: str, node: ClusterNode): def suspend_service(self, process_name: str, node: ClusterNode):
@ -390,31 +363,33 @@ class ClusterStateController:
@reporter.step("Get node time") @reporter.step("Get node time")
def get_node_date(self, node: ClusterNode) -> datetime: def get_node_date(self, node: ClusterNode) -> datetime:
shell = node.host.get_shell() shell = node.host.get_shell()
return datetime.datetime.strptime(shell.exec("hwclock -r").stdout.strip(), "%Y-%m-%d %H:%M:%S.%f%z") return datetime.strptime(shell.exec('date +"%Y-%m-%d %H:%M:%S"').stdout.strip(), "%Y-%m-%d %H:%M:%S")
@reporter.step("Set node time to {in_date}") @reporter.step("Set time on nodes in {in_date}")
def change_date_on_all_nodes(self, cluster: Cluster, in_date: datetime) -> None:
parallel(self.change_node_date, cluster.cluster_nodes, in_date=in_date)
@reporter.step("Set time on {node} to {in_date}")
def change_node_date(self, node: ClusterNode, in_date: datetime) -> None: def change_node_date(self, node: ClusterNode, in_date: datetime) -> None:
shell = node.host.get_shell() shell = node.host.get_shell()
shell.exec(f"date -s @{time.mktime(in_date.timetuple())}") in_date_frmt = in_date.strftime("%Y-%m-%d %H:%M:%S")
shell.exec("hwclock --systohc") shell.exec(f"timedatectl set-time '{in_date_frmt}'")
node_time = self.get_node_date(node) node_time = self.get_node_date(node)
with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"):
assert (self.get_node_date(node) - in_date) < datetime.timedelta(minutes=1)
@reporter.step(f"Restore time") with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"):
assert (node_time - in_date).total_seconds() < 60
@reporter.step("Restore time on nodes")
def restore_date_on_all_nodes(self, cluster: Cluster) -> None:
parallel(self.restore_node_date, cluster.cluster_nodes)
@reporter.step("Restore time on {node}")
def restore_node_date(self, node: ClusterNode) -> None: def restore_node_date(self, node: ClusterNode) -> None:
shell = node.host.get_shell() shell = node.host.get_shell()
now_time = datetime.datetime.now(datetime.timezone.utc) now_time = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S")
with reporter.step(f"Set {now_time} time"):
shell.exec(f"date -s @{time.mktime(now_time.timetuple())}")
shell.exec("hwclock --systohc")
@reporter.step("Change the synchronizer status to {status}") with reporter.step(f"Set {now_time} time"):
def set_sync_date_all_nodes(self, status: str): shell.exec(f"timedatectl set-time '{now_time}'")
if status == "active":
parallel(self._enable_date_synchronizer, self.cluster.cluster_nodes)
return
parallel(self._disable_date_synchronizer, self.cluster.cluster_nodes)
@reporter.step("Set MaintenanceModeAllowed - {status}") @reporter.step("Set MaintenanceModeAllowed - {status}")
def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None: def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None:
@ -500,16 +475,6 @@ class ClusterStateController:
frostfs_cli_remote = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path) frostfs_cli_remote = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path)
return frostfs_adm, frostfs_cli, frostfs_cli_remote return frostfs_adm, frostfs_cli, frostfs_cli_remote
def _enable_date_synchronizer(self, cluster_node: ClusterNode):
shell = cluster_node.host.get_shell()
shell.exec("timedatectl set-ntp true")
cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "active", 15)
def _disable_date_synchronizer(self, cluster_node: ClusterNode):
shell = cluster_node.host.get_shell()
shell.exec("timedatectl set-ntp false")
cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "inactive", 15)
def _get_disk_controller(self, node: StorageNode, device: str, mountpoint: str) -> DiskController: def _get_disk_controller(self, node: StorageNode, device: str, mountpoint: str) -> DiskController:
disk_controller_id = DiskController.get_id(node, device) disk_controller_id = DiskController.get_id(node, device)
if disk_controller_id in self.detached_disks.keys(): if disk_controller_id in self.detached_disks.keys():

View file

@ -1,3 +1,9 @@
import time
from functools import wraps
from typing import Callable
import pytest
from frostfs_testlib.hosting import Host from frostfs_testlib.hosting import Host
from frostfs_testlib.shell.interfaces import CommandResult from frostfs_testlib.shell.interfaces import CommandResult
@ -7,11 +13,11 @@ class Metrics:
self.storage = StorageMetrics(host, metrics_endpoint) self.storage = StorageMetrics(host, metrics_endpoint)
class StorageMetrics: class StorageMetrics:
""" """
Class represents storage metrics in a cluster Class represents storage metrics in a cluster
""" """
def __init__(self, host: Host, metrics_endpoint: str) -> None: def __init__(self, host: Host, metrics_endpoint: str) -> None:
self.host = host self.host = host
self.metrics_endpoint = metrics_endpoint self.metrics_endpoint = metrics_endpoint
@ -29,8 +35,46 @@ class StorageMetrics:
additional_greps = " |grep ".join([grep_command for grep_command in greps.values()]) additional_greps = " |grep ".join([grep_command for grep_command in greps.values()])
result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {additional_greps}") result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {additional_greps}")
return result return result
def get_all_metrics(self) -> CommandResult: def get_all_metrics(self) -> CommandResult:
shell = self.host.get_shell() shell = self.host.get_shell()
result = shell.exec(f"curl -s {self.metrics_endpoint}") result = shell.exec(f"curl -s {self.metrics_endpoint}")
return result return result
def wait_until_metric_result_is_stable(
relative_deviation: float = None, absolute_deviation: int = None, max_attempts: int = 10, sleep_interval: int = 30
):
"""
A decorator function that repeatedly calls the decorated function until its result stabilizes
within a specified relative tolerance or until the maximum number of attempts is reached.
This decorator is useful for scenarios where a function returns a metric or value that may fluctuate
over time, and you want to ensure that the result has stabilized before proceeding.
"""
def decorator(func: Callable):
@wraps(func)
def wrapper(*args, **kwargs):
last_result = None
for _ in range(max_attempts):
# first function call
first_result = func(*args, **kwargs)
# waiting before the second call
time.sleep(sleep_interval)
# second function call
last_result = func(*args, **kwargs)
# checking value stability
if first_result == pytest.approx(last_result, rel=relative_deviation, abs=absolute_deviation):
return last_result
# if stability is not achieved, return the last value
if last_result is not None:
return last_result
return wrapper
return decorator

View file

@ -82,6 +82,9 @@ class NodeBase(HumanReadableABC):
def get_metrics_endpoint(self) -> str: def get_metrics_endpoint(self) -> str:
return self._get_attribute(ConfigAttributes.ENDPOINT_PROMETHEUS) return self._get_attribute(ConfigAttributes.ENDPOINT_PROMETHEUS)
def get_pprof_endpoint(self) -> str:
return self._get_attribute(ConfigAttributes.ENDPOINT_PPROF)
def stop_service(self, mask: bool = True): def stop_service(self, mask: bool = True):
if mask: if mask:
with reporter.step(f"Mask {self.name} service on {self.host.config.address}"): with reporter.step(f"Mask {self.name} service on {self.host.config.address}"):
@ -144,6 +147,13 @@ class NodeBase(HumanReadableABC):
else None else None
) )
def get_working_dir_path(self) -> Optional[str]:
"""
Returns working directory path located on remote host
"""
config_attributes = self.host.get_service_config(self.name)
return self._get_attribute(ConfigAttributes.WORKING_DIR) if ConfigAttributes.WORKING_DIR in config_attributes.attributes else None
@property @property
def config_dir(self) -> str: def config_dir(self) -> str:
return self._get_attribute(ConfigAttributes.CONFIG_DIR) return self._get_attribute(ConfigAttributes.CONFIG_DIR)

View file

@ -1,6 +1,9 @@
import re
from dataclasses import dataclass from dataclasses import dataclass
from typing import Optional from typing import Optional
from pydantic import BaseModel, Field, field_validator
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.readable import HumanReadableEnum from frostfs_testlib.testing.readable import HumanReadableEnum
@ -75,8 +78,37 @@ class NodeNetInfo:
withdrawal_fee: str = None withdrawal_fee: str = None
homomorphic_hashing_disabled: str = None homomorphic_hashing_disabled: str = None
maintenance_mode_allowed: str = None maintenance_mode_allowed: str = None
eigen_trust_alpha: str = None
eigen_trust_iterations: str = None
class Attributes(BaseModel):
cluster_name: str = Field(alias="ClusterName")
continent: str = Field(alias="Continent")
country: str = Field(alias="Country")
country_code: str = Field(alias="CountryCode")
external_addr: list[str] = Field(alias="ExternalAddr")
location: str = Field(alias="Location")
node: str = Field(alias="Node")
subdiv: str = Field(alias="SubDiv")
subdiv_code: str = Field(alias="SubDivCode")
un_locode: str = Field(alias="UN-LOCODE")
role: str = Field(alias="role")
@field_validator("external_addr", mode="before")
@classmethod
def convert_external_addr(cls, value: str) -> list[str]:
return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", value)]
class NodeInfo(BaseModel):
public_key: str = Field(alias="publicKey")
addresses: list[str] = Field(alias="addresses")
state: str = Field(alias="state")
attributes: Attributes = Field(alias="attributes")
@field_validator("addresses", mode="before")
@classmethod
def convert_external_addr(cls, value: str) -> list[str]:
return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", ",".join(value))]
@dataclass @dataclass

View file

@ -1,14 +1,14 @@
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.storage.grpc_operations import interfaces from frostfs_testlib.storage.grpc_operations import implementations, interfaces, interfaces_wrapper
from frostfs_testlib.storage.grpc_operations.implementations import container, object
class CliClientWrapper(interfaces.GrpcClientWrapper): class CliClientWrapper(interfaces_wrapper.GrpcClientWrapper):
def __init__(self, cli: FrostfsCli) -> None: def __init__(self, cli: FrostfsCli) -> None:
self.cli = cli self.cli = cli
self.object: interfaces.ObjectInterface = object.ObjectOperations(self.cli) self.object: interfaces.ObjectInterface = implementations.ObjectOperations(self.cli)
self.container: interfaces.ContainerInterface = container.ContainerOperations(self.cli) self.container: interfaces.ContainerInterface = implementations.ContainerOperations(self.cli)
self.netmap: interfaces.NetmapInterface = implementations.NetmapOperations(self.cli)
class RpcClientWrapper(interfaces.GrpcClientWrapper): class RpcClientWrapper(interfaces_wrapper.GrpcClientWrapper):
pass # The next series pass # The next series

View file

@ -0,0 +1,4 @@
from .chunks import ChunksOperations
from .container import ContainerOperations
from .netmap import NetmapOperations
from .object import ObjectOperations

View file

@ -0,0 +1,171 @@
import json as module_json
from typing import List, Optional
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.cli.netmap_parser import NetmapParser
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeInfo, NodeNetInfo, NodeNetmapInfo
from .. import interfaces
class NetmapOperations(interfaces.NetmapInterface):
def __init__(self, cli: FrostfsCli) -> None:
self.cli = cli
def epoch(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = True,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> int:
"""
Get current epoch number.
"""
output = (
self.cli.netmap.epoch(
rpc_endpoint=rpc_endpoint,
wallet=wallet,
address=address,
generate_key=generate_key,
ttl=ttl,
trace=trace,
xhdr=xhdr,
timeout=timeout,
)
.stdout.split("Trace ID")[0]
.strip()
)
return int(output)
def netinfo(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = True,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> NodeNetInfo:
"""
Get target node info.
"""
output = (
self.cli.netmap.netinfo(
rpc_endpoint=rpc_endpoint,
wallet=wallet,
address=address,
generate_key=generate_key,
ttl=ttl,
trace=trace,
xhdr=xhdr,
timeout=timeout,
)
.stdout.split("Trace ID")[0]
.strip()
)
return NetmapParser.netinfo(output)
def nodeinfo(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
json: bool = True,
ttl: Optional[int] = None,
trace: Optional[bool] = True,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> NodeInfo:
"""
Get target node info.
"""
output = (
self.cli.netmap.nodeinfo(
rpc_endpoint=rpc_endpoint,
wallet=wallet,
address=address,
generate_key=generate_key,
json=json,
ttl=ttl,
trace=trace,
xhdr=xhdr,
timeout=timeout,
)
.stdout.split("Trace ID")[0]
.strip()
)
return NetmapParser.node_info(module_json.loads(output))
def snapshot(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = True,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> List[NodeNetmapInfo]:
"""
Get target node info.
"""
output = (
self.cli.netmap.snapshot(
rpc_endpoint=rpc_endpoint,
wallet=wallet,
address=address,
generate_key=generate_key,
ttl=ttl,
trace=trace,
xhdr=xhdr,
timeout=timeout,
)
.stdout.split("Trace ID")[0]
.strip()
)
return NetmapParser.snapshot_all_nodes(output)
def snapshot_one_node(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = True,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> List[NodeNetmapInfo]:
"""
Get target one node info.
"""
output = (
self.cli.netmap.snapshot(
rpc_endpoint=rpc_endpoint,
wallet=wallet,
address=address,
generate_key=generate_key,
ttl=ttl,
trace=trace,
xhdr=xhdr,
timeout=timeout,
)
.stdout.split("Trace ID")[0]
.strip()
)
return NetmapParser.snapshot_one_node(output, rpc_endpoint)

View file

@ -1,424 +0,0 @@
from abc import ABC, abstractmethod
from typing import Any, List, Optional
from frostfs_testlib.shell.interfaces import CommandResult
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.constants import PlacementRule
from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo
from frostfs_testlib.utils import file_utils
class ChunksInterface(ABC):
@abstractmethod
def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]:
pass
@abstractmethod
def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]:
pass
@abstractmethod
def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str:
pass
@abstractmethod
def get_all(
self,
rpc_endpoint: str,
cid: str,
oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> list[Chunk]:
pass
@abstractmethod
def get_parity(
self,
rpc_endpoint: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
oid: Optional[str] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> Chunk:
pass
@abstractmethod
def get_first_data(
self,
rpc_endpoint: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
oid: Optional[str] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> Chunk:
pass
class ObjectInterface(ABC):
def __init__(self) -> None:
self.chunks: ChunksInterface
@abstractmethod
def delete(
self,
cid: str,
oid: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def get(
self,
cid: str,
oid: str,
endpoint: str,
bearer: Optional[str] = None,
write_object: Optional[str] = None,
xhdr: Optional[dict] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> file_utils.TestFile:
pass
@abstractmethod
def get_from_random_node(
self,
cid: str,
oid: str,
cluster: Cluster,
bearer: Optional[str] = None,
write_object: Optional[str] = None,
xhdr: Optional[dict] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def hash(
self,
endpoint: str,
cid: str,
oid: str,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
range: Optional[str] = None,
salt: Optional[str] = None,
ttl: Optional[int] = None,
session: Optional[str] = None,
hash_type: Optional[str] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def head(
self,
cid: str,
oid: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
json_output: bool = True,
is_raw: bool = False,
is_direct: bool = False,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult | Any:
pass
@abstractmethod
def lock(
self,
cid: str,
oid: str,
endpoint: str,
lifetime: Optional[int] = None,
expire_at: Optional[int] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def put(
self,
path: str,
cid: str,
endpoint: str,
bearer: Optional[str] = None,
copies_number: Optional[int] = None,
attributes: Optional[dict] = None,
xhdr: Optional[dict] = None,
expire_at: Optional[int] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def patch(
self,
cid: str,
oid: str,
endpoint: str,
ranges: Optional[list[str]] = None,
payloads: Optional[list[str]] = None,
new_attrs: Optional[str] = None,
replace_attrs: bool = False,
bearer: Optional[str] = None,
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = None,
trace: bool = False,
) -> str:
pass
@abstractmethod
def put_to_random_node(
self,
path: str,
cid: str,
cluster: Cluster,
bearer: Optional[str] = None,
copies_number: Optional[int] = None,
attributes: Optional[dict] = None,
xhdr: Optional[dict] = None,
expire_at: Optional[int] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def range(
self,
cid: str,
oid: str,
range_cut: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> tuple[file_utils.TestFile, bytes]:
pass
@abstractmethod
def search(
self,
cid: str,
endpoint: str,
bearer: str = "",
oid: Optional[str] = None,
filters: Optional[dict] = None,
expected_objects_list: Optional[list] = None,
xhdr: Optional[dict] = None,
session: Optional[str] = None,
phy: bool = False,
root: bool = False,
timeout: Optional[str] = None,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
ttl: Optional[int] = None,
) -> List:
pass
@abstractmethod
def nodes(
self,
cluster: Cluster,
cid: str,
oid: str,
alive_node: ClusterNode,
bearer: str = "",
xhdr: Optional[dict] = None,
is_direct: bool = False,
verify_presence_all: bool = False,
timeout: Optional[str] = None,
) -> List[ClusterNode]:
pass
@abstractmethod
def parts(
self,
cid: str,
oid: str,
alive_node: ClusterNode,
bearer: str = "",
xhdr: Optional[dict] = None,
is_direct: bool = False,
verify_presence_all: bool = False,
timeout: Optional[str] = None,
) -> List[str]:
pass
class ContainerInterface(ABC):
@abstractmethod
def create(
self,
endpoint: str,
nns_zone: Optional[str] = None,
nns_name: Optional[str] = None,
address: Optional[str] = None,
attributes: Optional[dict] = None,
basic_acl: Optional[str] = None,
await_mode: bool = False,
disable_timestamp: bool = False,
force: bool = False,
trace: bool = False,
name: Optional[str] = None,
nonce: Optional[str] = None,
policy: Optional[str] = None,
session: Optional[str] = None,
subnet: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> str:
"""
Create a new container and register it in the FrostFS.
It will be stored in the sidechain when the Inner Ring accepts it.
"""
raise NotImplementedError("No implemethed method create")
@abstractmethod
def delete(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
await_mode: bool = False,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
force: bool = False,
trace: bool = False,
) -> List[str]:
"""
Delete an existing container.
Only the owner of the container has permission to remove the container.
"""
raise NotImplementedError("No implemethed method delete")
@abstractmethod
def get(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
await_mode: bool = False,
to: Optional[str] = None,
json_mode: bool = True,
trace: bool = False,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> List[str]:
"""Get container field info."""
raise NotImplementedError("No implemethed method get")
@abstractmethod
def get_eacl(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
await_mode: bool = False,
json_mode: bool = True,
trace: bool = False,
to: Optional[str] = None,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> List[str]:
"""Get extended ACL table of container."""
raise NotImplementedError("No implemethed method get-eacl")
@abstractmethod
def list(
self,
endpoint: str,
name: Optional[str] = None,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
trace: bool = False,
owner: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
**params,
) -> List[str]:
"""List all created containers."""
raise NotImplementedError("No implemethed method list")
@abstractmethod
def nodes(
self,
endpoint: str,
cid: str,
cluster: Cluster,
address: Optional[str] = None,
ttl: Optional[int] = None,
from_file: Optional[str] = None,
trace: bool = False,
short: Optional[bool] = True,
xhdr: Optional[dict] = None,
generate_key: Optional[bool] = None,
timeout: Optional[str] = None,
) -> List[ClusterNode]:
"""Show the nodes participating in the container in the current epoch."""
raise NotImplementedError("No implemethed method nodes")
class GrpcClientWrapper(ABC):
def __init__(self) -> None:
self.object: ObjectInterface
self.container: ContainerInterface

View file

@ -0,0 +1,4 @@
from .chunks import ChunksInterface
from .container import ContainerInterface
from .netmap import NetmapInterface
from .object import ObjectInterface

View file

@ -0,0 +1,79 @@
from abc import ABC, abstractmethod
from typing import Optional
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo
class ChunksInterface(ABC):
@abstractmethod
def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]:
pass
@abstractmethod
def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]:
pass
@abstractmethod
def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str:
pass
@abstractmethod
def get_all(
self,
rpc_endpoint: str,
cid: str,
oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> list[Chunk]:
pass
@abstractmethod
def get_parity(
self,
rpc_endpoint: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
oid: Optional[str] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> Chunk:
pass
@abstractmethod
def get_first_data(
self,
rpc_endpoint: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
oid: Optional[str] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> Chunk:
pass

View file

@ -0,0 +1,125 @@
from abc import ABC, abstractmethod
from typing import List, Optional
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
class ContainerInterface(ABC):
@abstractmethod
def create(
self,
endpoint: str,
nns_zone: Optional[str] = None,
nns_name: Optional[str] = None,
address: Optional[str] = None,
attributes: Optional[dict] = None,
basic_acl: Optional[str] = None,
await_mode: bool = False,
disable_timestamp: bool = False,
force: bool = False,
trace: bool = False,
name: Optional[str] = None,
nonce: Optional[str] = None,
policy: Optional[str] = None,
session: Optional[str] = None,
subnet: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> str:
"""
Create a new container and register it in the FrostFS.
It will be stored in the sidechain when the Inner Ring accepts it.
"""
raise NotImplementedError("No implemethed method create")
@abstractmethod
def delete(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
await_mode: bool = False,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
force: bool = False,
trace: bool = False,
) -> List[str]:
"""
Delete an existing container.
Only the owner of the container has permission to remove the container.
"""
raise NotImplementedError("No implemethed method delete")
@abstractmethod
def get(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
await_mode: bool = False,
to: Optional[str] = None,
json_mode: bool = True,
trace: bool = False,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> List[str]:
"""Get container field info."""
raise NotImplementedError("No implemethed method get")
@abstractmethod
def get_eacl(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
await_mode: bool = False,
json_mode: bool = True,
trace: bool = False,
to: Optional[str] = None,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> List[str]:
"""Get extended ACL table of container."""
raise NotImplementedError("No implemethed method get-eacl")
@abstractmethod
def list(
self,
endpoint: str,
name: Optional[str] = None,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
trace: bool = False,
owner: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
**params,
) -> List[str]:
"""List all created containers."""
raise NotImplementedError("No implemethed method list")
@abstractmethod
def nodes(
self,
endpoint: str,
cid: str,
cluster: Cluster,
address: Optional[str] = None,
ttl: Optional[int] = None,
from_file: Optional[str] = None,
trace: bool = False,
short: Optional[bool] = True,
xhdr: Optional[dict] = None,
generate_key: Optional[bool] = None,
timeout: Optional[str] = None,
) -> List[ClusterNode]:
"""Show the nodes participating in the container in the current epoch."""
raise NotImplementedError("No implemethed method nodes")

View file

@ -0,0 +1,89 @@
from abc import ABC, abstractmethod
from typing import List, Optional
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeInfo, NodeNetInfo, NodeNetmapInfo
class NetmapInterface(ABC):
@abstractmethod
def epoch(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = False,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> int:
"""
Get current epoch number.
"""
raise NotImplementedError("No implemethed method epoch")
@abstractmethod
def netinfo(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> NodeNetInfo:
"""
Get target node info.
"""
raise NotImplementedError("No implemethed method netinfo")
@abstractmethod
def nodeinfo(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> NodeInfo:
"""
Get target node info.
"""
raise NotImplementedError("No implemethed method nodeinfo")
@abstractmethod
def snapshot(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> List[NodeNetmapInfo]:
"""
Get target node info.
"""
raise NotImplementedError("No implemethed method snapshot")
@abstractmethod
def snapshot_one_node(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> List[NodeNetmapInfo]:
"""
Get target one node info.
"""
raise NotImplementedError("No implemethed method snapshot")

View file

@ -0,0 +1,223 @@
from abc import ABC, abstractmethod
from typing import Any, List, Optional
from frostfs_testlib.shell.interfaces import CommandResult
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.utils import file_utils
from .chunks import ChunksInterface
class ObjectInterface(ABC):
def __init__(self) -> None:
self.chunks: ChunksInterface
@abstractmethod
def delete(
self,
cid: str,
oid: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def get(
self,
cid: str,
oid: str,
endpoint: str,
bearer: Optional[str] = None,
write_object: Optional[str] = None,
xhdr: Optional[dict] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> file_utils.TestFile:
pass
@abstractmethod
def get_from_random_node(
self,
cid: str,
oid: str,
cluster: Cluster,
bearer: Optional[str] = None,
write_object: Optional[str] = None,
xhdr: Optional[dict] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def hash(
self,
endpoint: str,
cid: str,
oid: str,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
range: Optional[str] = None,
salt: Optional[str] = None,
ttl: Optional[int] = None,
session: Optional[str] = None,
hash_type: Optional[str] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def head(
self,
cid: str,
oid: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
json_output: bool = True,
is_raw: bool = False,
is_direct: bool = False,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult | Any:
pass
@abstractmethod
def lock(
self,
cid: str,
oid: str,
endpoint: str,
lifetime: Optional[int] = None,
expire_at: Optional[int] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def put(
self,
path: str,
cid: str,
endpoint: str,
bearer: Optional[str] = None,
copies_number: Optional[int] = None,
attributes: Optional[dict] = None,
xhdr: Optional[dict] = None,
expire_at: Optional[int] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def patch(
self,
cid: str,
oid: str,
endpoint: str,
ranges: Optional[list[str]] = None,
payloads: Optional[list[str]] = None,
new_attrs: Optional[str] = None,
replace_attrs: bool = False,
bearer: Optional[str] = None,
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = None,
trace: bool = False,
) -> str:
pass
@abstractmethod
def put_to_random_node(
self,
path: str,
cid: str,
cluster: Cluster,
bearer: Optional[str] = None,
copies_number: Optional[int] = None,
attributes: Optional[dict] = None,
xhdr: Optional[dict] = None,
expire_at: Optional[int] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def range(
self,
cid: str,
oid: str,
range_cut: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> tuple[file_utils.TestFile, bytes]:
pass
@abstractmethod
def search(
self,
cid: str,
endpoint: str,
bearer: str = "",
oid: Optional[str] = None,
filters: Optional[dict] = None,
expected_objects_list: Optional[list] = None,
xhdr: Optional[dict] = None,
session: Optional[str] = None,
phy: bool = False,
root: bool = False,
timeout: Optional[str] = None,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
ttl: Optional[int] = None,
) -> List:
pass
@abstractmethod
def nodes(
self,
cluster: Cluster,
cid: str,
oid: str,
alive_node: ClusterNode,
bearer: str = "",
xhdr: Optional[dict] = None,
is_direct: bool = False,
verify_presence_all: bool = False,
timeout: Optional[str] = None,
) -> List[ClusterNode]:
pass
@abstractmethod
def parts(
self,
cid: str,
oid: str,
alive_node: ClusterNode,
bearer: str = "",
xhdr: Optional[dict] = None,
is_direct: bool = False,
verify_presence_all: bool = False,
timeout: Optional[str] = None,
) -> List[str]:
pass

View file

@ -0,0 +1,10 @@
from abc import ABC
from . import interfaces
class GrpcClientWrapper(ABC):
def __init__(self) -> None:
self.object: interfaces.ObjectInterface
self.container: interfaces.ContainerInterface
self.netmap: interfaces.NetmapInterface

View file

@ -68,7 +68,7 @@ def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: date
f"COMMAND: '{cmd}'\n" f"COMMAND: '{cmd}'\n"
f"OUTPUT:\n {output}\n" f"OUTPUT:\n {output}\n"
f"RC: {return_code}\n" f"RC: {return_code}\n"
f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {end_time - start_time}" f"Start / End / Elapsed\t {start_time} / {end_time} / {end_time - start_time}"
) )
with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'): with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'):
reporter.attach(command_attachment, "Command execution") reporter.attach(command_attachment, "Command execution")

View file

@ -64,7 +64,7 @@ def parallel_binary_verions(host: Host) -> dict[str, str]:
try: try:
result = shell.exec(f"{binary_path} {binary['param']}") result = shell.exec(f"{binary_path} {binary['param']}")
version = parse_version(result.stdout) or parse_version(result.stderr) or "Unknown" version = parse_version(result.stdout) or parse_version(result.stderr) or "Unknown"
versions_at_host[binary_name] = version versions_at_host[binary_name] = version.strip()
except Exception as exc: except Exception as exc:
logger.error(f"Cannot get version for {binary_path} because of\n{exc}") logger.error(f"Cannot get version for {binary_path} because of\n{exc}")
versions_at_host[binary_name] = "Unknown" versions_at_host[binary_name] = "Unknown"

View file

@ -6,10 +6,7 @@ import pytest
from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType, Preset, ReadFrom from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType, Preset, ReadFrom
from frostfs_testlib.load.runners import DefaultRunner from frostfs_testlib.load.runners import DefaultRunner
from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
from frostfs_testlib.storage.dataclasses.node_base import NodeBase
@dataclass @dataclass
@ -129,6 +126,8 @@ class TestLoadConfig:
"--size '11'", "--size '11'",
"--acl 'acl'", "--acl 'acl'",
"--preload_obj '13'", "--preload_obj '13'",
"--retry '24'",
"--rule 'rule' --rule 'rule_2'",
"--out 'pregen_json'", "--out 'pregen_json'",
"--workers '7'", "--workers '7'",
"--containers '16'", "--containers '16'",
@ -161,6 +160,8 @@ class TestLoadConfig:
expected_preset_args = [ expected_preset_args = [
"--size '11'", "--size '11'",
"--preload_obj '13'", "--preload_obj '13'",
"--retry '24'",
"--rule 'rule' --rule 'rule_2'",
"--out 'pregen_json'", "--out 'pregen_json'",
"--workers '7'", "--workers '7'",
"--containers '16'", "--containers '16'",
@ -317,6 +318,8 @@ class TestLoadConfig:
"--no-verify-ssl", "--no-verify-ssl",
"--size '11'", "--size '11'",
"--preload_obj '13'", "--preload_obj '13'",
"--retry '24'",
"--rule 'rule' --rule 'rule_2'",
"--out 'pregen_json'", "--out 'pregen_json'",
"--workers '7'", "--workers '7'",
"--containers '16'", "--containers '16'",
@ -350,6 +353,8 @@ class TestLoadConfig:
expected_preset_args = [ expected_preset_args = [
"--size '11'", "--size '11'",
"--preload_obj '13'", "--preload_obj '13'",
"--retry '24'",
"--rule 'rule' --rule 'rule_2'",
"--out 'pregen_json'", "--out 'pregen_json'",
"--workers '7'", "--workers '7'",
"--containers '16'", "--containers '16'",
@ -415,6 +420,26 @@ class TestLoadConfig:
self._check_preset_params(load_params, params) self._check_preset_params(load_params, params)
@pytest.mark.parametrize(
"load_type, input, value, params",
[
(LoadType.gRPC, ["A C ", " B"], ["A C", "B"], [f"--rule 'A C' --rule 'B'"]),
(LoadType.gRPC, " A ", ["A"], ["--rule 'A'"]),
(LoadType.gRPC, " A , B ", ["A , B"], ["--rule 'A , B'"]),
(LoadType.gRPC, [" A", "B "], ["A", "B"], ["--rule 'A' --rule 'B'"]),
(LoadType.gRPC, None, None, []),
(LoadType.S3, ["A C ", " B"], ["A C", "B"], []),
(LoadType.S3, None, None, []),
],
)
def test_ape_list_parsing_formatter(self, load_type, input, value, params):
load_params = LoadParams(load_type)
load_params.preset = Preset()
load_params.preset.rule = input
assert load_params.preset.rule == value
self._check_preset_params(load_params, params)
@pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True) @pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True)
def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams): def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams):
expected_env_vars = { expected_env_vars = {
@ -444,6 +469,8 @@ class TestLoadConfig:
expected_preset_args = [ expected_preset_args = [
"--size '0'", "--size '0'",
"--preload_obj '0'", "--preload_obj '0'",
"--retry '0'",
"--rule ''",
"--out ''", "--out ''",
"--workers '0'", "--workers '0'",
"--containers '0'", "--containers '0'",
@ -475,6 +502,8 @@ class TestLoadConfig:
expected_preset_args = [ expected_preset_args = [
"--size '0'", "--size '0'",
"--preload_obj '0'", "--preload_obj '0'",
"--retry '0'",
"--rule ''",
"--out ''", "--out ''",
"--workers '0'", "--workers '0'",
"--containers '0'", "--containers '0'",
@ -582,6 +611,8 @@ class TestLoadConfig:
expected_preset_args = [ expected_preset_args = [
"--size '0'", "--size '0'",
"--preload_obj '0'", "--preload_obj '0'",
"--retry '0'",
"--rule ''",
"--out ''", "--out ''",
"--workers '0'", "--workers '0'",
"--containers '0'", "--containers '0'",
@ -613,6 +644,8 @@ class TestLoadConfig:
expected_preset_args = [ expected_preset_args = [
"--size '0'", "--size '0'",
"--preload_obj '0'", "--preload_obj '0'",
"--retry '0'",
"--rule ''",
"--out ''", "--out ''",
"--workers '0'", "--workers '0'",
"--containers '0'", "--containers '0'",