Compare commits

..

44 commits

Author SHA1 Message Date
7d2c92ebc0 [#361] Move common fixture to testlib
Signed-off-by: a.berezin <a.berezin@yadro.com>
2025-03-07 17:06:14 +03:00
0c4e601840 [#359] Override represantation method for Host
Signed-off-by: Ilyas Niyazov <i.niyazov@yadro.com>
2025-03-06 08:44:34 +00:00
f1073d214c [#360] Increased timeout for IAM policy attach/detach
Signed-off-by: Yaroslava Lukoyanova <y.lukoyanova@yadro.com>
2025-03-05 16:43:55 +03:00
b00d080982 [#357] Synchronize client and CliCommand timeouts
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2025-03-03 12:49:49 +00:00
97b9b5498a [#358] Add minor improvements for convenient work with clients
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2025-02-25 17:07:23 +03:00
e9bc36b3d3 [#355] Change CSC time methods
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2025-02-05 11:47:09 +00:00
Dmitry Anurin
87afc4b58c [#356] Added pprof endpoint and working dir to service attributes
Signed-off-by: Dmitry Anurin <danurin@yadro.com>
2025-02-05 09:47:49 +03:00
b44705eb2f [#353] Added Netmap command for CliWrapper
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2025-01-31 09:11:44 +00:00
ace9564243 [#352] Fix versions parsing
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2025-01-30 11:21:09 +03:00
0015ea7f93 [#350] Add ape rule for load config
Signed-off-by: a.berezin <a.berezin@yadro.com>
2025-01-23 17:47:49 +03:00
aed20e02ac [#349] Fixed hook pytest-collect-modifyitems
Signed-off-by: Ilyas Niyazov <i.niyazov@yadro.com>
2025-01-17 17:37:51 +03:00
80dd8d0b16 [#348] Fixed check of fields in S3 aws/boto3 methods related to policies
Signed-off-by: y.lukoyanova <y.lukoyanova@yadro.com>
2025-01-17 11:09:47 +03:00
daf186690b [#345] Fix curl request generation
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2025-01-16 07:45:42 +00:00
5a291c5b7f [#347] remove stderr check
Signed-off-by: m.malygina <m.malygina@yadro.com>
2025-01-14 15:26:05 +03:00
974836f1bd [#346] Added correct exception in Chunks parse
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2025-01-13 12:58:29 +03:00
6fe7fef44b [#344] Update ifaces
Signed-off-by: a.berezin <a.berezin@yadro.com>
2024-12-27 09:05:28 +00:00
0a3de927a2 [#343] Extend testsuites for PATCH method
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2024-12-25 15:39:17 +00:00
9e3380d519 [#336] Refine CODEOWNERS settings
Signed-off-by: Vitaliy Potyarkin <v.potyarkin@yadro.com>
2024-12-25 09:53:48 +00:00
6e951443ed [#342] Remove try-catch from delete block
Signed-off-by: a.berezin <a.berezin@yadro.com>
2024-12-24 08:17:18 +00:00
0479701258 [#341] Add test for multipart object in Test_http_object testsuite
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2024-12-18 18:03:00 +03:00
dc5a9e7bb9 [#340] Move s3 and http directories to avoid conflict with requests
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2024-12-18 12:57:24 +03:00
335eed85b1 [#338] Added parameter word_count to method get_logs
Signed-off-by: Ilyas Niyazov <i.niyazov@yadro.com>
2024-12-17 14:25:10 +00:00
cc7bd4ffc9 [#339] Added ns args for func container create
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2024-12-17 13:55:15 +03:00
cd15be3b7c [#334] Automation of PATCH method in S3
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2024-12-13 10:53:02 +03:00
8ff1e72499 [#337] Add rule chain error
Signed-off-by: Ekaterina Chernitsyna <e.chernitsyna@yadro.com>
2024-12-13 10:45:14 +03:00
0ebb845329 [#335] Fixed iam boto3 client 2024-12-06 10:50:34 +03:00
ee7d9df4a9 [#333] Fix files param in http client part two
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2024-12-05 16:48:23 +03:00
61353cb38c [#332] Fix files param in http client
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2024-12-05 14:26:24 +03:00
b3d05c5c28 [#326] Automation of PATCH method in GRPC
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2024-12-05 10:28:06 +00:00
8ec7e21e84 [#331] Fix type hints for service methods
Signed-off-by: a.berezin <a.berezin@yadro.com>
2024-12-03 14:55:12 +03:00
0e040d2722 [#330] Improve CURL generation and fix Boto3 logging
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2024-12-02 15:54:38 +03:00
7d6768c83f [#325] Added get nns records method to frostfs-adm
Signed-off-by: Dmitry Anurin <danurin@yadro.com>
2024-11-29 10:21:41 +00:00
3dc7a5bdb0 [#328] Change logic activating split-brain
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2024-11-29 08:55:08 +00:00
24e1dfef28 [#324]Extension list_objects method 2024-11-26 07:37:56 +00:00
0c9660fffc [#323] Update APE related entities
Signed-off-by: a.berezin <a.berezin@yadro.com>
2024-11-20 17:14:33 +03:00
8eaa511e5c [#322] Added classmethod decorator in Http client
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2024-11-18 15:07:24 +00:00
a1953684b8 [#307] added methods for testing MFA 2024-11-18 07:08:42 +00:00
451de5e07e [#320] Added shards detach function
Signed-off-by: Dmitry Anurin <danurin@yadro.com>
2024-11-14 16:22:06 +03:00
f24bfc06fd [#319] Add cached fixture feature
Signed-off-by: a.berezin <a.berezin@yadro.com>
2024-11-13 17:46:03 +03:00
47bc11835b [#318] Add tombstone expiration test
Signed-off-by: a.berezin <a.berezin@yadro.com>
2024-11-13 10:11:03 +03:00
2a90ec74ff [#317] update morph rule chain 2024-11-12 16:01:12 +03:00
95b32a036a [#316] Extend parallel exception message output
Signed-off-by: a.berezin <a.berezin@yadro.com>
2024-11-12 12:28:10 +03:00
55d8ee5da0 [#315] Add http client
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
2024-11-08 15:51:32 +03:00
ea40940514 [#313] update force_new_epoch 2024-11-05 12:37:56 +03:00
62 changed files with 1928 additions and 635 deletions

View file

@ -1 +1,3 @@
* @JuliaKovshova @abereziny @d.zayakin @anikeev-yadro @anurindm @ylukoyan @i.niyazov
.* @TrueCloudLab/qa-committers
.forgejo/.* @potyarkin
Makefile @potyarkin

View file

@ -62,7 +62,7 @@ authmate = "frostfs_testlib.credentials.authmate_s3_provider:AuthmateS3Credentia
wallet_factory = "frostfs_testlib.credentials.wallet_factory_provider:WalletFactoryProvider"
[project.entry-points."frostfs.testlib.bucket_cid_resolver"]
frostfs = "frostfs_testlib.s3.curl_bucket_resolver:CurlBucketContainerResolver"
frostfs = "frostfs_testlib.clients.s3.curl_bucket_resolver:CurlBucketContainerResolver"
[tool.isort]
profile = "black"

View file

@ -10,6 +10,7 @@ tenacity==8.0.1
pytest==7.1.2
boto3==1.35.30
boto3-stubs[essential]==1.35.30
pydantic==2.10.6
# Dev dependencies
black==22.8.0

View file

@ -1,4 +1,4 @@
__version__ = "2.0.1"
from .fixtures import configure_testlib, hosting, temp_directory
from .hooks import pytest_collection_modifyitems
from .fixtures import configure_testlib, hosting, session_start_time, temp_directory
from .hooks import pytest_add_frostfs_marker, pytest_collection_modifyitems

View file

@ -24,9 +24,7 @@ class CliCommand:
def __init__(self, shell: Shell, cli_exec_path: str, **base_params):
self.shell = shell
self.cli_exec_path = cli_exec_path
self.__base_params = " ".join(
[f"--{param} {value}" for param, value in base_params.items() if value]
)
self.__base_params = " ".join([f"--{param} {value}" for param, value in base_params.items() if value])
def _format_command(self, command: str, **params) -> str:
param_str = []
@ -48,9 +46,7 @@ class CliCommand:
val_str = str(value_item).replace("'", "\\'")
param_str.append(f"--{param} '{val_str}'")
elif isinstance(value, dict):
param_str.append(
f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\''
)
param_str.append(f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\'')
else:
if "'" in str(value):
value_str = str(value).replace('"', '\\"')
@ -63,12 +59,18 @@ class CliCommand:
return f"{self.cli_exec_path} {self.__base_params} {command or ''} {param_str}"
def _execute(self, command: Optional[str], **params) -> CommandResult:
return self.shell.exec(self._format_command(command, **params))
def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult:
timeout = int(params["timeout"].rstrip("s")) if params.get("timeout") else None
return self.shell.exec(
self._format_command(command, **params),
options=CommandOptions(
interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)]
CommandOptions(timeout=timeout),
)
def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult:
timeout = int(params["timeout"].rstrip("s")) if params.get("timeout") else None
return self.shell.exec(
self._format_command(command, **params),
CommandOptions(
interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)],
timeout=timeout,
),
)

View file

@ -122,7 +122,9 @@ class FrostfsAdmMorph(CliCommand):
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
)
def force_new_epoch(self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None) -> CommandResult:
def force_new_epoch(
self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None, delta: Optional[int] = None
) -> CommandResult:
"""Create new FrostFS epoch event in the side chain.
Args:
@ -351,6 +353,7 @@ class FrostfsAdmMorph(CliCommand):
rule: Optional[list[str]] = None,
path: Optional[str] = None,
chain_id_hex: Optional[bool] = None,
chain_name: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
@ -381,6 +384,7 @@ class FrostfsAdmMorph(CliCommand):
target_name: str,
target_type: str,
chain_id_hex: Optional[bool] = None,
chain_name: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
@ -408,6 +412,7 @@ class FrostfsAdmMorph(CliCommand):
target_type: str,
target_name: Optional[str] = None,
rpc_endpoint: Optional[str] = None,
chain_name: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
@ -434,6 +439,7 @@ class FrostfsAdmMorph(CliCommand):
target_name: str,
target_type: str,
all: Optional[bool] = None,
chain_name: Optional[str] = None,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
@ -457,3 +463,26 @@ class FrostfsAdmMorph(CliCommand):
"morph ape rm-rule-chain",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def get_nns_records(
self,
name: str,
type: Optional[str] = None,
rpc_endpoint: Optional[str] = None,
alphabet_wallets: Optional[str] = None,
) -> CommandResult:
"""Returns domain record of the specified type
Args:
name: Domain name
type: Domain name service record type(A|CNAME|SOA|TXT)
rpc_endpoint: N3 RPC node endpoint
alphabet_wallets: path to alphabet wallets dir
Returns:
Command's result
"""
return self._execute(
"morph nns get-records",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -12,6 +12,7 @@ class FrostfsCliNetmap(CliCommand):
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = False,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
@ -42,6 +43,7 @@ class FrostfsCliNetmap(CliCommand):
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = False,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
@ -73,6 +75,7 @@ class FrostfsCliNetmap(CliCommand):
generate_key: bool = False,
json: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = False,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
@ -104,6 +107,7 @@ class FrostfsCliNetmap(CliCommand):
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = False,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:

View file

@ -276,6 +276,54 @@ class FrostfsCliObject(CliCommand):
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def patch(
self,
rpc_endpoint: str,
cid: str,
oid: str,
range: list[str] = None,
payload: list[str] = None,
new_attrs: Optional[str] = None,
replace_attrs: bool = False,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
session: Optional[str] = None,
timeout: Optional[str] = None,
trace: bool = False,
ttl: Optional[int] = None,
wallet: Optional[str] = None,
xhdr: Optional[dict] = None,
) -> CommandResult:
"""
PATCH an object.
Args:
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>')
cid: Container ID
oid: Object ID
range: An array of ranges in which to replace data in the format [offset1:length1, offset2:length2]
payload: An array of file paths to be applied in each range
new_attrs: Attributes to be changed in the format Key1=Value1,Key2=Value2
replace_attrs: Replace all attributes completely with new ones specified in new_attrs
address: Address of wallet account
bearer: File with signed JSON or binary encoded bearer token
generate_key: Generate new private key
session: Filepath to a JSON- or binary-encoded token of the object RANGE session
timeout: Timeout for the operation
trace: Generate trace ID and print it
ttl: TTL value in request meta header (default 2)
wallet: WIF (NEP-2) string or path to the wallet or binary key
xhdr: Dict with request X-Headers
Returns:
Command's result.
"""
return self._execute(
"object patch",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def range(
self,
rpc_endpoint: str,

View file

@ -241,3 +241,21 @@ class FrostfsCliShards(CliCommand):
"control shards evacuation status",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def detach(self, endpoint: str, address: Optional[str] = None, id: Optional[str] = None, timeout: Optional[str] = None):
"""
Detach and close the shards
Args:
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
id: List of shard IDs in base58 encoding
timeout: Timeout for an operation (default 15s)
Returns:
Command's result.
"""
return self._execute(
"control shards detach",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -1,7 +1,7 @@
import re
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo, NodeStatus
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeInfo, NodeNetInfo, NodeNetmapInfo, NodeStatus
class NetmapParser:
@ -20,8 +20,6 @@ class NetmapParser:
"withdrawal_fee": r"Withdrawal fee: (?P<withdrawal_fee>\d+)",
"homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?P<homomorphic_hashing_disabled>true|false)",
"maintenance_mode_allowed": r"Maintenance mode allowed: (?P<maintenance_mode_allowed>true|false)",
"eigen_trust_alpha": r"EigenTrustAlpha: (?P<eigen_trust_alpha>\d+\w+$)",
"eigen_trust_iterations": r"EigenTrustIterations: (?P<eigen_trust_iterations>\d+)",
}
parse_result = {}
@ -64,7 +62,7 @@ class NetmapParser:
for node in netmap_nodes:
for key, regex in regexes.items():
search_result = re.search(regex, node, flags=re.MULTILINE)
if search_result == None:
if search_result is None:
result_netmap[key] = None
continue
if key == "node_data_ips":
@ -83,9 +81,22 @@ class NetmapParser:
return dataclasses_netmap
@staticmethod
def snapshot_one_node(output: str, cluster_node: ClusterNode) -> NodeNetmapInfo | None:
def snapshot_one_node(output: str, rpc_endpoint: str) -> NodeNetmapInfo | None:
snapshot_nodes = NetmapParser.snapshot_all_nodes(output=output)
snapshot_node = [node for node in snapshot_nodes if node.node == cluster_node.host_ip]
if not snapshot_node:
return None
return snapshot_node[0]
for snapshot in snapshot_nodes:
for endpoint in snapshot.external_address:
if rpc_endpoint.split(":")[0] in endpoint:
return snapshot
@staticmethod
def node_info(output: dict) -> NodeNetmapInfo:
data_dict = {"attributes": {}}
for key, value in output.items():
if key != "attributes":
data_dict[key] = value
for attribute in output["attributes"]:
data_dict["attributes"][attribute["key"]] = attribute["value"]
return NodeInfo(**data_dict)

View file

@ -0,0 +1,5 @@
from frostfs_testlib.clients.http.http_client import HttpClient
from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient
from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper
from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper
from frostfs_testlib.clients.s3.s3_http_client import S3HttpClient

View file

@ -0,0 +1 @@
from frostfs_testlib.clients.http.http_client import HttpClient

View file

@ -0,0 +1,145 @@
import io
import json
import logging
import logging.config
from typing import Mapping, Sequence
import httpx
from frostfs_testlib import reporter
timeout = httpx.Timeout(60, read=150)
LOGGING_CONFIG = {
"disable_existing_loggers": False,
"version": 1,
"handlers": {"default": {"class": "logging.StreamHandler", "formatter": "http", "stream": "ext://sys.stderr"}},
"formatters": {
"http": {
"format": "%(levelname)s [%(asctime)s] %(name)s - %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
}
},
"loggers": {
"httpx": {
"handlers": ["default"],
"level": "DEBUG",
},
"httpcore": {
"handlers": ["default"],
"level": "ERROR",
},
},
}
logging.config.dictConfig(LOGGING_CONFIG)
logger = logging.getLogger("NeoLogger")
class HttpClient:
@reporter.step("Send {method} request to {url}")
def send(self, method: str, url: str, expected_status_code: int = None, **kwargs: dict) -> httpx.Response:
transport = httpx.HTTPTransport(verify=False, retries=5)
client = httpx.Client(timeout=timeout, transport=transport)
response = client.request(method, url, **kwargs)
self._attach_response(response, **kwargs)
logger.info(f"Response: {response.status_code} => {response.text}")
if expected_status_code:
assert (
response.status_code == expected_status_code
), f"Got {response.status_code} response code while {expected_status_code} expected"
return response
@classmethod
def _parse_body(cls, readable: httpx.Request | httpx.Response) -> str | None:
try:
content = readable.read()
except Exception as e:
logger.warning(f"Unable to read file: {str(e)}")
return None
if not content:
return None
request_body = None
try:
request_body = json.loads(content)
except (json.JSONDecodeError, UnicodeDecodeError) as e:
logger.warning(f"Unable to convert body to json: {str(e)}")
if request_body is not None:
return json.dumps(request_body, default=str, indent=4)
try:
request_body = content.decode()
except UnicodeDecodeError as e:
logger.warning(f"Unable to decode binary data to text using UTF-8 encoding: {str(e)}")
request_body = content if request_body is None else request_body
request_body = "<large text data>" if len(request_body) > 1000 else request_body
return request_body
@classmethod
def _parse_files(cls, files: Mapping | Sequence | None) -> dict:
filepaths = {}
if not files:
return filepaths
if isinstance(files, Sequence):
items = files
elif isinstance(files, Mapping):
items = files.items()
else:
raise TypeError(f"'files' must be either Sequence or Mapping, got: {type(files).__name__}")
for name, file in items:
if isinstance(file, io.IOBase):
filepaths[name] = file.name
elif isinstance(file, Sequence):
filepaths[name] = file[1].name
return filepaths
@classmethod
def _attach_response(cls, response: httpx.Response, **kwargs):
request = response.request
request_headers = json.dumps(dict(request.headers), default=str, indent=4)
request_body = cls._parse_body(request)
files = kwargs.get("files")
request_files = cls._parse_files(files)
response_headers = json.dumps(dict(response.headers), default=str, indent=4)
response_body = cls._parse_body(response)
report = (
f"Method: {request.method}\n\n"
+ f"URL: {request.url}\n\n"
+ f"Request Headers: {request_headers}\n\n"
+ (f"Request Body: {request_body}\n\n" if request_body else "")
+ (f"Request Files: {request_files}\n\n" if request_files else "")
+ f"Response Status Code: {response.status_code}\n\n"
+ f"Response Headers: {response_headers}\n\n"
+ (f"Response Body: {response_body}\n\n" if response_body else "")
)
curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body, request_files)
reporter.attach(report, "Requests Info")
reporter.attach(curl_request, "CURL")
@classmethod
def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict) -> str:
excluded_headers = {"Accept-Encoding", "Connection", "User-Agent", "Content-Length"}
headers = " ".join(f"-H '{header.title()}: {value}'" for header, value in headers.items() if header.title() not in excluded_headers)
data = f" -d '{data}'" if data else ""
for name, path in files.items():
data += f' -F "{name}=@{path}"'
# Option -k means no verify SSL
return f"curl {url} -X {method} {headers}{data} -k"

View file

@ -0,0 +1,3 @@
from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient
from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper
from frostfs_testlib.clients.s3.interfaces import ACL, BucketContainerResolver, S3ClientWrapper, VersioningStatus

View file

@ -6,8 +6,8 @@ from time import sleep
from typing import Literal, Optional, Union
from frostfs_testlib import reporter
from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
from frostfs_testlib.shell import CommandOptions
from frostfs_testlib.shell.local_shell import LocalShell
from frostfs_testlib.utils import string_utils
@ -33,12 +33,14 @@ class AwsCliClient(S3ClientWrapper):
self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1"
) -> None:
self.s3gate_endpoint = s3gate_endpoint
self.iam_endpoint = None
self.access_key_id: str = access_key_id
self.secret_access_key: str = secret_access_key
self.profile = profile
self.local_shell = LocalShell()
self.region = region
self.iam_endpoint = None
self.local_shell = LocalShell()
try:
_configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key, region)
self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS} --profile {profile}")
@ -171,7 +173,7 @@ class AwsCliClient(S3ClientWrapper):
return response.get("TagSet")
@reporter.step("Get bucket acl")
def get_bucket_acl(self, bucket: str) -> list:
def get_bucket_acl(self, bucket: str) -> dict:
if bucket.startswith("-") or " " in bucket:
bucket = f'"{bucket}"'
@ -179,8 +181,7 @@ class AwsCliClient(S3ClientWrapper):
f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
)
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response.get("Grants")
return self._to_json(output)
@reporter.step("Get bucket location")
def get_bucket_location(self, bucket: str) -> dict:
@ -196,11 +197,20 @@ class AwsCliClient(S3ClientWrapper):
return response.get("LocationConstraint")
@reporter.step("List objects S3")
def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
def list_objects(
self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None
) -> Union[dict, list[str]]:
if bucket.startswith("-") or " " in bucket:
bucket = f'"{bucket}"'
cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} "
if page_size:
cmd = cmd.replace("--no-paginate", "")
cmd += f" --page-size {page_size} "
if prefix:
cmd += f" --prefix {prefix}"
if self.profile:
cmd += f" --profile {self.profile} "
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
@ -852,7 +862,7 @@ class AwsCliClient(S3ClientWrapper):
return response["Parts"]
@reporter.step("Complete multipart upload S3")
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None:
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict:
if bucket.startswith("-") or " " in bucket:
bucket = f'"{bucket}"'
@ -969,7 +979,7 @@ class AwsCliClient(S3ClientWrapper):
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 10)
sleep(S3_SYNC_WAIT_TIME * 14)
return response
@ -980,7 +990,7 @@ class AwsCliClient(S3ClientWrapper):
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 10)
sleep(S3_SYNC_WAIT_TIME * 14)
return response
@ -1112,7 +1122,7 @@ class AwsCliClient(S3ClientWrapper):
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 10)
sleep(S3_SYNC_WAIT_TIME * 14)
return response
@ -1123,7 +1133,7 @@ class AwsCliClient(S3ClientWrapper):
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 10)
sleep(S3_SYNC_WAIT_TIME * 14)
return response
@ -1219,7 +1229,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}"
assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}"
return response
@ -1231,7 +1241,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}"
assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}"
return response
@ -1256,7 +1266,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}"
assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}"
return response
@ -1268,7 +1278,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert response.get("Groups"), f"Expected Groups in response:\n{response}"
assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}"
return response
@ -1280,7 +1290,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert response.get("Groups"), f"Expected Groups in response:\n{response}"
assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}"
return response
@ -1316,7 +1326,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}"
assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}"
return response
@ -1342,7 +1352,7 @@ class AwsCliClient(S3ClientWrapper):
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 10)
sleep(S3_SYNC_WAIT_TIME * 14)
return response
@ -1357,7 +1367,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 10)
sleep(S3_SYNC_WAIT_TIME * 14)
return response
@ -1440,3 +1450,90 @@ class AwsCliClient(S3ClientWrapper):
response = self._to_json(output)
return response
# MFA METHODS
@reporter.step("Creates a new virtual MFA device")
def iam_create_virtual_mfa_device(self, virtual_mfa_device_name: str, outfile: str, bootstrap_method: str) -> tuple:
cmd = f"aws {self.common_flags} iam create-virtual-mfa-device --virtual-mfa-device-name {virtual_mfa_device_name}\
--outfile {outfile} --bootstrap-method {bootstrap_method} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
serial_number = response.get("VirtualMFADevice", {}).get("SerialNumber")
assert serial_number, f"Expected SerialNumber in response:\n{response}"
return serial_number, False
@reporter.step("Deactivates the specified MFA device and removes it from association with the user name")
def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict:
cmd = f"aws {self.common_flags} iam deactivate-mfa-device --user-name {user_name} --serial-number {serial_number} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Deletes a virtual MFA device")
def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict:
cmd = f"aws {self.common_flags} iam delete-virtual-mfa-device --serial-number {serial_number} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Enables the specified MFA device and associates it with the specified IAM user")
def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict:
cmd = f"aws {self.common_flags} iam enable-mfa-device --user-name {user_name} --serial-number {serial_number} --authentication-code1 {authentication_code1}\
--authentication-code2 {authentication_code2} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Lists the MFA devices for an IAM user")
def iam_list_virtual_mfa_devices(self) -> dict:
cmd = f"aws {self.common_flags} iam list-virtual-mfa-devices --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert response.get("VirtualMFADevices"), f"Expected VirtualMFADevices in response:\n{response}"
return response
@reporter.step("Get session token for user")
def sts_get_session_token(
self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None
) -> tuple:
cmd = f"aws {self.common_flags} sts get-session-token --endpoint {self.iam_endpoint}"
if duration_seconds:
cmd += f" --duration-seconds {duration_seconds}"
if serial_number:
cmd += f" --serial-number {serial_number}"
if token_code:
cmd += f" --token-code {token_code}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
access_key = response.get("Credentials", {}).get("AccessKeyId")
secret_access_key = response.get("Credentials", {}).get("SecretAccessKey")
session_token = response.get("Credentials", {}).get("SessionToken")
assert access_key, f"Expected AccessKeyId in response:\n{response}"
assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}"
assert session_token, f"Expected SessionToken in response:\n{response}"
return access_key, secret_access_key, session_token

View file

@ -13,8 +13,8 @@ from botocore.exceptions import ClientError
from mypy_boto3_s3 import S3Client
from frostfs_testlib import reporter
from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
from frostfs_testlib.utils import string_utils
# TODO: Refactor this code to use shell instead of _cmd_run
@ -35,24 +35,20 @@ class Boto3ClientWrapper(S3ClientWrapper):
def __init__(
self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1"
) -> None:
self.boto3_client: S3Client = None
self.s3gate_endpoint: str = ""
self.boto3_client: S3Client = None
self.boto3_iam_client: S3Client = None
self.iam_endpoint: str = ""
self.boto3_iam_client: S3Client = None
self.boto3_sts_client: S3Client = None
self.access_key_id: str = access_key_id
self.secret_access_key: str = secret_access_key
self.access_key_id = access_key_id
self.secret_access_key = secret_access_key
self.profile = profile
self.region = region
self.session = boto3.Session()
self.config = Config(
retries={
"max_attempts": MAX_REQUEST_ATTEMPTS,
"mode": RETRY_MODE,
}
)
self.config = Config(retries={"max_attempts": MAX_REQUEST_ATTEMPTS, "mode": RETRY_MODE})
self.set_endpoint(s3gate_endpoint)
@ -84,9 +80,18 @@ class Boto3ClientWrapper(S3ClientWrapper):
service_name="iam",
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
region_name=self.region,
endpoint_url=self.iam_endpoint,
verify=False,
)
# since the STS does not have an endpoint, IAM is used
self.boto3_sts_client = self.session.client(
service_name="sts",
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
endpoint_url=iam_endpoint,
verify=False,
)
def _to_s3_param(self, param: str) -> str:
replacement_map = {
@ -134,6 +139,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
params = {"Bucket": bucket}
if object_lock_enabled_for_bucket is not None:
params.update({"ObjectLockEnabledForBucket": object_lock_enabled_for_bucket})
if acl is not None:
params.update({"ACL": acl})
elif grant_write or grant_read or grant_full_control:
@ -143,6 +149,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
params.update({"GrantRead": grant_read})
elif grant_full_control:
params.update({"GrantFullControl": grant_full_control})
if location_constraint:
params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}})
@ -219,14 +226,13 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response.get("TagSet")
@reporter.step("Get bucket acl")
def get_bucket_acl(self, bucket: str) -> list:
response = self._exec_request(
def get_bucket_acl(self, bucket: str) -> dict:
return self._exec_request(
self.boto3_client.get_bucket_acl,
params={"Bucket": bucket},
endpoint=self.s3gate_endpoint,
profile=self.profile,
)
return response.get("Grants")
@reporter.step("Delete bucket tagging")
def delete_bucket_tagging(self, bucket: str) -> None:
@ -388,10 +394,17 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response if full_output else obj_list
@reporter.step("List objects S3")
def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
def list_objects(
self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None
) -> Union[dict, list[str]]:
params = {"Bucket": bucket}
if page_size:
params["MaxKeys"] = page_size
if prefix:
params["Prefix"] = prefix
response = self._exec_request(
self.boto3_client.list_objects,
params={"Bucket": bucket},
params,
endpoint=self.s3gate_endpoint,
profile=self.profile,
)
@ -687,7 +700,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response["Parts"]
@reporter.step("Complete multipart upload S3")
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None:
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict:
parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]
params = self._convert_to_s3_params(locals(), exclude=["parts"])
params["MultipartUpload"] = {"Parts": parts}
@ -823,7 +836,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
sleep(S3_SYNC_WAIT_TIME * 10)
sleep(S3_SYNC_WAIT_TIME * 14)
return response
@reporter.step("Attaches the specified managed policy to the specified user")
@ -835,7 +848,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
sleep(S3_SYNC_WAIT_TIME * 10)
sleep(S3_SYNC_WAIT_TIME * 14)
return response
@reporter.step("Creates a new AWS secret access key and access key ID for the specified user")
@ -966,7 +979,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
sleep(S3_SYNC_WAIT_TIME * 10)
sleep(S3_SYNC_WAIT_TIME * 14)
return response
@reporter.step("Removes the specified managed policy from the specified user")
@ -978,7 +991,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
sleep(S3_SYNC_WAIT_TIME * 10)
sleep(S3_SYNC_WAIT_TIME * 14)
return response
@reporter.step("Returns a list of IAM users that are in the specified IAM group")
@ -1074,7 +1087,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}"
assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}"
return response
@reporter.step("Lists all managed policies that are attached to the specified IAM user")
@ -1085,7 +1098,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}"
assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}"
return response
@reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to")
@ -1110,7 +1123,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}"
assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}"
return response
@reporter.step("Lists the IAM groups")
@ -1120,7 +1133,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
assert response.get("Groups"), f"Expected Groups in response:\n{response}"
assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}"
return response
@reporter.step("Lists the IAM groups that the specified IAM user belongs to")
@ -1131,7 +1144,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
assert response.get("Groups"), f"Expected Groups in response:\n{response}"
assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}"
return response
@reporter.step("Lists all the managed policies that are available in your AWS account")
@ -1163,7 +1176,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}"
assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}"
return response
@reporter.step("Lists the IAM users")
@ -1188,7 +1201,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
sleep(S3_SYNC_WAIT_TIME * 10)
sleep(S3_SYNC_WAIT_TIME * 14)
return response
@reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user")
@ -1203,7 +1216,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
sleep(S3_SYNC_WAIT_TIME * 10)
sleep(S3_SYNC_WAIT_TIME * 14)
return response
@reporter.step("Removes the specified user from the specified group")
@ -1265,3 +1278,66 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
# MFA methods
@reporter.step("Creates a new virtual MFA device")
def iam_create_virtual_mfa_device(
self, virtual_mfa_device_name: str, outfile: Optional[str] = None, bootstrap_method: Optional[str] = None
) -> tuple:
response = self.boto3_iam_client.create_virtual_mfa_device(VirtualMFADeviceName=virtual_mfa_device_name)
serial_number = response.get("VirtualMFADevice", {}).get("SerialNumber")
base32StringSeed = response.get("VirtualMFADevice", {}).get("Base32StringSeed")
assert serial_number, f"Expected SerialNumber in response:\n{response}"
assert base32StringSeed, f"Expected Base32StringSeed in response:\n{response}"
return serial_number, base32StringSeed
@reporter.step("Deactivates the specified MFA device and removes it from association with the user name")
def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict:
response = self.boto3_iam_client.deactivate_mfa_device(UserName=user_name, SerialNumber=serial_number)
return response
@reporter.step("Deletes a virtual MFA device")
def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict:
response = self.boto3_iam_client.delete_virtual_mfa_device(SerialNumber=serial_number)
return response
@reporter.step("Enables the specified MFA device and associates it with the specified IAM user")
def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict:
response = self.boto3_iam_client.enable_mfa_device(
UserName=user_name,
SerialNumber=serial_number,
AuthenticationCode1=authentication_code1,
AuthenticationCode2=authentication_code2,
)
return response
@reporter.step("Lists the MFA devices for an IAM user")
def iam_list_virtual_mfa_devices(self) -> dict:
response = self.boto3_iam_client.list_virtual_mfa_devices()
assert response.get("VirtualMFADevices"), f"Expected VirtualMFADevices in response:\n{response}"
return response
@reporter.step("Get session token for user")
def sts_get_session_token(
self, duration_seconds: Optional[str] = "", serial_number: Optional[str] = "", token_code: Optional[str] = ""
) -> tuple:
response = self.boto3_sts_client.get_session_token(
DurationSeconds=duration_seconds,
SerialNumber=serial_number,
TokenCode=token_code,
)
access_key = response.get("Credentials", {}).get("AccessKeyId")
secret_access_key = response.get("Credentials", {}).get("SecretAccessKey")
session_token = response.get("Credentials", {}).get("SessionToken")
assert access_key, f"Expected AccessKeyId in response:\n{response}"
assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}"
assert session_token, f"Expected SessionToken in response:\n{response}"
return access_key, secret_access_key, session_token

View file

@ -1,7 +1,7 @@
import re
from frostfs_testlib.cli.generic_cli import GenericCli
from frostfs_testlib.s3.interfaces import BucketContainerResolver
from frostfs_testlib.clients.s3 import BucketContainerResolver
from frostfs_testlib.storage.cluster import ClusterNode

View file

@ -22,15 +22,15 @@ class VersioningStatus(HumanReadableEnum):
SUSPENDED = "Suspended"
ACL_COPY = [
"private",
"public-read",
"public-read-write",
"authenticated-read",
"aws-exec-read",
"bucket-owner-read",
"bucket-owner-full-control",
]
class ACL:
PRIVATE = "private"
PUBLIC_READ = "public-read"
PUBLIC_READ_WRITE = "public-read-write"
AUTHENTICATED_READ = "authenticated-read"
AWS_EXEC_READ = "aws-exec-read"
BUCKET_OWNER_READ = "bucket-owner-read"
BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
LOG_DELIVERY_WRITE = "log-delivery-write"
class BucketContainerResolver(ABC):
@ -50,6 +50,14 @@ class BucketContainerResolver(ABC):
class S3ClientWrapper(HumanReadableABC):
access_key_id: str
secret_access_key: str
profile: str
region: str
s3gate_endpoint: str
iam_endpoint: str
@abstractmethod
def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str, region: str) -> None:
pass
@ -128,7 +136,7 @@ class S3ClientWrapper(HumanReadableABC):
"""Deletes the tags from the bucket."""
@abstractmethod
def get_bucket_acl(self, bucket: str) -> list:
def get_bucket_acl(self, bucket: str) -> dict:
"""This implementation of the GET action uses the acl subresource to return the access control list (ACL) of a bucket."""
@abstractmethod
@ -195,7 +203,9 @@ class S3ClientWrapper(HumanReadableABC):
"""
@abstractmethod
def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
def list_objects(
self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None
) -> Union[dict, list[str]]:
"""Returns some or all (up to 1,000) of the objects in a bucket with each request.
You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
A 200 OK response can contain valid or invalid XML. Make sure to design your application
@ -334,7 +344,7 @@ class S3ClientWrapper(HumanReadableABC):
"""Lists the parts that have been uploaded for a specific multipart upload."""
@abstractmethod
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None:
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict:
"""Completes a multipart upload by assembling previously uploaded parts."""
@abstractmethod
@ -578,3 +588,32 @@ class S3ClientWrapper(HumanReadableABC):
@abstractmethod
def iam_untag_user(self, user_name: str, tag_keys: list) -> dict:
"""Removes the specified tags from the user"""
# MFA methods
@abstractmethod
def iam_create_virtual_mfa_device(
self, virtual_mfa_device_name: str, outfile: Optional[str] = None, bootstrap_method: Optional[str] = None
) -> tuple:
"""Creates a new virtual MFA device"""
@abstractmethod
def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict:
"""Deactivates the specified MFA device and removes it from association with the user name"""
@abstractmethod
def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict:
"""Deletes a virtual MFA device"""
@abstractmethod
def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict:
"""Enables the specified MFA device and associates it with the specified IAM user"""
@abstractmethod
def iam_list_virtual_mfa_devices(self) -> dict:
"""Lists the MFA devices for an IAM user"""
@abstractmethod
def sts_get_session_token(
self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None
) -> tuple:
"""Get session token for user"""

View file

@ -0,0 +1,149 @@
import hashlib
import logging
import xml.etree.ElementTree as ET
import httpx
from botocore.auth import SigV4Auth
from botocore.awsrequest import AWSRequest
from botocore.credentials import Credentials
from frostfs_testlib import reporter
from frostfs_testlib.clients import HttpClient
from frostfs_testlib.utils.file_utils import TestFile
logger = logging.getLogger("NeoLogger")
DEFAULT_TIMEOUT = 60.0
class S3HttpClient:
def __init__(
self, s3gate_endpoint: str, access_key_id: str, secret_access_key: str, profile: str = "default", region: str = "us-east-1"
) -> None:
self.http_client = HttpClient()
self.credentials = Credentials(access_key_id, secret_access_key)
self.profile = profile
self.region = region
self.iam_endpoint: str = None
self.s3gate_endpoint: str = None
self.service: str = None
self.signature: SigV4Auth = None
self.set_endpoint(s3gate_endpoint)
def _to_s3_header(self, header: str) -> dict:
replacement_map = {
"Acl": "ACL",
"_": "-",
}
result = header
if not header.startswith("x_amz"):
result = header.title()
for find, replace in replacement_map.items():
result = result.replace(find, replace)
return result
def _convert_to_s3_headers(self, scope: dict, exclude: list[str] = None):
exclude = ["self", "cls"] if not exclude else exclude + ["self", "cls"]
return {self._to_s3_header(header): value for header, value in scope.items() if header not in exclude and value is not None}
def _create_aws_request(
self, method: str, url: str, headers: dict, content: str | bytes | TestFile = None, params: dict = None
) -> AWSRequest:
data = b""
if content is not None:
if isinstance(content, TestFile):
with open(content, "rb") as io_content:
data = io_content.read()
elif isinstance(content, str):
data = bytes(content, encoding="utf-8")
elif isinstance(content, bytes):
data = content
else:
raise TypeError(f"Content expected as a string, bytes or TestFile object, got: {content}")
headers["X-Amz-Content-SHA256"] = hashlib.sha256(data).hexdigest()
aws_request = AWSRequest(method, url, headers, data, params)
self.signature.add_auth(aws_request)
return aws_request
def _exec_request(
self,
method: str,
url: str,
headers: dict,
content: str | bytes | TestFile = None,
params: dict = None,
timeout: float = DEFAULT_TIMEOUT,
) -> dict:
aws_request = self._create_aws_request(method, url, headers, content, params)
response = self.http_client.send(
aws_request.method,
aws_request.url,
headers=dict(aws_request.headers),
data=aws_request.data,
params=aws_request.params,
timeout=timeout,
)
try:
response.raise_for_status()
except httpx.HTTPStatusError:
raise httpx.HTTPStatusError(response.text, request=response.request, response=response)
root = ET.fromstring(response.read())
data = {
"LastModified": root.find(".//LastModified").text,
"ETag": root.find(".//ETag").text,
}
if response.headers.get("x-amz-version-id"):
data["VersionId"] = response.headers.get("x-amz-version-id")
return data
@reporter.step("Set endpoint S3 to {s3gate_endpoint}")
def set_endpoint(self, s3gate_endpoint: str):
if self.s3gate_endpoint == s3gate_endpoint:
return
self.s3gate_endpoint = s3gate_endpoint
self.service = "s3"
self.signature = SigV4Auth(self.credentials, self.service, self.region)
@reporter.step("Set endpoint IAM to {iam_endpoint}")
def set_iam_endpoint(self, iam_endpoint: str):
if self.iam_endpoint == iam_endpoint:
return
self.iam_endpoint = iam_endpoint
self.service = "iam"
self.signature = SigV4Auth(self.credentials, self.service, self.region)
@reporter.step("Patch object S3")
def patch_object(
self,
bucket: str,
key: str,
content: str | bytes | TestFile,
content_range: str,
version_id: str = None,
if_match: str = None,
if_unmodified_since: str = None,
x_amz_expected_bucket_owner: str = None,
timeout: float = DEFAULT_TIMEOUT,
) -> dict:
if content_range and not content_range.startswith("bytes"):
content_range = f"bytes {content_range}/*"
url = f"{self.s3gate_endpoint}/{bucket}/{key}"
headers = self._convert_to_s3_headers(locals(), exclude=["bucket", "key", "content", "version_id", "timeout"])
params = {"VersionId": version_id} if version_id is not None else None
return self._exec_request("PATCH", url, headers, content, params, timeout=timeout)

View file

@ -1,5 +1,6 @@
import logging
import os
from datetime import datetime
from importlib.metadata import entry_points
import pytest
@ -11,6 +12,12 @@ from frostfs_testlib.resources.common import ASSETS_DIR, HOSTING_CONFIG_FILE
from frostfs_testlib.storage import get_service_registry
@pytest.fixture(scope="session", autouse=True)
def session_start_time():
start_time = datetime.utcnow()
return start_time
@pytest.fixture(scope="session")
def configure_testlib():
reporter.get_reporter().register_handler(reporter.AllureHandler())

View file

@ -1,8 +1,8 @@
import pytest
@pytest.hookimpl
def pytest_collection_modifyitems(items: list[pytest.Item]):
@pytest.hookimpl(specname="pytest_collection_modifyitems")
def pytest_add_frostfs_marker(items: list[pytest.Item]):
# All tests which reside in frostfs nodeid are granted with frostfs marker, excluding
# nodeid = full path of the test
# 1. plugins
@ -11,3 +11,18 @@ def pytest_collection_modifyitems(items: list[pytest.Item]):
location = item.location[0]
if "frostfs" in location and "plugin" not in location and "testlib" not in location:
item.add_marker("frostfs")
# pytest hook. Do not rename
@pytest.hookimpl(trylast=True)
def pytest_collection_modifyitems(items: list[pytest.Item]):
# Change order of tests based on @pytest.mark.order(<int>) marker
def order(item: pytest.Item) -> int:
order_marker = item.get_closest_marker("order")
if order_marker and (len(order_marker.args) != 1 or not isinstance(order_marker.args[0], int)):
raise RuntimeError("Incorrect usage of pytest.mark.order")
order_value = order_marker.args[0] if order_marker else 0
return order_value
items.sort(key=lambda item: order(item))

View file

@ -164,6 +164,9 @@ class DockerHost(Host):
return volume_path
def send_signal_to_service(self, service_name: str, signal: str) -> None:
raise NotImplementedError("Not implemented for docker")
def delete_metabase(self, service_name: str) -> None:
raise NotImplementedError("Not implemented for docker")
@ -247,6 +250,7 @@ class DockerHost(Host):
unit: Optional[str] = None,
exclude_filter: Optional[str] = None,
priority: Optional[str] = None,
word_count: bool = None,
) -> str:
client = self._get_docker_client()
filtered_logs = ""

View file

@ -29,6 +29,9 @@ class Host(ABC):
self._service_config_by_name = {service_config.name: service_config for service_config in config.services}
self._cli_config_by_name = {cli_config.name: cli_config for cli_config in config.clis}
def __repr__(self) -> str:
return self.config.address
@property
def config(self) -> HostConfig:
"""Returns config of the host.
@ -117,6 +120,17 @@ class Host(ABC):
service_name: Name of the service to stop.
"""
@abstractmethod
def send_signal_to_service(self, service_name: str, signal: str) -> None:
"""Send signal to service with specified name using kill -<signal>
The service must be hosted on this host.
Args:
service_name: Name of the service to stop.
signal: signal name. See kill -l to all names
"""
@abstractmethod
def mask_service(self, service_name: str) -> None:
"""Prevent the service from start by any activity by masking it.
@ -313,6 +327,7 @@ class Host(ABC):
unit: Optional[str] = None,
exclude_filter: Optional[str] = None,
priority: Optional[str] = None,
word_count: bool = None,
) -> str:
"""Get logs from host filtered by regex.
@ -323,6 +338,7 @@ class Host(ABC):
unit: required unit.
priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher.
For example, if we specify the -p 2 option, journalctl will show all messages with levels 2, 1 and 0.
word_count: output type, expected values: lines, bytes, json
Returns:
Found entries as str if any found.

View file

@ -182,8 +182,10 @@ class Preset(MetaConfig):
pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False)
# Workers count for preset
workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False)
# Acl for container/buckets
# TODO: Deprecated. Acl for container/buckets
acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False)
# APE rule for containers instead of deprecated ACL
rule: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "rule", None, False, formatter=force_list)
# ------ GRPC ------
# Amount of containers which should be created

View file

@ -193,7 +193,7 @@ class RemoteProcess:
)
if "No such file or directory" in terminal.stderr:
return None
elif terminal.stderr or terminal.return_code != 0:
elif terminal.return_code != 0:
raise AssertionError(f"cat process {file} was not successful: {terminal.stderr}")
return terminal.stdout

View file

@ -53,3 +53,4 @@ HOSTING_CONFIG_FILE = os.getenv(
)
MORE_LOG = os.getenv("MORE_LOG", "1")
EXPIRATION_EPOCH_ATTRIBUTE = "__SYSTEM__EXPIRATION_EPOCH"

View file

@ -1,5 +1,6 @@
# Regex patterns of status codes of Container service
CONTAINER_NOT_FOUND = "code = 3072.*message = container not found"
SUBJECT_NOT_FOUND = "code = 1024.*message = frostfs error: chain/client.*subject not found.*"
# Regex patterns of status codes of Object service
MALFORMED_REQUEST = "code = 1024.*message = malformed request"
@ -9,6 +10,7 @@ OBJECT_ALREADY_REMOVED = "code = 2052.*message = object already removed"
SESSION_NOT_FOUND = "code = 4096.*message = session token not found"
OUT_OF_RANGE = "code = 2053.*message = out of range"
EXPIRED_SESSION_TOKEN = "code = 4097.*message = expired session token"
ADD_CHAIN_ERROR = "code = 5120 message = apemanager access denied"
# TODO: Change to codes with message
# OBJECT_IS_LOCKED = "code = 2050.*message = object is locked"
# LOCK_NON_REGULAR_OBJECT = "code = 2051.*message = ..." will be available once 2092 is fixed
@ -27,6 +29,10 @@ S3_BUCKET_DOES_NOT_ALLOW_ACL = "The bucket does not allow ACLs"
S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not validate against our published schema."
RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied"
RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied"
# Errors from node missing reasons if request was forwarded. Commenting for now
# RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied"
RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request"
NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound"
NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound"
# Errors from node missing reasons if request was forwarded. Commenting for now
# NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound"
NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request"

View file

@ -16,11 +16,10 @@ OPTIONAL_NODE_UNDER_LOAD = os.getenv("OPTIONAL_NODE_UNDER_LOAD")
OPTIONAL_FAILOVER_ENABLED = str_to_bool(os.getenv("OPTIONAL_FAILOVER_ENABLED", "true"))
# Set this to True to disable background load. I.E. node which supposed to be stopped will not be actually stopped.
OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool(
os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true")
)
OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool(os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true"))
# Set this to False for disable autouse fixture like node healthcheck during developing time.
OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool(
os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true")
)
OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool(os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true"))
# Use cache for fixtures with @cachec_fixture decorator
OPTIONAL_CACHE_FIXTURES = str_to_bool(os.getenv("OPTIONAL_CACHE_FIXTURES", "false"))

View file

@ -1,3 +0,0 @@
from frostfs_testlib.s3.aws_cli_client import AwsCliClient
from frostfs_testlib.s3.boto3_client import Boto3ClientWrapper
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus

View file

@ -7,9 +7,7 @@ from typing import Optional, Union
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.plugins import load_plugin
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
from frostfs_testlib.s3.interfaces import BucketContainerResolver
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
@ -111,6 +109,8 @@ def create_container(
options: Optional[dict] = None,
await_mode: bool = True,
wait_for_creation: bool = True,
nns_zone: Optional[str] = None,
nns_name: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> str:
"""
@ -143,6 +143,8 @@ def create_container(
result = cli.container.create(
rpc_endpoint=endpoint,
policy=rule,
nns_name=nns_name,
nns_zone=nns_zone,
basic_acl=basic_acl,
attributes=attributes,
name=name,

View file

@ -12,6 +12,7 @@ from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC,
from frostfs_testlib.resources.common import ASSETS_DIR
from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing import wait_for_success
from frostfs_testlib.utils import json_utils
@ -752,7 +753,10 @@ def get_object_nodes(
]
object_nodes = [
cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes if netmap_node.node == cluster_node.host_ip
cluster_node
for netmap_node in netmap_nodes
for cluster_node in cluster.cluster_nodes
if netmap_node.node == cluster_node.get_interface(Interfaces.MGMT)
]
return object_nodes

View file

@ -12,8 +12,8 @@ import requests
from frostfs_testlib import reporter
from frostfs_testlib.cli import GenericCli
from frostfs_testlib.clients.s3.aws_cli_client import command_options
from frostfs_testlib.resources.common import ASSETS_DIR, SIMPLE_OBJECT_SIZE
from frostfs_testlib.s3.aws_cli_client import command_options
from frostfs_testlib.shell import Shell
from frostfs_testlib.shell.local_shell import LocalShell
from frostfs_testlib.steps.cli.object import get_object
@ -38,34 +38,34 @@ def get_via_http_gate(
"""
This function gets given object from HTTP gate
cid: container id to get object from
oid: object ID
oid: object id / object key
node: node to make request
request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}]
"""
# if `request_path` parameter omitted, use default
if request_path is None:
request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}"
else:
request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}"
if request_path:
request = f"{node.http_gate.get_endpoint()}{request_path}"
resp = requests.get(request, stream=True, timeout=timeout, verify=False)
response = requests.get(request, stream=True, timeout=timeout, verify=False)
if not resp.ok:
if not response.ok:
raise Exception(
f"""Failed to get object via HTTP gate:
request: {resp.request.path_url},
response: {resp.text},
headers: {resp.headers},
status code: {resp.status_code} {resp.reason}"""
request: {response.request.path_url},
response: {response.text},
headers: {response.headers},
status code: {response.status_code} {response.reason}"""
)
logger.info(f"Request: {request}")
_attach_allure_step(request, resp.status_code)
_attach_allure_step(request, response.status_code)
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}"))
with open(test_file, "wb") as file:
shutil.copyfileobj(resp.raw, file)
for chunk in response.iter_content(chunk_size=8192):
file.write(chunk)
return test_file
@ -117,12 +117,12 @@ def get_via_http_gate_by_attribute(
endpoint: http gate endpoint
request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}]
"""
attr_name = list(attribute.keys())[0]
attr_value = quote_plus(str(attribute.get(attr_name)))
# if `request_path` parameter ommited, use default
if request_path is None:
request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}"
else:
request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}"
if request_path:
request = f"{node.http_gate.get_endpoint()}{request_path}"
resp = requests.get(request, stream=True, timeout=timeout, verify=False)
@ -357,19 +357,9 @@ def try_to_get_object_via_passed_request_and_expect_error(
) -> None:
try:
if attrs is None:
get_via_http_gate(
cid=cid,
oid=oid,
node=node,
request_path=http_request_path,
)
get_via_http_gate(cid, oid, node, http_request_path)
else:
get_via_http_gate_by_attribute(
cid=cid,
attribute=attrs,
node=node,
request_path=http_request_path,
)
get_via_http_gate_by_attribute(cid, attrs, node, http_request_path)
raise AssertionError(f"Expected error on getting object with cid: {cid}")
except Exception as err:
match = error_pattern.casefold() in str(err).casefold()

View file

@ -4,16 +4,18 @@ from frostfs_testlib.storage.cluster import ClusterNode
class IpHelper:
@staticmethod
def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[str]) -> None:
def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[tuple]) -> None:
shell = node.host.get_shell()
for ip in block_ip:
shell.exec(f"ip route add blackhole {ip}")
for ip, table in block_ip:
if not table:
shell.exec(f"ip r a blackhole {ip}")
continue
shell.exec(f"ip r a blackhole {ip} table {table}")
@staticmethod
def restore_input_traffic_to_node(node: ClusterNode) -> None:
shell = node.host.get_shell()
unlock_ip = shell.exec("ip route list | grep blackhole", CommandOptions(check=False))
if unlock_ip.return_code != 0:
return
for ip in unlock_ip.stdout.strip().split("\n"):
shell.exec(f"ip route del blackhole {ip.split(' ')[1]}")
unlock_ip = shell.exec("ip r l table all | grep blackhole", CommandOptions(check=False)).stdout
for active_blackhole in unlock_ip.strip().split("\n"):
shell.exec(f"ip r d {active_blackhole}")

View file

@ -6,8 +6,7 @@ from typing import Optional
from dateutil.parser import parse
from frostfs_testlib import reporter
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
from frostfs_testlib.s3.interfaces import BucketContainerResolver
from frostfs_testlib.clients.s3 import BucketContainerResolver, S3ClientWrapper, VersioningStatus
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import search_nodes_with_container
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
@ -185,3 +184,26 @@ def search_nodes_with_bucket(
break
nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster)
return nodes_list
def get_bytes_relative_to_object(value: int | str, object_size: int = None, part_size: int = None) -> int:
if isinstance(value, int):
return value
if "part" not in value and "object" not in value:
return int(value)
if object_size is not None:
value = value.replace("object", str(object_size))
if part_size is not None:
value = value.replace("part", str(part_size))
return int(eval(value))
def get_range_relative_to_object(rng: str, object_size: int = None, part_size: int = None, int_values: bool = False) -> str | int:
start, end = rng.split(":")
start = get_bytes_relative_to_object(start, object_size, part_size)
end = get_bytes_relative_to_object(end, object_size, part_size)
return (start, end) if int_values else f"bytes {start}-{end}/*"

View file

@ -11,10 +11,10 @@ from frostfs_testlib.storage import get_service_registry
from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml
from frostfs_testlib.storage.constants import ConfigAttributes
from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode
from frostfs_testlib.storage.dataclasses.metrics import Metrics
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces
from frostfs_testlib.storage.service_registry import ServiceRegistry
from frostfs_testlib.storage.dataclasses.metrics import Metrics
class ClusterNode:

View file

@ -5,6 +5,7 @@ class ConfigAttributes:
WALLET_CONFIG = "wallet_config"
CONFIG_DIR = "service_config_dir"
CONFIG_PATH = "config_path"
WORKING_DIR = "working_dir"
SHARD_CONFIG_PATH = "shard_config_path"
LOGGER_CONFIG_PATH = "logger_config_path"
LOCAL_WALLET_PATH = "local_wallet_path"
@ -15,6 +16,7 @@ class ConfigAttributes:
ENDPOINT_DATA_0_NS = "endpoint_data0_namespace"
ENDPOINT_INTERNAL = "endpoint_internal0"
ENDPOINT_PROMETHEUS = "endpoint_prometheus"
ENDPOINT_PPROF = "endpoint_pprof"
CONTROL_ENDPOINT = "control_endpoint"
UN_LOCODE = "un_locode"
@ -23,4 +25,6 @@ class PlacementRule:
DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X"
REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X"
REP_1_FOR_2_NODES_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 2 FROM * AS X"
DEFAULT_EC_PLACEMENT_RULE = "EC 3.1"
EC_1_1_FOR_2_NODES_PLACEMENT_RULE = "EC 1.1 IN X CBF 1 SELECT 2 FROM * AS X"

View file

@ -1,6 +1,7 @@
import datetime
import itertools
import logging
import time
from datetime import datetime, timezone
from typing import TypeVar
import frostfs_testlib.resources.optionals as optionals
@ -18,7 +19,7 @@ from frostfs_testlib.steps.node_management import include_node_to_network_map, r
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode
from frostfs_testlib.storage.controllers.disk_controller import DiskController
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeStatus
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeStatus
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing import parallel
from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success
@ -39,7 +40,7 @@ class ClusterStateController:
def __init__(self, shell: Shell, cluster: Cluster, healthcheck: Healthcheck) -> None:
self.stopped_nodes: list[ClusterNode] = []
self.detached_disks: dict[str, DiskController] = {}
self.dropped_traffic: list[ClusterNode] = []
self.dropped_traffic: set[ClusterNode] = set()
self.excluded_from_netmap: list[StorageNode] = []
self.stopped_services: set[NodeBase] = set()
self.cluster = cluster
@ -172,6 +173,15 @@ class ClusterStateController:
if service_type == StorageNode:
self.wait_after_storage_startup()
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Send sighup to all {service_type} services")
def sighup_services_of_type(self, service_type: type[ServiceClass]):
services = self.cluster.services(service_type)
parallel([service.send_signal_to_service for service in services], signal="SIGHUP")
if service_type == StorageNode:
self.wait_after_storage_startup()
@wait_for_success(600, 60)
def wait_s3gate(self, s3gate: S3Gate):
with reporter.step(f"Wait for {s3gate} reconnection"):
@ -206,21 +216,27 @@ class ClusterStateController:
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Stop {service_type} service on {node}")
def stop_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass], mask: bool = True):
def stop_service_of_type(self, node: ClusterNode, service_type: ServiceClass, mask: bool = True):
service = node.service(service_type)
service.stop_service(mask)
self.stopped_services.add(service)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Send sighup to {service_type} service on {node}")
def sighup_service_of_type(self, node: ClusterNode, service_type: ServiceClass):
service = node.service(service_type)
service.send_signal_to_service("SIGHUP")
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Start {service_type} service on {node}")
def start_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]):
def start_service_of_type(self, node: ClusterNode, service_type: ServiceClass):
service = node.service(service_type)
service.start_service()
self.stopped_services.discard(service)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Start all stopped {service_type} services")
def start_stopped_services_of_type(self, service_type: type[ServiceClass]):
def start_stopped_services_of_type(self, service_type: ServiceClass):
stopped_svc = self._get_stopped_by_type(service_type)
if not stopped_svc:
return
@ -310,22 +326,22 @@ class ClusterStateController:
@reporter.step("Drop traffic to {node}, nodes - {block_nodes}")
def drop_traffic(self, node: ClusterNode, wakeup_timeout: int, name_interface: str, block_nodes: list[ClusterNode] = None) -> None:
list_ip = self._parse_interfaces(block_nodes, name_interface)
IpHelper.drop_input_traffic_to_node(node, list_ip)
interfaces_tables = self._parse_interfaces(block_nodes, name_interface)
IpHelper.drop_input_traffic_to_node(node, interfaces_tables)
time.sleep(wakeup_timeout)
self.dropped_traffic.append(node)
self.dropped_traffic.add(node)
@reporter.step("Start traffic to {node}")
def restore_traffic(self, node: ClusterNode) -> None:
IpHelper.restore_input_traffic_to_node(node=node)
index = self.dropped_traffic.index(node)
self.dropped_traffic.pop(index)
self.dropped_traffic.discard(node)
@reporter.step("Restore blocked nodes")
def restore_all_traffic(self):
if not self.dropped_traffic:
return
parallel(self._restore_traffic_to_node, self.dropped_traffic)
self.dropped_traffic.clear()
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Hard reboot host {node} via magic SysRq option")
@ -374,31 +390,23 @@ class ClusterStateController:
@reporter.step("Get node time")
def get_node_date(self, node: ClusterNode) -> datetime:
shell = node.host.get_shell()
return datetime.datetime.strptime(shell.exec("hwclock -r").stdout.strip(), "%Y-%m-%d %H:%M:%S.%f%z")
return datetime.strptime(shell.exec('date +"%Y-%m-%d %H:%M:%S"').stdout.strip(), "%Y-%m-%d %H:%M:%S")
@reporter.step("Set node time to {in_date}")
def change_node_date(self, node: ClusterNode, in_date: datetime) -> None:
shell = node.host.get_shell()
shell.exec(f"date -s @{time.mktime(in_date.timetuple())}")
shell.exec("hwclock --systohc")
in_date_frmt = in_date.strftime("%Y-%m-%d %H:%M:%S")
shell.exec(f"timedatectl set-time '{in_date_frmt}'")
node_time = self.get_node_date(node)
with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"):
assert (self.get_node_date(node) - in_date) < datetime.timedelta(minutes=1)
assert (node_time - in_date).total_seconds() < 60
@reporter.step(f"Restore time")
@reporter.step("Restore time")
def restore_node_date(self, node: ClusterNode) -> None:
shell = node.host.get_shell()
now_time = datetime.datetime.now(datetime.timezone.utc)
now_time = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S")
with reporter.step(f"Set {now_time} time"):
shell.exec(f"date -s @{time.mktime(now_time.timetuple())}")
shell.exec("hwclock --systohc")
@reporter.step("Change the synchronizer status to {status}")
def set_sync_date_all_nodes(self, status: str):
if status == "active":
parallel(self._enable_date_synchronizer, self.cluster.cluster_nodes)
return
parallel(self._disable_date_synchronizer, self.cluster.cluster_nodes)
shell.exec(f"timedatectl set-time '{now_time}'")
@reporter.step("Set MaintenanceModeAllowed - {status}")
def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None:
@ -438,9 +446,11 @@ class ClusterStateController:
if not checker_node:
checker_node = cluster_node
netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(checker_node.storage_node.get_rpc_endpoint()).stdout)
netmap = [node for node in netmap if cluster_node.host_ip == node.node]
netmap = [node for node in netmap if cluster_node.get_interface(Interfaces.MGMT) == node.node]
if status == NodeStatus.OFFLINE:
assert cluster_node.host_ip not in netmap, f"{cluster_node.host_ip} not in Offline"
assert (
cluster_node.get_interface(Interfaces.MGMT) not in netmap
), f"{cluster_node.get_interface(Interfaces.MGMT)} not in Offline"
else:
assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'"
@ -482,16 +492,6 @@ class ClusterStateController:
frostfs_cli_remote = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path)
return frostfs_adm, frostfs_cli, frostfs_cli_remote
def _enable_date_synchronizer(self, cluster_node: ClusterNode):
shell = cluster_node.host.get_shell()
shell.exec("timedatectl set-ntp true")
cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "active", 15)
def _disable_date_synchronizer(self, cluster_node: ClusterNode):
shell = cluster_node.host.get_shell()
shell.exec("timedatectl set-ntp false")
cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "inactive", 15)
def _get_disk_controller(self, node: StorageNode, device: str, mountpoint: str) -> DiskController:
disk_controller_id = DiskController.get_id(node, device)
if disk_controller_id in self.detached_disks.keys():
@ -501,17 +501,31 @@ class ClusterStateController:
return disk_controller
@reporter.step("Restore traffic {node}")
def _restore_traffic_to_node(self, node):
IpHelper.restore_input_traffic_to_node(node)
def _parse_interfaces(self, nodes: list[ClusterNode], name_interface: str):
interfaces = []
def _parse_interfaces(self, nodes: list[ClusterNode], name_interface: str) -> list[tuple]:
interfaces_and_tables = set()
for node in nodes:
dict_interfaces = node.host.config.interfaces
for type, ip in dict_interfaces.items():
if name_interface in type:
interfaces.append(ip)
return interfaces
shell = node.host.get_shell()
lines = shell.exec(f"ip r l table all | grep '{name_interface}'").stdout.splitlines()
ips = []
tables = []
for line in lines:
if "src" not in line or "table local" in line:
continue
parts = line.split()
ips.append(parts[-1])
if "table" in line:
tables.append(parts[parts.index("table") + 1])
tables.append(None)
[interfaces_and_tables.add((ip, table)) for ip, table in itertools.product(ips, tables)]
return interfaces_and_tables
@reporter.step("Ping node")
def _ping_host(self, node: ClusterNode):

View file

@ -14,14 +14,19 @@ class ConfigStateManager(StateManager):
self.cluster = self.csc.cluster
@reporter.step("Change configuration for {service_type} on all nodes")
def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any]):
def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any], sighup: bool = False):
services = self.cluster.services(service_type)
nodes = self.cluster.nodes(services)
self.services_with_changed_config.update([(node, service_type) for node in nodes])
self.csc.stop_services_of_type(service_type)
if not sighup:
self.csc.stop_services_of_type(service_type)
parallel([node.config(service_type).set for node in nodes], values=values)
self.csc.start_services_of_type(service_type)
if not sighup:
self.csc.start_services_of_type(service_type)
else:
self.csc.sighup_services_of_type(service_type)
@reporter.step("Change configuration for {service_type} on {node}")
def set_on_node(self, node: ClusterNode, service_type: type[ServiceClass], values: dict[str, Any]):
@ -32,18 +37,26 @@ class ConfigStateManager(StateManager):
self.csc.start_service_of_type(node, service_type)
@reporter.step("Revert all configuration changes")
def revert_all(self):
def revert_all(self, sighup: bool = False):
if not self.services_with_changed_config:
return
parallel(self._revert_svc, self.services_with_changed_config)
parallel(self._revert_svc, self.services_with_changed_config, sighup)
self.services_with_changed_config.clear()
self.csc.start_all_stopped_services()
if not sighup:
self.csc.start_all_stopped_services()
# TODO: parallel can't have multiple parallel_items :(
@reporter.step("Revert all configuration {node_and_service}")
def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass]):
def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass], sighup: bool = False):
node, service_type = node_and_service
self.csc.stop_service_of_type(node, service_type)
service = node.service(service_type)
if not sighup:
self.csc.stop_service_of_type(node, service_type)
node.config(service_type).revert()
if sighup:
service.send_signal_to_service("SIGHUP")

View file

@ -13,6 +13,7 @@ FROSTFS_CONTRACT_CACHE_TIMEOUT = 30
class ObjectOperations(HumanReadableEnum):
PUT = "object.put"
PATCH = "object.patch"
GET = "object.get"
HEAD = "object.head"
GET_RANGE = "object.range"
@ -26,6 +27,18 @@ class ObjectOperations(HumanReadableEnum):
return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL]
class ContainerOperations(HumanReadableEnum):
PUT = "container.put"
GET = "container.get"
LIST = "container.list"
DELETE = "container.delete"
WILDCARD_ALL = "container.*"
@staticmethod
def get_all():
return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL]
@dataclass
class Operations:
GET_CONTAINER = "GetContainer"
@ -39,6 +52,7 @@ class Operations:
SEARCH_OBJECT = "SearchObject"
HEAD_OBJECT = "HeadObject"
PUT_OBJECT = "PutObject"
PATCH_OBJECT = "PatchObject"
class Verb(HumanReadableEnum):
@ -124,7 +138,7 @@ class Rule:
if not operations:
self.operations = []
elif isinstance(operations, ObjectOperations):
elif isinstance(operations, (ObjectOperations, ContainerOperations)):
self.operations = [operations]
else:
self.operations = operations

View file

@ -65,6 +65,10 @@ class NodeBase(HumanReadableABC):
with reporter.step(f"Start {self.name} service on {self.host.config.address}"):
self.host.start_service(self.name)
def send_signal_to_service(self, signal: str):
with reporter.step(f"Send -{signal} signal to {self.name} service on {self.host.config.address}"):
self.host.send_signal_to_service(self.name, signal)
@abstractmethod
def service_healthcheck(self) -> bool:
"""Service healthcheck."""
@ -78,6 +82,9 @@ class NodeBase(HumanReadableABC):
def get_metrics_endpoint(self) -> str:
return self._get_attribute(ConfigAttributes.ENDPOINT_PROMETHEUS)
def get_pprof_endpoint(self) -> str:
return self._get_attribute(ConfigAttributes.ENDPOINT_PPROF)
def stop_service(self, mask: bool = True):
if mask:
with reporter.step(f"Mask {self.name} service on {self.host.config.address}"):
@ -140,6 +147,13 @@ class NodeBase(HumanReadableABC):
else None
)
def get_working_dir_path(self) -> Optional[str]:
"""
Returns working directory path located on remote host
"""
config_attributes = self.host.get_service_config(self.name)
return self._get_attribute(ConfigAttributes.WORKING_DIR) if ConfigAttributes.WORKING_DIR in config_attributes.attributes else None
@property
def config_dir(self) -> str:
return self._get_attribute(ConfigAttributes.CONFIG_DIR)
@ -185,9 +199,7 @@ class NodeBase(HumanReadableABC):
if attribute_name not in config.attributes:
if default_attribute_name is None:
raise RuntimeError(
f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either"
)
raise RuntimeError(f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either")
return config.attributes[default_attribute_name]
@ -197,9 +209,7 @@ class NodeBase(HumanReadableABC):
return self.host.get_service_config(self.name)
def get_service_uptime(self, service: str) -> datetime:
result = self.host.get_shell().exec(
f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2"
)
result = self.host.get_shell().exec(f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2")
start_time = parser.parse(result.stdout.strip())
current_time = datetime.now(tz=timezone.utc)
active_time = current_time - start_time

View file

@ -1,6 +1,9 @@
import re
from dataclasses import dataclass
from typing import Optional
from pydantic import BaseModel, Field, field_validator
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.readable import HumanReadableEnum
@ -75,8 +78,37 @@ class NodeNetInfo:
withdrawal_fee: str = None
homomorphic_hashing_disabled: str = None
maintenance_mode_allowed: str = None
eigen_trust_alpha: str = None
eigen_trust_iterations: str = None
class Attributes(BaseModel):
cluster_name: str = Field(alias="ClusterName")
continent: str = Field(alias="Continent")
country: str = Field(alias="Country")
country_code: str = Field(alias="CountryCode")
external_addr: list[str] = Field(alias="ExternalAddr")
location: str = Field(alias="Location")
node: str = Field(alias="Node")
subdiv: str = Field(alias="SubDiv")
subdiv_code: str = Field(alias="SubDivCode")
un_locode: str = Field(alias="UN-LOCODE")
role: str = Field(alias="role")
@field_validator("external_addr", mode="before")
@classmethod
def convert_external_addr(cls, value: str) -> list[str]:
return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", value)]
class NodeInfo(BaseModel):
public_key: str = Field(alias="publicKey")
addresses: list[str] = Field(alias="addresses")
state: str = Field(alias="state")
attributes: Attributes = Field(alias="attributes")
@field_validator("addresses", mode="before")
@classmethod
def convert_external_addr(cls, value: str) -> list[str]:
return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", ",".join(value))]
@dataclass

View file

@ -1,14 +1,14 @@
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.storage.grpc_operations import interfaces
from frostfs_testlib.storage.grpc_operations.implementations import container, object
from frostfs_testlib.storage.grpc_operations import implementations, interfaces, interfaces_wrapper
class CliClientWrapper(interfaces.GrpcClientWrapper):
class CliClientWrapper(interfaces_wrapper.GrpcClientWrapper):
def __init__(self, cli: FrostfsCli) -> None:
self.cli = cli
self.object: interfaces.ObjectInterface = object.ObjectOperations(self.cli)
self.container: interfaces.ContainerInterface = container.ContainerOperations(self.cli)
self.object: interfaces.ObjectInterface = implementations.ObjectOperations(self.cli)
self.container: interfaces.ContainerInterface = implementations.ContainerOperations(self.cli)
self.netmap: interfaces.NetmapInterface = implementations.NetmapOperations(self.cli)
class RpcClientWrapper(interfaces.GrpcClientWrapper):
class RpcClientWrapper(interfaces_wrapper.GrpcClientWrapper):
pass # The next series

View file

@ -0,0 +1,4 @@
from .chunks import ChunksOperations
from .container import ContainerOperations
from .netmap import NetmapOperations
from .object import ObjectOperations

View file

@ -6,7 +6,7 @@ from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher
from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo
from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, Interfaces, NodeNetmapInfo
from frostfs_testlib.storage.grpc_operations import interfaces
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.cli_utils import parse_netmap_output
@ -30,7 +30,7 @@ class ChunksOperations(interfaces.ChunksInterface):
result = []
for node_info in netmap:
for cluster_node in cluster.cluster_nodes:
if node_info.node == cluster_node.host_ip:
if node_info.node == cluster_node.get_interface(Interfaces.MGMT):
result.append(cluster_node)
return result
@ -40,7 +40,7 @@ class ChunksOperations(interfaces.ChunksInterface):
for node_info in netmap:
if node_info.node_id in chunk.confirmed_nodes:
for cluster_node in cluster.cluster_nodes:
if cluster_node.host_ip == node_info.node:
if cluster_node.get_interface(Interfaces.MGMT) == node_info.node:
return (cluster_node, node_info)
@wait_for_success(300, 5, fail_testcase=None)
@ -161,5 +161,5 @@ class ChunksOperations(interfaces.ChunksInterface):
def _parse_object_nodes(self, object_nodes: str) -> list[Chunk]:
parse_result = json.loads(object_nodes)
if parse_result.get("errors"):
raise parse_result["errors"]
raise RuntimeError(", ".join(parse_result["errors"]))
return [Chunk(**chunk) for chunk in parse_result["data_objects"]]

View file

@ -5,9 +5,9 @@ from typing import List, Optional, Union
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.clients.s3 import BucketContainerResolver
from frostfs_testlib.plugins import load_plugin
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.s3.interfaces import BucketContainerResolver
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.grpc_operations import interfaces
from frostfs_testlib.utils import json_utils
@ -181,20 +181,17 @@ class ContainerOperations(interfaces.ContainerInterface):
force: bool = False,
trace: bool = False,
):
try:
return self.cli.container.delete(
rpc_endpoint=endpoint,
cid=cid,
address=address,
await_mode=await_mode,
session=session,
ttl=ttl,
xhdr=xhdr,
force=force,
trace=trace,
).stdout
except RuntimeError as e:
print(f"Error request:\n{e}")
return self.cli.container.delete(
rpc_endpoint=endpoint,
cid=cid,
address=address,
await_mode=await_mode,
session=session,
ttl=ttl,
xhdr=xhdr,
force=force,
trace=trace,
).stdout
@reporter.step("Get container")
def get(

View file

@ -0,0 +1,171 @@
import json as module_json
from typing import List, Optional
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.cli.netmap_parser import NetmapParser
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo
from .. import interfaces
class NetmapOperations(interfaces.NetmapInterface):
def __init__(self, cli: FrostfsCli) -> None:
self.cli = cli
def epoch(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = True,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> int:
"""
Get current epoch number.
"""
output = (
self.cli.netmap.epoch(
rpc_endpoint=rpc_endpoint,
wallet=wallet,
address=address,
generate_key=generate_key,
ttl=ttl,
trace=trace,
xhdr=xhdr,
timeout=timeout,
)
.stdout.split("Trace ID")[0]
.strip()
)
return int(output)
def netinfo(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = True,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> NodeNetInfo:
"""
Get target node info.
"""
output = (
self.cli.netmap.netinfo(
rpc_endpoint=rpc_endpoint,
wallet=wallet,
address=address,
generate_key=generate_key,
ttl=ttl,
trace=trace,
xhdr=xhdr,
timeout=timeout,
)
.stdout.split("Trace ID")[0]
.strip()
)
return NetmapParser.netinfo(output)
def nodeinfo(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
json: bool = True,
ttl: Optional[int] = None,
trace: Optional[bool] = True,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> NodeNetmapInfo:
"""
Get target node info.
"""
output = (
self.cli.netmap.nodeinfo(
rpc_endpoint=rpc_endpoint,
wallet=wallet,
address=address,
generate_key=generate_key,
json=json,
ttl=ttl,
trace=trace,
xhdr=xhdr,
timeout=timeout,
)
.stdout.split("Trace ID")[0]
.strip()
)
return NetmapParser.node_info(module_json.loads(output))
def snapshot(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = True,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> List[NodeNetmapInfo]:
"""
Get target node info.
"""
output = (
self.cli.netmap.snapshot(
rpc_endpoint=rpc_endpoint,
wallet=wallet,
address=address,
generate_key=generate_key,
ttl=ttl,
trace=trace,
xhdr=xhdr,
timeout=timeout,
)
.stdout.split("Trace ID")[0]
.strip()
)
return NetmapParser.snapshot_all_nodes(output)
def snapshot_one_node(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = True,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> List[NodeNetmapInfo]:
"""
Get target one node info.
"""
output = (
self.cli.netmap.snapshot(
rpc_endpoint=rpc_endpoint,
wallet=wallet,
address=address,
generate_key=generate_key,
ttl=ttl,
trace=trace,
xhdr=xhdr,
timeout=timeout,
)
.stdout.split("Trace ID")[0]
.strip()
)
return NetmapParser.snapshot_one_node(output, rpc_endpoint)

View file

@ -11,6 +11,7 @@ from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.resources.common import ASSETS_DIR
from frostfs_testlib.shell.interfaces import CommandResult
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces
from frostfs_testlib.storage.grpc_operations import interfaces
from frostfs_testlib.storage.grpc_operations.implementations.chunks import ChunksOperations
from frostfs_testlib.testing.test_control import wait_for_success
@ -206,6 +207,11 @@ class ObjectOperations(interfaces.ObjectInterface):
hash_type=hash_type,
timeout=timeout,
)
if range:
# Cut off the range and return only hash
return result.stdout.split(":")[1].strip()
return result.stdout
@reporter.step("Head object")
@ -407,6 +413,57 @@ class ObjectOperations(interfaces.ObjectInterface):
oid = id_str.split(":")[1]
return oid.strip()
@reporter.step("Patch object")
def patch(
self,
cid: str,
oid: str,
endpoint: str,
ranges: list[str] = None,
payloads: list[str] = None,
new_attrs: Optional[str] = None,
replace_attrs: bool = False,
bearer: str = "",
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
trace: bool = False,
) -> str:
"""
PATCH an object.
Args:
cid: ID of Container where we get the Object from
oid: Object ID
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
ranges: An array of ranges in which to replace data in the format [offset1:length1, offset2:length2]
payloads: An array of file paths to be applied in each range
new_attrs: Attributes to be changed in the format "key1=value1,key2=value2"
replace_attrs: Replace all attributes completely with new ones specified in new_attrs
bearer: Path to Bearer Token file, appends to `--bearer` key
xhdr: Request X-Headers in form of Key=Value
session: Path to a JSON-encoded container session token
timeout: Timeout for the operation
trace: Generate trace ID and print it
Returns:
(str): ID of patched Object
"""
result = self.cli.object.patch(
rpc_endpoint=endpoint,
cid=cid,
oid=oid,
range=ranges,
payload=payloads,
new_attrs=new_attrs,
replace_attrs=replace_attrs,
bearer=bearer,
xhdr=xhdr,
session=session,
timeout=timeout,
trace=trace,
)
return result.stdout.split(":")[1].strip()
@reporter.step("Put object to random node")
def put_to_random_node(
self,
@ -618,7 +675,34 @@ class ObjectOperations(interfaces.ObjectInterface):
cluster_node
for netmap_node in netmap_nodes
for cluster_node in cluster.cluster_nodes
if netmap_node.node == cluster_node.host_ip
if netmap_node.node == cluster_node.get_interface(Interfaces.MGMT)
]
return object_nodes
@reporter.step("Search parts of object")
def parts(
self,
cid: str,
oid: str,
alive_node: ClusterNode,
bearer: str = "",
xhdr: Optional[dict] = None,
is_direct: bool = False,
verify_presence_all: bool = False,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> list[str]:
endpoint = alive_node.storage_node.get_rpc_endpoint()
response = self.cli.object.nodes(
rpc_endpoint=endpoint,
cid=cid,
oid=oid,
bearer=bearer,
ttl=1 if is_direct else None,
json=True,
xhdr=xhdr,
timeout=timeout,
verify_presence_all=verify_presence_all,
)
response_json = json.loads(response.stdout)
return [data_object["object_id"] for data_object in response_json["data_objects"]]

View file

@ -1,392 +0,0 @@
from abc import ABC, abstractmethod
from typing import Any, List, Optional
from frostfs_testlib.shell.interfaces import CommandResult
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.constants import PlacementRule
from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo
from frostfs_testlib.utils import file_utils
class ChunksInterface(ABC):
@abstractmethod
def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]:
pass
@abstractmethod
def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]:
pass
@abstractmethod
def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str:
pass
@abstractmethod
def get_all(
self,
rpc_endpoint: str,
cid: str,
oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> list[Chunk]:
pass
@abstractmethod
def get_parity(
self,
rpc_endpoint: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
oid: Optional[str] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> Chunk:
pass
@abstractmethod
def get_first_data(
self,
rpc_endpoint: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
oid: Optional[str] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> Chunk:
pass
class ObjectInterface(ABC):
def __init__(self) -> None:
self.chunks: ChunksInterface
@abstractmethod
def delete(
self,
cid: str,
oid: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def get(
self,
cid: str,
oid: str,
endpoint: str,
bearer: Optional[str] = None,
write_object: Optional[str] = None,
xhdr: Optional[dict] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> file_utils.TestFile:
pass
@abstractmethod
def get_from_random_node(
self,
cid: str,
oid: str,
cluster: Cluster,
bearer: Optional[str] = None,
write_object: Optional[str] = None,
xhdr: Optional[dict] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def hash(
self,
endpoint: str,
cid: str,
oid: str,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
range: Optional[str] = None,
salt: Optional[str] = None,
ttl: Optional[int] = None,
session: Optional[str] = None,
hash_type: Optional[str] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def head(
self,
cid: str,
oid: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
json_output: bool = True,
is_raw: bool = False,
is_direct: bool = False,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult | Any:
pass
@abstractmethod
def lock(
self,
cid: str,
oid: str,
endpoint: str,
lifetime: Optional[int] = None,
expire_at: Optional[int] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def put(
self,
path: str,
cid: str,
endpoint: str,
bearer: Optional[str] = None,
copies_number: Optional[int] = None,
attributes: Optional[dict] = None,
xhdr: Optional[dict] = None,
expire_at: Optional[int] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def put_to_random_node(
self,
path: str,
cid: str,
cluster: Cluster,
bearer: Optional[str] = None,
copies_number: Optional[int] = None,
attributes: Optional[dict] = None,
xhdr: Optional[dict] = None,
expire_at: Optional[int] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def range(
self,
cid: str,
oid: str,
range_cut: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> tuple[file_utils.TestFile, bytes]:
pass
@abstractmethod
def search(
self,
cid: str,
endpoint: str,
bearer: str = "",
oid: Optional[str] = None,
filters: Optional[dict] = None,
expected_objects_list: Optional[list] = None,
xhdr: Optional[dict] = None,
session: Optional[str] = None,
phy: bool = False,
root: bool = False,
timeout: Optional[str] = None,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
ttl: Optional[int] = None,
) -> List:
pass
@abstractmethod
def nodes(
self,
cluster: Cluster,
cid: str,
oid: str,
alive_node: ClusterNode,
bearer: str = "",
xhdr: Optional[dict] = None,
is_direct: bool = False,
verify_presence_all: bool = False,
timeout: Optional[str] = None,
) -> List[ClusterNode]:
pass
class ContainerInterface(ABC):
@abstractmethod
def create(
self,
endpoint: str,
nns_zone: Optional[str] = None,
nns_name: Optional[str] = None,
address: Optional[str] = None,
attributes: Optional[dict] = None,
basic_acl: Optional[str] = None,
await_mode: bool = False,
disable_timestamp: bool = False,
force: bool = False,
trace: bool = False,
name: Optional[str] = None,
nonce: Optional[str] = None,
policy: Optional[str] = None,
session: Optional[str] = None,
subnet: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> str:
"""
Create a new container and register it in the FrostFS.
It will be stored in the sidechain when the Inner Ring accepts it.
"""
raise NotImplementedError("No implemethed method create")
@abstractmethod
def delete(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
await_mode: bool = False,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
force: bool = False,
trace: bool = False,
) -> List[str]:
"""
Delete an existing container.
Only the owner of the container has permission to remove the container.
"""
raise NotImplementedError("No implemethed method delete")
@abstractmethod
def get(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
await_mode: bool = False,
to: Optional[str] = None,
json_mode: bool = True,
trace: bool = False,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> List[str]:
"""Get container field info."""
raise NotImplementedError("No implemethed method get")
@abstractmethod
def get_eacl(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
await_mode: bool = False,
json_mode: bool = True,
trace: bool = False,
to: Optional[str] = None,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> List[str]:
"""Get extended ACL table of container."""
raise NotImplementedError("No implemethed method get-eacl")
@abstractmethod
def list(
self,
endpoint: str,
name: Optional[str] = None,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
trace: bool = False,
owner: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
**params,
) -> List[str]:
"""List all created containers."""
raise NotImplementedError("No implemethed method list")
@abstractmethod
def nodes(
self,
endpoint: str,
cid: str,
cluster: Cluster,
address: Optional[str] = None,
ttl: Optional[int] = None,
from_file: Optional[str] = None,
trace: bool = False,
short: Optional[bool] = True,
xhdr: Optional[dict] = None,
generate_key: Optional[bool] = None,
timeout: Optional[str] = None,
) -> List[ClusterNode]:
"""Show the nodes participating in the container in the current epoch."""
raise NotImplementedError("No implemethed method nodes")
class GrpcClientWrapper(ABC):
def __init__(self) -> None:
self.object: ObjectInterface
self.container: ContainerInterface

View file

@ -0,0 +1,4 @@
from .chunks import ChunksInterface
from .container import ContainerInterface
from .netmap import NetmapInterface
from .object import ObjectInterface

View file

@ -0,0 +1,79 @@
from abc import ABC, abstractmethod
from typing import Optional
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo
class ChunksInterface(ABC):
@abstractmethod
def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]:
pass
@abstractmethod
def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]:
pass
@abstractmethod
def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str:
pass
@abstractmethod
def get_all(
self,
rpc_endpoint: str,
cid: str,
oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> list[Chunk]:
pass
@abstractmethod
def get_parity(
self,
rpc_endpoint: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
oid: Optional[str] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> Chunk:
pass
@abstractmethod
def get_first_data(
self,
rpc_endpoint: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
oid: Optional[str] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> Chunk:
pass

View file

@ -0,0 +1,125 @@
from abc import ABC, abstractmethod
from typing import List, Optional
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
class ContainerInterface(ABC):
@abstractmethod
def create(
self,
endpoint: str,
nns_zone: Optional[str] = None,
nns_name: Optional[str] = None,
address: Optional[str] = None,
attributes: Optional[dict] = None,
basic_acl: Optional[str] = None,
await_mode: bool = False,
disable_timestamp: bool = False,
force: bool = False,
trace: bool = False,
name: Optional[str] = None,
nonce: Optional[str] = None,
policy: Optional[str] = None,
session: Optional[str] = None,
subnet: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> str:
"""
Create a new container and register it in the FrostFS.
It will be stored in the sidechain when the Inner Ring accepts it.
"""
raise NotImplementedError("No implemethed method create")
@abstractmethod
def delete(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
await_mode: bool = False,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
force: bool = False,
trace: bool = False,
) -> List[str]:
"""
Delete an existing container.
Only the owner of the container has permission to remove the container.
"""
raise NotImplementedError("No implemethed method delete")
@abstractmethod
def get(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
await_mode: bool = False,
to: Optional[str] = None,
json_mode: bool = True,
trace: bool = False,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> List[str]:
"""Get container field info."""
raise NotImplementedError("No implemethed method get")
@abstractmethod
def get_eacl(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
await_mode: bool = False,
json_mode: bool = True,
trace: bool = False,
to: Optional[str] = None,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> List[str]:
"""Get extended ACL table of container."""
raise NotImplementedError("No implemethed method get-eacl")
@abstractmethod
def list(
self,
endpoint: str,
name: Optional[str] = None,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
trace: bool = False,
owner: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
**params,
) -> List[str]:
"""List all created containers."""
raise NotImplementedError("No implemethed method list")
@abstractmethod
def nodes(
self,
endpoint: str,
cid: str,
cluster: Cluster,
address: Optional[str] = None,
ttl: Optional[int] = None,
from_file: Optional[str] = None,
trace: bool = False,
short: Optional[bool] = True,
xhdr: Optional[dict] = None,
generate_key: Optional[bool] = None,
timeout: Optional[str] = None,
) -> List[ClusterNode]:
"""Show the nodes participating in the container in the current epoch."""
raise NotImplementedError("No implemethed method nodes")

View file

@ -0,0 +1,89 @@
from abc import ABC, abstractmethod
from typing import List, Optional
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo
class NetmapInterface(ABC):
@abstractmethod
def epoch(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = False,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> int:
"""
Get current epoch number.
"""
raise NotImplementedError("No implemethed method epoch")
@abstractmethod
def netinfo(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> NodeNetInfo:
"""
Get target node info.
"""
raise NotImplementedError("No implemethed method netinfo")
@abstractmethod
def nodeinfo(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> NodeNetmapInfo:
"""
Get target node info.
"""
raise NotImplementedError("No implemethed method nodeinfo")
@abstractmethod
def snapshot(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> List[NodeNetmapInfo]:
"""
Get target node info.
"""
raise NotImplementedError("No implemethed method snapshot")
@abstractmethod
def snapshot_one_node(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> List[NodeNetmapInfo]:
"""
Get target one node info.
"""
raise NotImplementedError("No implemethed method snapshot")

View file

@ -0,0 +1,223 @@
from abc import ABC, abstractmethod
from typing import Any, List, Optional
from frostfs_testlib.shell.interfaces import CommandResult
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.utils import file_utils
from .chunks import ChunksInterface
class ObjectInterface(ABC):
def __init__(self) -> None:
self.chunks: ChunksInterface
@abstractmethod
def delete(
self,
cid: str,
oid: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def get(
self,
cid: str,
oid: str,
endpoint: str,
bearer: Optional[str] = None,
write_object: Optional[str] = None,
xhdr: Optional[dict] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> file_utils.TestFile:
pass
@abstractmethod
def get_from_random_node(
self,
cid: str,
oid: str,
cluster: Cluster,
bearer: Optional[str] = None,
write_object: Optional[str] = None,
xhdr: Optional[dict] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def hash(
self,
endpoint: str,
cid: str,
oid: str,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
range: Optional[str] = None,
salt: Optional[str] = None,
ttl: Optional[int] = None,
session: Optional[str] = None,
hash_type: Optional[str] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def head(
self,
cid: str,
oid: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
json_output: bool = True,
is_raw: bool = False,
is_direct: bool = False,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult | Any:
pass
@abstractmethod
def lock(
self,
cid: str,
oid: str,
endpoint: str,
lifetime: Optional[int] = None,
expire_at: Optional[int] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def put(
self,
path: str,
cid: str,
endpoint: str,
bearer: Optional[str] = None,
copies_number: Optional[int] = None,
attributes: Optional[dict] = None,
xhdr: Optional[dict] = None,
expire_at: Optional[int] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def patch(
self,
cid: str,
oid: str,
endpoint: str,
ranges: Optional[list[str]] = None,
payloads: Optional[list[str]] = None,
new_attrs: Optional[str] = None,
replace_attrs: bool = False,
bearer: Optional[str] = None,
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = None,
trace: bool = False,
) -> str:
pass
@abstractmethod
def put_to_random_node(
self,
path: str,
cid: str,
cluster: Cluster,
bearer: Optional[str] = None,
copies_number: Optional[int] = None,
attributes: Optional[dict] = None,
xhdr: Optional[dict] = None,
expire_at: Optional[int] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def range(
self,
cid: str,
oid: str,
range_cut: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> tuple[file_utils.TestFile, bytes]:
pass
@abstractmethod
def search(
self,
cid: str,
endpoint: str,
bearer: str = "",
oid: Optional[str] = None,
filters: Optional[dict] = None,
expected_objects_list: Optional[list] = None,
xhdr: Optional[dict] = None,
session: Optional[str] = None,
phy: bool = False,
root: bool = False,
timeout: Optional[str] = None,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
ttl: Optional[int] = None,
) -> List:
pass
@abstractmethod
def nodes(
self,
cluster: Cluster,
cid: str,
oid: str,
alive_node: ClusterNode,
bearer: str = "",
xhdr: Optional[dict] = None,
is_direct: bool = False,
verify_presence_all: bool = False,
timeout: Optional[str] = None,
) -> List[ClusterNode]:
pass
@abstractmethod
def parts(
self,
cid: str,
oid: str,
alive_node: ClusterNode,
bearer: str = "",
xhdr: Optional[dict] = None,
is_direct: bool = False,
verify_presence_all: bool = False,
timeout: Optional[str] = None,
) -> List[str]:
pass

View file

@ -0,0 +1,10 @@
from abc import ABC
from . import interfaces
class GrpcClientWrapper(ABC):
def __init__(self) -> None:
self.object: interfaces.ObjectInterface
self.container: interfaces.ContainerInterface
self.netmap: interfaces.NetmapInterface

View file

@ -1,4 +1,5 @@
import itertools
import traceback
from concurrent.futures import Future, ThreadPoolExecutor
from contextlib import contextmanager
from typing import Callable, Collection, Optional, Union
@ -55,7 +56,42 @@ def parallel(
# Check for exceptions
exceptions = [future.exception() for future in futures if future.exception()]
if exceptions:
message = "\n".join([str(e) for e in exceptions])
# Prettify exception in parallel with all underlying stack traces
# For example, we had 3 RuntimeError exceptions during parallel. This format will give us something like
#
# RuntimeError: The following exceptions occured during parallel run:
# 1) Exception one text
# 2) Exception two text
# 3) Exception three text
# TRACES:
# ==== 1 ====
# Traceback (most recent call last):
# File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run
# result = self.fn(*self.args, **self.kwargs)
# File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service
# raise RuntimeError(f"Exception one text")
# RuntimeError: Exception one text
#
# ==== 2 ====
# Traceback (most recent call last):
# File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run
# result = self.fn(*self.args, **self.kwargs)
# File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service
# raise RuntimeError(f"Exception two text")
# RuntimeError: Exception two text
#
# ==== 3 ====
# Traceback (most recent call last):
# File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run
# result = self.fn(*self.args, **self.kwargs)
# File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service
# raise RuntimeError(f"Exception three text")
# RuntimeError: Exception three text
short_summary = "\n".join([f"{i}) {str(e)}" for i, e in enumerate(exceptions, 1)])
stack_traces = "\n".join(
[f"==== {i} ====\n{''.join(traceback.TracebackException.from_exception(e).format())}" for i, e in enumerate(exceptions, 1)]
)
message = f"{short_summary}\nTRACES:\n{stack_traces}"
raise RuntimeError(f"The following exceptions occured during parallel run:\n{message}")
return futures

View file

@ -1,13 +1,16 @@
import inspect
import logging
import os
from functools import wraps
from time import sleep, time
from typing import Any
import yaml
from _pytest.outcomes import Failed
from pytest import fail
from frostfs_testlib import reporter
from frostfs_testlib.resources.common import ASSETS_DIR
from frostfs_testlib.utils.func_utils import format_by_args
logger = logging.getLogger("NeoLogger")
@ -128,6 +131,42 @@ def run_optionally(enabled: bool, mock_value: Any = True):
return deco
def cached_fixture(enabled: bool):
"""
Decorator to cache fixtures.
MUST be placed after @pytest.fixture and before @allure decorators.
Args:
enabled: if true, decorated func will be cached.
"""
def deco(func):
@wraps(func)
def func_impl(*a, **kw):
# TODO: *a and *kw should be parsed to some kind of hashsum and used in filename to prevent cache load from different parameters
cache_file = os.path.join(ASSETS_DIR, f"fixture_cache_{func.__name__}.yml")
if enabled and os.path.exists(cache_file):
with open(cache_file, "r") as cache_input:
return yaml.load(cache_input, Loader=yaml.Loader)
result = func(*a, **kw)
if enabled:
with open(cache_file, "w") as cache_output:
yaml.dump(result, cache_output)
return result
# TODO: cache yielding fixtures
@wraps(func)
def gen_impl(*a, **kw):
raise NotImplementedError("Not implemented for yielding fixtures")
return gen_impl if inspect.isgeneratorfunction(func) else func_impl
return deco
def wait_for_success(
max_wait_time: int = 60,
interval: int = 1,

View file

@ -80,6 +80,9 @@ def log_command_execution(cmd: str, output: Union[str, dict], params: Optional[d
if not params:
params = {}
if params.get("Body") and len(params.get("Body")) > 1000:
params["Body"] = "<large text data>"
output_params = params
try:

View file

@ -45,7 +45,7 @@ def ensure_directory_opener(path, flags):
# TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps
# Use object_size dt in future as argument
@reporter.step("Generate file")
def generate_file(size: int) -> TestFile:
def generate_file(size: int, file_name: Optional[str] = None) -> TestFile:
"""Generates a binary file with the specified size in bytes.
Args:
@ -54,7 +54,11 @@ def generate_file(size: int) -> TestFile:
Returns:
The path to the generated file.
"""
test_file = TestFile(os.path.join(ASSETS_DIR, string_utils.unique_name("object-")))
if file_name is None:
file_name = string_utils.unique_name("object-")
test_file = TestFile(os.path.join(ASSETS_DIR, file_name))
with open(test_file, "wb", opener=ensure_directory_opener) as file:
file.write(os.urandom(size))
logger.info(f"File with size {size} bytes has been generated: {test_file}")

View file

@ -64,7 +64,7 @@ def parallel_binary_verions(host: Host) -> dict[str, str]:
try:
result = shell.exec(f"{binary_path} {binary['param']}")
version = parse_version(result.stdout) or parse_version(result.stderr) or "Unknown"
versions_at_host[binary_name] = version
versions_at_host[binary_name] = version.strip()
except Exception as exc:
logger.error(f"Cannot get version for {binary_path} because of\n{exc}")
versions_at_host[binary_name] = "Unknown"

View file

@ -2,7 +2,7 @@ from typing import Any
import pytest
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper
from frostfs_testlib.clients import AwsCliClient, Boto3ClientWrapper
from frostfs_testlib.storage.dataclasses.acl import EACLRole
from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize

View file

@ -6,10 +6,7 @@ import pytest
from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType, Preset, ReadFrom
from frostfs_testlib.load.runners import DefaultRunner
from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
from frostfs_testlib.storage.dataclasses.node_base import NodeBase
@dataclass
@ -129,6 +126,8 @@ class TestLoadConfig:
"--size '11'",
"--acl 'acl'",
"--preload_obj '13'",
"--retry '24'",
"--rule 'rule' --rule 'rule_2'",
"--out 'pregen_json'",
"--workers '7'",
"--containers '16'",
@ -161,6 +160,8 @@ class TestLoadConfig:
expected_preset_args = [
"--size '11'",
"--preload_obj '13'",
"--retry '24'",
"--rule 'rule' --rule 'rule_2'",
"--out 'pregen_json'",
"--workers '7'",
"--containers '16'",
@ -317,6 +318,8 @@ class TestLoadConfig:
"--no-verify-ssl",
"--size '11'",
"--preload_obj '13'",
"--retry '24'",
"--rule 'rule' --rule 'rule_2'",
"--out 'pregen_json'",
"--workers '7'",
"--containers '16'",
@ -350,6 +353,8 @@ class TestLoadConfig:
expected_preset_args = [
"--size '11'",
"--preload_obj '13'",
"--retry '24'",
"--rule 'rule' --rule 'rule_2'",
"--out 'pregen_json'",
"--workers '7'",
"--containers '16'",
@ -415,6 +420,26 @@ class TestLoadConfig:
self._check_preset_params(load_params, params)
@pytest.mark.parametrize(
"load_type, input, value, params",
[
(LoadType.gRPC, ["A C ", " B"], ["A C", "B"], [f"--rule 'A C' --rule 'B'"]),
(LoadType.gRPC, " A ", ["A"], ["--rule 'A'"]),
(LoadType.gRPC, " A , B ", ["A , B"], ["--rule 'A , B'"]),
(LoadType.gRPC, [" A", "B "], ["A", "B"], ["--rule 'A' --rule 'B'"]),
(LoadType.gRPC, None, None, []),
(LoadType.S3, ["A C ", " B"], ["A C", "B"], []),
(LoadType.S3, None, None, []),
],
)
def test_ape_list_parsing_formatter(self, load_type, input, value, params):
load_params = LoadParams(load_type)
load_params.preset = Preset()
load_params.preset.rule = input
assert load_params.preset.rule == value
self._check_preset_params(load_params, params)
@pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True)
def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams):
expected_env_vars = {
@ -444,6 +469,8 @@ class TestLoadConfig:
expected_preset_args = [
"--size '0'",
"--preload_obj '0'",
"--retry '0'",
"--rule ''",
"--out ''",
"--workers '0'",
"--containers '0'",
@ -475,6 +502,8 @@ class TestLoadConfig:
expected_preset_args = [
"--size '0'",
"--preload_obj '0'",
"--retry '0'",
"--rule ''",
"--out ''",
"--workers '0'",
"--containers '0'",
@ -582,6 +611,8 @@ class TestLoadConfig:
expected_preset_args = [
"--size '0'",
"--preload_obj '0'",
"--retry '0'",
"--rule ''",
"--out ''",
"--workers '0'",
"--containers '0'",
@ -613,6 +644,8 @@ class TestLoadConfig:
expected_preset_args = [
"--size '0'",
"--preload_obj '0'",
"--retry '0'",
"--rule ''",
"--out ''",
"--workers '0'",
"--containers '0'",