Compare commits

..

No commits in common. "master" and "master" have entirely different histories.

62 changed files with 633 additions and 1926 deletions

View file

@ -1,3 +1 @@
.* @TrueCloudLab/qa-committers
.forgejo/.* @potyarkin
Makefile @potyarkin
* @JuliaKovshova @abereziny @d.zayakin @anikeev-yadro @anurindm @ylukoyan @i.niyazov

View file

@ -62,7 +62,7 @@ authmate = "frostfs_testlib.credentials.authmate_s3_provider:AuthmateS3Credentia
wallet_factory = "frostfs_testlib.credentials.wallet_factory_provider:WalletFactoryProvider"
[project.entry-points."frostfs.testlib.bucket_cid_resolver"]
frostfs = "frostfs_testlib.clients.s3.curl_bucket_resolver:CurlBucketContainerResolver"
frostfs = "frostfs_testlib.s3.curl_bucket_resolver:CurlBucketContainerResolver"
[tool.isort]
profile = "black"

View file

@ -10,7 +10,6 @@ tenacity==8.0.1
pytest==7.1.2
boto3==1.35.30
boto3-stubs[essential]==1.35.30
pydantic==2.10.6
# Dev dependencies
black==22.8.0

View file

@ -1,4 +1,4 @@
__version__ = "2.0.1"
from .fixtures import configure_testlib, hosting, session_start_time, temp_directory
from .hooks import pytest_add_frostfs_marker, pytest_collection_modifyitems
from .fixtures import configure_testlib, hosting, temp_directory
from .hooks import pytest_collection_modifyitems

View file

@ -24,7 +24,9 @@ class CliCommand:
def __init__(self, shell: Shell, cli_exec_path: str, **base_params):
self.shell = shell
self.cli_exec_path = cli_exec_path
self.__base_params = " ".join([f"--{param} {value}" for param, value in base_params.items() if value])
self.__base_params = " ".join(
[f"--{param} {value}" for param, value in base_params.items() if value]
)
def _format_command(self, command: str, **params) -> str:
param_str = []
@ -46,7 +48,9 @@ class CliCommand:
val_str = str(value_item).replace("'", "\\'")
param_str.append(f"--{param} '{val_str}'")
elif isinstance(value, dict):
param_str.append(f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\'')
param_str.append(
f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\''
)
else:
if "'" in str(value):
value_str = str(value).replace('"', '\\"')
@ -59,18 +63,12 @@ class CliCommand:
return f"{self.cli_exec_path} {self.__base_params} {command or ''} {param_str}"
def _execute(self, command: Optional[str], **params) -> CommandResult:
timeout = int(params["timeout"].rstrip("s")) if params.get("timeout") else None
return self.shell.exec(
self._format_command(command, **params),
CommandOptions(timeout=timeout),
)
return self.shell.exec(self._format_command(command, **params))
def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult:
timeout = int(params["timeout"].rstrip("s")) if params.get("timeout") else None
return self.shell.exec(
self._format_command(command, **params),
CommandOptions(
interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)],
timeout=timeout,
options=CommandOptions(
interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)]
),
)

View file

@ -122,9 +122,7 @@ class FrostfsAdmMorph(CliCommand):
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
)
def force_new_epoch(
self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None, delta: Optional[int] = None
) -> CommandResult:
def force_new_epoch(self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None) -> CommandResult:
"""Create new FrostFS epoch event in the side chain.
Args:
@ -353,7 +351,6 @@ class FrostfsAdmMorph(CliCommand):
rule: Optional[list[str]] = None,
path: Optional[str] = None,
chain_id_hex: Optional[bool] = None,
chain_name: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
@ -384,7 +381,6 @@ class FrostfsAdmMorph(CliCommand):
target_name: str,
target_type: str,
chain_id_hex: Optional[bool] = None,
chain_name: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
@ -412,7 +408,6 @@ class FrostfsAdmMorph(CliCommand):
target_type: str,
target_name: Optional[str] = None,
rpc_endpoint: Optional[str] = None,
chain_name: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
@ -439,7 +434,6 @@ class FrostfsAdmMorph(CliCommand):
target_name: str,
target_type: str,
all: Optional[bool] = None,
chain_name: Optional[str] = None,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
@ -463,26 +457,3 @@ class FrostfsAdmMorph(CliCommand):
"morph ape rm-rule-chain",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def get_nns_records(
self,
name: str,
type: Optional[str] = None,
rpc_endpoint: Optional[str] = None,
alphabet_wallets: Optional[str] = None,
) -> CommandResult:
"""Returns domain record of the specified type
Args:
name: Domain name
type: Domain name service record type(A|CNAME|SOA|TXT)
rpc_endpoint: N3 RPC node endpoint
alphabet_wallets: path to alphabet wallets dir
Returns:
Command's result
"""
return self._execute(
"morph nns get-records",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -12,7 +12,6 @@ class FrostfsCliNetmap(CliCommand):
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = False,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
@ -43,7 +42,6 @@ class FrostfsCliNetmap(CliCommand):
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = False,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
@ -75,7 +73,6 @@ class FrostfsCliNetmap(CliCommand):
generate_key: bool = False,
json: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = False,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:
@ -107,7 +104,6 @@ class FrostfsCliNetmap(CliCommand):
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = False,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> CommandResult:

View file

@ -276,54 +276,6 @@ class FrostfsCliObject(CliCommand):
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def patch(
self,
rpc_endpoint: str,
cid: str,
oid: str,
range: list[str] = None,
payload: list[str] = None,
new_attrs: Optional[str] = None,
replace_attrs: bool = False,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
session: Optional[str] = None,
timeout: Optional[str] = None,
trace: bool = False,
ttl: Optional[int] = None,
wallet: Optional[str] = None,
xhdr: Optional[dict] = None,
) -> CommandResult:
"""
PATCH an object.
Args:
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>')
cid: Container ID
oid: Object ID
range: An array of ranges in which to replace data in the format [offset1:length1, offset2:length2]
payload: An array of file paths to be applied in each range
new_attrs: Attributes to be changed in the format Key1=Value1,Key2=Value2
replace_attrs: Replace all attributes completely with new ones specified in new_attrs
address: Address of wallet account
bearer: File with signed JSON or binary encoded bearer token
generate_key: Generate new private key
session: Filepath to a JSON- or binary-encoded token of the object RANGE session
timeout: Timeout for the operation
trace: Generate trace ID and print it
ttl: TTL value in request meta header (default 2)
wallet: WIF (NEP-2) string or path to the wallet or binary key
xhdr: Dict with request X-Headers
Returns:
Command's result.
"""
return self._execute(
"object patch",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def range(
self,
rpc_endpoint: str,

View file

@ -241,21 +241,3 @@ class FrostfsCliShards(CliCommand):
"control shards evacuation status",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def detach(self, endpoint: str, address: Optional[str] = None, id: Optional[str] = None, timeout: Optional[str] = None):
"""
Detach and close the shards
Args:
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
id: List of shard IDs in base58 encoding
timeout: Timeout for an operation (default 15s)
Returns:
Command's result.
"""
return self._execute(
"control shards detach",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -1,7 +1,7 @@
import re
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeInfo, NodeNetInfo, NodeNetmapInfo, NodeStatus
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo, NodeStatus
class NetmapParser:
@ -20,6 +20,8 @@ class NetmapParser:
"withdrawal_fee": r"Withdrawal fee: (?P<withdrawal_fee>\d+)",
"homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?P<homomorphic_hashing_disabled>true|false)",
"maintenance_mode_allowed": r"Maintenance mode allowed: (?P<maintenance_mode_allowed>true|false)",
"eigen_trust_alpha": r"EigenTrustAlpha: (?P<eigen_trust_alpha>\d+\w+$)",
"eigen_trust_iterations": r"EigenTrustIterations: (?P<eigen_trust_iterations>\d+)",
}
parse_result = {}
@ -62,7 +64,7 @@ class NetmapParser:
for node in netmap_nodes:
for key, regex in regexes.items():
search_result = re.search(regex, node, flags=re.MULTILINE)
if search_result is None:
if search_result == None:
result_netmap[key] = None
continue
if key == "node_data_ips":
@ -81,22 +83,9 @@ class NetmapParser:
return dataclasses_netmap
@staticmethod
def snapshot_one_node(output: str, rpc_endpoint: str) -> NodeNetmapInfo | None:
def snapshot_one_node(output: str, cluster_node: ClusterNode) -> NodeNetmapInfo | None:
snapshot_nodes = NetmapParser.snapshot_all_nodes(output=output)
for snapshot in snapshot_nodes:
for endpoint in snapshot.external_address:
if rpc_endpoint.split(":")[0] in endpoint:
return snapshot
@staticmethod
def node_info(output: dict) -> NodeNetmapInfo:
data_dict = {"attributes": {}}
for key, value in output.items():
if key != "attributes":
data_dict[key] = value
for attribute in output["attributes"]:
data_dict["attributes"][attribute["key"]] = attribute["value"]
return NodeInfo(**data_dict)
snapshot_node = [node for node in snapshot_nodes if node.node == cluster_node.host_ip]
if not snapshot_node:
return None
return snapshot_node[0]

View file

@ -1,5 +0,0 @@
from frostfs_testlib.clients.http.http_client import HttpClient
from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient
from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper
from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper
from frostfs_testlib.clients.s3.s3_http_client import S3HttpClient

View file

@ -1 +0,0 @@
from frostfs_testlib.clients.http.http_client import HttpClient

View file

@ -1,145 +0,0 @@
import io
import json
import logging
import logging.config
from typing import Mapping, Sequence
import httpx
from frostfs_testlib import reporter
timeout = httpx.Timeout(60, read=150)
LOGGING_CONFIG = {
"disable_existing_loggers": False,
"version": 1,
"handlers": {"default": {"class": "logging.StreamHandler", "formatter": "http", "stream": "ext://sys.stderr"}},
"formatters": {
"http": {
"format": "%(levelname)s [%(asctime)s] %(name)s - %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
}
},
"loggers": {
"httpx": {
"handlers": ["default"],
"level": "DEBUG",
},
"httpcore": {
"handlers": ["default"],
"level": "ERROR",
},
},
}
logging.config.dictConfig(LOGGING_CONFIG)
logger = logging.getLogger("NeoLogger")
class HttpClient:
@reporter.step("Send {method} request to {url}")
def send(self, method: str, url: str, expected_status_code: int = None, **kwargs: dict) -> httpx.Response:
transport = httpx.HTTPTransport(verify=False, retries=5)
client = httpx.Client(timeout=timeout, transport=transport)
response = client.request(method, url, **kwargs)
self._attach_response(response, **kwargs)
logger.info(f"Response: {response.status_code} => {response.text}")
if expected_status_code:
assert (
response.status_code == expected_status_code
), f"Got {response.status_code} response code while {expected_status_code} expected"
return response
@classmethod
def _parse_body(cls, readable: httpx.Request | httpx.Response) -> str | None:
try:
content = readable.read()
except Exception as e:
logger.warning(f"Unable to read file: {str(e)}")
return None
if not content:
return None
request_body = None
try:
request_body = json.loads(content)
except (json.JSONDecodeError, UnicodeDecodeError) as e:
logger.warning(f"Unable to convert body to json: {str(e)}")
if request_body is not None:
return json.dumps(request_body, default=str, indent=4)
try:
request_body = content.decode()
except UnicodeDecodeError as e:
logger.warning(f"Unable to decode binary data to text using UTF-8 encoding: {str(e)}")
request_body = content if request_body is None else request_body
request_body = "<large text data>" if len(request_body) > 1000 else request_body
return request_body
@classmethod
def _parse_files(cls, files: Mapping | Sequence | None) -> dict:
filepaths = {}
if not files:
return filepaths
if isinstance(files, Sequence):
items = files
elif isinstance(files, Mapping):
items = files.items()
else:
raise TypeError(f"'files' must be either Sequence or Mapping, got: {type(files).__name__}")
for name, file in items:
if isinstance(file, io.IOBase):
filepaths[name] = file.name
elif isinstance(file, Sequence):
filepaths[name] = file[1].name
return filepaths
@classmethod
def _attach_response(cls, response: httpx.Response, **kwargs):
request = response.request
request_headers = json.dumps(dict(request.headers), default=str, indent=4)
request_body = cls._parse_body(request)
files = kwargs.get("files")
request_files = cls._parse_files(files)
response_headers = json.dumps(dict(response.headers), default=str, indent=4)
response_body = cls._parse_body(response)
report = (
f"Method: {request.method}\n\n"
+ f"URL: {request.url}\n\n"
+ f"Request Headers: {request_headers}\n\n"
+ (f"Request Body: {request_body}\n\n" if request_body else "")
+ (f"Request Files: {request_files}\n\n" if request_files else "")
+ f"Response Status Code: {response.status_code}\n\n"
+ f"Response Headers: {response_headers}\n\n"
+ (f"Response Body: {response_body}\n\n" if response_body else "")
)
curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body, request_files)
reporter.attach(report, "Requests Info")
reporter.attach(curl_request, "CURL")
@classmethod
def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict) -> str:
excluded_headers = {"Accept-Encoding", "Connection", "User-Agent", "Content-Length"}
headers = " ".join(f"-H '{header.title()}: {value}'" for header, value in headers.items() if header.title() not in excluded_headers)
data = f" -d '{data}'" if data else ""
for name, path in files.items():
data += f' -F "{name}=@{path}"'
# Option -k means no verify SSL
return f"curl {url} -X {method} {headers}{data} -k"

View file

@ -1,3 +0,0 @@
from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient
from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper
from frostfs_testlib.clients.s3.interfaces import ACL, BucketContainerResolver, S3ClientWrapper, VersioningStatus

View file

@ -1,149 +0,0 @@
import hashlib
import logging
import xml.etree.ElementTree as ET
import httpx
from botocore.auth import SigV4Auth
from botocore.awsrequest import AWSRequest
from botocore.credentials import Credentials
from frostfs_testlib import reporter
from frostfs_testlib.clients import HttpClient
from frostfs_testlib.utils.file_utils import TestFile
logger = logging.getLogger("NeoLogger")
DEFAULT_TIMEOUT = 60.0
class S3HttpClient:
def __init__(
self, s3gate_endpoint: str, access_key_id: str, secret_access_key: str, profile: str = "default", region: str = "us-east-1"
) -> None:
self.http_client = HttpClient()
self.credentials = Credentials(access_key_id, secret_access_key)
self.profile = profile
self.region = region
self.iam_endpoint: str = None
self.s3gate_endpoint: str = None
self.service: str = None
self.signature: SigV4Auth = None
self.set_endpoint(s3gate_endpoint)
def _to_s3_header(self, header: str) -> dict:
replacement_map = {
"Acl": "ACL",
"_": "-",
}
result = header
if not header.startswith("x_amz"):
result = header.title()
for find, replace in replacement_map.items():
result = result.replace(find, replace)
return result
def _convert_to_s3_headers(self, scope: dict, exclude: list[str] = None):
exclude = ["self", "cls"] if not exclude else exclude + ["self", "cls"]
return {self._to_s3_header(header): value for header, value in scope.items() if header not in exclude and value is not None}
def _create_aws_request(
self, method: str, url: str, headers: dict, content: str | bytes | TestFile = None, params: dict = None
) -> AWSRequest:
data = b""
if content is not None:
if isinstance(content, TestFile):
with open(content, "rb") as io_content:
data = io_content.read()
elif isinstance(content, str):
data = bytes(content, encoding="utf-8")
elif isinstance(content, bytes):
data = content
else:
raise TypeError(f"Content expected as a string, bytes or TestFile object, got: {content}")
headers["X-Amz-Content-SHA256"] = hashlib.sha256(data).hexdigest()
aws_request = AWSRequest(method, url, headers, data, params)
self.signature.add_auth(aws_request)
return aws_request
def _exec_request(
self,
method: str,
url: str,
headers: dict,
content: str | bytes | TestFile = None,
params: dict = None,
timeout: float = DEFAULT_TIMEOUT,
) -> dict:
aws_request = self._create_aws_request(method, url, headers, content, params)
response = self.http_client.send(
aws_request.method,
aws_request.url,
headers=dict(aws_request.headers),
data=aws_request.data,
params=aws_request.params,
timeout=timeout,
)
try:
response.raise_for_status()
except httpx.HTTPStatusError:
raise httpx.HTTPStatusError(response.text, request=response.request, response=response)
root = ET.fromstring(response.read())
data = {
"LastModified": root.find(".//LastModified").text,
"ETag": root.find(".//ETag").text,
}
if response.headers.get("x-amz-version-id"):
data["VersionId"] = response.headers.get("x-amz-version-id")
return data
@reporter.step("Set endpoint S3 to {s3gate_endpoint}")
def set_endpoint(self, s3gate_endpoint: str):
if self.s3gate_endpoint == s3gate_endpoint:
return
self.s3gate_endpoint = s3gate_endpoint
self.service = "s3"
self.signature = SigV4Auth(self.credentials, self.service, self.region)
@reporter.step("Set endpoint IAM to {iam_endpoint}")
def set_iam_endpoint(self, iam_endpoint: str):
if self.iam_endpoint == iam_endpoint:
return
self.iam_endpoint = iam_endpoint
self.service = "iam"
self.signature = SigV4Auth(self.credentials, self.service, self.region)
@reporter.step("Patch object S3")
def patch_object(
self,
bucket: str,
key: str,
content: str | bytes | TestFile,
content_range: str,
version_id: str = None,
if_match: str = None,
if_unmodified_since: str = None,
x_amz_expected_bucket_owner: str = None,
timeout: float = DEFAULT_TIMEOUT,
) -> dict:
if content_range and not content_range.startswith("bytes"):
content_range = f"bytes {content_range}/*"
url = f"{self.s3gate_endpoint}/{bucket}/{key}"
headers = self._convert_to_s3_headers(locals(), exclude=["bucket", "key", "content", "version_id", "timeout"])
params = {"VersionId": version_id} if version_id is not None else None
return self._exec_request("PATCH", url, headers, content, params, timeout=timeout)

View file

@ -1,6 +1,5 @@
import logging
import os
from datetime import datetime
from importlib.metadata import entry_points
import pytest
@ -12,12 +11,6 @@ from frostfs_testlib.resources.common import ASSETS_DIR, HOSTING_CONFIG_FILE
from frostfs_testlib.storage import get_service_registry
@pytest.fixture(scope="session", autouse=True)
def session_start_time():
start_time = datetime.utcnow()
return start_time
@pytest.fixture(scope="session")
def configure_testlib():
reporter.get_reporter().register_handler(reporter.AllureHandler())

View file

@ -1,8 +1,8 @@
import pytest
@pytest.hookimpl(specname="pytest_collection_modifyitems")
def pytest_add_frostfs_marker(items: list[pytest.Item]):
@pytest.hookimpl
def pytest_collection_modifyitems(items: list[pytest.Item]):
# All tests which reside in frostfs nodeid are granted with frostfs marker, excluding
# nodeid = full path of the test
# 1. plugins
@ -11,18 +11,3 @@ def pytest_add_frostfs_marker(items: list[pytest.Item]):
location = item.location[0]
if "frostfs" in location and "plugin" not in location and "testlib" not in location:
item.add_marker("frostfs")
# pytest hook. Do not rename
@pytest.hookimpl(trylast=True)
def pytest_collection_modifyitems(items: list[pytest.Item]):
# Change order of tests based on @pytest.mark.order(<int>) marker
def order(item: pytest.Item) -> int:
order_marker = item.get_closest_marker("order")
if order_marker and (len(order_marker.args) != 1 or not isinstance(order_marker.args[0], int)):
raise RuntimeError("Incorrect usage of pytest.mark.order")
order_value = order_marker.args[0] if order_marker else 0
return order_value
items.sort(key=lambda item: order(item))

View file

@ -164,9 +164,6 @@ class DockerHost(Host):
return volume_path
def send_signal_to_service(self, service_name: str, signal: str) -> None:
raise NotImplementedError("Not implemented for docker")
def delete_metabase(self, service_name: str) -> None:
raise NotImplementedError("Not implemented for docker")
@ -250,7 +247,6 @@ class DockerHost(Host):
unit: Optional[str] = None,
exclude_filter: Optional[str] = None,
priority: Optional[str] = None,
word_count: bool = None,
) -> str:
client = self._get_docker_client()
filtered_logs = ""

View file

@ -29,9 +29,6 @@ class Host(ABC):
self._service_config_by_name = {service_config.name: service_config for service_config in config.services}
self._cli_config_by_name = {cli_config.name: cli_config for cli_config in config.clis}
def __repr__(self) -> str:
return self.config.address
@property
def config(self) -> HostConfig:
"""Returns config of the host.
@ -120,17 +117,6 @@ class Host(ABC):
service_name: Name of the service to stop.
"""
@abstractmethod
def send_signal_to_service(self, service_name: str, signal: str) -> None:
"""Send signal to service with specified name using kill -<signal>
The service must be hosted on this host.
Args:
service_name: Name of the service to stop.
signal: signal name. See kill -l to all names
"""
@abstractmethod
def mask_service(self, service_name: str) -> None:
"""Prevent the service from start by any activity by masking it.
@ -327,7 +313,6 @@ class Host(ABC):
unit: Optional[str] = None,
exclude_filter: Optional[str] = None,
priority: Optional[str] = None,
word_count: bool = None,
) -> str:
"""Get logs from host filtered by regex.
@ -338,7 +323,6 @@ class Host(ABC):
unit: required unit.
priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher.
For example, if we specify the -p 2 option, journalctl will show all messages with levels 2, 1 and 0.
word_count: output type, expected values: lines, bytes, json
Returns:
Found entries as str if any found.

View file

@ -182,10 +182,8 @@ class Preset(MetaConfig):
pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False)
# Workers count for preset
workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False)
# TODO: Deprecated. Acl for container/buckets
# Acl for container/buckets
acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False)
# APE rule for containers instead of deprecated ACL
rule: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "rule", None, False, formatter=force_list)
# ------ GRPC ------
# Amount of containers which should be created

View file

@ -193,7 +193,7 @@ class RemoteProcess:
)
if "No such file or directory" in terminal.stderr:
return None
elif terminal.return_code != 0:
elif terminal.stderr or terminal.return_code != 0:
raise AssertionError(f"cat process {file} was not successful: {terminal.stderr}")
return terminal.stdout

View file

@ -53,4 +53,3 @@ HOSTING_CONFIG_FILE = os.getenv(
)
MORE_LOG = os.getenv("MORE_LOG", "1")
EXPIRATION_EPOCH_ATTRIBUTE = "__SYSTEM__EXPIRATION_EPOCH"

View file

@ -1,6 +1,5 @@
# Regex patterns of status codes of Container service
CONTAINER_NOT_FOUND = "code = 3072.*message = container not found"
SUBJECT_NOT_FOUND = "code = 1024.*message = frostfs error: chain/client.*subject not found.*"
# Regex patterns of status codes of Object service
MALFORMED_REQUEST = "code = 1024.*message = malformed request"
@ -10,7 +9,6 @@ OBJECT_ALREADY_REMOVED = "code = 2052.*message = object already removed"
SESSION_NOT_FOUND = "code = 4096.*message = session token not found"
OUT_OF_RANGE = "code = 2053.*message = out of range"
EXPIRED_SESSION_TOKEN = "code = 4097.*message = expired session token"
ADD_CHAIN_ERROR = "code = 5120 message = apemanager access denied"
# TODO: Change to codes with message
# OBJECT_IS_LOCKED = "code = 2050.*message = object is locked"
# LOCK_NON_REGULAR_OBJECT = "code = 2051.*message = ..." will be available once 2092 is fixed
@ -29,10 +27,6 @@ S3_BUCKET_DOES_NOT_ALLOW_ACL = "The bucket does not allow ACLs"
S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not validate against our published schema."
RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied"
# Errors from node missing reasons if request was forwarded. Commenting for now
# RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied"
RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request"
RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied"
NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound"
# Errors from node missing reasons if request was forwarded. Commenting for now
# NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound"
NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request"
NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound"

View file

@ -16,10 +16,11 @@ OPTIONAL_NODE_UNDER_LOAD = os.getenv("OPTIONAL_NODE_UNDER_LOAD")
OPTIONAL_FAILOVER_ENABLED = str_to_bool(os.getenv("OPTIONAL_FAILOVER_ENABLED", "true"))
# Set this to True to disable background load. I.E. node which supposed to be stopped will not be actually stopped.
OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool(os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true"))
OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool(
os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true")
)
# Set this to False for disable autouse fixture like node healthcheck during developing time.
OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool(os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true"))
# Use cache for fixtures with @cachec_fixture decorator
OPTIONAL_CACHE_FIXTURES = str_to_bool(os.getenv("OPTIONAL_CACHE_FIXTURES", "false"))
OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool(
os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true")
)

View file

@ -0,0 +1,3 @@
from frostfs_testlib.s3.aws_cli_client import AwsCliClient
from frostfs_testlib.s3.boto3_client import Boto3ClientWrapper
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus

View file

@ -6,8 +6,8 @@ from time import sleep
from typing import Literal, Optional, Union
from frostfs_testlib import reporter
from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
from frostfs_testlib.shell import CommandOptions
from frostfs_testlib.shell.local_shell import LocalShell
from frostfs_testlib.utils import string_utils
@ -33,14 +33,12 @@ class AwsCliClient(S3ClientWrapper):
self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1"
) -> None:
self.s3gate_endpoint = s3gate_endpoint
self.iam_endpoint = None
self.access_key_id: str = access_key_id
self.secret_access_key: str = secret_access_key
self.profile = profile
self.region = region
self.local_shell = LocalShell()
self.region = region
self.iam_endpoint = None
try:
_configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key, region)
self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS} --profile {profile}")
@ -173,7 +171,7 @@ class AwsCliClient(S3ClientWrapper):
return response.get("TagSet")
@reporter.step("Get bucket acl")
def get_bucket_acl(self, bucket: str) -> dict:
def get_bucket_acl(self, bucket: str) -> list:
if bucket.startswith("-") or " " in bucket:
bucket = f'"{bucket}"'
@ -181,7 +179,8 @@ class AwsCliClient(S3ClientWrapper):
f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
)
output = self.local_shell.exec(cmd).stdout
return self._to_json(output)
response = self._to_json(output)
return response.get("Grants")
@reporter.step("Get bucket location")
def get_bucket_location(self, bucket: str) -> dict:
@ -197,20 +196,11 @@ class AwsCliClient(S3ClientWrapper):
return response.get("LocationConstraint")
@reporter.step("List objects S3")
def list_objects(
self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None
) -> Union[dict, list[str]]:
def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
if bucket.startswith("-") or " " in bucket:
bucket = f'"{bucket}"'
cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} "
if page_size:
cmd = cmd.replace("--no-paginate", "")
cmd += f" --page-size {page_size} "
if prefix:
cmd += f" --prefix {prefix}"
if self.profile:
cmd += f" --profile {self.profile} "
cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
@ -862,7 +852,7 @@ class AwsCliClient(S3ClientWrapper):
return response["Parts"]
@reporter.step("Complete multipart upload S3")
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict:
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None:
if bucket.startswith("-") or " " in bucket:
bucket = f'"{bucket}"'
@ -979,7 +969,7 @@ class AwsCliClient(S3ClientWrapper):
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 14)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@ -990,7 +980,7 @@ class AwsCliClient(S3ClientWrapper):
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 14)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@ -1122,7 +1112,7 @@ class AwsCliClient(S3ClientWrapper):
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 14)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@ -1133,7 +1123,7 @@ class AwsCliClient(S3ClientWrapper):
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 14)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@ -1229,7 +1219,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}"
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}"
return response
@ -1241,7 +1231,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}"
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}"
return response
@ -1266,7 +1256,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}"
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}"
return response
@ -1278,7 +1268,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}"
assert response.get("Groups"), f"Expected Groups in response:\n{response}"
return response
@ -1290,7 +1280,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}"
assert response.get("Groups"), f"Expected Groups in response:\n{response}"
return response
@ -1326,7 +1316,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}"
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}"
return response
@ -1352,7 +1342,7 @@ class AwsCliClient(S3ClientWrapper):
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 14)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@ -1367,7 +1357,7 @@ class AwsCliClient(S3ClientWrapper):
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 14)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@ -1450,90 +1440,3 @@ class AwsCliClient(S3ClientWrapper):
response = self._to_json(output)
return response
# MFA METHODS
@reporter.step("Creates a new virtual MFA device")
def iam_create_virtual_mfa_device(self, virtual_mfa_device_name: str, outfile: str, bootstrap_method: str) -> tuple:
cmd = f"aws {self.common_flags} iam create-virtual-mfa-device --virtual-mfa-device-name {virtual_mfa_device_name}\
--outfile {outfile} --bootstrap-method {bootstrap_method} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
serial_number = response.get("VirtualMFADevice", {}).get("SerialNumber")
assert serial_number, f"Expected SerialNumber in response:\n{response}"
return serial_number, False
@reporter.step("Deactivates the specified MFA device and removes it from association with the user name")
def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict:
cmd = f"aws {self.common_flags} iam deactivate-mfa-device --user-name {user_name} --serial-number {serial_number} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Deletes a virtual MFA device")
def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict:
cmd = f"aws {self.common_flags} iam delete-virtual-mfa-device --serial-number {serial_number} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Enables the specified MFA device and associates it with the specified IAM user")
def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict:
cmd = f"aws {self.common_flags} iam enable-mfa-device --user-name {user_name} --serial-number {serial_number} --authentication-code1 {authentication_code1}\
--authentication-code2 {authentication_code2} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Lists the MFA devices for an IAM user")
def iam_list_virtual_mfa_devices(self) -> dict:
cmd = f"aws {self.common_flags} iam list-virtual-mfa-devices --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert response.get("VirtualMFADevices"), f"Expected VirtualMFADevices in response:\n{response}"
return response
@reporter.step("Get session token for user")
def sts_get_session_token(
self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None
) -> tuple:
cmd = f"aws {self.common_flags} sts get-session-token --endpoint {self.iam_endpoint}"
if duration_seconds:
cmd += f" --duration-seconds {duration_seconds}"
if serial_number:
cmd += f" --serial-number {serial_number}"
if token_code:
cmd += f" --token-code {token_code}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
access_key = response.get("Credentials", {}).get("AccessKeyId")
secret_access_key = response.get("Credentials", {}).get("SecretAccessKey")
session_token = response.get("Credentials", {}).get("SessionToken")
assert access_key, f"Expected AccessKeyId in response:\n{response}"
assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}"
assert session_token, f"Expected SessionToken in response:\n{response}"
return access_key, secret_access_key, session_token

View file

@ -13,8 +13,8 @@ from botocore.exceptions import ClientError
from mypy_boto3_s3 import S3Client
from frostfs_testlib import reporter
from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
from frostfs_testlib.utils import string_utils
# TODO: Refactor this code to use shell instead of _cmd_run
@ -35,20 +35,24 @@ class Boto3ClientWrapper(S3ClientWrapper):
def __init__(
self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1"
) -> None:
self.s3gate_endpoint: str = ""
self.boto3_client: S3Client = None
self.s3gate_endpoint: str = ""
self.iam_endpoint: str = ""
self.boto3_iam_client: S3Client = None
self.boto3_sts_client: S3Client = None
self.iam_endpoint: str = ""
self.access_key_id = access_key_id
self.secret_access_key = secret_access_key
self.access_key_id: str = access_key_id
self.secret_access_key: str = secret_access_key
self.profile = profile
self.region = region
self.session = boto3.Session()
self.config = Config(retries={"max_attempts": MAX_REQUEST_ATTEMPTS, "mode": RETRY_MODE})
self.config = Config(
retries={
"max_attempts": MAX_REQUEST_ATTEMPTS,
"mode": RETRY_MODE,
}
)
self.set_endpoint(s3gate_endpoint)
@ -80,18 +84,9 @@ class Boto3ClientWrapper(S3ClientWrapper):
service_name="iam",
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
region_name=self.region,
endpoint_url=self.iam_endpoint,
verify=False,
)
# since the STS does not have an endpoint, IAM is used
self.boto3_sts_client = self.session.client(
service_name="sts",
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
endpoint_url=iam_endpoint,
verify=False,
)
def _to_s3_param(self, param: str) -> str:
replacement_map = {
@ -139,7 +134,6 @@ class Boto3ClientWrapper(S3ClientWrapper):
params = {"Bucket": bucket}
if object_lock_enabled_for_bucket is not None:
params.update({"ObjectLockEnabledForBucket": object_lock_enabled_for_bucket})
if acl is not None:
params.update({"ACL": acl})
elif grant_write or grant_read or grant_full_control:
@ -149,7 +143,6 @@ class Boto3ClientWrapper(S3ClientWrapper):
params.update({"GrantRead": grant_read})
elif grant_full_control:
params.update({"GrantFullControl": grant_full_control})
if location_constraint:
params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}})
@ -226,13 +219,14 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response.get("TagSet")
@reporter.step("Get bucket acl")
def get_bucket_acl(self, bucket: str) -> dict:
return self._exec_request(
def get_bucket_acl(self, bucket: str) -> list:
response = self._exec_request(
self.boto3_client.get_bucket_acl,
params={"Bucket": bucket},
endpoint=self.s3gate_endpoint,
profile=self.profile,
)
return response.get("Grants")
@reporter.step("Delete bucket tagging")
def delete_bucket_tagging(self, bucket: str) -> None:
@ -394,17 +388,10 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response if full_output else obj_list
@reporter.step("List objects S3")
def list_objects(
self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None
) -> Union[dict, list[str]]:
params = {"Bucket": bucket}
if page_size:
params["MaxKeys"] = page_size
if prefix:
params["Prefix"] = prefix
def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
response = self._exec_request(
self.boto3_client.list_objects,
params,
params={"Bucket": bucket},
endpoint=self.s3gate_endpoint,
profile=self.profile,
)
@ -700,7 +687,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
return response["Parts"]
@reporter.step("Complete multipart upload S3")
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict:
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None:
parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]
params = self._convert_to_s3_params(locals(), exclude=["parts"])
params["MultipartUpload"] = {"Parts": parts}
@ -836,7 +823,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
sleep(S3_SYNC_WAIT_TIME * 14)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Attaches the specified managed policy to the specified user")
@ -848,7 +835,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
sleep(S3_SYNC_WAIT_TIME * 14)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Creates a new AWS secret access key and access key ID for the specified user")
@ -979,7 +966,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
sleep(S3_SYNC_WAIT_TIME * 14)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Removes the specified managed policy from the specified user")
@ -991,7 +978,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
sleep(S3_SYNC_WAIT_TIME * 14)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Returns a list of IAM users that are in the specified IAM group")
@ -1087,7 +1074,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}"
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}"
return response
@reporter.step("Lists all managed policies that are attached to the specified IAM user")
@ -1098,7 +1085,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}"
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}"
return response
@reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to")
@ -1123,7 +1110,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}"
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}"
return response
@reporter.step("Lists the IAM groups")
@ -1133,7 +1120,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}"
assert response.get("Groups"), f"Expected Groups in response:\n{response}"
return response
@reporter.step("Lists the IAM groups that the specified IAM user belongs to")
@ -1144,7 +1131,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}"
assert response.get("Groups"), f"Expected Groups in response:\n{response}"
return response
@reporter.step("Lists all the managed policies that are available in your AWS account")
@ -1176,7 +1163,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}"
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}"
return response
@reporter.step("Lists the IAM users")
@ -1201,7 +1188,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
sleep(S3_SYNC_WAIT_TIME * 14)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user")
@ -1216,7 +1203,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
sleep(S3_SYNC_WAIT_TIME * 14)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Removes the specified user from the specified group")
@ -1278,66 +1265,3 @@ class Boto3ClientWrapper(S3ClientWrapper):
endpoint=self.iam_endpoint,
profile=self.profile,
)
# MFA methods
@reporter.step("Creates a new virtual MFA device")
def iam_create_virtual_mfa_device(
self, virtual_mfa_device_name: str, outfile: Optional[str] = None, bootstrap_method: Optional[str] = None
) -> tuple:
response = self.boto3_iam_client.create_virtual_mfa_device(VirtualMFADeviceName=virtual_mfa_device_name)
serial_number = response.get("VirtualMFADevice", {}).get("SerialNumber")
base32StringSeed = response.get("VirtualMFADevice", {}).get("Base32StringSeed")
assert serial_number, f"Expected SerialNumber in response:\n{response}"
assert base32StringSeed, f"Expected Base32StringSeed in response:\n{response}"
return serial_number, base32StringSeed
@reporter.step("Deactivates the specified MFA device and removes it from association with the user name")
def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict:
response = self.boto3_iam_client.deactivate_mfa_device(UserName=user_name, SerialNumber=serial_number)
return response
@reporter.step("Deletes a virtual MFA device")
def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict:
response = self.boto3_iam_client.delete_virtual_mfa_device(SerialNumber=serial_number)
return response
@reporter.step("Enables the specified MFA device and associates it with the specified IAM user")
def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict:
response = self.boto3_iam_client.enable_mfa_device(
UserName=user_name,
SerialNumber=serial_number,
AuthenticationCode1=authentication_code1,
AuthenticationCode2=authentication_code2,
)
return response
@reporter.step("Lists the MFA devices for an IAM user")
def iam_list_virtual_mfa_devices(self) -> dict:
response = self.boto3_iam_client.list_virtual_mfa_devices()
assert response.get("VirtualMFADevices"), f"Expected VirtualMFADevices in response:\n{response}"
return response
@reporter.step("Get session token for user")
def sts_get_session_token(
self, duration_seconds: Optional[str] = "", serial_number: Optional[str] = "", token_code: Optional[str] = ""
) -> tuple:
response = self.boto3_sts_client.get_session_token(
DurationSeconds=duration_seconds,
SerialNumber=serial_number,
TokenCode=token_code,
)
access_key = response.get("Credentials", {}).get("AccessKeyId")
secret_access_key = response.get("Credentials", {}).get("SecretAccessKey")
session_token = response.get("Credentials", {}).get("SessionToken")
assert access_key, f"Expected AccessKeyId in response:\n{response}"
assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}"
assert session_token, f"Expected SessionToken in response:\n{response}"
return access_key, secret_access_key, session_token

View file

@ -1,7 +1,7 @@
import re
from frostfs_testlib.cli.generic_cli import GenericCli
from frostfs_testlib.clients.s3 import BucketContainerResolver
from frostfs_testlib.s3.interfaces import BucketContainerResolver
from frostfs_testlib.storage.cluster import ClusterNode

View file

@ -22,15 +22,15 @@ class VersioningStatus(HumanReadableEnum):
SUSPENDED = "Suspended"
class ACL:
PRIVATE = "private"
PUBLIC_READ = "public-read"
PUBLIC_READ_WRITE = "public-read-write"
AUTHENTICATED_READ = "authenticated-read"
AWS_EXEC_READ = "aws-exec-read"
BUCKET_OWNER_READ = "bucket-owner-read"
BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
LOG_DELIVERY_WRITE = "log-delivery-write"
ACL_COPY = [
"private",
"public-read",
"public-read-write",
"authenticated-read",
"aws-exec-read",
"bucket-owner-read",
"bucket-owner-full-control",
]
class BucketContainerResolver(ABC):
@ -50,14 +50,6 @@ class BucketContainerResolver(ABC):
class S3ClientWrapper(HumanReadableABC):
access_key_id: str
secret_access_key: str
profile: str
region: str
s3gate_endpoint: str
iam_endpoint: str
@abstractmethod
def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str, region: str) -> None:
pass
@ -136,7 +128,7 @@ class S3ClientWrapper(HumanReadableABC):
"""Deletes the tags from the bucket."""
@abstractmethod
def get_bucket_acl(self, bucket: str) -> dict:
def get_bucket_acl(self, bucket: str) -> list:
"""This implementation of the GET action uses the acl subresource to return the access control list (ACL) of a bucket."""
@abstractmethod
@ -203,9 +195,7 @@ class S3ClientWrapper(HumanReadableABC):
"""
@abstractmethod
def list_objects(
self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None
) -> Union[dict, list[str]]:
def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
"""Returns some or all (up to 1,000) of the objects in a bucket with each request.
You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
A 200 OK response can contain valid or invalid XML. Make sure to design your application
@ -344,7 +334,7 @@ class S3ClientWrapper(HumanReadableABC):
"""Lists the parts that have been uploaded for a specific multipart upload."""
@abstractmethod
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict:
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None:
"""Completes a multipart upload by assembling previously uploaded parts."""
@abstractmethod
@ -588,32 +578,3 @@ class S3ClientWrapper(HumanReadableABC):
@abstractmethod
def iam_untag_user(self, user_name: str, tag_keys: list) -> dict:
"""Removes the specified tags from the user"""
# MFA methods
@abstractmethod
def iam_create_virtual_mfa_device(
self, virtual_mfa_device_name: str, outfile: Optional[str] = None, bootstrap_method: Optional[str] = None
) -> tuple:
"""Creates a new virtual MFA device"""
@abstractmethod
def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict:
"""Deactivates the specified MFA device and removes it from association with the user name"""
@abstractmethod
def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict:
"""Deletes a virtual MFA device"""
@abstractmethod
def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict:
"""Enables the specified MFA device and associates it with the specified IAM user"""
@abstractmethod
def iam_list_virtual_mfa_devices(self) -> dict:
"""Lists the MFA devices for an IAM user"""
@abstractmethod
def sts_get_session_token(
self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None
) -> tuple:
"""Get session token for user"""

View file

@ -7,7 +7,9 @@ from typing import Optional, Union
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.plugins import load_plugin
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
from frostfs_testlib.s3.interfaces import BucketContainerResolver
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
@ -109,8 +111,6 @@ def create_container(
options: Optional[dict] = None,
await_mode: bool = True,
wait_for_creation: bool = True,
nns_zone: Optional[str] = None,
nns_name: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> str:
"""
@ -143,8 +143,6 @@ def create_container(
result = cli.container.create(
rpc_endpoint=endpoint,
policy=rule,
nns_name=nns_name,
nns_zone=nns_zone,
basic_acl=basic_acl,
attributes=attributes,
name=name,

View file

@ -12,7 +12,6 @@ from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC,
from frostfs_testlib.resources.common import ASSETS_DIR
from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing import wait_for_success
from frostfs_testlib.utils import json_utils
@ -753,10 +752,7 @@ def get_object_nodes(
]
object_nodes = [
cluster_node
for netmap_node in netmap_nodes
for cluster_node in cluster.cluster_nodes
if netmap_node.node == cluster_node.get_interface(Interfaces.MGMT)
cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes if netmap_node.node == cluster_node.host_ip
]
return object_nodes

View file

@ -12,8 +12,8 @@ import requests
from frostfs_testlib import reporter
from frostfs_testlib.cli import GenericCli
from frostfs_testlib.clients.s3.aws_cli_client import command_options
from frostfs_testlib.resources.common import ASSETS_DIR, SIMPLE_OBJECT_SIZE
from frostfs_testlib.s3.aws_cli_client import command_options
from frostfs_testlib.shell import Shell
from frostfs_testlib.shell.local_shell import LocalShell
from frostfs_testlib.steps.cli.object import get_object
@ -38,34 +38,34 @@ def get_via_http_gate(
"""
This function gets given object from HTTP gate
cid: container id to get object from
oid: object id / object key
oid: object ID
node: node to make request
request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}]
"""
request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}"
if request_path:
# if `request_path` parameter omitted, use default
if request_path is None:
request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}"
else:
request = f"{node.http_gate.get_endpoint()}{request_path}"
response = requests.get(request, stream=True, timeout=timeout, verify=False)
resp = requests.get(request, stream=True, timeout=timeout, verify=False)
if not response.ok:
if not resp.ok:
raise Exception(
f"""Failed to get object via HTTP gate:
request: {response.request.path_url},
response: {response.text},
headers: {response.headers},
status code: {response.status_code} {response.reason}"""
request: {resp.request.path_url},
response: {resp.text},
headers: {resp.headers},
status code: {resp.status_code} {resp.reason}"""
)
logger.info(f"Request: {request}")
_attach_allure_step(request, response.status_code)
_attach_allure_step(request, resp.status_code)
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}"))
with open(test_file, "wb") as file:
for chunk in response.iter_content(chunk_size=8192):
file.write(chunk)
shutil.copyfileobj(resp.raw, file)
return test_file
@ -117,12 +117,12 @@ def get_via_http_gate_by_attribute(
endpoint: http gate endpoint
request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}]
"""
attr_name = list(attribute.keys())[0]
attr_value = quote_plus(str(attribute.get(attr_name)))
request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}"
if request_path:
# if `request_path` parameter ommited, use default
if request_path is None:
request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}"
else:
request = f"{node.http_gate.get_endpoint()}{request_path}"
resp = requests.get(request, stream=True, timeout=timeout, verify=False)
@ -357,9 +357,19 @@ def try_to_get_object_via_passed_request_and_expect_error(
) -> None:
try:
if attrs is None:
get_via_http_gate(cid, oid, node, http_request_path)
get_via_http_gate(
cid=cid,
oid=oid,
node=node,
request_path=http_request_path,
)
else:
get_via_http_gate_by_attribute(cid, attrs, node, http_request_path)
get_via_http_gate_by_attribute(
cid=cid,
attribute=attrs,
node=node,
request_path=http_request_path,
)
raise AssertionError(f"Expected error on getting object with cid: {cid}")
except Exception as err:
match = error_pattern.casefold() in str(err).casefold()

View file

@ -4,18 +4,16 @@ from frostfs_testlib.storage.cluster import ClusterNode
class IpHelper:
@staticmethod
def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[tuple]) -> None:
def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[str]) -> None:
shell = node.host.get_shell()
for ip, table in block_ip:
if not table:
shell.exec(f"ip r a blackhole {ip}")
continue
shell.exec(f"ip r a blackhole {ip} table {table}")
for ip in block_ip:
shell.exec(f"ip route add blackhole {ip}")
@staticmethod
def restore_input_traffic_to_node(node: ClusterNode) -> None:
shell = node.host.get_shell()
unlock_ip = shell.exec("ip r l table all | grep blackhole", CommandOptions(check=False)).stdout
for active_blackhole in unlock_ip.strip().split("\n"):
shell.exec(f"ip r d {active_blackhole}")
unlock_ip = shell.exec("ip route list | grep blackhole", CommandOptions(check=False))
if unlock_ip.return_code != 0:
return
for ip in unlock_ip.stdout.strip().split("\n"):
shell.exec(f"ip route del blackhole {ip.split(' ')[1]}")

View file

@ -6,7 +6,8 @@ from typing import Optional
from dateutil.parser import parse
from frostfs_testlib import reporter
from frostfs_testlib.clients.s3 import BucketContainerResolver, S3ClientWrapper, VersioningStatus
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
from frostfs_testlib.s3.interfaces import BucketContainerResolver
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import search_nodes_with_container
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
@ -184,26 +185,3 @@ def search_nodes_with_bucket(
break
nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster)
return nodes_list
def get_bytes_relative_to_object(value: int | str, object_size: int = None, part_size: int = None) -> int:
if isinstance(value, int):
return value
if "part" not in value and "object" not in value:
return int(value)
if object_size is not None:
value = value.replace("object", str(object_size))
if part_size is not None:
value = value.replace("part", str(part_size))
return int(eval(value))
def get_range_relative_to_object(rng: str, object_size: int = None, part_size: int = None, int_values: bool = False) -> str | int:
start, end = rng.split(":")
start = get_bytes_relative_to_object(start, object_size, part_size)
end = get_bytes_relative_to_object(end, object_size, part_size)
return (start, end) if int_values else f"bytes {start}-{end}/*"

View file

@ -11,10 +11,10 @@ from frostfs_testlib.storage import get_service_registry
from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml
from frostfs_testlib.storage.constants import ConfigAttributes
from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode
from frostfs_testlib.storage.dataclasses.metrics import Metrics
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces
from frostfs_testlib.storage.service_registry import ServiceRegistry
from frostfs_testlib.storage.dataclasses.metrics import Metrics
class ClusterNode:

View file

@ -5,7 +5,6 @@ class ConfigAttributes:
WALLET_CONFIG = "wallet_config"
CONFIG_DIR = "service_config_dir"
CONFIG_PATH = "config_path"
WORKING_DIR = "working_dir"
SHARD_CONFIG_PATH = "shard_config_path"
LOGGER_CONFIG_PATH = "logger_config_path"
LOCAL_WALLET_PATH = "local_wallet_path"
@ -16,7 +15,6 @@ class ConfigAttributes:
ENDPOINT_DATA_0_NS = "endpoint_data0_namespace"
ENDPOINT_INTERNAL = "endpoint_internal0"
ENDPOINT_PROMETHEUS = "endpoint_prometheus"
ENDPOINT_PPROF = "endpoint_pprof"
CONTROL_ENDPOINT = "control_endpoint"
UN_LOCODE = "un_locode"
@ -25,6 +23,4 @@ class PlacementRule:
DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X"
REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X"
REP_1_FOR_2_NODES_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 2 FROM * AS X"
DEFAULT_EC_PLACEMENT_RULE = "EC 3.1"
EC_1_1_FOR_2_NODES_PLACEMENT_RULE = "EC 1.1 IN X CBF 1 SELECT 2 FROM * AS X"

View file

@ -1,7 +1,6 @@
import itertools
import datetime
import logging
import time
from datetime import datetime, timezone
from typing import TypeVar
import frostfs_testlib.resources.optionals as optionals
@ -19,7 +18,7 @@ from frostfs_testlib.steps.node_management import include_node_to_network_map, r
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode
from frostfs_testlib.storage.controllers.disk_controller import DiskController
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeStatus
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeStatus
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing import parallel
from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success
@ -40,7 +39,7 @@ class ClusterStateController:
def __init__(self, shell: Shell, cluster: Cluster, healthcheck: Healthcheck) -> None:
self.stopped_nodes: list[ClusterNode] = []
self.detached_disks: dict[str, DiskController] = {}
self.dropped_traffic: set[ClusterNode] = set()
self.dropped_traffic: list[ClusterNode] = []
self.excluded_from_netmap: list[StorageNode] = []
self.stopped_services: set[NodeBase] = set()
self.cluster = cluster
@ -173,15 +172,6 @@ class ClusterStateController:
if service_type == StorageNode:
self.wait_after_storage_startup()
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Send sighup to all {service_type} services")
def sighup_services_of_type(self, service_type: type[ServiceClass]):
services = self.cluster.services(service_type)
parallel([service.send_signal_to_service for service in services], signal="SIGHUP")
if service_type == StorageNode:
self.wait_after_storage_startup()
@wait_for_success(600, 60)
def wait_s3gate(self, s3gate: S3Gate):
with reporter.step(f"Wait for {s3gate} reconnection"):
@ -216,27 +206,21 @@ class ClusterStateController:
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Stop {service_type} service on {node}")
def stop_service_of_type(self, node: ClusterNode, service_type: ServiceClass, mask: bool = True):
def stop_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass], mask: bool = True):
service = node.service(service_type)
service.stop_service(mask)
self.stopped_services.add(service)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Send sighup to {service_type} service on {node}")
def sighup_service_of_type(self, node: ClusterNode, service_type: ServiceClass):
service = node.service(service_type)
service.send_signal_to_service("SIGHUP")
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Start {service_type} service on {node}")
def start_service_of_type(self, node: ClusterNode, service_type: ServiceClass):
def start_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]):
service = node.service(service_type)
service.start_service()
self.stopped_services.discard(service)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Start all stopped {service_type} services")
def start_stopped_services_of_type(self, service_type: ServiceClass):
def start_stopped_services_of_type(self, service_type: type[ServiceClass]):
stopped_svc = self._get_stopped_by_type(service_type)
if not stopped_svc:
return
@ -326,22 +310,22 @@ class ClusterStateController:
@reporter.step("Drop traffic to {node}, nodes - {block_nodes}")
def drop_traffic(self, node: ClusterNode, wakeup_timeout: int, name_interface: str, block_nodes: list[ClusterNode] = None) -> None:
interfaces_tables = self._parse_interfaces(block_nodes, name_interface)
IpHelper.drop_input_traffic_to_node(node, interfaces_tables)
list_ip = self._parse_interfaces(block_nodes, name_interface)
IpHelper.drop_input_traffic_to_node(node, list_ip)
time.sleep(wakeup_timeout)
self.dropped_traffic.add(node)
self.dropped_traffic.append(node)
@reporter.step("Start traffic to {node}")
def restore_traffic(self, node: ClusterNode) -> None:
IpHelper.restore_input_traffic_to_node(node=node)
self.dropped_traffic.discard(node)
index = self.dropped_traffic.index(node)
self.dropped_traffic.pop(index)
@reporter.step("Restore blocked nodes")
def restore_all_traffic(self):
if not self.dropped_traffic:
return
parallel(self._restore_traffic_to_node, self.dropped_traffic)
self.dropped_traffic.clear()
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step("Hard reboot host {node} via magic SysRq option")
@ -390,23 +374,31 @@ class ClusterStateController:
@reporter.step("Get node time")
def get_node_date(self, node: ClusterNode) -> datetime:
shell = node.host.get_shell()
return datetime.strptime(shell.exec('date +"%Y-%m-%d %H:%M:%S"').stdout.strip(), "%Y-%m-%d %H:%M:%S")
return datetime.datetime.strptime(shell.exec("hwclock -r").stdout.strip(), "%Y-%m-%d %H:%M:%S.%f%z")
@reporter.step("Set node time to {in_date}")
def change_node_date(self, node: ClusterNode, in_date: datetime) -> None:
shell = node.host.get_shell()
in_date_frmt = in_date.strftime("%Y-%m-%d %H:%M:%S")
shell.exec(f"timedatectl set-time '{in_date_frmt}'")
shell.exec(f"date -s @{time.mktime(in_date.timetuple())}")
shell.exec("hwclock --systohc")
node_time = self.get_node_date(node)
with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"):
assert (node_time - in_date).total_seconds() < 60
assert (self.get_node_date(node) - in_date) < datetime.timedelta(minutes=1)
@reporter.step("Restore time")
@reporter.step(f"Restore time")
def restore_node_date(self, node: ClusterNode) -> None:
shell = node.host.get_shell()
now_time = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S")
now_time = datetime.datetime.now(datetime.timezone.utc)
with reporter.step(f"Set {now_time} time"):
shell.exec(f"timedatectl set-time '{now_time}'")
shell.exec(f"date -s @{time.mktime(now_time.timetuple())}")
shell.exec("hwclock --systohc")
@reporter.step("Change the synchronizer status to {status}")
def set_sync_date_all_nodes(self, status: str):
if status == "active":
parallel(self._enable_date_synchronizer, self.cluster.cluster_nodes)
return
parallel(self._disable_date_synchronizer, self.cluster.cluster_nodes)
@reporter.step("Set MaintenanceModeAllowed - {status}")
def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None:
@ -446,11 +438,9 @@ class ClusterStateController:
if not checker_node:
checker_node = cluster_node
netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(checker_node.storage_node.get_rpc_endpoint()).stdout)
netmap = [node for node in netmap if cluster_node.get_interface(Interfaces.MGMT) == node.node]
netmap = [node for node in netmap if cluster_node.host_ip == node.node]
if status == NodeStatus.OFFLINE:
assert (
cluster_node.get_interface(Interfaces.MGMT) not in netmap
), f"{cluster_node.get_interface(Interfaces.MGMT)} not in Offline"
assert cluster_node.host_ip not in netmap, f"{cluster_node.host_ip} not in Offline"
else:
assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'"
@ -492,6 +482,16 @@ class ClusterStateController:
frostfs_cli_remote = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path)
return frostfs_adm, frostfs_cli, frostfs_cli_remote
def _enable_date_synchronizer(self, cluster_node: ClusterNode):
shell = cluster_node.host.get_shell()
shell.exec("timedatectl set-ntp true")
cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "active", 15)
def _disable_date_synchronizer(self, cluster_node: ClusterNode):
shell = cluster_node.host.get_shell()
shell.exec("timedatectl set-ntp false")
cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "inactive", 15)
def _get_disk_controller(self, node: StorageNode, device: str, mountpoint: str) -> DiskController:
disk_controller_id = DiskController.get_id(node, device)
if disk_controller_id in self.detached_disks.keys():
@ -501,31 +501,17 @@ class ClusterStateController:
return disk_controller
@reporter.step("Restore traffic {node}")
def _restore_traffic_to_node(self, node):
IpHelper.restore_input_traffic_to_node(node)
def _parse_interfaces(self, nodes: list[ClusterNode], name_interface: str) -> list[tuple]:
interfaces_and_tables = set()
def _parse_interfaces(self, nodes: list[ClusterNode], name_interface: str):
interfaces = []
for node in nodes:
shell = node.host.get_shell()
lines = shell.exec(f"ip r l table all | grep '{name_interface}'").stdout.splitlines()
ips = []
tables = []
for line in lines:
if "src" not in line or "table local" in line:
continue
parts = line.split()
ips.append(parts[-1])
if "table" in line:
tables.append(parts[parts.index("table") + 1])
tables.append(None)
[interfaces_and_tables.add((ip, table)) for ip, table in itertools.product(ips, tables)]
return interfaces_and_tables
dict_interfaces = node.host.config.interfaces
for type, ip in dict_interfaces.items():
if name_interface in type:
interfaces.append(ip)
return interfaces
@reporter.step("Ping node")
def _ping_host(self, node: ClusterNode):

View file

@ -14,19 +14,14 @@ class ConfigStateManager(StateManager):
self.cluster = self.csc.cluster
@reporter.step("Change configuration for {service_type} on all nodes")
def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any], sighup: bool = False):
def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any]):
services = self.cluster.services(service_type)
nodes = self.cluster.nodes(services)
self.services_with_changed_config.update([(node, service_type) for node in nodes])
if not sighup:
self.csc.stop_services_of_type(service_type)
self.csc.stop_services_of_type(service_type)
parallel([node.config(service_type).set for node in nodes], values=values)
if not sighup:
self.csc.start_services_of_type(service_type)
else:
self.csc.sighup_services_of_type(service_type)
self.csc.start_services_of_type(service_type)
@reporter.step("Change configuration for {service_type} on {node}")
def set_on_node(self, node: ClusterNode, service_type: type[ServiceClass], values: dict[str, Any]):
@ -37,26 +32,18 @@ class ConfigStateManager(StateManager):
self.csc.start_service_of_type(node, service_type)
@reporter.step("Revert all configuration changes")
def revert_all(self, sighup: bool = False):
def revert_all(self):
if not self.services_with_changed_config:
return
parallel(self._revert_svc, self.services_with_changed_config, sighup)
parallel(self._revert_svc, self.services_with_changed_config)
self.services_with_changed_config.clear()
if not sighup:
self.csc.start_all_stopped_services()
self.csc.start_all_stopped_services()
# TODO: parallel can't have multiple parallel_items :(
@reporter.step("Revert all configuration {node_and_service}")
def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass], sighup: bool = False):
def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass]):
node, service_type = node_and_service
service = node.service(service_type)
if not sighup:
self.csc.stop_service_of_type(node, service_type)
self.csc.stop_service_of_type(node, service_type)
node.config(service_type).revert()
if sighup:
service.send_signal_to_service("SIGHUP")

View file

@ -13,7 +13,6 @@ FROSTFS_CONTRACT_CACHE_TIMEOUT = 30
class ObjectOperations(HumanReadableEnum):
PUT = "object.put"
PATCH = "object.patch"
GET = "object.get"
HEAD = "object.head"
GET_RANGE = "object.range"
@ -27,18 +26,6 @@ class ObjectOperations(HumanReadableEnum):
return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL]
class ContainerOperations(HumanReadableEnum):
PUT = "container.put"
GET = "container.get"
LIST = "container.list"
DELETE = "container.delete"
WILDCARD_ALL = "container.*"
@staticmethod
def get_all():
return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL]
@dataclass
class Operations:
GET_CONTAINER = "GetContainer"
@ -52,7 +39,6 @@ class Operations:
SEARCH_OBJECT = "SearchObject"
HEAD_OBJECT = "HeadObject"
PUT_OBJECT = "PutObject"
PATCH_OBJECT = "PatchObject"
class Verb(HumanReadableEnum):
@ -138,7 +124,7 @@ class Rule:
if not operations:
self.operations = []
elif isinstance(operations, (ObjectOperations, ContainerOperations)):
elif isinstance(operations, ObjectOperations):
self.operations = [operations]
else:
self.operations = operations

View file

@ -65,10 +65,6 @@ class NodeBase(HumanReadableABC):
with reporter.step(f"Start {self.name} service on {self.host.config.address}"):
self.host.start_service(self.name)
def send_signal_to_service(self, signal: str):
with reporter.step(f"Send -{signal} signal to {self.name} service on {self.host.config.address}"):
self.host.send_signal_to_service(self.name, signal)
@abstractmethod
def service_healthcheck(self) -> bool:
"""Service healthcheck."""
@ -82,9 +78,6 @@ class NodeBase(HumanReadableABC):
def get_metrics_endpoint(self) -> str:
return self._get_attribute(ConfigAttributes.ENDPOINT_PROMETHEUS)
def get_pprof_endpoint(self) -> str:
return self._get_attribute(ConfigAttributes.ENDPOINT_PPROF)
def stop_service(self, mask: bool = True):
if mask:
with reporter.step(f"Mask {self.name} service on {self.host.config.address}"):
@ -147,13 +140,6 @@ class NodeBase(HumanReadableABC):
else None
)
def get_working_dir_path(self) -> Optional[str]:
"""
Returns working directory path located on remote host
"""
config_attributes = self.host.get_service_config(self.name)
return self._get_attribute(ConfigAttributes.WORKING_DIR) if ConfigAttributes.WORKING_DIR in config_attributes.attributes else None
@property
def config_dir(self) -> str:
return self._get_attribute(ConfigAttributes.CONFIG_DIR)
@ -199,7 +185,9 @@ class NodeBase(HumanReadableABC):
if attribute_name not in config.attributes:
if default_attribute_name is None:
raise RuntimeError(f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either")
raise RuntimeError(
f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either"
)
return config.attributes[default_attribute_name]
@ -209,7 +197,9 @@ class NodeBase(HumanReadableABC):
return self.host.get_service_config(self.name)
def get_service_uptime(self, service: str) -> datetime:
result = self.host.get_shell().exec(f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2")
result = self.host.get_shell().exec(
f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2"
)
start_time = parser.parse(result.stdout.strip())
current_time = datetime.now(tz=timezone.utc)
active_time = current_time - start_time

View file

@ -1,9 +1,6 @@
import re
from dataclasses import dataclass
from typing import Optional
from pydantic import BaseModel, Field, field_validator
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.readable import HumanReadableEnum
@ -78,37 +75,8 @@ class NodeNetInfo:
withdrawal_fee: str = None
homomorphic_hashing_disabled: str = None
maintenance_mode_allowed: str = None
class Attributes(BaseModel):
cluster_name: str = Field(alias="ClusterName")
continent: str = Field(alias="Continent")
country: str = Field(alias="Country")
country_code: str = Field(alias="CountryCode")
external_addr: list[str] = Field(alias="ExternalAddr")
location: str = Field(alias="Location")
node: str = Field(alias="Node")
subdiv: str = Field(alias="SubDiv")
subdiv_code: str = Field(alias="SubDivCode")
un_locode: str = Field(alias="UN-LOCODE")
role: str = Field(alias="role")
@field_validator("external_addr", mode="before")
@classmethod
def convert_external_addr(cls, value: str) -> list[str]:
return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", value)]
class NodeInfo(BaseModel):
public_key: str = Field(alias="publicKey")
addresses: list[str] = Field(alias="addresses")
state: str = Field(alias="state")
attributes: Attributes = Field(alias="attributes")
@field_validator("addresses", mode="before")
@classmethod
def convert_external_addr(cls, value: str) -> list[str]:
return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", ",".join(value))]
eigen_trust_alpha: str = None
eigen_trust_iterations: str = None
@dataclass

View file

@ -1,14 +1,14 @@
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.storage.grpc_operations import implementations, interfaces, interfaces_wrapper
from frostfs_testlib.storage.grpc_operations import interfaces
from frostfs_testlib.storage.grpc_operations.implementations import container, object
class CliClientWrapper(interfaces_wrapper.GrpcClientWrapper):
class CliClientWrapper(interfaces.GrpcClientWrapper):
def __init__(self, cli: FrostfsCli) -> None:
self.cli = cli
self.object: interfaces.ObjectInterface = implementations.ObjectOperations(self.cli)
self.container: interfaces.ContainerInterface = implementations.ContainerOperations(self.cli)
self.netmap: interfaces.NetmapInterface = implementations.NetmapOperations(self.cli)
self.object: interfaces.ObjectInterface = object.ObjectOperations(self.cli)
self.container: interfaces.ContainerInterface = container.ContainerOperations(self.cli)
class RpcClientWrapper(interfaces_wrapper.GrpcClientWrapper):
class RpcClientWrapper(interfaces.GrpcClientWrapper):
pass # The next series

View file

@ -1,4 +0,0 @@
from .chunks import ChunksOperations
from .container import ContainerOperations
from .netmap import NetmapOperations
from .object import ObjectOperations

View file

@ -6,7 +6,7 @@ from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher
from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, Interfaces, NodeNetmapInfo
from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo
from frostfs_testlib.storage.grpc_operations import interfaces
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.cli_utils import parse_netmap_output
@ -30,7 +30,7 @@ class ChunksOperations(interfaces.ChunksInterface):
result = []
for node_info in netmap:
for cluster_node in cluster.cluster_nodes:
if node_info.node == cluster_node.get_interface(Interfaces.MGMT):
if node_info.node == cluster_node.host_ip:
result.append(cluster_node)
return result
@ -40,7 +40,7 @@ class ChunksOperations(interfaces.ChunksInterface):
for node_info in netmap:
if node_info.node_id in chunk.confirmed_nodes:
for cluster_node in cluster.cluster_nodes:
if cluster_node.get_interface(Interfaces.MGMT) == node_info.node:
if cluster_node.host_ip == node_info.node:
return (cluster_node, node_info)
@wait_for_success(300, 5, fail_testcase=None)
@ -161,5 +161,5 @@ class ChunksOperations(interfaces.ChunksInterface):
def _parse_object_nodes(self, object_nodes: str) -> list[Chunk]:
parse_result = json.loads(object_nodes)
if parse_result.get("errors"):
raise RuntimeError(", ".join(parse_result["errors"]))
raise parse_result["errors"]
return [Chunk(**chunk) for chunk in parse_result["data_objects"]]

View file

@ -5,9 +5,9 @@ from typing import List, Optional, Union
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.clients.s3 import BucketContainerResolver
from frostfs_testlib.plugins import load_plugin
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.s3.interfaces import BucketContainerResolver
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.grpc_operations import interfaces
from frostfs_testlib.utils import json_utils
@ -181,17 +181,20 @@ class ContainerOperations(interfaces.ContainerInterface):
force: bool = False,
trace: bool = False,
):
return self.cli.container.delete(
rpc_endpoint=endpoint,
cid=cid,
address=address,
await_mode=await_mode,
session=session,
ttl=ttl,
xhdr=xhdr,
force=force,
trace=trace,
).stdout
try:
return self.cli.container.delete(
rpc_endpoint=endpoint,
cid=cid,
address=address,
await_mode=await_mode,
session=session,
ttl=ttl,
xhdr=xhdr,
force=force,
trace=trace,
).stdout
except RuntimeError as e:
print(f"Error request:\n{e}")
@reporter.step("Get container")
def get(

View file

@ -1,171 +0,0 @@
import json as module_json
from typing import List, Optional
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.cli.netmap_parser import NetmapParser
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo
from .. import interfaces
class NetmapOperations(interfaces.NetmapInterface):
def __init__(self, cli: FrostfsCli) -> None:
self.cli = cli
def epoch(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = True,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> int:
"""
Get current epoch number.
"""
output = (
self.cli.netmap.epoch(
rpc_endpoint=rpc_endpoint,
wallet=wallet,
address=address,
generate_key=generate_key,
ttl=ttl,
trace=trace,
xhdr=xhdr,
timeout=timeout,
)
.stdout.split("Trace ID")[0]
.strip()
)
return int(output)
def netinfo(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = True,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> NodeNetInfo:
"""
Get target node info.
"""
output = (
self.cli.netmap.netinfo(
rpc_endpoint=rpc_endpoint,
wallet=wallet,
address=address,
generate_key=generate_key,
ttl=ttl,
trace=trace,
xhdr=xhdr,
timeout=timeout,
)
.stdout.split("Trace ID")[0]
.strip()
)
return NetmapParser.netinfo(output)
def nodeinfo(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
json: bool = True,
ttl: Optional[int] = None,
trace: Optional[bool] = True,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> NodeNetmapInfo:
"""
Get target node info.
"""
output = (
self.cli.netmap.nodeinfo(
rpc_endpoint=rpc_endpoint,
wallet=wallet,
address=address,
generate_key=generate_key,
json=json,
ttl=ttl,
trace=trace,
xhdr=xhdr,
timeout=timeout,
)
.stdout.split("Trace ID")[0]
.strip()
)
return NetmapParser.node_info(module_json.loads(output))
def snapshot(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = True,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> List[NodeNetmapInfo]:
"""
Get target node info.
"""
output = (
self.cli.netmap.snapshot(
rpc_endpoint=rpc_endpoint,
wallet=wallet,
address=address,
generate_key=generate_key,
ttl=ttl,
trace=trace,
xhdr=xhdr,
timeout=timeout,
)
.stdout.split("Trace ID")[0]
.strip()
)
return NetmapParser.snapshot_all_nodes(output)
def snapshot_one_node(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = True,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> List[NodeNetmapInfo]:
"""
Get target one node info.
"""
output = (
self.cli.netmap.snapshot(
rpc_endpoint=rpc_endpoint,
wallet=wallet,
address=address,
generate_key=generate_key,
ttl=ttl,
trace=trace,
xhdr=xhdr,
timeout=timeout,
)
.stdout.split("Trace ID")[0]
.strip()
)
return NetmapParser.snapshot_one_node(output, rpc_endpoint)

View file

@ -11,7 +11,6 @@ from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.resources.common import ASSETS_DIR
from frostfs_testlib.shell.interfaces import CommandResult
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces
from frostfs_testlib.storage.grpc_operations import interfaces
from frostfs_testlib.storage.grpc_operations.implementations.chunks import ChunksOperations
from frostfs_testlib.testing.test_control import wait_for_success
@ -207,11 +206,6 @@ class ObjectOperations(interfaces.ObjectInterface):
hash_type=hash_type,
timeout=timeout,
)
if range:
# Cut off the range and return only hash
return result.stdout.split(":")[1].strip()
return result.stdout
@reporter.step("Head object")
@ -413,57 +407,6 @@ class ObjectOperations(interfaces.ObjectInterface):
oid = id_str.split(":")[1]
return oid.strip()
@reporter.step("Patch object")
def patch(
self,
cid: str,
oid: str,
endpoint: str,
ranges: list[str] = None,
payloads: list[str] = None,
new_attrs: Optional[str] = None,
replace_attrs: bool = False,
bearer: str = "",
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
trace: bool = False,
) -> str:
"""
PATCH an object.
Args:
cid: ID of Container where we get the Object from
oid: Object ID
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
ranges: An array of ranges in which to replace data in the format [offset1:length1, offset2:length2]
payloads: An array of file paths to be applied in each range
new_attrs: Attributes to be changed in the format "key1=value1,key2=value2"
replace_attrs: Replace all attributes completely with new ones specified in new_attrs
bearer: Path to Bearer Token file, appends to `--bearer` key
xhdr: Request X-Headers in form of Key=Value
session: Path to a JSON-encoded container session token
timeout: Timeout for the operation
trace: Generate trace ID and print it
Returns:
(str): ID of patched Object
"""
result = self.cli.object.patch(
rpc_endpoint=endpoint,
cid=cid,
oid=oid,
range=ranges,
payload=payloads,
new_attrs=new_attrs,
replace_attrs=replace_attrs,
bearer=bearer,
xhdr=xhdr,
session=session,
timeout=timeout,
trace=trace,
)
return result.stdout.split(":")[1].strip()
@reporter.step("Put object to random node")
def put_to_random_node(
self,
@ -675,34 +618,7 @@ class ObjectOperations(interfaces.ObjectInterface):
cluster_node
for netmap_node in netmap_nodes
for cluster_node in cluster.cluster_nodes
if netmap_node.node == cluster_node.get_interface(Interfaces.MGMT)
if netmap_node.node == cluster_node.host_ip
]
return object_nodes
@reporter.step("Search parts of object")
def parts(
self,
cid: str,
oid: str,
alive_node: ClusterNode,
bearer: str = "",
xhdr: Optional[dict] = None,
is_direct: bool = False,
verify_presence_all: bool = False,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> list[str]:
endpoint = alive_node.storage_node.get_rpc_endpoint()
response = self.cli.object.nodes(
rpc_endpoint=endpoint,
cid=cid,
oid=oid,
bearer=bearer,
ttl=1 if is_direct else None,
json=True,
xhdr=xhdr,
timeout=timeout,
verify_presence_all=verify_presence_all,
)
response_json = json.loads(response.stdout)
return [data_object["object_id"] for data_object in response_json["data_objects"]]

View file

@ -0,0 +1,392 @@
from abc import ABC, abstractmethod
from typing import Any, List, Optional
from frostfs_testlib.shell.interfaces import CommandResult
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.constants import PlacementRule
from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo
from frostfs_testlib.utils import file_utils
class ChunksInterface(ABC):
@abstractmethod
def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]:
pass
@abstractmethod
def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]:
pass
@abstractmethod
def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str:
pass
@abstractmethod
def get_all(
self,
rpc_endpoint: str,
cid: str,
oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> list[Chunk]:
pass
@abstractmethod
def get_parity(
self,
rpc_endpoint: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
oid: Optional[str] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> Chunk:
pass
@abstractmethod
def get_first_data(
self,
rpc_endpoint: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
oid: Optional[str] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> Chunk:
pass
class ObjectInterface(ABC):
def __init__(self) -> None:
self.chunks: ChunksInterface
@abstractmethod
def delete(
self,
cid: str,
oid: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def get(
self,
cid: str,
oid: str,
endpoint: str,
bearer: Optional[str] = None,
write_object: Optional[str] = None,
xhdr: Optional[dict] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> file_utils.TestFile:
pass
@abstractmethod
def get_from_random_node(
self,
cid: str,
oid: str,
cluster: Cluster,
bearer: Optional[str] = None,
write_object: Optional[str] = None,
xhdr: Optional[dict] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def hash(
self,
endpoint: str,
cid: str,
oid: str,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
range: Optional[str] = None,
salt: Optional[str] = None,
ttl: Optional[int] = None,
session: Optional[str] = None,
hash_type: Optional[str] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def head(
self,
cid: str,
oid: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
json_output: bool = True,
is_raw: bool = False,
is_direct: bool = False,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult | Any:
pass
@abstractmethod
def lock(
self,
cid: str,
oid: str,
endpoint: str,
lifetime: Optional[int] = None,
expire_at: Optional[int] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def put(
self,
path: str,
cid: str,
endpoint: str,
bearer: Optional[str] = None,
copies_number: Optional[int] = None,
attributes: Optional[dict] = None,
xhdr: Optional[dict] = None,
expire_at: Optional[int] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def put_to_random_node(
self,
path: str,
cid: str,
cluster: Cluster,
bearer: Optional[str] = None,
copies_number: Optional[int] = None,
attributes: Optional[dict] = None,
xhdr: Optional[dict] = None,
expire_at: Optional[int] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def range(
self,
cid: str,
oid: str,
range_cut: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> tuple[file_utils.TestFile, bytes]:
pass
@abstractmethod
def search(
self,
cid: str,
endpoint: str,
bearer: str = "",
oid: Optional[str] = None,
filters: Optional[dict] = None,
expected_objects_list: Optional[list] = None,
xhdr: Optional[dict] = None,
session: Optional[str] = None,
phy: bool = False,
root: bool = False,
timeout: Optional[str] = None,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
ttl: Optional[int] = None,
) -> List:
pass
@abstractmethod
def nodes(
self,
cluster: Cluster,
cid: str,
oid: str,
alive_node: ClusterNode,
bearer: str = "",
xhdr: Optional[dict] = None,
is_direct: bool = False,
verify_presence_all: bool = False,
timeout: Optional[str] = None,
) -> List[ClusterNode]:
pass
class ContainerInterface(ABC):
@abstractmethod
def create(
self,
endpoint: str,
nns_zone: Optional[str] = None,
nns_name: Optional[str] = None,
address: Optional[str] = None,
attributes: Optional[dict] = None,
basic_acl: Optional[str] = None,
await_mode: bool = False,
disable_timestamp: bool = False,
force: bool = False,
trace: bool = False,
name: Optional[str] = None,
nonce: Optional[str] = None,
policy: Optional[str] = None,
session: Optional[str] = None,
subnet: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> str:
"""
Create a new container and register it in the FrostFS.
It will be stored in the sidechain when the Inner Ring accepts it.
"""
raise NotImplementedError("No implemethed method create")
@abstractmethod
def delete(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
await_mode: bool = False,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
force: bool = False,
trace: bool = False,
) -> List[str]:
"""
Delete an existing container.
Only the owner of the container has permission to remove the container.
"""
raise NotImplementedError("No implemethed method delete")
@abstractmethod
def get(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
await_mode: bool = False,
to: Optional[str] = None,
json_mode: bool = True,
trace: bool = False,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> List[str]:
"""Get container field info."""
raise NotImplementedError("No implemethed method get")
@abstractmethod
def get_eacl(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
await_mode: bool = False,
json_mode: bool = True,
trace: bool = False,
to: Optional[str] = None,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> List[str]:
"""Get extended ACL table of container."""
raise NotImplementedError("No implemethed method get-eacl")
@abstractmethod
def list(
self,
endpoint: str,
name: Optional[str] = None,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
trace: bool = False,
owner: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
**params,
) -> List[str]:
"""List all created containers."""
raise NotImplementedError("No implemethed method list")
@abstractmethod
def nodes(
self,
endpoint: str,
cid: str,
cluster: Cluster,
address: Optional[str] = None,
ttl: Optional[int] = None,
from_file: Optional[str] = None,
trace: bool = False,
short: Optional[bool] = True,
xhdr: Optional[dict] = None,
generate_key: Optional[bool] = None,
timeout: Optional[str] = None,
) -> List[ClusterNode]:
"""Show the nodes participating in the container in the current epoch."""
raise NotImplementedError("No implemethed method nodes")
class GrpcClientWrapper(ABC):
def __init__(self) -> None:
self.object: ObjectInterface
self.container: ContainerInterface

View file

@ -1,4 +0,0 @@
from .chunks import ChunksInterface
from .container import ContainerInterface
from .netmap import NetmapInterface
from .object import ObjectInterface

View file

@ -1,79 +0,0 @@
from abc import ABC, abstractmethod
from typing import Optional
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo
class ChunksInterface(ABC):
@abstractmethod
def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]:
pass
@abstractmethod
def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]:
pass
@abstractmethod
def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str:
pass
@abstractmethod
def get_all(
self,
rpc_endpoint: str,
cid: str,
oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> list[Chunk]:
pass
@abstractmethod
def get_parity(
self,
rpc_endpoint: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
oid: Optional[str] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> Chunk:
pass
@abstractmethod
def get_first_data(
self,
rpc_endpoint: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
oid: Optional[str] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> Chunk:
pass

View file

@ -1,125 +0,0 @@
from abc import ABC, abstractmethod
from typing import List, Optional
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
class ContainerInterface(ABC):
@abstractmethod
def create(
self,
endpoint: str,
nns_zone: Optional[str] = None,
nns_name: Optional[str] = None,
address: Optional[str] = None,
attributes: Optional[dict] = None,
basic_acl: Optional[str] = None,
await_mode: bool = False,
disable_timestamp: bool = False,
force: bool = False,
trace: bool = False,
name: Optional[str] = None,
nonce: Optional[str] = None,
policy: Optional[str] = None,
session: Optional[str] = None,
subnet: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> str:
"""
Create a new container and register it in the FrostFS.
It will be stored in the sidechain when the Inner Ring accepts it.
"""
raise NotImplementedError("No implemethed method create")
@abstractmethod
def delete(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
await_mode: bool = False,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
force: bool = False,
trace: bool = False,
) -> List[str]:
"""
Delete an existing container.
Only the owner of the container has permission to remove the container.
"""
raise NotImplementedError("No implemethed method delete")
@abstractmethod
def get(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
await_mode: bool = False,
to: Optional[str] = None,
json_mode: bool = True,
trace: bool = False,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> List[str]:
"""Get container field info."""
raise NotImplementedError("No implemethed method get")
@abstractmethod
def get_eacl(
self,
endpoint: str,
cid: str,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
await_mode: bool = False,
json_mode: bool = True,
trace: bool = False,
to: Optional[str] = None,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> List[str]:
"""Get extended ACL table of container."""
raise NotImplementedError("No implemethed method get-eacl")
@abstractmethod
def list(
self,
endpoint: str,
name: Optional[str] = None,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
trace: bool = False,
owner: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
**params,
) -> List[str]:
"""List all created containers."""
raise NotImplementedError("No implemethed method list")
@abstractmethod
def nodes(
self,
endpoint: str,
cid: str,
cluster: Cluster,
address: Optional[str] = None,
ttl: Optional[int] = None,
from_file: Optional[str] = None,
trace: bool = False,
short: Optional[bool] = True,
xhdr: Optional[dict] = None,
generate_key: Optional[bool] = None,
timeout: Optional[str] = None,
) -> List[ClusterNode]:
"""Show the nodes participating in the container in the current epoch."""
raise NotImplementedError("No implemethed method nodes")

View file

@ -1,89 +0,0 @@
from abc import ABC, abstractmethod
from typing import List, Optional
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo
class NetmapInterface(ABC):
@abstractmethod
def epoch(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
trace: Optional[bool] = False,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> int:
"""
Get current epoch number.
"""
raise NotImplementedError("No implemethed method epoch")
@abstractmethod
def netinfo(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> NodeNetInfo:
"""
Get target node info.
"""
raise NotImplementedError("No implemethed method netinfo")
@abstractmethod
def nodeinfo(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> NodeNetmapInfo:
"""
Get target node info.
"""
raise NotImplementedError("No implemethed method nodeinfo")
@abstractmethod
def snapshot(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> List[NodeNetmapInfo]:
"""
Get target node info.
"""
raise NotImplementedError("No implemethed method snapshot")
@abstractmethod
def snapshot_one_node(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> List[NodeNetmapInfo]:
"""
Get target one node info.
"""
raise NotImplementedError("No implemethed method snapshot")

View file

@ -1,223 +0,0 @@
from abc import ABC, abstractmethod
from typing import Any, List, Optional
from frostfs_testlib.shell.interfaces import CommandResult
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.utils import file_utils
from .chunks import ChunksInterface
class ObjectInterface(ABC):
def __init__(self) -> None:
self.chunks: ChunksInterface
@abstractmethod
def delete(
self,
cid: str,
oid: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def get(
self,
cid: str,
oid: str,
endpoint: str,
bearer: Optional[str] = None,
write_object: Optional[str] = None,
xhdr: Optional[dict] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> file_utils.TestFile:
pass
@abstractmethod
def get_from_random_node(
self,
cid: str,
oid: str,
cluster: Cluster,
bearer: Optional[str] = None,
write_object: Optional[str] = None,
xhdr: Optional[dict] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def hash(
self,
endpoint: str,
cid: str,
oid: str,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
range: Optional[str] = None,
salt: Optional[str] = None,
ttl: Optional[int] = None,
session: Optional[str] = None,
hash_type: Optional[str] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def head(
self,
cid: str,
oid: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
json_output: bool = True,
is_raw: bool = False,
is_direct: bool = False,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult | Any:
pass
@abstractmethod
def lock(
self,
cid: str,
oid: str,
endpoint: str,
lifetime: Optional[int] = None,
expire_at: Optional[int] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
session: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def put(
self,
path: str,
cid: str,
endpoint: str,
bearer: Optional[str] = None,
copies_number: Optional[int] = None,
attributes: Optional[dict] = None,
xhdr: Optional[dict] = None,
expire_at: Optional[int] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def patch(
self,
cid: str,
oid: str,
endpoint: str,
ranges: Optional[list[str]] = None,
payloads: Optional[list[str]] = None,
new_attrs: Optional[str] = None,
replace_attrs: bool = False,
bearer: Optional[str] = None,
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = None,
trace: bool = False,
) -> str:
pass
@abstractmethod
def put_to_random_node(
self,
path: str,
cid: str,
cluster: Cluster,
bearer: Optional[str] = None,
copies_number: Optional[int] = None,
attributes: Optional[dict] = None,
xhdr: Optional[dict] = None,
expire_at: Optional[int] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> str:
pass
@abstractmethod
def range(
self,
cid: str,
oid: str,
range_cut: str,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = None,
) -> tuple[file_utils.TestFile, bytes]:
pass
@abstractmethod
def search(
self,
cid: str,
endpoint: str,
bearer: str = "",
oid: Optional[str] = None,
filters: Optional[dict] = None,
expected_objects_list: Optional[list] = None,
xhdr: Optional[dict] = None,
session: Optional[str] = None,
phy: bool = False,
root: bool = False,
timeout: Optional[str] = None,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
ttl: Optional[int] = None,
) -> List:
pass
@abstractmethod
def nodes(
self,
cluster: Cluster,
cid: str,
oid: str,
alive_node: ClusterNode,
bearer: str = "",
xhdr: Optional[dict] = None,
is_direct: bool = False,
verify_presence_all: bool = False,
timeout: Optional[str] = None,
) -> List[ClusterNode]:
pass
@abstractmethod
def parts(
self,
cid: str,
oid: str,
alive_node: ClusterNode,
bearer: str = "",
xhdr: Optional[dict] = None,
is_direct: bool = False,
verify_presence_all: bool = False,
timeout: Optional[str] = None,
) -> List[str]:
pass

View file

@ -1,10 +0,0 @@
from abc import ABC
from . import interfaces
class GrpcClientWrapper(ABC):
def __init__(self) -> None:
self.object: interfaces.ObjectInterface
self.container: interfaces.ContainerInterface
self.netmap: interfaces.NetmapInterface

View file

@ -1,5 +1,4 @@
import itertools
import traceback
from concurrent.futures import Future, ThreadPoolExecutor
from contextlib import contextmanager
from typing import Callable, Collection, Optional, Union
@ -56,42 +55,7 @@ def parallel(
# Check for exceptions
exceptions = [future.exception() for future in futures if future.exception()]
if exceptions:
# Prettify exception in parallel with all underlying stack traces
# For example, we had 3 RuntimeError exceptions during parallel. This format will give us something like
#
# RuntimeError: The following exceptions occured during parallel run:
# 1) Exception one text
# 2) Exception two text
# 3) Exception three text
# TRACES:
# ==== 1 ====
# Traceback (most recent call last):
# File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run
# result = self.fn(*self.args, **self.kwargs)
# File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service
# raise RuntimeError(f"Exception one text")
# RuntimeError: Exception one text
#
# ==== 2 ====
# Traceback (most recent call last):
# File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run
# result = self.fn(*self.args, **self.kwargs)
# File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service
# raise RuntimeError(f"Exception two text")
# RuntimeError: Exception two text
#
# ==== 3 ====
# Traceback (most recent call last):
# File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run
# result = self.fn(*self.args, **self.kwargs)
# File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service
# raise RuntimeError(f"Exception three text")
# RuntimeError: Exception three text
short_summary = "\n".join([f"{i}) {str(e)}" for i, e in enumerate(exceptions, 1)])
stack_traces = "\n".join(
[f"==== {i} ====\n{''.join(traceback.TracebackException.from_exception(e).format())}" for i, e in enumerate(exceptions, 1)]
)
message = f"{short_summary}\nTRACES:\n{stack_traces}"
message = "\n".join([str(e) for e in exceptions])
raise RuntimeError(f"The following exceptions occured during parallel run:\n{message}")
return futures

View file

@ -1,16 +1,13 @@
import inspect
import logging
import os
from functools import wraps
from time import sleep, time
from typing import Any
import yaml
from _pytest.outcomes import Failed
from pytest import fail
from frostfs_testlib import reporter
from frostfs_testlib.resources.common import ASSETS_DIR
from frostfs_testlib.utils.func_utils import format_by_args
logger = logging.getLogger("NeoLogger")
@ -131,42 +128,6 @@ def run_optionally(enabled: bool, mock_value: Any = True):
return deco
def cached_fixture(enabled: bool):
"""
Decorator to cache fixtures.
MUST be placed after @pytest.fixture and before @allure decorators.
Args:
enabled: if true, decorated func will be cached.
"""
def deco(func):
@wraps(func)
def func_impl(*a, **kw):
# TODO: *a and *kw should be parsed to some kind of hashsum and used in filename to prevent cache load from different parameters
cache_file = os.path.join(ASSETS_DIR, f"fixture_cache_{func.__name__}.yml")
if enabled and os.path.exists(cache_file):
with open(cache_file, "r") as cache_input:
return yaml.load(cache_input, Loader=yaml.Loader)
result = func(*a, **kw)
if enabled:
with open(cache_file, "w") as cache_output:
yaml.dump(result, cache_output)
return result
# TODO: cache yielding fixtures
@wraps(func)
def gen_impl(*a, **kw):
raise NotImplementedError("Not implemented for yielding fixtures")
return gen_impl if inspect.isgeneratorfunction(func) else func_impl
return deco
def wait_for_success(
max_wait_time: int = 60,
interval: int = 1,

View file

@ -80,9 +80,6 @@ def log_command_execution(cmd: str, output: Union[str, dict], params: Optional[d
if not params:
params = {}
if params.get("Body") and len(params.get("Body")) > 1000:
params["Body"] = "<large text data>"
output_params = params
try:

View file

@ -45,7 +45,7 @@ def ensure_directory_opener(path, flags):
# TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps
# Use object_size dt in future as argument
@reporter.step("Generate file")
def generate_file(size: int, file_name: Optional[str] = None) -> TestFile:
def generate_file(size: int) -> TestFile:
"""Generates a binary file with the specified size in bytes.
Args:
@ -54,11 +54,7 @@ def generate_file(size: int, file_name: Optional[str] = None) -> TestFile:
Returns:
The path to the generated file.
"""
if file_name is None:
file_name = string_utils.unique_name("object-")
test_file = TestFile(os.path.join(ASSETS_DIR, file_name))
test_file = TestFile(os.path.join(ASSETS_DIR, string_utils.unique_name("object-")))
with open(test_file, "wb", opener=ensure_directory_opener) as file:
file.write(os.urandom(size))
logger.info(f"File with size {size} bytes has been generated: {test_file}")

View file

@ -64,7 +64,7 @@ def parallel_binary_verions(host: Host) -> dict[str, str]:
try:
result = shell.exec(f"{binary_path} {binary['param']}")
version = parse_version(result.stdout) or parse_version(result.stderr) or "Unknown"
versions_at_host[binary_name] = version.strip()
versions_at_host[binary_name] = version
except Exception as exc:
logger.error(f"Cannot get version for {binary_path} because of\n{exc}")
versions_at_host[binary_name] = "Unknown"

View file

@ -2,7 +2,7 @@ from typing import Any
import pytest
from frostfs_testlib.clients import AwsCliClient, Boto3ClientWrapper
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper
from frostfs_testlib.storage.dataclasses.acl import EACLRole
from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize

View file

@ -6,7 +6,10 @@ import pytest
from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType, Preset, ReadFrom
from frostfs_testlib.load.runners import DefaultRunner
from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
from frostfs_testlib.storage.dataclasses.node_base import NodeBase
@dataclass
@ -126,8 +129,6 @@ class TestLoadConfig:
"--size '11'",
"--acl 'acl'",
"--preload_obj '13'",
"--retry '24'",
"--rule 'rule' --rule 'rule_2'",
"--out 'pregen_json'",
"--workers '7'",
"--containers '16'",
@ -160,8 +161,6 @@ class TestLoadConfig:
expected_preset_args = [
"--size '11'",
"--preload_obj '13'",
"--retry '24'",
"--rule 'rule' --rule 'rule_2'",
"--out 'pregen_json'",
"--workers '7'",
"--containers '16'",
@ -318,8 +317,6 @@ class TestLoadConfig:
"--no-verify-ssl",
"--size '11'",
"--preload_obj '13'",
"--retry '24'",
"--rule 'rule' --rule 'rule_2'",
"--out 'pregen_json'",
"--workers '7'",
"--containers '16'",
@ -353,8 +350,6 @@ class TestLoadConfig:
expected_preset_args = [
"--size '11'",
"--preload_obj '13'",
"--retry '24'",
"--rule 'rule' --rule 'rule_2'",
"--out 'pregen_json'",
"--workers '7'",
"--containers '16'",
@ -420,26 +415,6 @@ class TestLoadConfig:
self._check_preset_params(load_params, params)
@pytest.mark.parametrize(
"load_type, input, value, params",
[
(LoadType.gRPC, ["A C ", " B"], ["A C", "B"], [f"--rule 'A C' --rule 'B'"]),
(LoadType.gRPC, " A ", ["A"], ["--rule 'A'"]),
(LoadType.gRPC, " A , B ", ["A , B"], ["--rule 'A , B'"]),
(LoadType.gRPC, [" A", "B "], ["A", "B"], ["--rule 'A' --rule 'B'"]),
(LoadType.gRPC, None, None, []),
(LoadType.S3, ["A C ", " B"], ["A C", "B"], []),
(LoadType.S3, None, None, []),
],
)
def test_ape_list_parsing_formatter(self, load_type, input, value, params):
load_params = LoadParams(load_type)
load_params.preset = Preset()
load_params.preset.rule = input
assert load_params.preset.rule == value
self._check_preset_params(load_params, params)
@pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True)
def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams):
expected_env_vars = {
@ -469,8 +444,6 @@ class TestLoadConfig:
expected_preset_args = [
"--size '0'",
"--preload_obj '0'",
"--retry '0'",
"--rule ''",
"--out ''",
"--workers '0'",
"--containers '0'",
@ -502,8 +475,6 @@ class TestLoadConfig:
expected_preset_args = [
"--size '0'",
"--preload_obj '0'",
"--retry '0'",
"--rule ''",
"--out ''",
"--workers '0'",
"--containers '0'",
@ -611,8 +582,6 @@ class TestLoadConfig:
expected_preset_args = [
"--size '0'",
"--preload_obj '0'",
"--retry '0'",
"--rule ''",
"--out ''",
"--workers '0'",
"--containers '0'",
@ -644,8 +613,6 @@ class TestLoadConfig:
expected_preset_args = [
"--size '0'",
"--preload_obj '0'",
"--retry '0'",
"--rule ''",
"--out ''",
"--workers '0'",
"--containers '0'",