Move shared code to testlib

Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
This commit is contained in:
Andrey Berezin 2023-05-14 13:43:59 +03:00
parent d97a02d1d3
commit 997e768e92
69 changed files with 9213 additions and 64 deletions

View file

@ -116,7 +116,7 @@ contributors".
To sign your work, just add a line like this at the end of your commit message:
```
Signed-off-by: Samii Sakisaka <samii@nspcc.ru>
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
```
This can easily be done with the `--signoff` option to `git commit`.

View file

@ -4,10 +4,10 @@ build-backend = "setuptools.build_meta"
[project]
name = "frostfs-testlib"
version = "1.3.1"
version = "2.0.0"
description = "Building blocks and utilities to facilitate development of automated tests for FrostFS system"
readme = "README.md"
authors = [{ name = "NSPCC", email = "info@nspcc.ru" }]
authors = [{ name = "Yadro", email = "info@yadro.com" }]
license = { text = "GNU General Public License v3 (GPLv3)" }
classifiers = [
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
@ -25,6 +25,10 @@ dependencies = [
"requests>=2.28.0",
"docstring_parser>=0.15",
"testrail-api>=1.12.0",
"pytest==7.1.2",
"tenacity==8.0.1",
"boto3==1.16.33",
"boto3-stubs[essential]==1.16.33",
]
requires-python = ">=3.10"
@ -50,7 +54,7 @@ line-length = 100
target-version = ["py310"]
[tool.bumpver]
current_version = "1.3.1"
current_version = "2.0.0"
version_pattern = "MAJOR.MINOR.PATCH"
commit_message = "Bump version {old_version} -> {new_version}"
commit = true

View file

@ -7,6 +7,10 @@ pexpect==4.8.0
requests==2.28.1
docstring_parser==0.15
testrail-api==1.12.0
tenacity==8.0.1
pytest==7.1.2
boto3==1.16.33
boto3-stubs[essential]==1.16.33
# Dev dependencies
black==22.8.0

View file

@ -1 +1 @@
__version__ = "1.3.1"
__version__ = "2.0.0"

View file

@ -66,7 +66,7 @@ class TestExporter(ABC):
test_case_in_tms = self.search_test_case_id(test_case.id)
steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()]
if test_case:
if test_case_in_tms:
self.update_test_case(test_case, test_case_in_tms)
else:
self.create_test_case(test_case)

View file

@ -38,6 +38,7 @@ class TestrailExporter(TestExporter):
self.tr_id_field = tr_id_field
self.tr_description_fields = tr_description_fields
self.tr_steps_field = tr_steps_field
self.test_case_id_field_name = "" # TODO: Add me
def fill_suite_cache(self) -> None:
"""

View file

@ -9,14 +9,17 @@ from frostfs_testlib.shell import Shell
class FrostfsAdm:
morph: Optional[FrostfsAdmMorph] = None
subnet: Optional[FrostfsAdmMorphSubnet] = None
storage_config: Optional[FrostfsAdmStorageConfig] = None
version: Optional[FrostfsAdmVersion] = None
morph: FrostfsAdmMorph
subnet: FrostfsAdmMorphSubnet
storage_config: FrostfsAdmStorageConfig
version: FrostfsAdmVersion
config: FrostfsAdmConfig
def __init__(self, shell: Shell, frostfs_adm_exec_path: str, config_file: Optional[str] = None):
self.config = FrostfsAdmConfig(shell, frostfs_adm_exec_path, config=config_file)
self.morph = FrostfsAdmMorph(shell, frostfs_adm_exec_path, config=config_file)
self.subnet = FrostfsAdmMorphSubnet(shell, frostfs_adm_exec_path, config=config_file)
self.storage_config = FrostfsAdmStorageConfig(shell, frostfs_adm_exec_path, config=config_file)
self.storage_config = FrostfsAdmStorageConfig(
shell, frostfs_adm_exec_path, config=config_file
)
self.version = FrostfsAdmVersion(shell, frostfs_adm_exec_path, config=config_file)

View file

@ -14,16 +14,16 @@ from frostfs_testlib.shell import Shell
class FrostfsCli:
accounting: Optional[FrostfsCliAccounting] = None
acl: Optional[FrostfsCliACL] = None
container: Optional[FrostfsCliContainer] = None
netmap: Optional[FrostfsCliNetmap] = None
object: Optional[FrostfsCliObject] = None
session: Optional[FrostfsCliSession] = None
shards: Optional[FrostfsCliShards] = None
storagegroup: Optional[FrostfsCliStorageGroup] = None
util: Optional[FrostfsCliUtil] = None
version: Optional[FrostfsCliVersion] = None
accounting: FrostfsCliAccounting
acl: FrostfsCliACL
container: FrostfsCliContainer
netmap: FrostfsCliNetmap
object: FrostfsCliObject
session: FrostfsCliSession
shards: FrostfsCliShards
storagegroup: FrostfsCliStorageGroup
util: FrostfsCliUtil
version: FrostfsCliVersion
def __init__(self, shell: Shell, frostfs_cli_exec_path: str, config_file: Optional[str] = None):
self.accounting = FrostfsCliAccounting(shell, frostfs_cli_exec_path, config=config_file)

View file

@ -12,14 +12,14 @@ from frostfs_testlib.shell import Shell
class NeoGo:
candidate: Optional[NeoGoCandidate] = None
contract: Optional[NeoGoContract] = None
db: Optional[NeoGoDb] = None
nep17: Optional[NeoGoNep17] = None
node: Optional[NeoGoNode] = None
query: Optional[NeoGoQuery] = None
version: Optional[NeoGoVersion] = None
wallet: Optional[NeoGoWallet] = None
candidate: NeoGoCandidate
contract: NeoGoContract
db: NeoGoDb
nep17: NeoGoNep17
node: NeoGoNode
query: NeoGoQuery
version: NeoGoVersion
wallet: NeoGoWallet
def __init__(
self,

View file

@ -0,0 +1,207 @@
import frostfs_testlib.resources.optionals as optionals
from frostfs_testlib.load.k6 import K6
from frostfs_testlib.load.load_config import (
EndpointSelectionStrategy,
K6ProcessAllocationStrategy,
LoadParams,
LoadScenario,
LoadType,
)
from frostfs_testlib.load.load_steps import init_s3_client, prepare_k6_instances
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.load_params import (
K6_TEARDOWN_PERIOD,
LOAD_NODE_SSH_PASSWORD,
LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE,
LOAD_NODE_SSH_PRIVATE_KEY_PATH,
LOAD_NODE_SSH_USER,
LOAD_NODES,
)
from frostfs_testlib.shell.interfaces import SshCredentials
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.cluster.frostfs_services import S3Gate, StorageNode
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.test_control import run_optionally
reporter = get_reporter()
class BackgroundLoadController:
k6_instances: list[K6]
k6_dir: str
load_params: LoadParams
load_nodes: list[str]
verification_params: LoadParams
nodes_under_load: list[ClusterNode]
ssh_credentials: SshCredentials
loaders_wallet: WalletInfo
endpoints: list[str]
def __init__(
self,
k6_dir: str,
load_params: LoadParams,
loaders_wallet: WalletInfo,
nodes_under_load: list[ClusterNode],
) -> None:
self.k6_dir = k6_dir
self.load_params = load_params
self.nodes_under_load = nodes_under_load
self.load_nodes = LOAD_NODES
self.loaders_wallet = loaders_wallet
if load_params.endpoint_selection_strategy is None:
raise RuntimeError("endpoint_selection_strategy should not be None")
self.endpoints = self._get_endpoints(
load_params.load_type, load_params.endpoint_selection_strategy
)
self.verification_params = LoadParams(
clients=load_params.readers,
scenario=LoadScenario.VERIFY,
registry_file=load_params.registry_file,
verify_time=load_params.verify_time,
load_type=load_params.load_type,
load_id=load_params.load_id,
working_dir=load_params.working_dir,
endpoint_selection_strategy=load_params.endpoint_selection_strategy,
k6_process_allocation_strategy=load_params.k6_process_allocation_strategy,
)
self.ssh_credentials = SshCredentials(
LOAD_NODE_SSH_USER,
LOAD_NODE_SSH_PASSWORD,
LOAD_NODE_SSH_PRIVATE_KEY_PATH,
LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE,
)
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, [])
def _get_endpoints(
self, load_type: LoadType, endpoint_selection_strategy: EndpointSelectionStrategy
):
all_endpoints = {
LoadType.gRPC: {
EndpointSelectionStrategy.ALL: list(
set(
endpoint
for node_under_load in self.nodes_under_load
for endpoint in node_under_load.service(StorageNode).get_all_rpc_endpoint()
)
),
EndpointSelectionStrategy.FIRST: list(
set(
node_under_load.service(StorageNode).get_rpc_endpoint()
for node_under_load in self.nodes_under_load
)
),
},
# for some reason xk6 appends http protocol on its own
LoadType.S3: {
EndpointSelectionStrategy.ALL: list(
set(
endpoint.replace("http://", "")
for node_under_load in self.nodes_under_load
for endpoint in node_under_load.service(S3Gate).get_all_endpoints()
)
),
EndpointSelectionStrategy.FIRST: list(
set(
node_under_load.service(S3Gate).get_endpoint().replace("http://", "")
for node_under_load in self.nodes_under_load
)
),
},
}
return all_endpoints[load_type][endpoint_selection_strategy]
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step_deco("Prepare background load instances")
def prepare(self):
if self.load_params.load_type == LoadType.S3:
init_s3_client(
self.load_nodes,
self.load_params,
self.k6_dir,
self.ssh_credentials,
self.nodes_under_load,
self.loaders_wallet,
)
self._prepare(self.load_params)
def _prepare(self, load_params: LoadParams):
self.k6_instances = prepare_k6_instances(
load_nodes=LOAD_NODES,
ssh_credentials=self.ssh_credentials,
k6_dir=self.k6_dir,
load_params=load_params,
endpoints=self.endpoints,
loaders_wallet=self.loaders_wallet,
)
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step_deco("Start background load")
def start(self):
if self.load_params.preset is None:
raise RuntimeError("Preset should not be none at the moment of start")
with reporter.step(
f"Start background load on nodes {self.nodes_under_load}: "
f"writers = {self.load_params.writers}, "
f"obj_size = {self.load_params.object_size}, "
f"load_time = {self.load_params.load_time}, "
f"prepare_json = {self.load_params.preset.pregen_json}, "
f"endpoints = {self.endpoints}"
):
for k6_load_instance in self.k6_instances:
k6_load_instance.start()
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step_deco("Stop background load")
def stop(self):
for k6_load_instance in self.k6_instances:
k6_load_instance.stop()
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, True)
def is_running(self):
for k6_load_instance in self.k6_instances:
if not k6_load_instance.is_running:
return False
return True
def wait_until_finish(self):
if self.load_params.load_time is None:
raise RuntimeError("LoadTime should not be none")
for k6_instance in self.k6_instances:
k6_instance.wait_until_finished(self.load_params.load_time + int(K6_TEARDOWN_PERIOD))
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
def verify(self):
if self.verification_params.verify_time is None:
raise RuntimeError("verify_time should not be none")
self._prepare(self.verification_params)
with reporter.step("Run verify background load data"):
for k6_verify_instance in self.k6_instances:
k6_verify_instance.start()
k6_verify_instance.wait_until_finished(self.verification_params.verify_time)
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step_deco("K6 run results")
def get_results(self) -> dict:
results = {}
for k6_instance in self.k6_instances:
if k6_instance.load_params.k6_process_allocation_strategy is None:
raise RuntimeError("k6_process_allocation_strategy should not be none")
result = k6_instance.get_results()
keys_map = {
K6ProcessAllocationStrategy.PER_LOAD_NODE: k6_instance.load_node,
K6ProcessAllocationStrategy.PER_ENDPOINT: k6_instance.endpoints[0],
}
key = keys_map[k6_instance.load_params.k6_process_allocation_strategy]
results[key] = result
return results

View file

@ -0,0 +1,130 @@
import time
import allure
import frostfs_testlib.resources.optionals as optionals
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.shell import CommandOptions, Shell
from frostfs_testlib.steps import epoch
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode
from frostfs_testlib.storage.controllers.disk_controller import DiskController
from frostfs_testlib.testing.test_control import run_optionally, wait_for_success
from frostfs_testlib.utils.failover_utils import (
wait_all_storage_nodes_returned,
wait_for_host_offline,
wait_for_host_online,
wait_for_node_online,
)
reporter = get_reporter()
class ClusterStateController:
def __init__(self, shell: Shell, cluster: Cluster) -> None:
self.stopped_nodes: list[ClusterNode] = []
self.detached_disks: dict[str, DiskController] = {}
self.stopped_storage_nodes: list[StorageNode] = []
self.cluster = cluster
self.shell = shell
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Stop host of node {node}")
def stop_node_host(self, node: ClusterNode, mode: str):
with allure.step(f"Stop host {node.host.config.address}"):
node.host.stop_host(mode=mode)
wait_for_host_offline(self.shell, node.storage_node)
self.stopped_nodes.append(node)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Start host of node {node}")
def start_node_host(self, node: ClusterNode):
with allure.step(f"Start host {node.host.config.address}"):
node.host.start_host()
wait_for_host_online(self.shell, node.storage_node)
wait_for_node_online(node.storage_node)
self.stopped_nodes.remove(node)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Start stopped hosts")
def start_stopped_hosts(self):
for node in self.stopped_nodes:
node.host.start_host()
self.stopped_nodes = []
wait_all_storage_nodes_returned(self.shell, self.cluster)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Detach disk {device} at {mountpoint} on node {node}")
def detach_disk(self, node: StorageNode, device: str, mountpoint: str):
disk_controller = self._get_disk_controller(node, device, mountpoint)
self.detached_disks[disk_controller.id] = disk_controller
disk_controller.detach()
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Attach disk {device} at {mountpoint} on node {node}")
def attach_disk(self, node: StorageNode, device: str, mountpoint: str):
disk_controller = self._get_disk_controller(node, device, mountpoint)
disk_controller.attach()
self.detached_disks.pop(disk_controller.id, None)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Restore detached disks")
def restore_disks(self):
for disk_controller in self.detached_disks.values():
disk_controller.attach()
self.detached_disks = {}
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Stop storage service on {node}")
def stop_storage_service(self, node: ClusterNode):
node.storage_node.stop_service()
self.stopped_storage_nodes.append(node.storage_node)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Start storage service on {node}")
def start_storage_service(self, node: ClusterNode):
node.storage_node.start_service()
self.stopped_storage_nodes.remove(node.storage_node)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Start stopped storage services")
def start_stopped_storage_services(self):
for node in self.stopped_storage_nodes:
node.start_service()
self.stopped_storage_nodes = []
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Hard reboot host {node} via magic SysRq option")
def panic_reboot_host(self, node: ClusterNode):
shell = node.host.get_shell()
shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"')
options = CommandOptions(close_stdin=True, timeout=1, check=False)
shell.exec('sudo sh -c "echo b > /proc/sysrq-trigger"', options)
# Let the things to be settled
# A little wait here to prevent ssh stuck during panic
time.sleep(10)
wait_for_host_online(self.shell, node.storage_node)
wait_for_node_online(node.storage_node)
@reporter.step_deco("Wait up to {timeout} seconds for nodes on cluster to align epochs")
def wait_for_epochs_align(self, timeout=60):
@wait_for_success(timeout, 5, None, True)
def check_epochs():
epochs_by_node = epoch.get_epochs_from_nodes(self.shell, self.cluster)
assert (
len(set(epochs_by_node.values())) == 1
), f"unaligned epochs found: {epochs_by_node}"
check_epochs()
def _get_disk_controller(
self, node: StorageNode, device: str, mountpoint: str
) -> DiskController:
disk_controller_id = DiskController.get_id(node, device)
if disk_controller_id in self.detached_disks.keys():
disk_controller = self.detached_disks[disk_controller_id]
else:
disk_controller = DiskController(node, device, mountpoint)
return disk_controller

View file

@ -0,0 +1,237 @@
import json
import logging
import os
from dataclasses import dataclass, fields
from time import sleep
from typing import Any
from frostfs_testlib.load.load_config import (
K6ProcessAllocationStrategy,
LoadParams,
LoadScenario,
LoadType,
)
from frostfs_testlib.processes.remote_process import RemoteProcess
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
EXIT_RESULT_CODE = 0
logger = logging.getLogger("NeoLogger")
reporter = get_reporter()
@dataclass
class LoadResults:
data_sent: float = 0.0
data_received: float = 0.0
read_ops: float = 0.0
write_ops: float = 0.0
total_ops: float = 0.0
class K6:
_k6_process: RemoteProcess
_k6_stop_attempts: int = 5
_k6_stop_check_interval: int = 15
def __init__(
self,
load_params: LoadParams,
endpoints: list[str],
k6_dir: str,
shell: Shell,
load_node: str,
wallet: WalletInfo,
):
if load_params.scenario is None:
raise RuntimeError("Scenario should not be none")
self.load_params: LoadParams = load_params
self.endpoints = endpoints
self.load_node: str = load_node
self.shell: Shell = shell
self.wallet = wallet
self.scenario: LoadScenario = load_params.scenario
self.summary_json: str = os.path.join(
self.load_params.working_dir,
f"{self.load_params.load_id}_{self.scenario.value}_summary.json",
)
self._k6_dir: str = k6_dir
@property
def process_dir(self) -> str:
return self._k6_process.process_dir
@reporter.step_deco("Preset containers and objects")
def preset(self) -> str:
preset_grpc = f"{self._k6_dir}/scenarios/preset/preset_grpc.py"
preset_s3 = f"{self._k6_dir}/scenarios/preset/preset_s3.py"
preset_map = {
LoadType.gRPC: preset_grpc,
LoadType.S3: preset_s3,
LoadType.HTTP: preset_grpc,
}
base_args = {
preset_grpc: [
preset_grpc,
f"--endpoint {self.endpoints[0]}",
f"--wallet {self.wallet.path} ",
f"--config {self.wallet.config_path} ",
],
preset_s3: [
preset_s3,
f"--endpoint {self.endpoints[0]}",
],
}
preset_scenario = preset_map[self.load_params.load_type]
command_args = base_args[preset_scenario].copy()
command_args += [
f"--{field.metadata['preset_argument']} '{getattr(self.load_params, field.name)}'"
for field in fields(self.load_params)
if field.metadata
and self.scenario in field.metadata["applicable_scenarios"]
and field.metadata["preset_argument"]
and getattr(self.load_params, field.name) is not None
]
if self.load_params.preset:
command_args += [
f"--{field.metadata['preset_argument']} '{getattr(self.load_params.preset, field.name)}'"
for field in fields(self.load_params.preset)
if field.metadata
and self.scenario in field.metadata["applicable_scenarios"]
and field.metadata["preset_argument"]
and getattr(self.load_params.preset, field.name) is not None
]
command = " ".join(command_args)
result = self.shell.exec(command)
assert (
result.return_code == EXIT_RESULT_CODE
), f"Return code of preset is not zero: {result.stdout}"
return result.stdout.strip("\n")
@reporter.step_deco("Generate K6 command")
def _generate_env_variables(self) -> str:
env_vars = {
field.metadata["env_variable"]: getattr(self.load_params, field.name)
for field in fields(self.load_params)
if field.metadata
and self.scenario in field.metadata["applicable_scenarios"]
and field.metadata["env_variable"]
and getattr(self.load_params, field.name) is not None
}
if self.load_params.preset:
env_vars.update(
{
field.metadata["env_variable"]: getattr(self.load_params.preset, field.name)
for field in fields(self.load_params.preset)
if field.metadata
and self.scenario in field.metadata["applicable_scenarios"]
and field.metadata["env_variable"]
and getattr(self.load_params.preset, field.name) is not None
}
)
env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints)
env_vars["SUMMARY_JSON"] = self.summary_json
reporter.attach(
"\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables"
)
return " ".join(
[f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None]
)
@reporter.step_deco("Start K6 on initiator")
def start(self) -> None:
command = (
f"{self._k6_dir}/k6 run {self._generate_env_variables()} "
f"{self._k6_dir}/scenarios/{self.scenario.value}.js"
)
self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir)
@reporter.step_deco("Wait until K6 is finished")
def wait_until_finished(self, timeout: int = 0, k6_should_be_running: bool = False) -> None:
wait_interval = 10
if self._k6_process is None:
assert "No k6 instances were executed"
if k6_should_be_running:
assert self._k6_process.running(), "k6 should be running."
while timeout >= 0:
if not self._k6_process.running():
return
logger.info(f"K6 is running. Waiting {wait_interval} seconds...")
if timeout > 0:
sleep(wait_interval)
timeout -= wait_interval
self._stop()
raise TimeoutError(f"Expected K6 finished in {timeout} sec.")
def get_results(self) -> Any:
with reporter.step(f"K6 results from {self.load_node}"):
self.__log_output()
if not self.summary_json:
return None
summary_text = self.shell.exec(f"cat {self.summary_json}").stdout
summary_json = json.loads(summary_text)
allure_filenames = {
K6ProcessAllocationStrategy.PER_LOAD_NODE: f"{self.load_node}_{self.scenario.value}_summary.json",
K6ProcessAllocationStrategy.PER_ENDPOINT: f"{self.load_node}_{self.scenario.value}_{self.endpoints[0]}_summary.json",
}
allure_filename = allure_filenames[self.load_params.k6_process_allocation_strategy]
reporter.attach(summary_text, allure_filename)
return summary_json
@reporter.step_deco("Assert K6 should be finished")
def _k6_should_be_finished(self) -> None:
k6_rc = self._k6_process.rc()
assert k6_rc == 0, f"K6 unexpectedly finished with RC {k6_rc}"
@reporter.step_deco("Terminate K6 on initiator")
def stop(self) -> None:
if not self.is_running:
self.get_results()
raise AssertionError("K6 unexpectedly finished")
self._stop()
k6_rc = self._k6_process.rc()
assert k6_rc == EXIT_RESULT_CODE, f"Return code of K6 job should be 0, but {k6_rc}"
@property
def is_running(self) -> bool:
if self._k6_process:
return self._k6_process.running()
return False
@reporter.step_deco("Try to stop K6 with SIGTERM")
def _stop(self) -> None:
self._k6_process.stop()
with reporter.step("Wait until process end"):
for _ in range(self._k6_stop_attempts):
if not self._k6_process.running():
break
sleep(self._k6_stop_check_interval)
else:
raise AssertionError("Can not stop K6 process within timeout")
def _kill(self) -> None:
self._k6_process.kill()
def __log_output(self) -> None:
reporter.attach(self._k6_process.stdout(full=True), "K6 stdout")
reporter.attach(self._k6_process.stderr(full=True), "K6 stderr")

View file

@ -0,0 +1,211 @@
import os
from dataclasses import dataclass, field
from enum import Enum
from typing import Optional
class LoadType(Enum):
gRPC = "grpc"
S3 = "s3"
HTTP = "http"
class LoadScenario(Enum):
gRPC = "grpc"
gRPC_CAR = "grpc_car"
S3 = "s3"
S3_CAR = "s3_car"
HTTP = "http"
VERIFY = "verify"
all_load_scenarios = [
LoadScenario.gRPC,
LoadScenario.S3,
LoadScenario.HTTP,
LoadScenario.S3_CAR,
LoadScenario.gRPC_CAR,
]
all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY]
constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP]
constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]
grpc_preset_scenarios = [LoadScenario.gRPC, LoadScenario.HTTP, LoadScenario.gRPC_CAR]
s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR]
def metadata_field(
applicable_scenarios: list[LoadScenario],
preset_param: Optional[str] = None,
scenario_variable: Optional[str] = None,
distributed: Optional[bool] = False,
):
return field(
default=None,
metadata={
"applicable_scenarios": applicable_scenarios,
"preset_argument": preset_param,
"env_variable": scenario_variable,
"distributed": distributed,
},
)
class NodesSelectionStrategy(Enum):
# Select ONE random node from cluster nodes.
RANDOM_SINGLE = "RANDOM_SINGLE"
# Select All nodes.
ALL = "ALL"
# Select All nodes except node under test (useful for failover). This is DEFAULT one
ALL_EXCEPT_UNDER_TEST = "ALL_EXCEPT_UNDER_TEST"
# Select ONE random node except under test (useful for failover).
RANDOM_SINGLE_EXCEPT_UNDER_TEST = "RANDOM_SINGLE_EXCEPT_UNDER_TEST"
class EndpointSelectionStrategy(Enum):
"""Enum which defines which endpoint to select from each storage node"""
# Select All endpoints.
ALL = "ALL"
# Select first endpoint from node
FIRST = "FIRST"
class K6ProcessAllocationStrategy(Enum):
"""Enum which defines how K6 processes should be allocated"""
# Each load node will get one k6 process with all endpoints (Default)
PER_LOAD_NODE = "PER_LOAD_NODE"
# Each endpoint will get it's own k6 process regardless of number of load nodes.
# If there is not enough load nodes, some nodes may have multiple k6 processes
PER_ENDPOINT = "PER_ENDPOINT"
@dataclass
class Preset:
# ------ COMMON ------
# Amount of objects which should be created
objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None)
# Preset json. Filled automatically.
pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON")
# Workers count for preset
workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None)
# ------ GRPC ------
# Amount of containers which should be created
containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None)
# Container placement policy for containers for gRPC
container_placement_policy: Optional[str] = metadata_field(
grpc_preset_scenarios, "policy", None
)
# ------ S3 ------
# Amount of buckets which should be created
buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None)
# S3 region (AKA placement policy for S3 buckets)
s3_location: Optional[str] = metadata_field(s3_preset_scenarios, "location", None)
@dataclass
class LoadParams:
# ------- CONTROL PARAMS -------
# Load type can be gRPC, HTTP, S3.
load_type: LoadType
# Load scenario from k6 scenarios
scenario: Optional[LoadScenario] = None
# Strategy to select nodes under load. See NodesSelectionStrategy class for more details.
# default is ALL_EXCEPT_UNDER_TEST
nodes_selection_strategy: Optional[NodesSelectionStrategy] = None
# Strategy which defines which endpoint to select from each storage node
endpoint_selection_strategy: Optional[EndpointSelectionStrategy] = None
# Strategy which defines how K6 processes should be allocated
k6_process_allocation_strategy: Optional[K6ProcessAllocationStrategy] = None
# Set to true in order to verify uploaded objects after K6 load finish. Default is True.
verify: Optional[bool] = None
# Just id for load so distinct it between runs. Filled automatically.
load_id: Optional[str] = None
# Working directory
working_dir: Optional[str] = None
# Preset for the k6 run
preset: Optional[Preset] = None
# ------- COMMON SCENARIO PARAMS -------
# Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value.
load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION")
# Object size in KB for load and preset.
object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE")
# Output registry K6 file. Filled automatically.
registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE")
# Specifies the minimum duration of every single execution (i.e. iteration).
# Any iterations that are shorter than this value will cause that VU to
# sleep for the remainder of the time until the specified minimum duration is reached.
min_iteration_duration: Optional[str] = metadata_field(
all_load_scenarios, None, "K6_MIN_ITERATION_DURATION"
)
# ------- CONSTANT VUS SCENARIO PARAMS -------
# Amount of Writers VU.
writers: Optional[int] = metadata_field(constant_vus_scenarios, None, "WRITERS", True)
# Amount of Readers VU.
readers: Optional[int] = metadata_field(constant_vus_scenarios, None, "READERS", True)
# Amount of Deleters VU.
deleters: Optional[int] = metadata_field(constant_vus_scenarios, None, "DELETERS", True)
# ------- CONSTANT ARRIVAL RATE SCENARIO PARAMS -------
# Number of iterations to start during each timeUnit period for write.
write_rate: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "WRITE_RATE", True
)
# Number of iterations to start during each timeUnit period for read.
read_rate: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "READ_RATE", True
)
# Number of iterations to start during each timeUnit period for delete.
delete_rate: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "DELETE_RATE", True
)
# Amount of preAllocatedVUs for write operations.
preallocated_writers: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True
)
# Amount of maxVUs for write operations.
max_writers: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "MAX_WRITERS", True
)
# Amount of preAllocatedVUs for read operations.
preallocated_readers: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True
)
# Amount of maxVUs for read operations.
max_readers: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "MAX_READERS", True
)
# Amount of preAllocatedVUs for read operations.
preallocated_deleters: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True
)
# Amount of maxVUs for delete operations.
max_deleters: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "MAX_DELETERS", True
)
# Period of time to apply the rate value.
time_unit: Optional[str] = metadata_field(constant_arrival_rate_scenarios, None, "TIME_UNIT")
# ------- VERIFY SCENARIO PARAMS -------
# Maximum verification time for k6 to verify objects. Default is BACKGROUND_LOAD_MAX_VERIFY_TIME (3600).
verify_time: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "TIME_LIMIT")
# Amount of Verification VU.
clients: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "CLIENTS")
def set_id(self, load_id):
self.load_id = load_id
self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt")
if self.preset:
self.preset.pregen_json = os.path.join(self.working_dir, f"{load_id}_prepare.json")

View file

@ -0,0 +1,162 @@
from abc import ABC
from typing import Any
from frostfs_testlib.load.load_config import LoadScenario
class MetricsBase(ABC):
_WRITE_SUCCESS = ""
_WRITE_ERRORS = ""
_WRITE_THROUGHPUT = "data_sent"
_READ_SUCCESS = ""
_READ_ERRORS = ""
_READ_THROUGHPUT = "data_received"
_DELETE_SUCCESS = ""
_DELETE_ERRORS = ""
def __init__(self, summary) -> None:
self.summary = summary
self.metrics = summary["metrics"]
@property
def write_total_iterations(self) -> int:
return self._get_metric(self._WRITE_SUCCESS) + self._get_metric(self._WRITE_ERRORS)
@property
def write_success_iterations(self) -> int:
return self._get_metric(self._WRITE_SUCCESS)
@property
def write_rate(self) -> float:
return self._get_metric_rate(self._WRITE_SUCCESS)
@property
def write_failed_iterations(self) -> int:
return self._get_metric(self._WRITE_ERRORS)
@property
def write_throughput(self) -> float:
return self._get_metric_rate(self._WRITE_THROUGHPUT)
@property
def read_total_iterations(self) -> int:
return self._get_metric(self._READ_SUCCESS) + self._get_metric(self._READ_ERRORS)
@property
def read_success_iterations(self) -> int:
return self._get_metric(self._READ_SUCCESS)
@property
def read_rate(self) -> int:
return self._get_metric_rate(self._READ_SUCCESS)
@property
def read_failed_iterations(self) -> int:
return self._get_metric(self._READ_ERRORS)
@property
def read_throughput(self) -> float:
return self._get_metric_rate(self._READ_THROUGHPUT)
@property
def delete_total_iterations(self) -> int:
return self._get_metric(self._DELETE_SUCCESS) + self._get_metric(self._DELETE_ERRORS)
@property
def delete_success_iterations(self) -> int:
return self._get_metric(self._DELETE_SUCCESS)
@property
def delete_failed_iterations(self) -> int:
return self._get_metric(self._DELETE_ERRORS)
@property
def delete_rate(self) -> int:
return self._get_metric_rate(self._DELETE_SUCCESS)
def _get_metric(self, metric: str) -> int:
metrics_method_map = {"counter": self._get_counter_metric, "gauge": self._get_gauge_metric}
if metric not in self.metrics:
return 0
metric = self.metrics[metric]
metric_type = metric["type"]
if metric_type not in metrics_method_map:
raise Exception(
f"Unsupported metric type: {metric_type}, supported: {metrics_method_map.keys()}"
)
return metrics_method_map[metric_type](metric)
def _get_metric_rate(self, metric: str) -> int:
metrics_method_map = {"counter": self._get_counter_metric_rate}
if metric not in self.metrics:
return 0
metric = self.metrics[metric]
metric_type = metric["type"]
if metric_type not in metrics_method_map:
raise Exception(
f"Unsupported rate metric type: {metric_type}, supported: {metrics_method_map.keys()}"
)
return metrics_method_map[metric_type](metric)
def _get_counter_metric_rate(self, metric: str) -> int:
return metric["values"]["rate"]
def _get_counter_metric(self, metric: str) -> int:
return metric["values"]["count"]
def _get_gauge_metric(self, metric: str) -> int:
return metric["values"]["value"]
class GrpcMetrics(MetricsBase):
_WRITE_SUCCESS = "frostfs_obj_put_total"
_WRITE_ERRORS = "frostfs_obj_put_fails"
_READ_SUCCESS = "frostfs_obj_get_total"
_READ_ERRORS = "frostfs_obj_get_fails"
_DELETE_SUCCESS = "frostfs_obj_delete_total"
_DELETE_ERRORS = "frostfs_obj_delete_fails"
class S3Metrics(MetricsBase):
_WRITE_SUCCESS = "aws_obj_put_total"
_WRITE_ERRORS = "aws_obj_put_fails"
_READ_SUCCESS = "aws_obj_get_total"
_READ_ERRORS = "aws_obj_get_fails"
_DELETE_SUCCESS = "aws_obj_delete_total"
_DELETE_ERRORS = "aws_obj_delete_fails"
class VerifyMetrics(MetricsBase):
_WRITE_SUCCESS = "N/A"
_WRITE_ERRORS = "N/A"
_READ_SUCCESS = "verified_obj"
_READ_ERRORS = "invalid_obj"
_DELETE_SUCCESS = "N/A"
_DELETE_ERRORS = "N/A"
def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> MetricsBase:
class_map = {
LoadScenario.gRPC: GrpcMetrics,
LoadScenario.gRPC_CAR: GrpcMetrics,
LoadScenario.HTTP: GrpcMetrics,
LoadScenario.S3: S3Metrics,
LoadScenario.S3_CAR: S3Metrics,
LoadScenario.VERIFY: VerifyMetrics,
}
return class_map[load_type](summary)

View file

@ -0,0 +1,265 @@
from datetime import datetime
from typing import Optional, Tuple
import yaml
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario
from frostfs_testlib.load.load_metrics import get_metrics_object
class LoadReport:
def __init__(self, load_test) -> None:
self.load_test = load_test
self.load_summaries: Optional[dict] = None
self.load_params: Optional[LoadParams] = None
self.start_time: Optional[datetime] = None
self.end_time: Optional[datetime] = None
def set_start_time(self):
self.start_time = datetime.utcnow()
def set_end_time(self):
self.end_time = datetime.utcnow()
def set_summaries(self, load_summaries: dict):
self.load_summaries = load_summaries
def set_load_params(self, load_params: LoadParams):
self.load_params = load_params
def get_report_html(self):
report_sections = [
[self.load_test, self._get_load_params_section_html],
[self.load_summaries, self._get_totals_section_html],
[self.end_time, self._get_test_time_html],
]
html = ""
for section in report_sections:
if section[0] is not None:
html += section[1]()
return html
def _get_load_params_section_html(self) -> str:
params: str = yaml.safe_dump(self.load_test, sort_keys=False)
params = params.replace("\n", "<br>")
section_html = f"""<h3>Scenario params</h3>
<pre>{params}</pre>
<hr>"""
return section_html
def _get_test_time_html(self) -> str:
html = f"""<h3>Scenario duration in UTC time (from agent)</h3>
{self.start_time} - {self.end_time}<br>
<hr>
"""
return html
def _calc_unit(self, value: float, skip_units: int = 0) -> Tuple[float, str]:
units = ["B", "KB", "MB", "GB", "TB"]
for unit in units[skip_units:]:
if value < 1024:
return value, unit
value = value / 1024.0
return value, unit
def _seconds_to_formatted_duration(self, seconds: int) -> str:
"""Converts N number of seconds to formatted output ignoring zeroes.
Examples:
186399 -> "2d3h46m39s"
86399 -> "23h59m59s"
86399 -> "23h59m59s"
3605 -> "1h5s"
123 -> "2m3s"
"""
units = {"d": 86400, "h": 3600, "m": 60, "s": 1}
parts = []
remaining = seconds
for divisor in units.values():
part = remaining // divisor
remaining -= divisor * part
parts.append(part)
return "".join([f"{val}{unit}" for unit, val in zip(units, parts) if val > 0])
def _row(self, caption: str, value: str) -> str:
return f"<tr><th>{caption}</th><td>{value}</td></tr>"
def _get_model_string(self):
if self.load_params.min_iteration_duration is not None:
return f"min_iteration_duration={self.load_params.min_iteration_duration}"
model_map = {
LoadScenario.gRPC: "closed model",
LoadScenario.S3: "closed model",
LoadScenario.HTTP: "closed model",
LoadScenario.gRPC_CAR: "open model",
LoadScenario.S3_CAR: "open model",
}
return model_map[self.load_params.scenario]
def _get_oprations_sub_section_html(
self,
operation_type: str,
total_operations: int,
requested_rate_str: str,
vus_str: str,
total_rate: float,
throughput: float,
errors: dict[str, int],
):
throughput_html = ""
if throughput > 0:
throughput, unit = self._calc_unit(throughput)
throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec")
per_node_errors_html = ""
total_errors = 0
if errors:
total_errors: int = 0
for node_key, errors in errors.items():
total_errors += errors
if (
self.load_params.k6_process_allocation_strategy
== K6ProcessAllocationStrategy.PER_ENDPOINT
):
per_node_errors_html += self._row(f"At {node_key}", errors)
object_size, object_size_unit = self._calc_unit(self.load_params.object_size, 1)
duration = self._seconds_to_formatted_duration(self.load_params.load_time)
model = self._get_model_string()
# write 8KB 15h49m 50op/sec 50th open model/closed model/min_iteration duration=1s - 1.636MB/s 199.57451/s
short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {vus_str} {model} - {throughput:.2f}{unit} {total_rate:.2f}/s"
html = f"""
<table border="1" cellpadding="5px"><tbody>
<th colspan="2" bgcolor="gainsboro">{short_summary}</th>
<th colspan="2" bgcolor="gainsboro">Metrics</th>
{self._row("Total operations", total_operations)}
{self._row("OP/sec", f"{total_rate:.2f}")}
{throughput_html}
<th colspan="2" bgcolor="gainsboro">Errors</th>
{per_node_errors_html}
{self._row("Total", f"{total_errors} ({total_errors/total_operations*100.0:.2f}%)")}
</tbody></table><br><hr>
"""
return html
def _get_totals_section_html(self):
html = "<h3>Load Results</h3>"
write_operations = 0
write_op_sec = 0
write_throughput = 0
write_errors = {}
requested_write_rate = self.load_params.write_rate
requested_write_rate_str = f"{requested_write_rate}op/sec" if requested_write_rate else ""
read_operations = 0
read_op_sec = 0
read_throughput = 0
read_errors = {}
requested_read_rate = self.load_params.read_rate
requested_read_rate_str = f"{requested_read_rate}op/sec" if requested_read_rate else ""
delete_operations = 0
delete_op_sec = 0
delete_errors = {}
requested_delete_rate = self.load_params.delete_rate
requested_delete_rate_str = (
f"{requested_delete_rate}op/sec" if requested_delete_rate else ""
)
if self.load_params.scenario in [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]:
delete_vus = max(
self.load_params.preallocated_deleters or 0, self.load_params.max_deleters or 0
)
write_vus = max(
self.load_params.preallocated_writers or 0, self.load_params.max_writers or 0
)
read_vus = max(
self.load_params.preallocated_readers or 0, self.load_params.max_readers or 0
)
else:
write_vus = self.load_params.writers
read_vus = self.load_params.readers
delete_vus = self.load_params.deleters
write_vus_str = f"{write_vus}th"
read_vus_str = f"{read_vus}th"
delete_vus_str = f"{delete_vus}th"
write_section_required = False
read_section_required = False
delete_section_required = False
for node_key, load_summary in self.load_summaries.items():
metrics = get_metrics_object(self.load_params.scenario, load_summary)
write_operations += metrics.write_total_iterations
if write_operations:
write_section_required = True
write_op_sec += metrics.write_rate
write_throughput += metrics.write_throughput
if metrics.write_failed_iterations:
write_errors[node_key] = metrics.write_failed_iterations
read_operations += metrics.read_total_iterations
if read_operations:
read_section_required = True
read_op_sec += metrics.read_rate
read_throughput += metrics.read_throughput
if metrics.read_failed_iterations:
read_errors[node_key] = metrics.read_failed_iterations
delete_operations += metrics.delete_total_iterations
if delete_operations:
delete_section_required = True
delete_op_sec += metrics.delete_rate
if metrics.delete_failed_iterations:
delete_errors[node_key] = metrics.delete_failed_iterations
if write_section_required:
html += self._get_oprations_sub_section_html(
"Write",
write_operations,
requested_write_rate_str,
write_vus_str,
write_op_sec,
write_throughput,
write_errors,
)
if read_section_required:
html += self._get_oprations_sub_section_html(
"Read",
read_operations,
requested_read_rate_str,
read_vus_str,
read_op_sec,
read_throughput,
read_errors,
)
if delete_section_required:
html += self._get_oprations_sub_section_html(
"Delete",
delete_operations,
requested_delete_rate_str,
delete_vus_str,
delete_op_sec,
0,
delete_errors,
)
return html

View file

@ -0,0 +1,184 @@
import copy
import itertools
import math
import re
from dataclasses import fields
from frostfs_testlib.cli import FrostfsAuthmate
from frostfs_testlib.load.k6 import K6
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC
from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_VUS_COUNT_DIVISOR
from frostfs_testlib.shell import CommandOptions, SSHShell
from frostfs_testlib.shell.interfaces import InteractiveInput, SshCredentials
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
reporter = get_reporter()
STOPPED_HOSTS = []
@reporter.step_deco("Init s3 client on load nodes")
def init_s3_client(
load_nodes: list[str],
load_params: LoadParams,
k6_directory: str,
ssh_credentials: SshCredentials,
nodes_under_load: list[ClusterNode],
wallet: WalletInfo,
):
storage_node = nodes_under_load[0].service(StorageNode)
s3_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in nodes_under_load]
grpc_peer = storage_node.get_rpc_endpoint()
for load_node in load_nodes:
ssh_client = _get_ssh_client(ssh_credentials, load_node)
frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(ssh_client, FROSTFS_AUTHMATE_EXEC)
issue_secret_output = frostfs_authmate_exec.secret.issue(
wallet=wallet.path,
peer=grpc_peer,
bearer_rules=f"{k6_directory}/scenarios/files/rules.json",
gate_public_key=s3_public_keys,
container_placement_policy=load_params.preset.container_placement_policy,
container_policy=f"{k6_directory}/scenarios/files/policy.json",
wallet_password=wallet.password,
).stdout
aws_access_key_id = str(
re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group(
"aws_access_key_id"
)
)
aws_secret_access_key = str(
re.search(
r"secret_access_key.*:\s.(?P<aws_secret_access_key>\w*)", issue_secret_output
).group("aws_secret_access_key")
)
# prompt_pattern doesn't work at the moment
configure_input = [
InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id),
InteractiveInput(
prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key
),
InteractiveInput(prompt_pattern=r".*", input=""),
InteractiveInput(prompt_pattern=r".*", input=""),
]
ssh_client.exec("aws configure", CommandOptions(interactive_inputs=configure_input))
@reporter.step_deco("Prepare K6 instances and objects")
def prepare_k6_instances(
load_nodes: list[str],
ssh_credentials: SshCredentials,
k6_dir: str,
load_params: LoadParams,
endpoints: list[str],
loaders_wallet: WalletInfo,
) -> list[K6]:
k6_load_objects: list[K6] = []
nodes = itertools.cycle(load_nodes)
k6_distribution_count = {
K6ProcessAllocationStrategy.PER_LOAD_NODE: len(load_nodes),
K6ProcessAllocationStrategy.PER_ENDPOINT: len(endpoints),
}
endpoints_generators = {
K6ProcessAllocationStrategy.PER_LOAD_NODE: itertools.cycle([endpoints]),
K6ProcessAllocationStrategy.PER_ENDPOINT: itertools.cycle(
[[endpoint] for endpoint in endpoints]
),
}
k6_processes_count = k6_distribution_count[load_params.k6_process_allocation_strategy]
endpoints_gen = endpoints_generators[load_params.k6_process_allocation_strategy]
distributed_load_params_list = _get_distributed_load_params_list(
load_params, k6_processes_count
)
for distributed_load_params in distributed_load_params_list:
load_node = next(nodes)
ssh_client = _get_ssh_client(ssh_credentials, load_node)
k6_load_object = K6(
distributed_load_params,
next(endpoints_gen),
k6_dir,
ssh_client,
load_node,
loaders_wallet,
)
k6_load_objects.append(k6_load_object)
if load_params.preset:
k6_load_object.preset()
return k6_load_objects
def _get_ssh_client(ssh_credentials: SshCredentials, load_node: str):
ssh_client = SSHShell(
host=load_node,
login=ssh_credentials.ssh_login,
password=ssh_credentials.ssh_password,
private_key_path=ssh_credentials.ssh_key_path,
private_key_passphrase=ssh_credentials.ssh_key_passphrase,
)
return ssh_client
def _get_distributed_load_params_list(
original_load_params: LoadParams, workers_count: int
) -> list[LoadParams]:
divisor = int(BACKGROUND_LOAD_VUS_COUNT_DIVISOR)
distributed_load_params: list[LoadParams] = []
for i in range(workers_count):
load_params = copy.deepcopy(original_load_params)
# Append #i here in case if multiple k6 processes goes into same load node
load_params.set_id(f"{load_params.load_id}_{i}")
distributed_load_params.append(load_params)
load_fields = fields(original_load_params)
for field in load_fields:
if (
field.metadata
and original_load_params.scenario in field.metadata["applicable_scenarios"]
and field.metadata["distributed"]
and getattr(original_load_params, field.name) is not None
):
original_value = getattr(original_load_params, field.name)
distribution = _get_distribution(math.ceil(original_value / divisor), workers_count)
for i in range(workers_count):
setattr(distributed_load_params[i], field.name, distribution[i])
return distributed_load_params
def _get_distribution(clients_count: int, workers_count: int) -> list[int]:
"""
This function will distribute evenly as possible X clients to Y workers.
For example if we have 150 readers (clients) and we want to spread it over 4 load nodes (workers)
this will return [38, 38, 37, 37].
Args:
clients_count: amount of things needs to be distributed.
workers_count: amount of workers.
Returns:
list of distribution.
"""
if workers_count < 1:
raise Exception("Workers cannot be less then 1")
# Amount of guaranteed payload on one worker
clients_per_worker = clients_count // workers_count
# Remainder of clients left to be distributed
remainder = clients_count - clients_per_worker * workers_count
distribution = [
clients_per_worker + 1 if i < remainder else clients_per_worker
for i in range(workers_count)
]
return distribution

View file

@ -0,0 +1,36 @@
import logging
from frostfs_testlib.load.load_config import LoadParams, LoadScenario
from frostfs_testlib.load.load_metrics import get_metrics_object
logger = logging.getLogger("NeoLogger")
class LoadVerifier:
def __init__(self, load_params: LoadParams) -> None:
self.load_params = load_params
def verify_summaries(self, load_summary, verification_summary) -> None:
if not verification_summary or not load_summary:
logger.info("Can't check load results due to missing summary")
load_metrics = get_metrics_object(self.load_params.scenario, load_summary)
writers = self.load_params.writers or 0
objects_count = load_metrics.write_success_iterations
fails_count = load_metrics.write_failed_iterations
if writers > 0:
assert objects_count > 0, "Total put objects should be greater than 0"
assert fails_count == 0, f"There were {fails_count} failed put objects operations"
if verification_summary:
verify_metrics = get_metrics_object(LoadScenario.VERIFY, verification_summary)
verified_objects = verify_metrics.read_success_iterations
invalid_objects = verify_metrics.read_failed_iterations
assert invalid_objects == 0, f"There were {invalid_objects} verification fails"
# Due to interruptions we may see total verified objects to be less than written on writers count
assert (
abs(objects_count - verified_objects) <= writers
), f"Verified objects is less than total objects. Total: {objects_count}, Verified: {verified_objects}. Writers: {writers}."

View file

@ -0,0 +1,197 @@
from __future__ import annotations
import os
import uuid
from typing import Optional
from tenacity import retry
from tenacity.stop import stop_after_attempt
from tenacity.wait import wait_fixed
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.shell import Shell
from frostfs_testlib.shell.interfaces import CommandOptions
reporter = get_reporter()
class RemoteProcess:
def __init__(self, cmd: str, process_dir: str, shell: Shell):
self.process_dir = process_dir
self.cmd = cmd
self.stdout_last_line_number = 0
self.stderr_last_line_number = 0
self.pid: Optional[str] = None
self.proc_rc: Optional[int] = None
self.saved_stdout: Optional[str] = None
self.saved_stderr: Optional[str] = None
self.shell = shell
@classmethod
@reporter.step_deco("Create remote process")
def create(cls, command: str, shell: Shell, working_dir: str = "/tmp") -> RemoteProcess:
"""
Create a process on a remote host.
Created dir for process with following files:
command.sh: script to execute
pid: contains process id
rc: contains script return code
stderr: contains script errors
stdout: contains script output
Args:
shell: Shell instance
command: command to be run on a remote host
working_dir: working directory for the process
Returns:
RemoteProcess instance for further examination
"""
remote_process = cls(
cmd=command, process_dir=os.path.join(working_dir, f"proc_{uuid.uuid4()}"), shell=shell
)
remote_process._create_process_dir()
remote_process._generate_command_script(command)
remote_process._start_process()
remote_process.pid = remote_process._get_pid()
return remote_process
@reporter.step_deco("Get process stdout")
def stdout(self, full: bool = False) -> str:
"""
Method to get process stdout, either fresh info or full.
Args:
full: returns full stdout that we have to this moment
Returns:
Fresh stdout. By means of stdout_last_line_number only new stdout lines are returned.
If process is finished (proc_rc is not None) saved stdout is returned
"""
if self.saved_stdout is not None:
cur_stdout = self.saved_stdout
else:
terminal = self.shell.exec(f"cat {self.process_dir}/stdout")
if self.proc_rc is not None:
self.saved_stdout = terminal.stdout
cur_stdout = terminal.stdout
if full:
return cur_stdout
whole_stdout = cur_stdout.split("\n")
if len(whole_stdout) > self.stdout_last_line_number:
resulted_stdout = "\n".join(whole_stdout[self.stdout_last_line_number :])
self.stdout_last_line_number = len(whole_stdout)
return resulted_stdout
return ""
@reporter.step_deco("Get process stderr")
def stderr(self, full: bool = False) -> str:
"""
Method to get process stderr, either fresh info or full.
Args:
full: returns full stderr that we have to this moment
Returns:
Fresh stderr. By means of stderr_last_line_number only new stderr lines are returned.
If process is finished (proc_rc is not None) saved stderr is returned
"""
if self.saved_stderr is not None:
cur_stderr = self.saved_stderr
else:
terminal = self.shell.exec(f"cat {self.process_dir}/stderr")
if self.proc_rc is not None:
self.saved_stderr = terminal.stdout
cur_stderr = terminal.stdout
if full:
return cur_stderr
whole_stderr = cur_stderr.split("\n")
if len(whole_stderr) > self.stderr_last_line_number:
resulted_stderr = "\n".join(whole_stderr[self.stderr_last_line_number :])
self.stderr_last_line_number = len(whole_stderr)
return resulted_stderr
return ""
@reporter.step_deco("Get process rc")
def rc(self) -> Optional[int]:
if self.proc_rc is not None:
return self.proc_rc
terminal = self.shell.exec(f"cat {self.process_dir}/rc", CommandOptions(check=False))
if "No such file or directory" in terminal.stderr:
return None
elif terminal.stderr or terminal.return_code != 0:
raise AssertionError(f"cat process rc was not successful: {terminal.stderr}")
self.proc_rc = int(terminal.stdout)
return self.proc_rc
@reporter.step_deco("Check if process is running")
def running(self) -> bool:
return self.rc() is None
@reporter.step_deco("Send signal to process")
def send_signal(self, signal: int) -> None:
kill_res = self.shell.exec(f"kill -{signal} {self.pid}", CommandOptions(check=False))
if "No such process" in kill_res.stderr:
return
if kill_res.return_code:
raise AssertionError(
f"Signal {signal} not sent. Return code of kill: {kill_res.return_code}"
)
@reporter.step_deco("Stop process")
def stop(self) -> None:
self.send_signal(15)
@reporter.step_deco("Kill process")
def kill(self) -> None:
self.send_signal(9)
@reporter.step_deco("Clear process directory")
def clear(self) -> None:
if self.process_dir == "/":
raise AssertionError(f"Invalid path to delete: {self.process_dir}")
self.shell.exec(f"rm -rf {self.process_dir}")
@reporter.step_deco("Start remote process")
def _start_process(self) -> None:
self.shell.exec(
f"nohup {self.process_dir}/command.sh </dev/null "
f">{self.process_dir}/stdout "
f"2>{self.process_dir}/stderr &"
)
@reporter.step_deco("Create process directory")
def _create_process_dir(self) -> None:
self.shell.exec(f"mkdir {self.process_dir}")
self.shell.exec(f"chmod 777 {self.process_dir}")
terminal = self.shell.exec(f"realpath {self.process_dir}")
self.process_dir = terminal.stdout.strip()
@reporter.step_deco("Get pid")
@retry(wait=wait_fixed(10), stop=stop_after_attempt(5), reraise=True)
def _get_pid(self) -> str:
terminal = self.shell.exec(f"cat {self.process_dir}/pid")
assert terminal.stdout, f"invalid pid: {terminal.stdout}"
return terminal.stdout.strip()
@reporter.step_deco("Generate command script")
def _generate_command_script(self, command: str) -> None:
command = command.replace('"', '\\"').replace("\\", "\\\\")
script = (
f"#!/bin/bash\n"
f"cd {self.process_dir}\n"
f"{command} &\n"
f"pid=\$!\n"
f"cd {self.process_dir}\n"
f"echo \$pid > {self.process_dir}/pid\n"
f"wait \$pid\n"
f"echo $? > {self.process_dir}/rc"
)
self.shell.exec(f'echo "{script}" > {self.process_dir}/command.sh')
self.shell.exec(f"cat {self.process_dir}/command.sh")
self.shell.exec(f"chmod +x {self.process_dir}/command.sh")

View file

@ -1,7 +1,7 @@
import os
from contextlib import AbstractContextManager
from textwrap import shorten
from typing import Any
from typing import Any, Callable
import allure
from allure import attachment_type
@ -16,6 +16,9 @@ class AllureHandler(ReporterHandler):
name = shorten(name, width=70, placeholder="...")
return allure.step(name)
def step_decorator(self, name: str) -> Callable:
return allure.step(name)
def attach(self, body: Any, file_name: str) -> None:
attachment_name, extension = os.path.splitext(file_name)
attachment_type = self._resolve_attachment_type(extension)

View file

@ -1,6 +1,6 @@
from abc import ABC, abstractmethod
from contextlib import AbstractContextManager
from typing import Any
from typing import Any, Callable
class ReporterHandler(ABC):
@ -17,6 +17,17 @@ class ReporterHandler(ABC):
Step context.
"""
@abstractmethod
def step_decorator(self, name: str) -> Callable:
"""A step decorator from reporter.
Args:
name: Name of the step.
Returns:
decorator for the step
"""
@abstractmethod
def attach(self, content: Any, file_name: str) -> None:
"""Attach specified content with given file name to the test report.

View file

@ -1,6 +1,7 @@
from contextlib import AbstractContextManager, contextmanager
from functools import wraps
from types import TracebackType
from typing import Any, Optional
from typing import Any, Callable, Optional
from frostfs_testlib.plugins import load_plugin
from frostfs_testlib.reporter.interfaces import ReporterHandler
@ -45,6 +46,32 @@ class Reporter:
handler_class = load_plugin("frostfs.testlib.reporter", handler_config["plugin_name"])
self.register_handler(handler_class())
def step_deco(self, name: str) -> Callable:
"""Register a new step in test execution in a decorator fashion.
To note: the actual decoration with handlers is happening during target function call time.
Args:
name: Name of the step.
Returns:
decorated function
"""
def deco(func):
@wraps(func)
def wrapper(*a, **kw):
resulting_func = func
for handler in self.handlers:
decorator = handler.step_decorator(name)
resulting_func = decorator(resulting_func)
return resulting_func(*a, **kw)
return wrapper
return deco
def step(self, name: str) -> AbstractContextManager:
"""Register a new step in test execution.

View file

@ -0,0 +1,12 @@
# Paths to CLI executables on machine that runs tests
import os
NEOGO_EXECUTABLE = os.getenv("FROSTFS_EXECUTABLE", "neo-go")
FROSTFS_CLI_EXEC = os.getenv("FROSTFS_CLI_EXEC", "frostfs-cli")
FROSTFS_AUTHMATE_EXEC = os.getenv("FROSTFS_AUTHMATE_EXEC", "frostfs-s3-authmate")
FROSTFS_ADM_EXEC = os.getenv("FROSTFS_ADM_EXEC", "frostfs-adm")
# Config for frostfs-adm utility. Optional if tests are running against devenv
FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH")
CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", None)

View file

@ -1,37 +1,46 @@
# ACLs with final flag
PUBLIC_ACL_F = "1FBFBFFF"
PRIVATE_ACL_F = "1C8C8CCC"
READONLY_ACL_F = "1FBF8CFF"
import os
# ACLs without final flag set
PUBLIC_ACL = "0FBFBFFF"
INACCESSIBLE_ACL = "40000000"
STICKY_BIT_PUB_ACL = "3FFFFFFF"
import yaml
EACL_PUBLIC_READ_WRITE = "eacl-public-read-write"
CONTAINER_WAIT_INTERVAL = "1m"
# Regex patterns of status codes of Container service
CONTAINER_NOT_FOUND = "code = 3072.*message = container not found"
SIMPLE_OBJECT_SIZE = os.getenv("SIMPLE_OBJECT_SIZE", "1000")
COMPLEX_OBJECT_CHUNKS_COUNT = os.getenv("COMPLEX_OBJECT_CHUNKS_COUNT", "3")
COMPLEX_OBJECT_TAIL_SIZE = os.getenv("COMPLEX_OBJECT_TAIL_SIZE", "1000")
SERVICE_MAX_STARTUP_TIME = os.getenv("SERVICE_MAX_STARTUP_TIME", "5m")
# Regex patterns of status codes of Object service
MALFORMED_REQUEST = "code = 1024.*message = malformed request"
OBJECT_ACCESS_DENIED = "code = 2048.*message = access to object operation denied"
OBJECT_NOT_FOUND = "code = 2049.*message = object not found"
OBJECT_ALREADY_REMOVED = "code = 2052.*message = object already removed"
SESSION_NOT_FOUND = "code = 4096.*message = session token not found"
OUT_OF_RANGE = "code = 2053.*message = out of range"
EXPIRED_SESSION_TOKEN = "code = 4097.*message = expired session token"
# TODO: Due to https://github.com/nspcc-dev/neofs-node/issues/2092 we have to check only codes until fixed
# OBJECT_IS_LOCKED = "code = 2050.*message = object is locked"
# LOCK_NON_REGULAR_OBJECT = "code = 2051.*message = ..." will be available once 2092 is fixed
OBJECT_IS_LOCKED = "code = 2050"
LOCK_NON_REGULAR_OBJECT = "code = 2051"
MORPH_TIMEOUT = os.getenv("MORPH_BLOCK_TIME", "8s")
MORPH_BLOCK_TIME = os.getenv("MORPH_BLOCK_TIME", "1s")
FROSTFS_CONTRACT_CACHE_TIMEOUT = os.getenv("FROSTFS_CONTRACT_CACHE_TIMEOUT", "30s")
LIFETIME_REQUIRED = "either expiration epoch of a lifetime is required"
LOCK_OBJECT_REMOVAL = "lock object removal"
LOCK_OBJECT_EXPIRATION = "lock object expiration: {expiration_epoch}; current: {current_epoch}"
INVALID_RANGE_ZERO_LENGTH = "invalid '{range}' range: zero length"
INVALID_RANGE_OVERFLOW = "invalid '{range}' range: uint64 overflow"
INVALID_OFFSET_SPECIFIER = "invalid '{range}' range offset specifier"
INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier"
# Time interval that allows a GC pass on storage node (this includes GC sleep interval
# of 1min plus 15 seconds for GC pass itself)
STORAGE_GC_TIME = os.getenv("STORAGE_GC_TIME", "75s")
GAS_HASH = os.getenv("GAS_HASH", "0xd2a4cff31913016155e38e474a2c06d08be276cf")
FROSTFS_CONTRACT = os.getenv("FROSTFS_IR_CONTRACTS_FROSTFS")
ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir")
# Password of wallet owned by user on behalf of whom we are running tests
# Default wallet password is empty
DEFAULT_WALLET_PASS = os.getenv("WALLET_PASS", "")
# Artificial delay that we add after object deletion and container creation
# Delay is added because sometimes immediately after deletion object still appears
# to be existing (probably because tombstone object takes some time to replicate)
# TODO: remove this wait
S3_SYNC_WAIT_TIME = 5
# Generate wallet config
# TODO: we should move all info about wallet configs to fixtures
DEFAULT_WALLET_CONFIG = os.path.join(os.getcwd(), "wallet_config.yml")
with open(DEFAULT_WALLET_CONFIG, "w") as file:
yaml.dump({"password": DEFAULT_WALLET_PASS}, file)
# Number of attempts that S3 clients will attempt per each request (1 means single attempt
# without any retries)
MAX_REQUEST_ATTEMPTS = 1
RETRY_MODE = "standard"
CREDENTIALS_CREATE_TIMEOUT = "1m"

View file

@ -0,0 +1,28 @@
# Regex patterns of status codes of Container service
CONTAINER_NOT_FOUND = "code = 3072.*message = container not found"
# Regex patterns of status codes of Object service
MALFORMED_REQUEST = "code = 1024.*message = malformed request"
OBJECT_ACCESS_DENIED = "code = 2048.*message = access to object operation denied"
OBJECT_NOT_FOUND = "code = 2049.*message = object not found"
OBJECT_ALREADY_REMOVED = "code = 2052.*message = object already removed"
SESSION_NOT_FOUND = "code = 4096.*message = session token not found"
OUT_OF_RANGE = "code = 2053.*message = out of range"
EXPIRED_SESSION_TOKEN = "code = 4097.*message = expired session token"
# TODO: Change to codes with message
# OBJECT_IS_LOCKED = "code = 2050.*message = object is locked"
# LOCK_NON_REGULAR_OBJECT = "code = 2051.*message = ..." will be available once 2092 is fixed
OBJECT_IS_LOCKED = "code = 2050"
LOCK_NON_REGULAR_OBJECT = "code = 2051"
LIFETIME_REQUIRED = "either expiration epoch of a lifetime is required"
LOCK_OBJECT_REMOVAL = "lock object removal"
LOCK_OBJECT_EXPIRATION = "lock object expiration: {expiration_epoch}; current: {current_epoch}"
INVALID_RANGE_ZERO_LENGTH = "invalid '{range}' range: zero length"
INVALID_RANGE_OVERFLOW = "invalid '{range}' range: uint64 overflow"
INVALID_OFFSET_SPECIFIER = "invalid '{range}' range offset specifier"
INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier"
S3_MALFORMED_XML_REQUEST = (
"The XML you provided was not well-formed or did not validate against our published schema."
)

View file

@ -0,0 +1,30 @@
import os
# Background load node parameters
LOAD_NODES = os.getenv("LOAD_NODES", "").split()
# Must hardcode for now
LOAD_NODE_SSH_USER = os.getenv("LOAD_NODE_SSH_USER", "service")
LOAD_NODE_SSH_PASSWORD = os.getenv("LOAD_NODE_SSH_PASSWORD")
LOAD_NODE_SSH_PRIVATE_KEY_PATH = os.getenv("LOAD_NODE_SSH_PRIVATE_KEY_PATH")
LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE = os.getenv("LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE")
BACKGROUND_WRITERS_COUNT = os.getenv("BACKGROUND_WRITERS_COUNT", 4)
BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 4)
BACKGROUND_DELETERS_COUNT = os.getenv("BACKGROUND_DELETERS_COUNT", 0)
BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 600)
BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE = os.getenv("BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE", 32)
# This will decrease load params for some weak environments
BACKGROUND_LOAD_VUS_COUNT_DIVISOR = os.getenv("BACKGROUND_LOAD_VUS_COUNT_DIVISOR", 1)
# Wait for 1 hour for xk6 verify scenario by default (in practice means like "unlimited" time)
BACKGROUND_LOAD_MAX_VERIFY_TIME = os.getenv("BACKGROUND_LOAD_VERIFY_MAX_TIME", 3600)
BACKGROUND_LOAD_CONTAINER_PLACEMENT_POLICY = os.getenv(
"BACKGROUND_LOAD_CONTAINER_PLACEMENT_POLICY", "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
)
BACKGROUND_LOAD_S3_LOCATION = os.getenv("BACKGROUND_LOAD_S3_LOCATION", "node-off")
PRESET_CONTAINERS_COUNT = os.getenv("CONTAINERS_COUNT", "40")
# TODO: At lease one object is required due to bug in xk6 (buckets with no objects produce millions exceptions in read)
PRESET_OBJECTS_COUNT = os.getenv("OBJ_COUNT", "10")
K6_DIRECTORY = os.getenv("K6_DIRECTORY", "/etc/k6")
K6_TEARDOWN_PERIOD = os.getenv("K6_TEARDOWN_PERIOD", "30")
LOAD_CONFIG_YAML_PATH = os.getenv("LOAD_CONFIG_YAML_PATH", "load_config_yaml_file.yml")

View file

@ -0,0 +1,26 @@
import os
def str_to_bool(input: str) -> bool:
return input in ["true", "True", "1"]
# Override these optional params to not comment/modify code during local development. Use with caution.
# Node under test. Set this to occupy exact node.
OPTIONAL_NODE_UNDER_TEST = os.getenv("OPTIONAL_NODE_UNDER_TEST")
# Node under load. Set this to target load on exact node.
OPTIONAL_NODE_UNDER_LOAD = os.getenv("OPTIONAL_NODE_UNDER_LOAD")
# Set this to True to disable failover commands. I.E. node which supposed to be stopped will not be actually stopped.
OPTIONAL_FAILOVER_ENABLED = str_to_bool(os.getenv("OPTIONAL_FAILOVER_ENABLED", "true"))
# Set this to True to disable background load. I.E. node which supposed to be stopped will not be actually stopped.
OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool(
os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true")
)
# Set this to False for disable autouse fixture like node healthcheck during developing time.
OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool(
os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true")
)

View file

@ -0,0 +1,11 @@
# ACLs with final flag
PUBLIC_ACL_F = "1FBFBFFF"
PRIVATE_ACL_F = "1C8C8CCC"
READONLY_ACL_F = "1FBF8CFF"
# ACLs without final flag set
PUBLIC_ACL = "0FBFBFFF"
INACCESSIBLE_ACL = "40000000"
STICKY_BIT_PUB_ACL = "3FFFFFFF"
EACL_PUBLIC_READ_WRITE = "eacl-public-read-write"

View file

@ -0,0 +1,3 @@
from frostfs_testlib.s3.aws_cli_client import AwsCliClient
from frostfs_testlib.s3.boto3_client import Boto3ClientWrapper
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus

View file

@ -0,0 +1,754 @@
import json
import logging
import os
import uuid
from datetime import datetime
from time import sleep
from typing import Literal, Optional, Union
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.common import (
ASSETS_DIR,
MAX_REQUEST_ATTEMPTS,
RETRY_MODE,
S3_SYNC_WAIT_TIME,
)
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
# TODO: Refactor this code to use shell instead of _cmd_run
from frostfs_testlib.utils.cli_utils import _cmd_run, _configure_aws_cli
reporter = get_reporter()
logger = logging.getLogger("NeoLogger")
LONG_TIMEOUT = 240
class AwsCliClient(S3ClientWrapper):
# Flags that we use for all S3 commands: disable SSL verification (as we use self-signed
# certificate in devenv) and disable automatic pagination in CLI output
common_flags = "--no-verify-ssl --no-paginate"
s3gate_endpoint: str
@reporter.step_deco("Configure S3 client (aws cli)")
def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None:
self.s3gate_endpoint = s3gate_endpoint
try:
_configure_aws_cli("aws configure", access_key_id, secret_access_key)
_cmd_run(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS}")
_cmd_run(f"aws configure set retry_mode {RETRY_MODE}")
except Exception as err:
raise RuntimeError("Error while configuring AwsCliClient") from err
@reporter.step_deco("Create bucket S3")
def create_bucket(
self,
bucket: Optional[str] = None,
object_lock_enabled_for_bucket: Optional[bool] = None,
acl: Optional[str] = None,
grant_write: Optional[str] = None,
grant_read: Optional[str] = None,
grant_full_control: Optional[str] = None,
location_constraint: Optional[str] = None,
) -> str:
if bucket is None:
bucket = str(uuid.uuid4())
if object_lock_enabled_for_bucket is None:
object_lock = ""
elif object_lock_enabled_for_bucket:
object_lock = " --object-lock-enabled-for-bucket"
else:
object_lock = " --no-object-lock-enabled-for-bucket"
cmd = (
f"aws {self.common_flags} s3api create-bucket --bucket {bucket} "
f"{object_lock} --endpoint {self.s3gate_endpoint}"
)
if acl:
cmd += f" --acl {acl}"
if grant_full_control:
cmd += f" --grant-full-control {grant_full_control}"
if grant_write:
cmd += f" --grant-write {grant_write}"
if grant_read:
cmd += f" --grant-read {grant_read}"
if location_constraint:
cmd += f" --create-bucket-configuration LocationConstraint={location_constraint}"
_cmd_run(cmd)
sleep(S3_SYNC_WAIT_TIME)
return bucket
@reporter.step_deco("List buckets S3")
def list_buckets(self) -> list[str]:
cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint}"
output = _cmd_run(cmd)
buckets_json = self._to_json(output)
return [bucket["Name"] for bucket in buckets_json["Buckets"]]
@reporter.step_deco("Delete bucket S3")
def delete_bucket(self, bucket: str) -> None:
cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint}"
_cmd_run(cmd, LONG_TIMEOUT)
sleep(S3_SYNC_WAIT_TIME)
@reporter.step_deco("Head bucket S3")
def head_bucket(self, bucket: str) -> None:
cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint}"
_cmd_run(cmd)
@reporter.step_deco("Put bucket versioning status")
def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None:
cmd = (
f"aws {self.common_flags} s3api put-bucket-versioning --bucket {bucket} "
f"--versioning-configuration Status={status.value} "
f"--endpoint {self.s3gate_endpoint}"
)
_cmd_run(cmd)
@reporter.step_deco("Get bucket versioning status")
def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]:
cmd = (
f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
response = self._to_json(output)
return response.get("Status")
@reporter.step_deco("Put bucket tagging")
def put_bucket_tagging(self, bucket: str, tags: list) -> None:
tags_json = {
"TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
}
cmd = (
f"aws {self.common_flags} s3api put-bucket-tagging --bucket {bucket} "
f"--tagging '{json.dumps(tags_json)}' --endpoint {self.s3gate_endpoint}"
)
_cmd_run(cmd)
@reporter.step_deco("Get bucket tagging")
def get_bucket_tagging(self, bucket: str) -> list:
cmd = (
f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
response = self._to_json(output)
return response.get("TagSet")
@reporter.step_deco("Get bucket acl")
def get_bucket_acl(self, bucket: str) -> list:
cmd = (
f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
response = self._to_json(output)
return response.get("Grants")
@reporter.step_deco("Get bucket location")
def get_bucket_location(self, bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
response = self._to_json(output)
return response.get("LocationConstraint")
@reporter.step_deco("List objects S3")
def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
cmd = (
f"aws {self.common_flags} s3api list-objects --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
response = self._to_json(output)
obj_list = [obj["Key"] for obj in response.get("Contents", [])]
logger.info(f"Found s3 objects: {obj_list}")
return response if full_output else obj_list
@reporter.step_deco("List objects S3 v2")
def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
cmd = (
f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
response = self._to_json(output)
obj_list = [obj["Key"] for obj in response.get("Contents", [])]
logger.info(f"Found s3 objects: {obj_list}")
return response if full_output else obj_list
@reporter.step_deco("List objects versions S3")
def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict:
cmd = (
f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
response = self._to_json(output)
return response if full_output else response.get("Versions", [])
@reporter.step_deco("List objects delete markers S3")
def list_delete_markers(self, bucket: str, full_output: bool = False) -> list:
cmd = (
f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
response = self._to_json(output)
return response if full_output else response.get("DeleteMarkers", [])
@reporter.step_deco("Copy object S3")
def copy_object(
self,
source_bucket: str,
source_key: str,
bucket: Optional[str] = None,
key: Optional[str] = None,
acl: Optional[str] = None,
metadata_directive: Optional[Literal["COPY", "REPLACE"]] = None,
metadata: Optional[dict] = None,
tagging_directive: Optional[Literal["COPY", "REPLACE"]] = None,
tagging: Optional[str] = None,
) -> str:
if bucket is None:
bucket = source_bucket
if key is None:
key = os.path.join(os.getcwd(), str(uuid.uuid4()))
copy_source = f"{source_bucket}/{source_key}"
cmd = (
f"aws {self.common_flags} s3api copy-object --copy-source {copy_source} "
f"--bucket {bucket} --key {key} --endpoint {self.s3gate_endpoint}"
)
if acl:
cmd += f" --acl {acl}"
if metadata_directive:
cmd += f" --metadata-directive {metadata_directive}"
if metadata:
cmd += " --metadata "
for meta_key, value in metadata.items():
cmd += f" {meta_key}={value}"
if tagging_directive:
cmd += f" --tagging-directive {tagging_directive}"
if tagging:
cmd += f" --tagging {tagging}"
_cmd_run(cmd, LONG_TIMEOUT)
return key
@reporter.step_deco("Put object S3")
def put_object(
self,
bucket: str,
filepath: str,
key: Optional[str] = None,
metadata: Optional[dict] = None,
tagging: Optional[str] = None,
acl: Optional[str] = None,
object_lock_mode: Optional[str] = None,
object_lock_retain_until_date: Optional[datetime] = None,
object_lock_legal_hold_status: Optional[str] = None,
grant_full_control: Optional[str] = None,
grant_read: Optional[str] = None,
) -> str:
if key is None:
key = os.path.basename(filepath)
cmd = (
f"aws {self.common_flags} s3api put-object --bucket {bucket} --key {key} "
f"--body {filepath} --endpoint {self.s3gate_endpoint}"
)
if metadata:
cmd += " --metadata"
for key, value in metadata.items():
cmd += f" {key}={value}"
if tagging:
cmd += f" --tagging '{tagging}'"
if acl:
cmd += f" --acl {acl}"
if object_lock_mode:
cmd += f" --object-lock-mode {object_lock_mode}"
if object_lock_retain_until_date:
cmd += f' --object-lock-retain-until-date "{object_lock_retain_until_date}"'
if object_lock_legal_hold_status:
cmd += f" --object-lock-legal-hold-status {object_lock_legal_hold_status}"
if grant_full_control:
cmd += f" --grant-full-control '{grant_full_control}'"
if grant_read:
cmd += f" --grant-read {grant_read}"
output = _cmd_run(cmd, LONG_TIMEOUT)
response = self._to_json(output)
return response.get("VersionId")
@reporter.step_deco("Head object S3")
def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
version = f" --version-id {version_id}" if version_id else ""
cmd = (
f"aws {self.common_flags} s3api head-object --bucket {bucket} --key {key} "
f"{version} --endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
response = self._to_json(output)
return response
@reporter.step_deco("Get object S3")
def get_object(
self,
bucket: str,
key: str,
version_id: Optional[str] = None,
object_range: Optional[tuple[int, int]] = None,
full_output: bool = False,
) -> Union[dict, str]:
file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
version = f" --version-id {version_id}" if version_id else ""
cmd = (
f"aws {self.common_flags} s3api get-object --bucket {bucket} --key {key} "
f"{version} {file_path} --endpoint {self.s3gate_endpoint}"
)
if object_range:
cmd += f" --range bytes={object_range[0]}-{object_range[1]}"
output = _cmd_run(cmd)
response = self._to_json(output)
return response if full_output else file_path
@reporter.step_deco("Get object ACL")
def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
version = f" --version-id {version_id}" if version_id else ""
cmd = (
f"aws {self.common_flags} s3api get-object-acl --bucket {bucket} --key {key} "
f"{version} --endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
response = self._to_json(output)
return response.get("Grants")
@reporter.step_deco("Put object ACL")
def put_object_acl(
self,
bucket: str,
key: str,
acl: Optional[str] = None,
grant_write: Optional[str] = None,
grant_read: Optional[str] = None,
) -> list:
cmd = (
f"aws {self.common_flags} s3api put-object-acl --bucket {bucket} --key {key} "
f" --endpoint {self.s3gate_endpoint}"
)
if acl:
cmd += f" --acl {acl}"
if grant_write:
cmd += f" --grant-write {grant_write}"
if grant_read:
cmd += f" --grant-read {grant_read}"
output = _cmd_run(cmd)
response = self._to_json(output)
return response.get("Grants")
@reporter.step_deco("Put bucket ACL")
def put_bucket_acl(
self,
bucket: str,
acl: Optional[str] = None,
grant_write: Optional[str] = None,
grant_read: Optional[str] = None,
) -> None:
cmd = (
f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} "
f" --endpoint {self.s3gate_endpoint}"
)
if acl:
cmd += f" --acl {acl}"
if grant_write:
cmd += f" --grant-write {grant_write}"
if grant_read:
cmd += f" --grant-read {grant_read}"
_cmd_run(cmd)
@reporter.step_deco("Delete objects S3")
def delete_objects(self, bucket: str, keys: list[str]) -> dict:
file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json")
delete_structure = json.dumps(_make_objs_dict(keys))
with open(file_path, "w") as out_file:
out_file.write(delete_structure)
logger.info(f"Input file for delete-objects: {delete_structure}")
cmd = (
f"aws {self.common_flags} s3api delete-objects --bucket {bucket} "
f"--delete file://{file_path} --endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd, LONG_TIMEOUT)
response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME)
return response
@reporter.step_deco("Delete object S3")
def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
version = f" --version-id {version_id}" if version_id else ""
cmd = (
f"aws {self.common_flags} s3api delete-object --bucket {bucket} "
f"--key {key} {version} --endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd, LONG_TIMEOUT)
sleep(S3_SYNC_WAIT_TIME)
return self._to_json(output)
@reporter.step_deco("Delete object versions S3")
def delete_object_versions(self, bucket: str, object_versions: list) -> dict:
# Build deletion list in S3 format
delete_list = {
"Objects": [
{
"Key": object_version["Key"],
"VersionId": object_version["VersionId"],
}
for object_version in object_versions
]
}
file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json")
delete_structure = json.dumps(delete_list)
with open(file_path, "w") as out_file:
out_file.write(delete_structure)
logger.info(f"Input file for delete-objects: {delete_structure}")
cmd = (
f"aws {self.common_flags} s3api delete-objects --bucket {bucket} "
f"--delete file://{file_path} --endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd, LONG_TIMEOUT)
sleep(S3_SYNC_WAIT_TIME)
return self._to_json(output)
@reporter.step_deco("Delete object versions S3 without delete markers")
def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None:
# Delete objects without creating delete markers
for object_version in object_versions:
self.delete_object(
bucket=bucket, key=object_version["Key"], version_id=object_version["VersionId"]
)
@reporter.step_deco("Get object attributes")
def get_object_attributes(
self,
bucket: str,
key: str,
attributes: list[str],
version_id: str = "",
max_parts: int = 0,
part_number: int = 0,
full_output: bool = True,
) -> dict:
attrs = ",".join(attributes)
version = f" --version-id {version_id}" if version_id else ""
parts = f"--max-parts {max_parts}" if max_parts else ""
part_number_str = f"--part-number-marker {part_number}" if part_number else ""
cmd = (
f"aws {self.common_flags} s3api get-object-attributes --bucket {bucket} "
f"--key {key} {version} {parts} {part_number_str} --object-attributes {attrs} "
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
response = self._to_json(output)
for attr in attributes:
assert attr in response, f"Expected attribute {attr} in {response}"
if full_output:
return response
else:
return response.get(attributes[0])
@reporter.step_deco("Get bucket policy")
def get_bucket_policy(self, bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
response = self._to_json(output)
return response.get("Policy")
@reporter.step_deco("Put bucket policy")
def put_bucket_policy(self, bucket: str, policy: dict) -> None:
# Leaving it as is was in test repo. Double dumps to escape resulting string
# Example:
# policy = {"a": 1}
# json.dumps(policy) => {"a": 1}
# json.dumps(json.dumps(policy)) => "{\"a\": 1}"
# TODO: update this
dumped_policy = json.dumps(json.dumps(policy))
cmd = (
f"aws {self.common_flags} s3api put-bucket-policy --bucket {bucket} "
f"--policy {dumped_policy} --endpoint {self.s3gate_endpoint}"
)
_cmd_run(cmd)
@reporter.step_deco("Get bucket cors")
def get_bucket_cors(self, bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
response = self._to_json(output)
return response.get("CORSRules")
@reporter.step_deco("Put bucket cors")
def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None:
cmd = (
f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} "
f"--cors-configuration '{json.dumps(cors_configuration)}' --endpoint {self.s3gate_endpoint}"
)
_cmd_run(cmd)
@reporter.step_deco("Delete bucket cors")
def delete_bucket_cors(self, bucket: str) -> None:
cmd = (
f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint}"
)
_cmd_run(cmd)
@reporter.step_deco("Delete bucket tagging")
def delete_bucket_tagging(self, bucket: str) -> None:
cmd = (
f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint}"
)
_cmd_run(cmd)
@reporter.step_deco("Put object retention")
def put_object_retention(
self,
bucket: str,
key: str,
retention: dict,
version_id: Optional[str] = None,
bypass_governance_retention: Optional[bool] = None,
) -> None:
version = f" --version-id {version_id}" if version_id else ""
cmd = (
f"aws {self.common_flags} s3api put-object-retention --bucket {bucket} --key {key} "
f"{version} --retention '{json.dumps(retention, indent=4, sort_keys=True, default=str)}' --endpoint {self.s3gate_endpoint}"
)
if bypass_governance_retention is not None:
cmd += " --bypass-governance-retention"
_cmd_run(cmd)
@reporter.step_deco("Put object legal hold")
def put_object_legal_hold(
self,
bucket: str,
key: str,
legal_hold_status: Literal["ON", "OFF"],
version_id: Optional[str] = None,
) -> None:
version = f" --version-id {version_id}" if version_id else ""
legal_hold = json.dumps({"Status": legal_hold_status})
cmd = (
f"aws {self.common_flags} s3api put-object-legal-hold --bucket {bucket} --key {key} "
f"{version} --legal-hold '{legal_hold}' --endpoint {self.s3gate_endpoint}"
)
_cmd_run(cmd)
@reporter.step_deco("Put object tagging")
def put_object_tagging(self, bucket: str, key: str, tags: list) -> None:
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
tagging = {"TagSet": tags}
cmd = (
f"aws {self.common_flags} s3api put-object-tagging --bucket {bucket} --key {key} "
f"--tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint}"
)
_cmd_run(cmd)
@reporter.step_deco("Get object tagging")
def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
version = f" --version-id {version_id}" if version_id else ""
cmd = (
f"aws {self.common_flags} s3api get-object-tagging --bucket {bucket} --key {key} "
f"{version} --endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
response = self._to_json(output)
return response.get("TagSet")
@reporter.step_deco("Delete object tagging")
def delete_object_tagging(self, bucket: str, key: str) -> None:
cmd = (
f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} "
f"--key {key} --endpoint {self.s3gate_endpoint}"
)
_cmd_run(cmd)
@reporter.step_deco("Sync directory S3")
def sync(
self,
bucket: str,
dir_path: str,
acl: Optional[str] = None,
metadata: Optional[dict] = None,
) -> dict:
cmd = (
f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} "
f"--endpoint-url {self.s3gate_endpoint}"
)
if metadata:
cmd += " --metadata"
for key, value in metadata.items():
cmd += f" {key}={value}"
if acl:
cmd += f" --acl {acl}"
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
@reporter.step_deco("CP directory S3")
def cp(
self,
bucket: str,
dir_path: str,
acl: Optional[str] = None,
metadata: Optional[dict] = None,
) -> dict:
cmd = (
f"aws {self.common_flags} s3 cp {dir_path} s3://{bucket} "
f"--endpoint-url {self.s3gate_endpoint} --recursive"
)
if metadata:
cmd += " --metadata"
for key, value in metadata.items():
cmd += f" {key}={value}"
if acl:
cmd += f" --acl {acl}"
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
@reporter.step_deco("Create multipart upload S3")
def create_multipart_upload(self, bucket: str, key: str) -> str:
cmd = (
f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} "
f"--key {key} --endpoint-url {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
response = self._to_json(output)
assert response.get("UploadId"), f"Expected UploadId in response:\n{response}"
return response["UploadId"]
@reporter.step_deco("List multipart uploads S3")
def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]:
cmd = (
f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} "
f"--endpoint-url {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
response = self._to_json(output)
return response.get("Uploads")
@reporter.step_deco("Abort multipart upload S3")
def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None:
cmd = (
f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} "
f"--key {key} --upload-id {upload_id} --endpoint-url {self.s3gate_endpoint}"
)
_cmd_run(cmd)
@reporter.step_deco("Upload part S3")
def upload_part(
self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str
) -> str:
cmd = (
f"aws {self.common_flags} s3api upload-part --bucket {bucket} --key {key} "
f"--upload-id {upload_id} --part-number {part_num} --body {filepath} "
f"--endpoint-url {self.s3gate_endpoint}"
)
output = _cmd_run(cmd, LONG_TIMEOUT)
response = self._to_json(output)
assert response.get("ETag"), f"Expected ETag in response:\n{response}"
return response["ETag"]
@reporter.step_deco("Upload copy part S3")
def upload_part_copy(
self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str
) -> str:
cmd = (
f"aws {self.common_flags} s3api upload-part-copy --bucket {bucket} --key {key} "
f"--upload-id {upload_id} --part-number {part_num} --copy-source {copy_source} "
f"--endpoint-url {self.s3gate_endpoint}"
)
output = _cmd_run(cmd, LONG_TIMEOUT)
response = self._to_json(output)
assert response.get("CopyPartResult", []).get(
"ETag"
), f"Expected ETag in response:\n{response}"
return response["CopyPartResult"]["ETag"]
@reporter.step_deco("List parts S3")
def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]:
cmd = (
f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} "
f"--upload-id {upload_id} --endpoint-url {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
response = self._to_json(output)
assert response.get("Parts"), f"Expected Parts in response:\n{response}"
return response["Parts"]
@reporter.step_deco("Complete multipart upload S3")
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None:
file_path = os.path.join(os.getcwd(), ASSETS_DIR, "parts.json")
parts_dict = {"Parts": [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]}
with open(file_path, "w") as out_file:
out_file.write(json.dumps(parts_dict))
logger.info(f"Input file for complete-multipart-upload: {json.dumps(parts_dict)}")
cmd = (
f"aws {self.common_flags} s3api complete-multipart-upload --bucket {bucket} "
f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} "
f"--endpoint-url {self.s3gate_endpoint}"
)
_cmd_run(cmd)
@reporter.step_deco("Put object lock configuration")
def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict:
cmd = (
f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} "
f"--object-lock-configuration '{json.dumps(configuration)}' --endpoint-url {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
@reporter.step_deco("Get object lock configuration")
def get_object_lock_configuration(self, bucket: str):
cmd = (
f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} "
f"--endpoint-url {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
response = self._to_json(output)
return response.get("ObjectLockConfiguration")
@staticmethod
def _to_json(output: str) -> dict:
json_output = {}
if "{" not in output and "}" not in output:
logger.warning(f"Could not parse json from output {output}")
return json_output
json_output = json.loads(output[output.index("{") :])
return json_output

View file

@ -0,0 +1,661 @@
import json
import logging
import os
import uuid
from datetime import datetime
from functools import wraps
from time import sleep
from typing import Literal, Optional, Union
import boto3
import urllib3
from botocore.config import Config
from botocore.exceptions import ClientError
from mypy_boto3_s3 import S3Client
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.common import (
ASSETS_DIR,
MAX_REQUEST_ATTEMPTS,
RETRY_MODE,
S3_SYNC_WAIT_TIME,
)
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
from frostfs_testlib.utils.cli_utils import log_command_execution
reporter = get_reporter()
logger = logging.getLogger("NeoLogger")
# Disable warnings on self-signed certificate which the
# boto library produces on requests to S3-gate in dev-env
urllib3.disable_warnings()
def report_error(func):
@wraps(func)
def deco(*a, **kw):
try:
return func(*a, **kw)
except ClientError as err:
log_command_execution("Result", str(err))
raise
return deco
class Boto3ClientWrapper(S3ClientWrapper):
@reporter.step_deco("Configure S3 client (boto3)")
@report_error
def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None:
session = boto3.Session()
config = Config(
retries={
"max_attempts": MAX_REQUEST_ATTEMPTS,
"mode": RETRY_MODE,
}
)
self.boto3_client: S3Client = session.client(
service_name="s3",
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key,
config=config,
endpoint_url=s3gate_endpoint,
verify=False,
)
def _to_s3_param(self, param: str):
replacement_map = {
"Acl": "ACL",
"Cors": "CORS",
"_": "",
}
result = param.title()
for find, replace in replacement_map.items():
result = result.replace(find, replace)
return result
# BUCKET METHODS #
@reporter.step_deco("Create bucket S3")
@report_error
def create_bucket(
self,
bucket: Optional[str] = None,
object_lock_enabled_for_bucket: Optional[bool] = None,
acl: Optional[str] = None,
grant_write: Optional[str] = None,
grant_read: Optional[str] = None,
grant_full_control: Optional[str] = None,
location_constraint: Optional[str] = None,
) -> str:
if bucket is None:
bucket = str(uuid.uuid4())
params = {"Bucket": bucket}
if object_lock_enabled_for_bucket is not None:
params.update({"ObjectLockEnabledForBucket": object_lock_enabled_for_bucket})
if acl is not None:
params.update({"ACL": acl})
elif grant_write or grant_read or grant_full_control:
if grant_write:
params.update({"GrantWrite": grant_write})
elif grant_read:
params.update({"GrantRead": grant_read})
elif grant_full_control:
params.update({"GrantFullControl": grant_full_control})
if location_constraint:
params.update(
{"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}
)
s3_bucket = self.boto3_client.create_bucket(**params)
log_command_execution(f"Created S3 bucket {bucket}", s3_bucket)
sleep(S3_SYNC_WAIT_TIME)
return bucket
@reporter.step_deco("List buckets S3")
@report_error
def list_buckets(self) -> list[str]:
found_buckets = []
response = self.boto3_client.list_buckets()
log_command_execution("S3 List buckets result", response)
for bucket in response["Buckets"]:
found_buckets.append(bucket["Name"])
return found_buckets
@reporter.step_deco("Delete bucket S3")
@report_error
def delete_bucket(self, bucket: str) -> None:
response = self.boto3_client.delete_bucket(Bucket=bucket)
log_command_execution("S3 Delete bucket result", response)
sleep(S3_SYNC_WAIT_TIME)
@reporter.step_deco("Head bucket S3")
@report_error
def head_bucket(self, bucket: str) -> None:
response = self.boto3_client.head_bucket(Bucket=bucket)
log_command_execution("S3 Head bucket result", response)
@reporter.step_deco("Put bucket versioning status")
@report_error
def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None:
response = self.boto3_client.put_bucket_versioning(
Bucket=bucket, VersioningConfiguration={"Status": status.value}
)
log_command_execution("S3 Set bucket versioning to", response)
@reporter.step_deco("Get bucket versioning status")
@report_error
def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]:
response = self.boto3_client.get_bucket_versioning(Bucket=bucket)
status = response.get("Status")
log_command_execution("S3 Got bucket versioning status", response)
return status
@reporter.step_deco("Put bucket tagging")
@report_error
def put_bucket_tagging(self, bucket: str, tags: list) -> None:
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
tagging = {"TagSet": tags}
response = self.boto3_client.put_bucket_tagging(Bucket=bucket, Tagging=tagging)
log_command_execution("S3 Put bucket tagging", response)
@reporter.step_deco("Get bucket tagging")
@report_error
def get_bucket_tagging(self, bucket: str) -> list:
response = self.boto3_client.get_bucket_tagging(Bucket=bucket)
log_command_execution("S3 Get bucket tagging", response)
return response.get("TagSet")
@reporter.step_deco("Get bucket acl")
@report_error
def get_bucket_acl(self, bucket: str) -> list:
response = self.boto3_client.get_bucket_acl(Bucket=bucket)
log_command_execution("S3 Get bucket acl", response)
return response.get("Grants")
@reporter.step_deco("Delete bucket tagging")
@report_error
def delete_bucket_tagging(self, bucket: str) -> None:
response = self.boto3_client.delete_bucket_tagging(Bucket=bucket)
log_command_execution("S3 Delete bucket tagging", response)
@reporter.step_deco("Put bucket ACL")
@report_error
def put_bucket_acl(
self,
bucket: str,
acl: Optional[str] = None,
grant_write: Optional[str] = None,
grant_read: Optional[str] = None,
) -> None:
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
response = self.boto3_client.put_bucket_acl(**params)
log_command_execution("S3 ACL bucket result", response)
@reporter.step_deco("Put object lock configuration")
@report_error
def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict:
response = self.boto3_client.put_object_lock_configuration(
Bucket=bucket, ObjectLockConfiguration=configuration
)
log_command_execution("S3 put_object_lock_configuration result", response)
return response
@reporter.step_deco("Get object lock configuration")
@report_error
def get_object_lock_configuration(self, bucket: str) -> dict:
response = self.boto3_client.get_object_lock_configuration(Bucket=bucket)
log_command_execution("S3 get_object_lock_configuration result", response)
return response.get("ObjectLockConfiguration")
@reporter.step_deco("Get bucket policy")
@report_error
def get_bucket_policy(self, bucket: str) -> str:
response = self.boto3_client.get_bucket_policy(Bucket=bucket)
log_command_execution("S3 get_bucket_policy result", response)
return response.get("Policy")
@reporter.step_deco("Put bucket policy")
@report_error
def put_bucket_policy(self, bucket: str, policy: dict) -> None:
response = self.boto3_client.put_bucket_policy(Bucket=bucket, Policy=json.dumps(policy))
log_command_execution("S3 put_bucket_policy result", response)
return response
@reporter.step_deco("Get bucket cors")
@report_error
def get_bucket_cors(self, bucket: str) -> dict:
response = self.boto3_client.get_bucket_cors(Bucket=bucket)
log_command_execution("S3 get_bucket_cors result", response)
return response.get("CORSRules")
@reporter.step_deco("Get bucket location")
@report_error
def get_bucket_location(self, bucket: str) -> str:
response = self.boto3_client.get_bucket_location(Bucket=bucket)
log_command_execution("S3 get_bucket_location result", response)
return response.get("LocationConstraint")
@reporter.step_deco("Put bucket cors")
@report_error
def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None:
response = self.boto3_client.put_bucket_cors(
Bucket=bucket, CORSConfiguration=cors_configuration
)
log_command_execution("S3 put_bucket_cors result", response)
return response
@reporter.step_deco("Delete bucket cors")
@report_error
def delete_bucket_cors(self, bucket: str) -> None:
response = self.boto3_client.delete_bucket_cors(Bucket=bucket)
log_command_execution("S3 delete_bucket_cors result", response)
# END OF BUCKET METHODS #
# OBJECT METHODS #
@reporter.step_deco("List objects S3 v2")
@report_error
def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
response = self.boto3_client.list_objects_v2(Bucket=bucket)
log_command_execution("S3 v2 List objects result", response)
obj_list = [obj["Key"] for obj in response.get("Contents", [])]
logger.info(f"Found s3 objects: {obj_list}")
return response if full_output else obj_list
@reporter.step_deco("List objects S3")
@report_error
def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
response = self.boto3_client.list_objects(Bucket=bucket)
log_command_execution("S3 List objects result", response)
obj_list = [obj["Key"] for obj in response.get("Contents", [])]
logger.info(f"Found s3 objects: {obj_list}")
return response if full_output else obj_list
@reporter.step_deco("List objects versions S3")
@report_error
def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict:
response = self.boto3_client.list_object_versions(Bucket=bucket)
log_command_execution("S3 List objects versions result", response)
return response if full_output else response.get("Versions", [])
@reporter.step_deco("List objects delete markers S3")
@report_error
def list_delete_markers(self, bucket: str, full_output: bool = False) -> list:
response = self.boto3_client.list_object_versions(Bucket=bucket)
log_command_execution("S3 List objects delete markers result", response)
return response if full_output else response.get("DeleteMarkers", [])
@reporter.step_deco("Put object S3")
@report_error
def put_object(
self,
bucket: str,
filepath: str,
key: Optional[str] = None,
metadata: Optional[dict] = None,
tagging: Optional[str] = None,
acl: Optional[str] = None,
object_lock_mode: Optional[str] = None,
object_lock_retain_until_date: Optional[datetime] = None,
object_lock_legal_hold_status: Optional[str] = None,
grant_full_control: Optional[str] = None,
grant_read: Optional[str] = None,
) -> str:
if key is None:
key = os.path.basename(filepath)
with open(filepath, "rb") as put_file:
body = put_file.read()
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self", "filepath", "put_file"] and value is not None
}
response = self.boto3_client.put_object(**params)
log_command_execution("S3 Put object result", response)
return response.get("VersionId")
@reporter.step_deco("Head object S3")
@report_error
def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
response = self.boto3_client.head_object(**params)
log_command_execution("S3 Head object result", response)
return response
@reporter.step_deco("Delete object S3")
@report_error
def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
response = self.boto3_client.delete_object(**params)
log_command_execution("S3 Delete object result", response)
sleep(S3_SYNC_WAIT_TIME)
return response
@reporter.step_deco("Delete objects S3")
@report_error
def delete_objects(self, bucket: str, keys: list[str]) -> dict:
response = self.boto3_client.delete_objects(Bucket=bucket, Delete=_make_objs_dict(keys))
log_command_execution("S3 Delete objects result", response)
assert (
"Errors" not in response
), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}'
sleep(S3_SYNC_WAIT_TIME)
return response
@reporter.step_deco("Delete object versions S3")
@report_error
def delete_object_versions(self, bucket: str, object_versions: list) -> dict:
# Build deletion list in S3 format
delete_list = {
"Objects": [
{
"Key": object_version["Key"],
"VersionId": object_version["VersionId"],
}
for object_version in object_versions
]
}
response = self.boto3_client.delete_objects(Bucket=bucket, Delete=delete_list)
log_command_execution("S3 Delete objects result", response)
return response
@reporter.step_deco("Delete object versions S3 without delete markers")
@report_error
def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None:
# Delete objects without creating delete markers
for object_version in object_versions:
response = self.boto3_client.delete_object(
Bucket=bucket, Key=object_version["Key"], VersionId=object_version["VersionId"]
)
log_command_execution("S3 Delete object result", response)
@reporter.step_deco("Put object ACL")
@report_error
def put_object_acl(
self,
bucket: str,
key: str,
acl: Optional[str] = None,
grant_write: Optional[str] = None,
grant_read: Optional[str] = None,
) -> list:
# pytest.skip("Method put_object_acl is not supported by boto3 client")
raise NotImplementedError("Unsupported for boto3 client")
@reporter.step_deco("Get object ACL")
@report_error
def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
response = self.boto3_client.get_object_acl(**params)
log_command_execution("S3 ACL objects result", response)
return response.get("Grants")
@reporter.step_deco("Copy object S3")
@report_error
def copy_object(
self,
source_bucket: str,
source_key: str,
bucket: Optional[str] = None,
key: Optional[str] = None,
acl: Optional[str] = None,
metadata_directive: Optional[Literal["COPY", "REPLACE"]] = None,
metadata: Optional[dict] = None,
tagging_directive: Optional[Literal["COPY", "REPLACE"]] = None,
tagging: Optional[str] = None,
) -> str:
if bucket is None:
bucket = source_bucket
if key is None:
key = os.path.join(os.getcwd(), str(uuid.uuid4()))
copy_source = f"{source_bucket}/{source_key}"
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self", "source_bucket", "source_key"] and value is not None
}
response = self.boto3_client.copy_object(**params)
log_command_execution("S3 Copy objects result", response)
return key
@reporter.step_deco("Get object S3")
@report_error
def get_object(
self,
bucket: str,
key: str,
version_id: Optional[str] = None,
object_range: Optional[tuple[int, int]] = None,
full_output: bool = False,
) -> Union[dict, str]:
filename = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
range_str = None
if object_range:
range_str = f"bytes={object_range[0]}-{object_range[1]}"
params = {
self._to_s3_param(param): value
for param, value in {**locals(), **{"Range": range_str}}.items()
if param not in ["self", "object_range", "full_output", "range_str", "filename"]
and value is not None
}
response = self.boto3_client.get_object(**params)
log_command_execution("S3 Get objects result", response)
with open(f"{filename}", "wb") as get_file:
chunk = response["Body"].read(1024)
while chunk:
get_file.write(chunk)
chunk = response["Body"].read(1024)
return response if full_output else filename
@reporter.step_deco("Create multipart upload S3")
@report_error
def create_multipart_upload(self, bucket: str, key: str) -> str:
response = self.boto3_client.create_multipart_upload(Bucket=bucket, Key=key)
log_command_execution("S3 Created multipart upload", response)
assert response.get("UploadId"), f"Expected UploadId in response:\n{response}"
return response["UploadId"]
@reporter.step_deco("List multipart uploads S3")
@report_error
def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]:
response = self.boto3_client.list_multipart_uploads(Bucket=bucket)
log_command_execution("S3 List multipart upload", response)
return response.get("Uploads")
@reporter.step_deco("Abort multipart upload S3")
@report_error
def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None:
response = self.boto3_client.abort_multipart_upload(
Bucket=bucket, Key=key, UploadId=upload_id
)
log_command_execution("S3 Abort multipart upload", response)
@reporter.step_deco("Upload part S3")
@report_error
def upload_part(
self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str
) -> str:
with open(filepath, "rb") as put_file:
body = put_file.read()
response = self.boto3_client.upload_part(
UploadId=upload_id,
Bucket=bucket,
Key=key,
PartNumber=part_num,
Body=body,
)
log_command_execution("S3 Upload part", response)
assert response.get("ETag"), f"Expected ETag in response:\n{response}"
return response["ETag"]
@reporter.step_deco("Upload copy part S3")
@report_error
def upload_part_copy(
self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str
) -> str:
response = self.boto3_client.upload_part_copy(
UploadId=upload_id,
Bucket=bucket,
Key=key,
PartNumber=part_num,
CopySource=copy_source,
)
log_command_execution("S3 Upload copy part", response)
assert response.get("CopyPartResult", []).get(
"ETag"
), f"Expected ETag in response:\n{response}"
return response["CopyPartResult"]["ETag"]
@reporter.step_deco("List parts S3")
@report_error
def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]:
response = self.boto3_client.list_parts(UploadId=upload_id, Bucket=bucket, Key=key)
log_command_execution("S3 List part", response)
assert response.get("Parts"), f"Expected Parts in response:\n{response}"
return response["Parts"]
@reporter.step_deco("Complete multipart upload S3")
@report_error
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None:
parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]
response = self.boto3_client.complete_multipart_upload(
Bucket=bucket, Key=key, UploadId=upload_id, MultipartUpload={"Parts": parts}
)
log_command_execution("S3 Complete multipart upload", response)
@reporter.step_deco("Put object retention")
@report_error
def put_object_retention(
self,
bucket: str,
key: str,
retention: dict,
version_id: Optional[str] = None,
bypass_governance_retention: Optional[bool] = None,
) -> None:
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
response = self.boto3_client.put_object_retention(**params)
log_command_execution("S3 Put object retention ", response)
@reporter.step_deco("Put object legal hold")
@report_error
def put_object_legal_hold(
self,
bucket: str,
key: str,
legal_hold_status: Literal["ON", "OFF"],
version_id: Optional[str] = None,
) -> None:
legal_hold = {"Status": legal_hold_status}
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self", "legal_hold_status"] and value is not None
}
response = self.boto3_client.put_object_legal_hold(**params)
log_command_execution("S3 Put object legal hold ", response)
@reporter.step_deco("Put object tagging")
@report_error
def put_object_tagging(self, bucket: str, key: str, tags: list) -> None:
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
tagging = {"TagSet": tags}
response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging)
log_command_execution("S3 Put object tagging", response)
@reporter.step_deco("Get object tagging")
@report_error
def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
response = self.boto3_client.get_object_tagging(**params)
log_command_execution("S3 Get object tagging", response)
return response.get("TagSet")
@reporter.step_deco("Delete object tagging")
@report_error
def delete_object_tagging(self, bucket: str, key: str) -> None:
response = self.boto3_client.delete_object_tagging(Bucket=bucket, Key=key)
log_command_execution("S3 Delete object tagging", response)
@reporter.step_deco("Get object attributes")
@report_error
def get_object_attributes(
self,
bucket: str,
key: str,
attributes: list[str],
version_id: Optional[str] = None,
max_parts: Optional[int] = None,
part_number: Optional[int] = None,
full_output: bool = True,
) -> dict:
logger.warning("Method get_object_attributes is not supported by boto3 client")
return {}
@reporter.step_deco("Sync directory S3")
@report_error
def sync(
self,
bucket: str,
dir_path: str,
acl: Optional[str] = None,
metadata: Optional[dict] = None,
) -> dict:
raise NotImplementedError("Sync is not supported for boto3 client")
@reporter.step_deco("CP directory S3")
@report_error
def cp(
self,
bucket: str,
dir_path: str,
acl: Optional[str] = None,
metadata: Optional[dict] = None,
) -> dict:
raise NotImplementedError("Cp is not supported for boto3 client")
# END OBJECT METHODS #

View file

@ -0,0 +1,378 @@
from abc import ABC, abstractmethod
from datetime import datetime
from enum import Enum
from typing import Literal, Optional, Union
def _make_objs_dict(key_names):
objs_list = []
for key in key_names:
obj_dict = {"Key": key}
objs_list.append(obj_dict)
objs_dict = {"Objects": objs_list}
return objs_dict
class VersioningStatus(Enum):
ENABLED = "Enabled"
SUSPENDED = "Suspended"
ACL_COPY = [
"private",
"public-read",
"public-read-write",
"authenticated-read",
"aws-exec-read",
"bucket-owner-read",
"bucket-owner-full-control",
]
class S3ClientWrapper(ABC):
@abstractmethod
def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None:
pass
@abstractmethod
def create_bucket(
self,
bucket: Optional[str] = None,
object_lock_enabled_for_bucket: Optional[bool] = None,
acl: Optional[str] = None,
grant_write: Optional[str] = None,
grant_read: Optional[str] = None,
grant_full_control: Optional[str] = None,
location_constraint: Optional[str] = None,
) -> str:
"""Create a bucket."""
# BUCKET METHODS #
@abstractmethod
def list_buckets(self) -> list[str]:
"""List buckets."""
@abstractmethod
def delete_bucket(self, bucket: str) -> None:
"""Delete bucket"""
@abstractmethod
def head_bucket(self, bucket: str) -> None:
"""This action is useful to determine if a bucket exists and you have permission to access it.
The action returns a 200 OK if the bucket exists and you have permission to access it.
If the bucket does not exist or you do not have permission to access it, the HEAD request
returns a generic 400 Bad Request, 403 Forbidden or 404 Not Found code.
A message body is not included, so you cannot determine the exception beyond these error codes.
"""
@abstractmethod
def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None:
"""Sets the versioning state of an existing bucket.
You can set the versioning state with one of the following values:
EnabledEnables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID.
SuspendedDisables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null.
If the versioning state has never been set on a bucket, it has no versioning state
"""
@abstractmethod
def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]:
"""Returns the versioning state of a bucket.
To retrieve the versioning state of a bucket, you must be the bucket owner.
"""
@abstractmethod
def put_bucket_tagging(self, bucket: str, tags: list) -> None:
"""Sets the tags for a bucket."""
@abstractmethod
def get_bucket_tagging(self, bucket: str) -> list:
"""Returns the tag set associated with the Outposts bucket."""
@abstractmethod
def delete_bucket_tagging(self, bucket: str) -> None:
"""Deletes the tags from the bucket."""
@abstractmethod
def get_bucket_acl(self, bucket: str) -> list:
"""This implementation of the GET action uses the acl subresource to return the access control list (ACL) of a bucket."""
@abstractmethod
def put_bucket_acl(
self,
bucket: str,
acl: Optional[str] = None,
grant_write: Optional[str] = None,
grant_read: Optional[str] = None,
) -> list:
"""Sets the permissions on an existing bucket using access control lists (ACL)."""
@abstractmethod
def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict:
"""Places an Object Lock configuration on the specified bucket.
The rule specified in the Object Lock configuration will be applied by
default to every new object placed in the specified bucket."""
@abstractmethod
def get_object_lock_configuration(self, bucket: str) -> dict:
"""Gets the Object Lock configuration for a bucket.
The rule specified in the Object Lock configuration will be applied by
default to every new object placed in the specified bucket."""
@abstractmethod
def get_bucket_policy(self, bucket: str) -> str:
"""Returns the policy of a specified bucket."""
@abstractmethod
def put_bucket_policy(self, bucket: str, policy: dict) -> None:
"""Applies S3 bucket policy to an S3 bucket."""
@abstractmethod
def get_bucket_cors(self, bucket: str) -> dict:
"""Returns the Cross-Origin Resource Sharing (CORS) configuration information set for the bucket."""
@abstractmethod
def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None:
"""Sets the cors configuration for your bucket. If the configuration exists, S3 replaces it."""
@abstractmethod
def delete_bucket_cors(self, bucket: str) -> None:
"""Deletes the cors configuration information set for the bucket."""
@abstractmethod
def get_bucket_location(self, bucket: str) -> str:
"""Returns the LocationConstraint the bucket resides in. You can set the it
using the LocationConstraint request parameter in a CreateBucket request."""
# END OF BUCKET METHODS #
# OBJECT METHODS #
@abstractmethod
def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
"""Returns some or all (up to 1,000) of the objects in a bucket with each request.
You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
A 200 OK response can contain valid or invalid XML. Make sure to design your application
to parse the contents of the response and handle it appropriately.
"""
@abstractmethod
def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
"""Returns some or all (up to 1,000) of the objects in a bucket with each request.
You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
A 200 OK response can contain valid or invalid XML. Make sure to design your application
to parse the contents of the response and handle it appropriately.
"""
@abstractmethod
def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict:
"""Returns metadata about all versions of the objects in a bucket."""
@abstractmethod
def list_delete_markers(self, bucket: str, full_output: bool = False) -> dict:
"""Returns metadata about all delete markers of the objects in a bucket."""
@abstractmethod
def put_object(
self,
bucket: str,
filepath: str,
key: Optional[str] = None,
metadata: Optional[dict] = None,
tagging: Optional[str] = None,
acl: Optional[str] = None,
object_lock_mode: Optional[str] = None,
object_lock_retain_until_date: Optional[datetime] = None,
object_lock_legal_hold_status: Optional[str] = None,
grant_full_control: Optional[str] = None,
grant_read: Optional[str] = None,
) -> str:
"""Adds an object to a bucket."""
@abstractmethod
def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
"""The HEAD action retrieves metadata from an object without returning the object itself.
This action is useful if you're only interested in an object's metadata."""
@abstractmethod
def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
"""Removes the null version (if there is one) of an object and inserts a delete marker,
which becomes the latest version of the object. If there isn't a null version,
S3 does not remove any objects but will still respond that the command was successful."""
@abstractmethod
def delete_objects(self, bucket: str, keys: list[str]) -> dict:
"""This action enables you to delete multiple objects from a bucket
using a single HTTP request. If you know the object keys that
you want to delete, then this action provides a suitable alternative
to sending individual delete requests, reducing per-request overhead.
The request contains a list of up to 1000 keys that you want to delete."""
@abstractmethod
def delete_object_versions(self, bucket: str, object_versions: list) -> dict:
"""Delete object versions"""
@abstractmethod
def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None:
"""Delete object versions without delete markers"""
@abstractmethod
def put_object_acl(
self,
bucket: str,
key: str,
acl: Optional[str] = None,
grant_write: Optional[str] = None,
grant_read: Optional[str] = None,
) -> list:
"""Uses the acl subresource to set the access control
list (ACL) permissions for a new or existing object in an S3 bucket."""
@abstractmethod
def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
"""Returns the access control list (ACL) of an object."""
@abstractmethod
def copy_object(
self,
source_bucket: str,
source_key: str,
bucket: Optional[str] = None,
key: Optional[str] = None,
acl: Optional[str] = None,
metadata_directive: Optional[Literal["COPY", "REPLACE"]] = None,
metadata: Optional[dict] = None,
tagging_directive: Optional[Literal["COPY", "REPLACE"]] = None,
tagging: Optional[str] = None,
) -> str:
"""Creates a copy of an object"""
@abstractmethod
def get_object(
self,
bucket: str,
key: str,
version_id: Optional[str] = None,
object_range: Optional[tuple[int, int]] = None,
full_output: bool = False,
) -> Union[dict, str]:
"""Retrieves objects from S3."""
@abstractmethod
def create_multipart_upload(self, bucket: str, key: str) -> str:
"""This action initiates a multipart upload and returns an upload ID.
This upload ID is used to associate all of the parts in the specific multipart upload.
You specify this upload ID in each of your subsequent upload part requests (see UploadPart).
You also include this upload ID in the final request to either complete or abort the multipart upload request."""
@abstractmethod
def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]:
"""This action lists in-progress multipart uploads.
An in-progress multipart upload is a multipart upload that has been initiated
using the Initiate Multipart Upload request, but has not yet been completed or aborted.
This action returns at most 1,000 multipart uploads in the response."""
@abstractmethod
def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None:
"""This action aborts a multipart upload. After a multipart upload is aborted,
no additional parts can be uploaded using that upload ID.
The storage consumed by any previously uploaded parts will be freed.
However, if any part uploads are currently in progress, those part
uploads might or might not succeed. As a result, it might be necessary to
abort a given multipart upload multiple times in order to completely free all storage consumed by all parts."""
@abstractmethod
def upload_part(
self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str
) -> str:
"""Uploads a part in a multipart upload."""
@abstractmethod
def upload_part_copy(
self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str
) -> str:
"""Uploads a part by copying data from an existing object as data source."""
@abstractmethod
def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]:
"""Lists the parts that have been uploaded for a specific multipart upload."""
@abstractmethod
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None:
"""Completes a multipart upload by assembling previously uploaded parts."""
@abstractmethod
def put_object_retention(
self,
bucket: str,
key: str,
retention: dict,
version_id: Optional[str] = None,
bypass_governance_retention: Optional[bool] = None,
) -> None:
"""Places an Object Retention configuration on an object."""
@abstractmethod
def put_object_legal_hold(
self,
bucket: str,
key: str,
legal_hold_status: Literal["ON", "OFF"],
version_id: Optional[str] = None,
) -> None:
"""Applies a legal hold configuration to the specified object."""
@abstractmethod
def put_object_tagging(self, bucket: str, key: str, tags: list) -> None:
"""Sets the tag-set for an object."""
@abstractmethod
def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
"""Returns the tag-set of an object."""
@abstractmethod
def delete_object_tagging(self, bucket: str, key: str) -> None:
"""Removes the entire tag set from the specified object."""
@abstractmethod
def get_object_attributes(
self,
bucket: str,
key: str,
attributes: list[str],
version_id: str = "",
max_parts: int = 0,
part_number: int = 0,
full_output: bool = True,
) -> dict:
"""Retrieves all the metadata from an object without returning the object itself."""
@abstractmethod
def sync(
self,
bucket: str,
dir_path: str,
acl: Optional[str] = None,
metadata: Optional[dict] = None,
) -> dict:
"""sync directory TODO: Add proper description"""
@abstractmethod
def cp(
self,
bucket: str,
dir_path: str,
acl: Optional[str] = None,
metadata: Optional[dict] = None,
) -> dict:
"""cp directory TODO: Add proper description"""
# END OF OBJECT METHODS #

View file

@ -60,6 +60,23 @@ class CommandOptions:
self.timeout = Options.get_default_shell_timeout()
@dataclass
class SshCredentials:
"""Represents ssh credentials.
Attributes:
ssh_login: ssh login.
ssh_password: ssh password as plain text (unsecure, for local setup only).
ssh_key_path: path to a ssh key file.
ssh_key_passphrase: passphrase to ssh key file.
"""
ssh_login: str
ssh_password: Optional[str] = None
ssh_key_path: Optional[str] = None
ssh_key_passphrase: Optional[str] = None
@dataclass
class CommandResult:
"""Represents a result of a command executed via shell.

View file

@ -29,7 +29,7 @@ reporter = get_reporter()
class HostIsNotAvailable(Exception):
"""Raised when host is not reachable via SSH connection."""
def __init__(self, host: str = None):
def __init__(self, host: Optional[str] = None):
msg = f"Host {host} is not available"
super().__init__(msg)

View file

View file

@ -0,0 +1,191 @@
import base64
import json
import logging
import os
import uuid
from time import sleep
from typing import List, Optional, Union
import base58
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.dataclasses.acl import (
EACL_LIFETIME,
FROSTFS_CONTRACT_CACHE_TIMEOUT,
EACLPubKey,
EACLRole,
EACLRule,
)
from frostfs_testlib.utils import wallet_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger")
@reporter.step_deco("Get extended ACL")
def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optional[str]:
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
try:
result = cli.container.get_eacl(wallet=wallet_path, rpc_endpoint=endpoint, cid=cid)
except RuntimeError as exc:
logger.info("Extended ACL table is not set for this container")
logger.info(f"Got exception while getting eacl: {exc}")
return None
if "extended ACL table is not set for this container" in result.stdout:
return None
return result.stdout
@reporter.step_deco("Set extended ACL")
def set_eacl(
wallet_path: str,
cid: str,
eacl_table_path: str,
shell: Shell,
endpoint: str,
session_token: Optional[str] = None,
) -> None:
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
cli.container.set_eacl(
wallet=wallet_path,
rpc_endpoint=endpoint,
cid=cid,
table=eacl_table_path,
await_mode=True,
session=session_token,
)
def _encode_cid_for_eacl(cid: str) -> str:
cid_base58 = base58.b58decode(cid)
return base64.b64encode(cid_base58).decode("utf-8")
def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str:
table_file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"eacl_table_{str(uuid.uuid4())}.json")
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
cli.acl.extended_create(cid=cid, out=table_file_path, rule=rules_list)
with open(table_file_path, "r") as file:
table_data = file.read()
logger.info(f"Generated eACL:\n{table_data}")
return table_file_path
def form_bearertoken_file(
wif: str,
cid: str,
eacl_rule_list: List[Union[EACLRule, EACLPubKey]],
shell: Shell,
endpoint: str,
sign: Optional[bool] = True,
) -> str:
"""
This function fetches eACL for given <cid> on behalf of <wif>,
then extends it with filters taken from <eacl_rules>, signs
with bearer token and writes to file
"""
enc_cid = _encode_cid_for_eacl(cid) if cid else None
file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
eacl = get_eacl(wif, cid, shell, endpoint)
json_eacl = dict()
if eacl:
eacl = eacl.replace("eACL: ", "").split("Signature")[0]
json_eacl = json.loads(eacl)
logger.info(json_eacl)
eacl_result = {
"body": {
"eaclTable": {"containerID": {"value": enc_cid} if cid else enc_cid, "records": []},
"lifetime": {"exp": EACL_LIFETIME, "nbf": "1", "iat": "0"},
}
}
assert eacl_rules, "Got empty eacl_records list"
for rule in eacl_rule_list:
op_data = {
"operation": rule.operation.value.upper(),
"action": rule.access.value.upper(),
"filters": rule.filters or [],
"targets": [],
}
if isinstance(rule.role, EACLRole):
op_data["targets"] = [{"role": rule.role.value.upper()}]
elif isinstance(rule.role, EACLPubKey):
op_data["targets"] = [{"keys": rule.role.keys}]
eacl_result["body"]["eaclTable"]["records"].append(op_data)
# Add records from current eACL
if "records" in json_eacl.keys():
for record in json_eacl["records"]:
eacl_result["body"]["eaclTable"]["records"].append(record)
with open(file_path, "w", encoding="utf-8") as eacl_file:
json.dump(eacl_result, eacl_file, ensure_ascii=False, indent=4)
logger.info(f"Got these extended ACL records: {eacl_result}")
if sign:
sign_bearer(
shell=shell,
wallet_path=wif,
eacl_rules_file_from=file_path,
eacl_rules_file_to=file_path,
json=True,
)
return file_path
def eacl_rules(access: str, verbs: list, user: str) -> list[str]:
"""
This function creates a list of eACL rules.
Args:
access (str): identifies if the following operation(s)
is allowed or denied
verbs (list): a list of operations to set rules for
user (str): a group of users (user/others) or a wallet of
a certain user for whom rules are set
Returns:
(list): a list of eACL rules
"""
if user not in ("others", "user"):
pubkey = wallet_utils.get_wallet_public_key(user, wallet_password="")
user = f"pubkey:{pubkey}"
rules = []
for verb in verbs:
rule = f"{access} {verb} {user}"
rules.append(rule)
return rules
def sign_bearer(
shell: Shell, wallet_path: str, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool
) -> None:
frostfscli = FrostfsCli(
shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG
)
frostfscli.util.sign_bearer_token(
wallet=wallet_path, from_file=eacl_rules_file_from, to_file=eacl_rules_file_to, json=json
)
@reporter.step_deco("Wait for eACL cache expired")
def wait_for_cache_expired():
sleep(FROSTFS_CONTRACT_CACHE_TIMEOUT)
return
@reporter.step_deco("Return bearer token in base64 to caller")
def bearer_token_base64_from_file(
bearer_path: str,
) -> str:
with open(bearer_path, "rb") as file:
signed = file.read()
return base64.b64encode(signed).decode("utf-8")

View file

@ -0,0 +1,359 @@
import json
import logging
from dataclasses import dataclass
from time import sleep
from typing import Optional, Union
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.utils import json_utils
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
reporter = get_reporter()
logger = logging.getLogger("NeoLogger")
@dataclass
class StorageContainerInfo:
id: str
wallet_file: WalletInfo
class StorageContainer:
def __init__(
self,
storage_container_info: StorageContainerInfo,
shell: Shell,
cluster: Cluster,
) -> None:
self.shell = shell
self.storage_container_info = storage_container_info
self.cluster = cluster
def get_id(self) -> str:
return self.storage_container_info.id
def get_wallet_path(self) -> str:
return self.storage_container_info.wallet_file.path
def get_wallet_config_path(self) -> str:
return self.storage_container_info.wallet_file.config_path
@reporter.step_deco("Generate new object and put in container")
def generate_object(
self,
size: int,
expire_at: Optional[int] = None,
bearer_token: Optional[str] = None,
endpoint: Optional[str] = None,
) -> StorageObjectInfo:
with reporter.step(f"Generate object with size {size}"):
file_path = generate_file(size)
file_hash = get_file_hash(file_path)
container_id = self.get_id()
wallet_path = self.get_wallet_path()
wallet_config = self.get_wallet_config_path()
with reporter.step(f"Put object with size {size} to container {container_id}"):
if endpoint:
object_id = put_object(
wallet=wallet_path,
path=file_path,
cid=container_id,
expire_at=expire_at,
shell=self.shell,
endpoint=endpoint,
bearer=bearer_token,
wallet_config=wallet_config,
)
else:
object_id = put_object_to_random_node(
wallet=wallet_path,
path=file_path,
cid=container_id,
expire_at=expire_at,
shell=self.shell,
cluster=self.cluster,
bearer=bearer_token,
wallet_config=wallet_config,
)
storage_object = StorageObjectInfo(
container_id,
object_id,
size=size,
wallet_file_path=wallet_path,
file_path=file_path,
file_hash=file_hash,
)
return storage_object
DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X"
REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X"
@reporter.step_deco("Create Container")
def create_container(
wallet: str,
shell: Shell,
endpoint: str,
rule: str = DEFAULT_PLACEMENT_RULE,
basic_acl: str = "",
attributes: Optional[dict] = None,
session_token: str = "",
session_wallet: str = "",
name: Optional[str] = None,
options: Optional[dict] = None,
await_mode: bool = True,
wait_for_creation: bool = True,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> str:
"""
A wrapper for `frostfs-cli container create` call.
Args:
wallet (str): a wallet on whose behalf a container is created
rule (optional, str): placement rule for container
basic_acl (optional, str): an ACL for container, will be
appended to `--basic-acl` key
attributes (optional, dict): container attributes , will be
appended to `--attributes` key
session_token (optional, str): a path to session token file
session_wallet(optional, str): a path to the wallet which signed
the session token; this parameter makes sense
when paired with `session_token`
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
options (optional, dict): any other options to pass to the call
name (optional, str): container name attribute
await_mode (bool): block execution until container is persisted
wait_for_creation (): Wait for container shows in container list
timeout: Timeout for the operation.
Returns:
(str): CID of the created container
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
result = cli.container.create(
rpc_endpoint=endpoint,
wallet=session_wallet if session_wallet else wallet,
policy=rule,
basic_acl=basic_acl,
attributes=attributes,
name=name,
session=session_token,
await_mode=await_mode,
timeout=timeout,
**options or {},
)
cid = _parse_cid(result.stdout)
logger.info("Container created; waiting until it is persisted in the sidechain")
if wait_for_creation:
wait_for_container_creation(wallet, cid, shell, endpoint)
return cid
def wait_for_container_creation(
wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 15, sleep_interval: int = 1
):
for _ in range(attempts):
containers = list_containers(wallet, shell, endpoint)
if cid in containers:
return
logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue")
sleep(sleep_interval)
raise RuntimeError(
f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting"
)
def wait_for_container_deletion(
wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 30, sleep_interval: int = 1
):
for _ in range(attempts):
try:
get_container(wallet, cid, shell=shell, endpoint=endpoint)
sleep(sleep_interval)
continue
except Exception as err:
if "container not found" not in str(err):
raise AssertionError(f'Expected "container not found" in error, got\n{err}')
return
raise AssertionError(f"Expected container deleted during {attempts * sleep_interval} sec.")
@reporter.step_deco("List Containers")
def list_containers(
wallet: str, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT
) -> list[str]:
"""
A wrapper for `frostfs-cli container list` call. It returns all the
available containers for the given wallet.
Args:
wallet (str): a wallet on whose behalf we list the containers
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
timeout: Timeout for the operation.
Returns:
(list): list of containers
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
result = cli.container.list(rpc_endpoint=endpoint, wallet=wallet, timeout=timeout)
logger.info(f"Containers: \n{result}")
return result.stdout.split()
@reporter.step_deco("List Objects in container")
def list_objects(
wallet: str,
shell: Shell,
container_id: str,
endpoint: str,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> list[str]:
"""
A wrapper for `frostfs-cli container list-objects` call. It returns all the
available objects in container.
Args:
wallet (str): a wallet on whose behalf we list the containers objects
shell: executor for cli command
container_id: cid of container
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
timeout: Timeout for the operation.
Returns:
(list): list of containers
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
result = cli.container.list_objects(
rpc_endpoint=endpoint, wallet=wallet, cid=container_id, timeout=timeout
)
logger.info(f"Container objects: \n{result}")
return result.stdout.split()
@reporter.step_deco("Get Container")
def get_container(
wallet: str,
cid: str,
shell: Shell,
endpoint: str,
json_mode: bool = True,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> Union[dict, str]:
"""
A wrapper for `frostfs-cli container get` call. It extracts container's
attributes and rearranges them into a more compact view.
Args:
wallet (str): path to a wallet on whose behalf we get the container
cid (str): ID of the container to get
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
json_mode (bool): return container in JSON format
timeout: Timeout for the operation.
Returns:
(dict, str): dict of container attributes
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
result = cli.container.get(
rpc_endpoint=endpoint, wallet=wallet, cid=cid, json_mode=json_mode, timeout=timeout
)
if not json_mode:
return result.stdout
container_info = json.loads(result.stdout)
attributes = dict()
for attr in container_info["attributes"]:
attributes[attr["key"]] = attr["value"]
container_info["attributes"] = attributes
container_info["ownerID"] = json_utils.json_reencode(container_info["ownerID"]["value"])
return container_info
@reporter.step_deco("Delete Container")
# TODO: make the error message about a non-found container more user-friendly
def delete_container(
wallet: str,
cid: str,
shell: Shell,
endpoint: str,
force: bool = False,
session_token: Optional[str] = None,
await_mode: bool = False,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> None:
"""
A wrapper for `frostfs-cli container delete` call.
Args:
wallet (str): path to a wallet on whose behalf we delete the container
cid (str): ID of the container to delete
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
force (bool): do not check whether container contains locks and remove immediately
session_token: a path to session token file
timeout: Timeout for the operation.
This function doesn't return anything.
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
cli.container.delete(
wallet=wallet,
cid=cid,
rpc_endpoint=endpoint,
force=force,
session=session_token,
await_mode=await_mode,
timeout=timeout,
)
def _parse_cid(output: str) -> str:
"""
Parses container ID from a given CLI output. The input string we expect:
container ID: 2tz86kVTDpJxWHrhw3h6PbKMwkLtBEwoqhHQCKTre1FN
awaiting...
container has been persisted on sidechain
We want to take 'container ID' value from the string.
Args:
output (str): CLI output to parse
Returns:
(str): extracted CID
"""
try:
# taking first line from command's output
first_line = output.split("\n")[0]
except Exception:
first_line = ""
logger.error(f"Got empty output: {output}")
splitted = first_line.split(": ")
if len(splitted) != 2:
raise ValueError(f"no CID was parsed from command output: \t{first_line}")
return splitted[1]
@reporter.step_deco("Search container by name")
def search_container_by_name(wallet: str, name: str, shell: Shell, endpoint: str):
list_cids = list_containers(wallet, shell, endpoint)
for cid in list_cids:
cont_info = get_container(wallet, cid, shell, endpoint, True)
if cont_info.get("attributes", {}).get("Name", None) == name:
return cid
return None

View file

@ -0,0 +1,727 @@
import json
import logging
import os
import re
import uuid
from typing import Any, Optional
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.cli.neogo import NeoGo
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.utils import json_utils
logger = logging.getLogger("NeoLogger")
reporter = get_reporter()
@reporter.step_deco("Get object from random node")
def get_object_from_random_node(
wallet: str,
cid: str,
oid: str,
shell: Shell,
cluster: Cluster,
bearer: Optional[str] = None,
write_object: Optional[str] = None,
xhdr: Optional[dict] = None,
wallet_config: Optional[str] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> str:
"""
GET from FrostFS random storage node
Args:
wallet: wallet on whose behalf GET is done
cid: ID of Container where we get the Object from
oid: Object ID
shell: executor for cli command
cluster: cluster object
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key
write_object (optional, str): path to downloaded file, appends to `--file` key
wallet_config(optional, str): path to the wallet config
no_progress(optional, bool): do not show progress bar
xhdr (optional, dict): Request X-Headers in form of Key=Value
session (optional, dict): path to a JSON-encoded container session token
timeout: Timeout for the operation.
Returns:
(str): path to downloaded file
"""
endpoint = cluster.get_random_storage_rpc_endpoint()
return get_object(
wallet,
cid,
oid,
shell,
endpoint,
bearer,
write_object,
xhdr,
wallet_config,
no_progress,
session,
timeout,
)
@reporter.step_deco("Get object from {endpoint}")
def get_object(
wallet: str,
cid: str,
oid: str,
shell: Shell,
endpoint: str,
bearer: Optional[str] = None,
write_object: Optional[str] = None,
xhdr: Optional[dict] = None,
wallet_config: Optional[str] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> str:
"""
GET from FrostFS.
Args:
wallet (str): wallet on whose behalf GET is done
cid (str): ID of Container where we get the Object from
oid (str): Object ID
shell: executor for cli command
bearer: path to Bearer Token file, appends to `--bearer` key
write_object: path to downloaded file, appends to `--file` key
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config(optional, str): path to the wallet config
no_progress(optional, bool): do not show progress bar
xhdr (optional, dict): Request X-Headers in form of Key=Value
session (optional, dict): path to a JSON-encoded container session token
timeout: Timeout for the operation.
Returns:
(str): path to downloaded file
"""
if not write_object:
write_object = str(uuid.uuid4())
file_path = os.path.join(ASSETS_DIR, write_object)
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
cli.object.get(
rpc_endpoint=endpoint,
wallet=wallet,
cid=cid,
oid=oid,
file=file_path,
bearer=bearer,
no_progress=no_progress,
xhdr=xhdr,
session=session,
timeout=timeout,
)
return file_path
@reporter.step_deco("Get Range Hash from {endpoint}")
def get_range_hash(
wallet: str,
cid: str,
oid: str,
range_cut: str,
shell: Shell,
endpoint: str,
bearer: Optional[str] = None,
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
):
"""
GETRANGEHASH of given Object.
Args:
wallet: wallet on whose behalf GETRANGEHASH is done
cid: ID of Container where we get the Object from
oid: Object ID
shell: executor for cli command
bearer: path to Bearer Token file, appends to `--bearer` key
range_cut: Range to take hash from in the form offset1:length1,...,
value to pass to the `--range` parameter
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config: path to the wallet config
xhdr: Request X-Headers in form of Key=Values
session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session.
timeout: Timeout for the operation.
Returns:
None
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.hash(
rpc_endpoint=endpoint,
wallet=wallet,
cid=cid,
oid=oid,
range=range_cut,
bearer=bearer,
xhdr=xhdr,
session=session,
timeout=timeout,
)
# cutting off output about range offset and length
return result.stdout.split(":")[1].strip()
@reporter.step_deco("Put object to random node")
def put_object_to_random_node(
wallet: str,
path: str,
cid: str,
shell: Shell,
cluster: Cluster,
bearer: Optional[str] = None,
attributes: Optional[dict] = None,
xhdr: Optional[dict] = None,
wallet_config: Optional[str] = None,
expire_at: Optional[int] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
):
"""
PUT of given file to a random storage node.
Args:
wallet: wallet on whose behalf PUT is done
path: path to file to be PUT
cid: ID of Container where we get the Object from
shell: executor for cli command
cluster: cluster under test
bearer: path to Bearer Token file, appends to `--bearer` key
attributes: User attributes in form of Key1=Value1,Key2=Value2
cluster: cluster under test
wallet_config: path to the wallet config
no_progress: do not show progress bar
expire_at: Last epoch in the life of the object
xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token
timeout: Timeout for the operation.
Returns:
ID of uploaded Object
"""
endpoint = cluster.get_random_storage_rpc_endpoint()
return put_object(
wallet,
path,
cid,
shell,
endpoint,
bearer,
attributes,
xhdr,
wallet_config,
expire_at,
no_progress,
session,
timeout=timeout,
)
@reporter.step_deco("Put object at {endpoint} in container {cid}")
def put_object(
wallet: str,
path: str,
cid: str,
shell: Shell,
endpoint: str,
bearer: Optional[str] = None,
attributes: Optional[dict] = None,
xhdr: Optional[dict] = None,
wallet_config: Optional[str] = None,
expire_at: Optional[int] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
):
"""
PUT of given file.
Args:
wallet: wallet on whose behalf PUT is done
path: path to file to be PUT
cid: ID of Container where we get the Object from
shell: executor for cli command
bearer: path to Bearer Token file, appends to `--bearer` key
attributes: User attributes in form of Key1=Value1,Key2=Value2
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config: path to the wallet config
no_progress: do not show progress bar
expire_at: Last epoch in the life of the object
xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token
timeout: Timeout for the operation.
Returns:
(str): ID of uploaded Object
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.put(
rpc_endpoint=endpoint,
wallet=wallet,
file=path,
cid=cid,
attributes=attributes,
bearer=bearer,
expire_at=expire_at,
no_progress=no_progress,
xhdr=xhdr,
session=session,
timeout=timeout,
)
# Splitting CLI output to separate lines and taking the penultimate line
id_str = result.stdout.strip().split("\n")[-2]
oid = id_str.split(":")[1]
return oid.strip()
@reporter.step_deco("Delete object {cid}/{oid} from {endpoint}")
def delete_object(
wallet: str,
cid: str,
oid: str,
shell: Shell,
endpoint: str,
bearer: str = "",
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
):
"""
DELETE an Object.
Args:
wallet: wallet on whose behalf DELETE is done
cid: ID of Container where we get the Object from
oid: ID of Object we are going to delete
shell: executor for cli command
bearer: path to Bearer Token file, appends to `--bearer` key
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config: path to the wallet config
xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token
timeout: Timeout for the operation.
Returns:
(str): Tombstone ID
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.delete(
rpc_endpoint=endpoint,
wallet=wallet,
cid=cid,
oid=oid,
bearer=bearer,
xhdr=xhdr,
session=session,
timeout=timeout,
)
id_str = result.stdout.split("\n")[1]
tombstone = id_str.split(":")[1]
return tombstone.strip()
@reporter.step_deco("Get Range")
def get_range(
wallet: str,
cid: str,
oid: str,
range_cut: str,
shell: Shell,
endpoint: str,
wallet_config: Optional[str] = None,
bearer: str = "",
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
):
"""
GETRANGE an Object.
Args:
wallet: wallet on whose behalf GETRANGE is done
cid: ID of Container where we get the Object from
oid: ID of Object we are going to request
range_cut: range to take data from in the form offset:length
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
bearer: path to Bearer Token file, appends to `--bearer` key
wallet_config: path to the wallet config
xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token
timeout: Timeout for the operation.
Returns:
(str, bytes) - path to the file with range content and content of this file as bytes
"""
range_file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4()))
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
cli.object.range(
rpc_endpoint=endpoint,
wallet=wallet,
cid=cid,
oid=oid,
range=range_cut,
file=range_file_path,
bearer=bearer,
xhdr=xhdr,
session=session,
timeout=timeout,
)
with open(range_file_path, "rb") as file:
content = file.read()
return range_file_path, content
@reporter.step_deco("Lock Object")
def lock_object(
wallet: str,
cid: str,
oid: str,
shell: Shell,
endpoint: str,
lifetime: Optional[int] = None,
expire_at: Optional[int] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
session: Optional[str] = None,
wallet_config: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> str:
"""
Locks object in container.
Args:
address: Address of wallet account.
bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID.
oid: Object ID.
lifetime: Lock lifetime.
expire_at: Lock expiration epoch.
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
session: Path to a JSON-encoded container session token.
ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for the operation.
Returns:
Lock object ID
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.lock(
rpc_endpoint=endpoint,
lifetime=lifetime,
expire_at=expire_at,
address=address,
wallet=wallet,
cid=cid,
oid=oid,
bearer=bearer,
xhdr=xhdr,
session=session,
ttl=ttl,
timeout=timeout,
)
# Splitting CLI output to separate lines and taking the penultimate line
id_str = result.stdout.strip().split("\n")[0]
oid = id_str.split(":")[1]
return oid.strip()
@reporter.step_deco("Search object")
def search_object(
wallet: str,
cid: str,
shell: Shell,
endpoint: str,
bearer: str = "",
filters: Optional[dict] = None,
expected_objects_list: Optional[list] = None,
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None,
session: Optional[str] = None,
phy: bool = False,
root: bool = False,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> list:
"""
SEARCH an Object.
Args:
wallet: wallet on whose behalf SEARCH is done
cid: ID of Container where we get the Object from
shell: executor for cli command
bearer: path to Bearer Token file, appends to `--bearer` key
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
filters: key=value pairs to filter Objects
expected_objects_list: a list of ObjectIDs to compare found Objects with
wallet_config: path to the wallet config
xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token
phy: Search physically stored objects.
root: Search for user objects.
timeout: Timeout for the operation.
Returns:
list of found ObjectIDs
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.search(
rpc_endpoint=endpoint,
wallet=wallet,
cid=cid,
bearer=bearer,
xhdr=xhdr,
filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()]
if filters
else None,
session=session,
phy=phy,
root=root,
timeout=timeout,
)
found_objects = re.findall(r"(\w{43,44})", result.stdout)
if expected_objects_list:
if sorted(found_objects) == sorted(expected_objects_list):
logger.info(
f"Found objects list '{found_objects}' "
f"is equal for expected list '{expected_objects_list}'"
)
else:
logger.warning(
f"Found object list {found_objects} "
f"is not equal to expected list '{expected_objects_list}'"
)
return found_objects
@reporter.step_deco("Get netmap netinfo")
def get_netmap_netinfo(
wallet: str,
shell: Shell,
endpoint: str,
wallet_config: Optional[str] = None,
address: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> dict[str, Any]:
"""
Get netmap netinfo output from node
Args:
wallet (str): wallet on whose behalf request is done
shell: executor for cli command
endpoint (optional, str): FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
address: Address of wallet account
ttl: TTL value in request meta header (default 2)
wallet: Path to the wallet or binary key
xhdr: Request X-Headers in form of Key=Value
timeout: Timeout for the operation.
Returns:
(dict): dict of parsed command output
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
output = cli.netmap.netinfo(
wallet=wallet,
rpc_endpoint=endpoint,
address=address,
ttl=ttl,
xhdr=xhdr,
timeout=timeout,
)
settings = dict()
patterns = [
(re.compile("(.*): (\d+)"), int),
(re.compile("(.*): (false|true)"), bool),
(re.compile("(.*): (\d+\.\d+)"), float),
]
for pattern, func in patterns:
for setting, value in re.findall(pattern, output.stdout):
settings[setting.lower().strip().replace(" ", "_")] = func(value)
return settings
@reporter.step_deco("Head object")
def head_object(
wallet: str,
cid: str,
oid: str,
shell: Shell,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
json_output: bool = True,
is_raw: bool = False,
is_direct: bool = False,
wallet_config: Optional[str] = None,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
):
"""
HEAD an Object.
Args:
wallet (str): wallet on whose behalf HEAD is done
cid (str): ID of Container where we get the Object from
oid (str): ObjectID to HEAD
shell: executor for cli command
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key
endpoint(optional, str): FrostFS endpoint to send request to
json_output(optional, bool): return response in JSON format or not; this flag
turns into `--json` key
is_raw(optional, bool): send "raw" request or not; this flag
turns into `--raw` key
is_direct(optional, bool): send request directly to the node or not; this flag
turns into `--ttl 1` key
wallet_config(optional, str): path to the wallet config
xhdr (optional, dict): Request X-Headers in form of Key=Value
session (optional, dict): path to a JSON-encoded container session token
timeout: Timeout for the operation.
Returns:
depending on the `json_output` parameter value, the function returns
(dict): HEAD response in JSON format
or
(str): HEAD response as a plain text
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.head(
rpc_endpoint=endpoint,
wallet=wallet,
cid=cid,
oid=oid,
bearer=bearer,
json_mode=json_output,
raw=is_raw,
ttl=1 if is_direct else None,
xhdr=xhdr,
session=session,
timeout=timeout,
)
if not json_output:
return result
try:
decoded = json.loads(result.stdout)
except Exception as exc:
# If we failed to parse output as JSON, the cause might be
# the plain text string in the beginning of the output.
# Here we cut off first string and try to parse again.
logger.info(f"failed to parse output: {exc}")
logger.info("parsing output in another way")
fst_line_idx = result.stdout.find("\n")
decoded = json.loads(result.stdout[fst_line_idx:])
# If response is Complex Object header, it has `splitId` key
if "splitId" in decoded.keys():
logger.info("decoding split header")
return json_utils.decode_split_header(decoded)
# If response is Last or Linking Object header,
# it has `header` dictionary and non-null `split` dictionary
if "split" in decoded["header"].keys():
if decoded["header"]["split"]:
logger.info("decoding linking object")
return json_utils.decode_linking_object(decoded)
if decoded["header"]["objectType"] == "STORAGE_GROUP":
logger.info("decoding storage group")
return json_utils.decode_storage_group(decoded)
if decoded["header"]["objectType"] == "TOMBSTONE":
logger.info("decoding tombstone")
return json_utils.decode_tombstone(decoded)
logger.info("decoding simple header")
return json_utils.decode_simple_header(decoded)
@reporter.step_deco("Run neo-go dump-keys")
def neo_go_dump_keys(shell: Shell, wallet: str) -> dict:
"""
Run neo-go dump keys command
Args:
shell: executor for cli command
wallet: wallet path to dump from
Returns:
dict Address:Wallet Key
"""
neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE)
output = neogo.wallet.dump_keys(wallet=wallet).stdout
first_line = ""
try:
# taking first line from command's output contain wallet address
first_line = output.split("\n")[0]
except Exception:
logger.error(f"Got empty output (neo-go dump keys): {output}")
address_id = first_line.split()[0]
# taking second line from command's output contain wallet key
wallet_key = output.split("\n")[1]
return {address_id: wallet_key}
@reporter.step_deco("Run neo-go query height")
def neo_go_query_height(shell: Shell, endpoint: str) -> dict:
"""
Run neo-go query height command
Args:
shell: executor for cli command
endpoint: endpoint to execute
Returns:
dict->
Latest block: {value}
Validated state: {value}
"""
neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE)
output = neogo.query.height(rpc_endpoint=endpoint).stdout
first_line = ""
try:
# taking first line from command's output contain the latest block in blockchain
first_line = output.split("\n")[0]
except Exception:
logger.error(f"Got empty output (neo-go query height): {output}")
latest_block = first_line.split(":")
# taking second line from command's output contain wallet key
second_line = output.split("\n")[1]
validated_state = second_line.split(":")
return {
latest_block[0].replace(":", ""): int(latest_block[1]),
validated_state[0].replace(":", ""): int(validated_state[1]),
}

View file

@ -0,0 +1,210 @@
#!/usr/bin/python3
"""
This module contains functions which are used for Large Object assembling:
getting Last Object and split and getting Link Object. It is not enough to
simply perform a "raw" HEAD request.
Therefore, the reliable retrieval of the aforementioned objects must be done
this way: send direct "raw" HEAD request to the every Storage Node and return
the desired OID on first non-null response.
"""
import logging
from typing import Optional, Tuple
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import head_object
from frostfs_testlib.storage.cluster import Cluster, StorageNode
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
reporter = get_reporter()
logger = logging.getLogger("NeoLogger")
def get_storage_object_chunks(
storage_object: StorageObjectInfo,
shell: Shell,
cluster: Cluster,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> list[str]:
"""
Get complex object split objects ids (no linker object)
Args:
storage_object: storage_object to get it's chunks
shell: client shell to do cmd requests
cluster: cluster object under test
timeout: Timeout for an operation.
Returns:
list of object ids of complex object chunks
"""
with reporter.step(f"Get complex object chunks (f{storage_object.oid})"):
split_object_id = get_link_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
shell,
cluster.services(StorageNode),
is_direct=False,
timeout=timeout,
)
head = head_object(
storage_object.wallet_file_path,
storage_object.cid,
split_object_id,
shell,
cluster.default_rpc_endpoint,
timeout=timeout,
)
chunks_object_ids = []
if "split" in head["header"] and "children" in head["header"]["split"]:
chunks_object_ids = head["header"]["split"]["children"]
return chunks_object_ids
def get_complex_object_split_ranges(
storage_object: StorageObjectInfo,
shell: Shell,
cluster: Cluster,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> list[Tuple[int, int]]:
"""
Get list of split ranges tuples (offset, length) of a complex object
For example if object size if 100 and max object size in system is 30
the returned list should be
[(0, 30), (30, 30), (60, 30), (90, 10)]
Args:
storage_object: storage_object to get it's chunks
shell: client shell to do cmd requests
cluster: cluster object under test
timeout: Timeout for an operation.
Returns:
list of object ids of complex object chunks
"""
ranges: list = []
offset = 0
chunks_ids = get_storage_object_chunks(storage_object, shell, cluster)
for chunk_id in chunks_ids:
head = head_object(
storage_object.wallet_file_path,
storage_object.cid,
chunk_id,
shell,
cluster.default_rpc_endpoint,
timeout=timeout,
)
length = int(head["header"]["payloadLength"])
ranges.append((offset, length))
offset = offset + length
return ranges
@reporter.step_deco("Get Link Object")
def get_link_object(
wallet: str,
cid: str,
oid: str,
shell: Shell,
nodes: list[StorageNode],
bearer: str = "",
wallet_config: str = DEFAULT_WALLET_CONFIG,
is_direct: bool = True,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
):
"""
Args:
wallet (str): path to the wallet on whose behalf the Storage Nodes
are requested
cid (str): Container ID which stores the Large Object
oid (str): Large Object ID
shell: executor for cli command
nodes: list of nodes to do search on
bearer (optional, str): path to Bearer token file
wallet_config (optional, str): path to the frostfs-cli config file
is_direct: send request directly to the node or not; this flag
turns into `--ttl 1` key
timeout: Timeout for an operation.
Returns:
(str): Link Object ID
When no Link Object ID is found after all Storage Nodes polling,
the function throws an error.
"""
for node in nodes:
endpoint = node.get_rpc_endpoint()
try:
resp = head_object(
wallet,
cid,
oid,
shell=shell,
endpoint=endpoint,
is_raw=True,
is_direct=is_direct,
bearer=bearer,
wallet_config=wallet_config,
timeout=timeout,
)
if resp["link"]:
return resp["link"]
except Exception:
logger.info(f"No Link Object found on {endpoint}; continue")
logger.error(f"No Link Object for {cid}/{oid} found among all Storage Nodes")
return None
@reporter.step_deco("Get Last Object")
def get_last_object(
wallet: str,
cid: str,
oid: str,
shell: Shell,
nodes: list[StorageNode],
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> Optional[str]:
"""
Args:
wallet (str): path to the wallet on whose behalf the Storage Nodes
are requested
cid (str): Container ID which stores the Large Object
oid (str): Large Object ID
shell: executor for cli command
nodes: list of nodes to do search on
timeout: Timeout for an operation.
Returns:
(str): Last Object ID
When no Last Object ID is found after all Storage Nodes polling,
the function throws an error.
"""
for node in nodes:
endpoint = node.get_rpc_endpoint()
try:
resp = head_object(
wallet,
cid,
oid,
shell=shell,
endpoint=endpoint,
is_raw=True,
is_direct=True,
timeout=timeout,
)
if resp["lastPart"]:
return resp["lastPart"]
except Exception:
logger.info(f"No Last Object found on {endpoint}; continue")
logger.error(f"No Last Object for {cid}/{oid} found among all Storage Nodes")
return None

View file

@ -0,0 +1,131 @@
import logging
from time import sleep
from typing import Optional
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import (
CLI_DEFAULT_TIMEOUT,
FROSTFS_ADM_CONFIG_PATH,
FROSTFS_ADM_EXEC,
FROSTFS_CLI_EXEC,
NEOGO_EXECUTABLE,
)
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.payment_neogo import get_contract_hash
from frostfs_testlib.storage.cluster import Cluster, StorageNode
from frostfs_testlib.storage.dataclasses.frostfs_services import InnerRing, MorphChain
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils import datetime_utils, wallet_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger")
@reporter.step_deco("Get epochs from nodes")
def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]:
"""
Get current epochs on each node.
Args:
shell: shell to run commands on.
cluster: cluster under test.
Returns:
Dict of {node_ip: epoch}.
"""
epochs_by_node = {}
for node in cluster.services(StorageNode):
epochs_by_node[node.host.config.address] = get_epoch(shell, cluster, node)
return epochs_by_node
@reporter.step_deco("Ensure fresh epoch")
def ensure_fresh_epoch(
shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None
) -> int:
# ensure new fresh epoch to avoid epoch switch during test session
alive_node = alive_node if alive_node else cluster.services(StorageNode)[0]
current_epoch = get_epoch(shell, cluster, alive_node)
tick_epoch(shell, cluster, alive_node)
epoch = get_epoch(shell, cluster, alive_node)
assert epoch > current_epoch, "Epoch wasn't ticked"
return epoch
@reporter.step_deco("Wait for epochs align in whole cluster")
@wait_for_success(60, 5)
def wait_for_epochs_align(shell: Shell, cluster: Cluster) -> None:
epochs = []
for node in cluster.services(StorageNode):
epochs.append(get_epoch(shell, cluster, node))
unique_epochs = list(set(epochs))
assert (
len(unique_epochs) == 1
), f"unaligned epochs found, {epochs}, count of unique epochs {len(unique_epochs)}"
@reporter.step_deco("Get Epoch")
def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None):
alive_node = alive_node if alive_node else cluster.services(StorageNode)[0]
endpoint = alive_node.get_rpc_endpoint()
wallet_path = alive_node.get_wallet_path()
wallet_config = alive_node.get_wallet_config_path()
cli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config)
epoch = cli.netmap.epoch(endpoint, wallet_path, timeout=CLI_DEFAULT_TIMEOUT)
return int(epoch.stdout)
@reporter.step_deco("Tick Epoch")
def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None):
"""
Tick epoch using frostfs-adm or NeoGo if frostfs-adm is not available (DevEnv)
Args:
shell: local shell to make queries about current epoch. Remote shell will be used to tick new one
cluster: cluster instance under test
alive_node: node to send requests to (first node in cluster by default)
"""
alive_node = alive_node if alive_node else cluster.services(StorageNode)[0]
remote_shell = alive_node.host.get_shell()
if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH:
# If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests)
frostfs_adm = FrostfsAdm(
shell=remote_shell,
frostfs_adm_exec_path=FROSTFS_ADM_EXEC,
config_file=FROSTFS_ADM_CONFIG_PATH,
)
frostfs_adm.morph.force_new_epoch()
return
# Otherwise we tick epoch using transaction
cur_epoch = get_epoch(shell, cluster)
# Use first node by default
ir_node = cluster.services(InnerRing)[0]
# In case if no local_wallet_path is provided, we use wallet_path
ir_wallet_path = ir_node.get_wallet_path()
ir_wallet_pass = ir_node.get_wallet_password()
ir_address = wallet_utils.get_last_address_from_wallet(ir_wallet_path, ir_wallet_pass)
morph_chain = cluster.services(MorphChain)[0]
morph_endpoint = morph_chain.get_endpoint()
neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE)
neogo.contract.invokefunction(
wallet=ir_wallet_path,
wallet_password=ir_wallet_pass,
scripthash=get_contract_hash(morph_chain, "netmap.frostfs", shell=shell),
method="newEpoch",
arguments=f"int:{cur_epoch + 1}",
multisig_hash=f"{ir_address}:Global",
address=ir_address,
rpc_endpoint=morph_endpoint,
force=True,
gas=1,
)
sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))

View file

@ -0,0 +1,355 @@
import logging
import os
import random
import re
import shutil
import uuid
import zipfile
from typing import Optional
from urllib.parse import quote_plus
import requests
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE
from frostfs_testlib.s3.aws_cli_client import LONG_TIMEOUT
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import get_object
from frostfs_testlib.steps.storage_policy import get_nodes_without_object
from frostfs_testlib.storage.cluster import StorageNode
from frostfs_testlib.utils.cli_utils import _cmd_run
from frostfs_testlib.utils.file_utils import get_file_hash
reporter = get_reporter()
logger = logging.getLogger("NeoLogger")
ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/")
@reporter.step_deco("Get via HTTP Gate")
def get_via_http_gate(cid: str, oid: str, endpoint: str, request_path: Optional[str] = None):
"""
This function gets given object from HTTP gate
cid: container id to get object from
oid: object ID
endpoint: http gate endpoint
request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}]
"""
# if `request_path` parameter omitted, use default
if request_path is None:
request = f"{endpoint}/get/{cid}/{oid}"
else:
request = f"{endpoint}{request_path}"
resp = requests.get(request, stream=True)
if not resp.ok:
raise Exception(
f"""Failed to get object via HTTP gate:
request: {resp.request.path_url},
response: {resp.text},
status code: {resp.status_code} {resp.reason}"""
)
logger.info(f"Request: {request}")
_attach_allure_step(request, resp.status_code)
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}")
with open(file_path, "wb") as file:
shutil.copyfileobj(resp.raw, file)
return file_path
@reporter.step_deco("Get via Zip HTTP Gate")
def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str):
"""
This function gets given object from HTTP gate
cid: container id to get object from
prefix: common prefix
endpoint: http gate endpoint
"""
request = f"{endpoint}/zip/{cid}/{prefix}"
resp = requests.get(request, stream=True)
if not resp.ok:
raise Exception(
f"""Failed to get object via HTTP gate:
request: {resp.request.path_url},
response: {resp.text},
status code: {resp.status_code} {resp.reason}"""
)
logger.info(f"Request: {request}")
_attach_allure_step(request, resp.status_code)
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_archive.zip")
with open(file_path, "wb") as file:
shutil.copyfileobj(resp.raw, file)
with zipfile.ZipFile(file_path, "r") as zip_ref:
zip_ref.extractall(ASSETS_DIR)
return os.path.join(os.getcwd(), ASSETS_DIR, prefix)
@reporter.step_deco("Get via HTTP Gate by attribute")
def get_via_http_gate_by_attribute(
cid: str, attribute: dict, endpoint: str, request_path: Optional[str] = None
):
"""
This function gets given object from HTTP gate
cid: CID to get object from
attribute: attribute {name: attribute} value pair
endpoint: http gate endpoint
request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}]
"""
attr_name = list(attribute.keys())[0]
attr_value = quote_plus(str(attribute.get(attr_name)))
# if `request_path` parameter ommited, use default
if request_path is None:
request = f"{endpoint}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}"
else:
request = f"{endpoint}{request_path}"
resp = requests.get(request, stream=True)
if not resp.ok:
raise Exception(
f"""Failed to get object via HTTP gate:
request: {resp.request.path_url},
response: {resp.text},
status code: {resp.status_code} {resp.reason}"""
)
logger.info(f"Request: {request}")
_attach_allure_step(request, resp.status_code)
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{str(uuid.uuid4())}")
with open(file_path, "wb") as file:
shutil.copyfileobj(resp.raw, file)
return file_path
@reporter.step_deco("Upload via HTTP Gate")
def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[dict] = None) -> str:
"""
This function upload given object through HTTP gate
cid: CID to get object from
path: File path to upload
endpoint: http gate endpoint
headers: Object header
"""
request = f"{endpoint}/upload/{cid}"
files = {"upload_file": open(path, "rb")}
body = {"filename": path}
resp = requests.post(request, files=files, data=body, headers=headers)
if not resp.ok:
raise Exception(
f"""Failed to get object via HTTP gate:
request: {resp.request.path_url},
response: {resp.text},
status code: {resp.status_code} {resp.reason}"""
)
logger.info(f"Request: {request}")
_attach_allure_step(request, resp.json(), req_type="POST")
assert resp.json().get("object_id"), f"OID found in response {resp}"
return resp.json().get("object_id")
@reporter.step_deco("Check is the passed object large")
def is_object_large(filepath: str) -> bool:
"""
This function check passed file size and return True if file_size > SIMPLE_OBJECT_SIZE
filepath: File path to check
"""
file_size = os.path.getsize(filepath)
logger.info(f"Size= {file_size}")
if file_size > int(SIMPLE_OBJECT_SIZE):
return True
else:
return False
@reporter.step_deco("Upload via HTTP Gate using Curl")
def upload_via_http_gate_curl(
cid: str,
filepath: str,
endpoint: str,
headers: Optional[list] = None,
error_pattern: Optional[str] = None,
) -> str:
"""
This function upload given object through HTTP gate using curl utility.
cid: CID to get object from
filepath: File path to upload
headers: Object header
endpoint: http gate endpoint
error_pattern: [optional] expected error message from the command
"""
request = f"{endpoint}/upload/{cid}"
attributes = ""
if headers:
# parse attributes
attributes = " ".join(headers)
large_object = is_object_large(filepath)
if large_object:
# pre-clean
_cmd_run("rm pipe -f")
files = f"file=@pipe;filename={os.path.basename(filepath)}"
cmd = f"mkfifo pipe;cat {filepath} > pipe & curl --no-buffer -F '{files}' {attributes} {request}"
output = _cmd_run(cmd, LONG_TIMEOUT)
# clean up pipe
_cmd_run("rm pipe")
else:
files = f"file=@{filepath};filename={os.path.basename(filepath)}"
cmd = f"curl -F '{files}' {attributes} {request}"
output = _cmd_run(cmd)
if error_pattern:
match = error_pattern.casefold() in str(output).casefold()
assert match, f"Expected {output} to match {error_pattern}"
return ""
oid_re = re.search(r'"object_id": "(.*)"', output)
if not oid_re:
raise AssertionError(f'Could not find "object_id" in {output}')
return oid_re.group(1)
@reporter.step_deco("Get via HTTP Gate using Curl")
def get_via_http_curl(cid: str, oid: str, endpoint: str) -> str:
"""
This function gets given object from HTTP gate using curl utility.
cid: CID to get object from
oid: object OID
endpoint: http gate endpoint
"""
request = f"{endpoint}/get/{cid}/{oid}"
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}")
cmd = f"curl {request} > {file_path}"
_cmd_run(cmd)
return file_path
def _attach_allure_step(request: str, status_code: int, req_type="GET"):
command_attachment = f"REQUEST: '{request}'\n" f"RESPONSE:\n {status_code}\n"
with reporter.step(f"{req_type} Request"):
reporter.attach(command_attachment, f"{req_type} Request")
@reporter.step_deco("Try to get object and expect error")
def try_to_get_object_and_expect_error(
cid: str, oid: str, error_pattern: str, endpoint: str
) -> None:
try:
get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint)
raise AssertionError(f"Expected error on getting object with cid: {cid}")
except Exception as err:
match = error_pattern.casefold() in str(err).casefold()
assert match, f"Expected {err} to match {error_pattern}"
@reporter.step_deco("Verify object can be get using HTTP header attribute")
def get_object_by_attr_and_verify_hashes(
oid: str, file_name: str, cid: str, attrs: dict, endpoint: str
) -> None:
got_file_path_http = get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint)
got_file_path_http_attr = get_via_http_gate_by_attribute(
cid=cid, attribute=attrs, endpoint=endpoint
)
assert_hashes_are_equal(file_name, got_file_path_http, got_file_path_http_attr)
def verify_object_hash(
oid: str,
file_name: str,
wallet: str,
cid: str,
shell: Shell,
nodes: list[StorageNode],
endpoint: str,
object_getter=None,
) -> None:
nodes_list = get_nodes_without_object(
wallet=wallet,
cid=cid,
oid=oid,
shell=shell,
nodes=nodes,
)
# for some reason we can face with case when nodes_list is empty due to object resides in all nodes
if nodes_list:
random_node = random.choice(nodes_list)
else:
random_node = random.choice(nodes)
object_getter = object_getter or get_via_http_gate
got_file_path = get_object(
wallet=wallet,
cid=cid,
oid=oid,
shell=shell,
endpoint=random_node.get_rpc_endpoint(),
)
got_file_path_http = object_getter(cid=cid, oid=oid, endpoint=endpoint)
assert_hashes_are_equal(file_name, got_file_path, got_file_path_http)
def assert_hashes_are_equal(orig_file_name: str, got_file_1: str, got_file_2: str) -> None:
msg = "Expected hashes are equal for files {f1} and {f2}"
got_file_hash_http = get_file_hash(got_file_1)
assert get_file_hash(got_file_2) == got_file_hash_http, msg.format(f1=got_file_2, f2=got_file_1)
assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format(
f1=orig_file_name, f2=got_file_1
)
def attr_into_header(attrs: dict) -> dict:
return {f"X-Attribute-{_key}": _value for _key, _value in attrs.items()}
@reporter.step_deco(
"Convert each attribute (Key=Value) to the following format: -H 'X-Attribute-Key: Value'"
)
def attr_into_str_header_curl(attrs: dict) -> list:
headers = []
for k, v in attrs.items():
headers.append(f"-H 'X-Attribute-{k}: {v}'")
logger.info(f"[List of Attrs for curl:] {headers}")
return headers
@reporter.step_deco(
"Try to get object via http (pass http_request and optional attributes) and expect error"
)
def try_to_get_object_via_passed_request_and_expect_error(
cid: str,
oid: str,
error_pattern: str,
endpoint: str,
http_request_path: str,
attrs: Optional[dict] = None,
) -> None:
try:
if attrs is None:
get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, request_path=http_request_path)
else:
get_via_http_gate_by_attribute(
cid=cid, attribute=attrs, endpoint=endpoint, request_path=http_request_path
)
raise AssertionError(f"Expected error on getting object with cid: {cid}")
except Exception as err:
match = error_pattern.casefold() in str(err).casefold()
assert match, f"Expected {err} to match {error_pattern}"

View file

@ -0,0 +1,351 @@
import logging
import random
import re
import time
from dataclasses import dataclass
from time import sleep
from typing import Optional
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import (
FROSTFS_ADM_CONFIG_PATH,
FROSTFS_ADM_EXEC,
FROSTFS_CLI_EXEC,
)
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.epoch import tick_epoch
from frostfs_testlib.storage.cluster import Cluster, StorageNode
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate
from frostfs_testlib.utils import datetime_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger")
@dataclass
class HealthStatus:
network_status: Optional[str] = None
health_status: Optional[str] = None
@staticmethod
def from_stdout(output: str) -> "HealthStatus":
network, health = None, None
for line in output.split("\n"):
if "Network status" in line:
network = line.split(":")[-1].strip()
if "Health status" in line:
health = line.split(":")[-1].strip()
return HealthStatus(network, health)
@reporter.step_deco("Stop random storage nodes")
def stop_random_storage_nodes(number: int, nodes: list[StorageNode]) -> list[StorageNode]:
"""
Shuts down the given number of randomly selected storage nodes.
Args:
number: the number of storage nodes to stop
nodes: the list of storage nodes to stop
Returns:
the list of nodes that were stopped
"""
nodes_to_stop = random.sample(nodes, number)
for node in nodes_to_stop:
node.stop_service()
return nodes_to_stop
@reporter.step_deco("Start storage node")
def start_storage_nodes(nodes: list[StorageNode]) -> None:
"""
The function starts specified storage nodes.
Args:
nodes: the list of nodes to start
"""
for node in nodes:
node.start_service()
@reporter.step_deco("Stop storage node")
def stop_storage_nodes(nodes: list[StorageNode]) -> None:
"""
The function starts specified storage nodes.
Args:
nodes: the list of nodes to start
"""
for node in nodes:
node.stop_service()
@reporter.step_deco("Get Locode from random storage node")
def get_locode_from_random_node(cluster: Cluster) -> str:
node = random.choice(cluster.services(StorageNode))
locode = node.get_un_locode()
logger.info(f"Chosen '{locode}' locode from node {node}")
return locode
@reporter.step_deco("Healthcheck for storage node {node}")
def storage_node_healthcheck(node: StorageNode) -> HealthStatus:
"""
The function returns storage node's health status.
Args:
node: storage node for which health status should be retrieved.
Returns:
health status as HealthStatus object.
"""
command = "control healthcheck"
output = _run_control_command_with_retries(node, command)
return HealthStatus.from_stdout(output)
@reporter.step_deco("Set status for {node}")
def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> None:
"""
The function sets particular status for given node.
Args:
node: node for which status should be set.
status: online or offline.
retries (optional, int): number of retry attempts if it didn't work from the first time
"""
command = f"control set-status --status {status}"
_run_control_command_with_retries(node, command, retries)
@reporter.step_deco("Get netmap snapshot")
def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str:
"""
The function returns string representation of netmap snapshot.
Args:
node: node from which netmap snapshot should be requested.
Returns:
string representation of netmap
"""
storage_wallet_config = node.get_wallet_config_path()
storage_wallet_path = node.get_wallet_path()
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, config_file=storage_wallet_config)
return cli.netmap.snapshot(
rpc_endpoint=node.get_rpc_endpoint(),
wallet=storage_wallet_path,
).stdout
@reporter.step_deco("Get shard list for {node}")
def node_shard_list(node: StorageNode) -> list[str]:
"""
The function returns list of shards for specified storage node.
Args:
node: node for which shards should be returned.
Returns:
list of shards.
"""
command = "control shards list"
output = _run_control_command_with_retries(node, command)
return re.findall(r"Shard (.*):", output)
@reporter.step_deco("Shard set for {node}")
def node_shard_set_mode(node: StorageNode, shard: str, mode: str) -> str:
"""
The function sets mode for specified shard.
Args:
node: node on which shard mode should be set.
"""
command = f"control shards set-mode --id {shard} --mode {mode}"
return _run_control_command_with_retries(node, command)
@reporter.step_deco("Drop object from {node}")
def drop_object(node: StorageNode, cid: str, oid: str) -> str:
"""
The function drops object from specified node.
Args:
node_id str: node from which object should be dropped.
"""
command = f"control drop-objects -o {cid}/{oid}"
return _run_control_command_with_retries(node, command)
@reporter.step_deco("Delete data from host for node {node}")
def delete_node_data(node: StorageNode) -> None:
node.stop_service()
node.host.delete_storage_node_data(node.name)
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
@reporter.step_deco("Exclude node {node_to_exclude} from network map")
def exclude_node_from_network_map(
node_to_exclude: StorageNode,
alive_node: StorageNode,
shell: Shell,
cluster: Cluster,
) -> None:
node_netmap_key = node_to_exclude.get_wallet_public_key()
storage_node_set_status(node_to_exclude, status="offline")
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
tick_epoch(shell, cluster)
snapshot = get_netmap_snapshot(node=alive_node, shell=shell)
assert (
node_netmap_key not in snapshot
), f"Expected node with key {node_netmap_key} to be absent in network map"
@reporter.step_deco("Include node {node_to_include} into network map")
def include_node_to_network_map(
node_to_include: StorageNode,
alive_node: StorageNode,
shell: Shell,
cluster: Cluster,
) -> None:
storage_node_set_status(node_to_include, status="online")
# Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch.
# First sleep can be omitted after https://github.com/TrueCloudLab/frostfs-node/issues/60 complete.
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
tick_epoch(shell, cluster)
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
check_node_in_map(node_to_include, shell, alive_node)
@reporter.step_deco("Check node {node} in network map")
def check_node_in_map(
node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None
) -> None:
alive_node = alive_node or node
node_netmap_key = node.get_wallet_public_key()
logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}")
snapshot = get_netmap_snapshot(alive_node, shell)
assert (
node_netmap_key in snapshot
), f"Expected node with key {node_netmap_key} to be in network map"
@reporter.step_deco("Check node {node} NOT in network map")
def check_node_not_in_map(
node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None
) -> None:
alive_node = alive_node or node
node_netmap_key = node.get_wallet_public_key()
logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}")
snapshot = get_netmap_snapshot(alive_node, shell)
assert (
node_netmap_key not in snapshot
), f"Expected node with key {node_netmap_key} to be NOT in network map"
@reporter.step_deco("Wait for node {node} is ready")
def wait_for_node_to_be_ready(node: StorageNode) -> None:
timeout, attempts = 30, 6
for _ in range(attempts):
try:
health_check = storage_node_healthcheck(node)
if health_check.health_status == "READY":
return
except Exception as err:
logger.warning(f"Node {node} is not ready:\n{err}")
sleep(timeout)
raise AssertionError(
f"Node {node} hasn't gone to the READY state after {timeout * attempts} seconds"
)
@reporter.step_deco("Remove nodes from network map trough cli-adm morph command")
def remove_nodes_from_map_morph(
shell: Shell,
cluster: Cluster,
remove_nodes: list[StorageNode],
alive_node: Optional[StorageNode] = None,
):
"""
Move node to the Offline state in the candidates list and tick an epoch to update the netmap
using frostfs-adm
Args:
shell: local shell to make queries about current epoch. Remote shell will be used to tick new one
cluster: cluster instance under test
alive_node: node to send requests to (first node in cluster by default)
remove_nodes: list of nodes which would be removed from map
"""
alive_node = alive_node if alive_node else remove_nodes[0]
remote_shell = alive_node.host.get_shell()
node_netmap_keys = list(map(StorageNode.get_wallet_public_key, remove_nodes))
logger.info(f"Nodes netmap keys are: {' '.join(node_netmap_keys)}")
if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH:
# If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests)
frostfsadm = FrostfsAdm(
shell=remote_shell,
frostfs_adm_exec_path=FROSTFS_ADM_EXEC,
config_file=FROSTFS_ADM_CONFIG_PATH,
)
frostfsadm.morph.remove_nodes(node_netmap_keys)
def _run_control_command_with_retries(node: StorageNode, command: str, retries: int = 0) -> str:
for attempt in range(1 + retries): # original attempt + specified retries
try:
return _run_control_command(node, command)
except AssertionError as err:
if attempt < retries:
logger.warning(f"Command {command} failed with error {err} and will be retried")
continue
raise AssertionError(f"Command {command} failed with error {err}") from err
def _run_control_command(node: StorageNode, command: str) -> None:
host = node.host
service_config = host.get_service_config(node.name)
wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"]
control_endpoint = service_config.attributes["control_endpoint"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{node.name}-config.yaml"
wallet_config = f'password: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
cli_config = host.get_cli_config("frostfs-cli")
# TODO: implement cli.control
# cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
result = shell.exec(
f"{cli_config.exec_path} {command} --endpoint {control_endpoint} "
f"--wallet {wallet_path} --config {wallet_config_path}"
)
return result.stdout
@reporter.step_deco("Start services s3gate ")
def start_s3gates(cluster: Cluster) -> None:
"""
The function starts specified storage nodes.
Args:
cluster: cluster instance under test
"""
for gate in cluster.services(S3Gate):
gate.start_service()
@reporter.step_deco("Stop services s3gate ")
def stop_s3gates(cluster: Cluster) -> None:
"""
The function starts specified storage nodes.
Args:
cluster: cluster instance under test
"""
for gate in cluster.services(S3Gate):
gate.stop_service()

View file

@ -0,0 +1,217 @@
import base64
import json
import logging
import re
import time
from typing import Optional
from neo3.wallet import utils as neo3_utils
from neo3.wallet import wallet as neo3_wallet
from frostfs_testlib.cli import NeoGo
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import NEOGO_EXECUTABLE
from frostfs_testlib.resources.common import FROSTFS_CONTRACT, GAS_HASH, MORPH_BLOCK_TIME
from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.dataclasses.frostfs_services import MainChain, MorphChain
from frostfs_testlib.utils import converting_utils, datetime_utils, wallet_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger")
EMPTY_PASSWORD = ""
TX_PERSIST_TIMEOUT = 15 # seconds
ASSET_POWER_MAINCHAIN = 10**8
ASSET_POWER_SIDECHAIN = 10**12
def get_nns_contract_hash(morph_chain: MorphChain) -> str:
return morph_chain.rpc_client.get_contract_state(1)["hash"]
def get_contract_hash(morph_chain: MorphChain, resolve_name: str, shell: Shell) -> str:
nns_contract_hash = get_nns_contract_hash(morph_chain)
neogo = NeoGo(shell=shell, neo_go_exec_path=NEOGO_EXECUTABLE)
out = neogo.contract.testinvokefunction(
scripthash=nns_contract_hash,
method="resolve",
arguments=f"string:{resolve_name} int:16",
rpc_endpoint=morph_chain.get_endpoint(),
)
stack_data = json.loads(out.stdout.replace("\n", ""))["stack"][0]["value"]
return bytes.decode(base64.b64decode(stack_data[0]["value"]))
@reporter.step_deco("Withdraw Mainnet Gas")
def withdraw_mainnet_gas(shell: Shell, main_chain: MainChain, wlt: str, amount: int):
address = wallet_utils.get_last_address_from_wallet(wlt, EMPTY_PASSWORD)
scripthash = neo3_utils.address_to_script_hash(address)
neogo = NeoGo(shell=shell, neo_go_exec_path=NEOGO_EXECUTABLE)
out = neogo.contract.invokefunction(
wallet=wlt,
address=address,
rpc_endpoint=main_chain.get_endpoint(),
scripthash=FROSTFS_CONTRACT,
method="withdraw",
arguments=f"{scripthash} int:{amount}",
multisig_hash=f"{scripthash}:Global",
wallet_password="",
)
m = re.match(r"^Sent invocation transaction (\w{64})$", out.stdout)
if m is None:
raise Exception("Can not get Tx.")
tx = m.group(1)
if not transaction_accepted(main_chain, tx):
raise AssertionError(f"TX {tx} hasn't been processed")
def transaction_accepted(main_chain: MainChain, tx_id: str):
"""
This function returns True in case of accepted TX.
Args:
tx_id(str): transaction ID
Returns:
(bool)
"""
try:
for _ in range(0, TX_PERSIST_TIMEOUT):
time.sleep(1)
resp = main_chain.rpc_client.get_transaction_height(tx_id)
if resp is not None:
logger.info(f"TX is accepted in block: {resp}")
return True, resp
except Exception as out:
logger.info(f"request failed with error: {out}")
raise out
return False
@reporter.step_deco("Get FrostFS Balance")
def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_password: str = ""):
"""
This function returns FrostFS balance for given wallet.
"""
with open(wallet_path) as wallet_file:
wallet = neo3_wallet.Wallet.from_json(json.load(wallet_file), password=wallet_password)
acc = wallet.accounts[-1]
payload = [{"type": "Hash160", "value": str(acc.script_hash)}]
try:
resp = morph_chain.rpc_client.invoke_function(
get_contract_hash(morph_chain, "balance.frostfs", shell=shell), "balanceOf", payload
)
logger.info(f"Got response \n{resp}")
value = int(resp["stack"][0]["value"])
return value / ASSET_POWER_SIDECHAIN
except Exception as out:
logger.error(f"failed to get wallet balance: {out}")
raise out
@reporter.step_deco("Transfer Gas")
def transfer_gas(
shell: Shell,
amount: int,
main_chain: MainChain,
wallet_from_path: Optional[str] = None,
wallet_from_password: Optional[str] = None,
address_from: Optional[str] = None,
address_to: Optional[str] = None,
wallet_to_path: Optional[str] = None,
wallet_to_password: Optional[str] = None,
):
"""
This function transfer GAS in main chain from mainnet wallet to
the provided wallet. If the wallet contains more than one address,
the assets will be transferred to the last one.
Args:
shell: Shell instance.
wallet_from_password: Password of the wallet; it is required to decode the wallet
and extract its addresses.
wallet_from_path: Path to chain node wallet.
address_from: The address of the wallet to transfer assets from.
wallet_to_path: The path to the wallet to transfer assets to.
wallet_to_password: The password to the wallet to transfer assets to.
address_to: The address of the wallet to transfer assets to.
amount: Amount of gas to transfer.
"""
wallet_from_path = wallet_from_path or main_chain.get_wallet_path()
wallet_from_password = (
wallet_from_password
if wallet_from_password is not None
else main_chain.get_wallet_password()
)
address_from = address_from or wallet_utils.get_last_address_from_wallet(
wallet_from_path, wallet_from_password
)
address_to = address_to or wallet_utils.get_last_address_from_wallet(
wallet_to_path, wallet_to_password
)
neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE)
out = neogo.nep17.transfer(
rpc_endpoint=main_chain.get_endpoint(),
wallet=wallet_from_path,
wallet_password=wallet_from_password,
amount=amount,
from_address=address_from,
to_address=address_to,
token="GAS",
force=True,
)
txid = out.stdout.strip().split("\n")[-1]
if len(txid) != 64:
raise Exception("Got no TXID after run the command")
if not transaction_accepted(main_chain, txid):
raise AssertionError(f"TX {txid} hasn't been processed")
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
@reporter.step_deco("FrostFS Deposit")
def deposit_gas(
shell: Shell,
main_chain: MainChain,
amount: int,
wallet_from_path: str,
wallet_from_password: str,
):
"""
Transferring GAS from given wallet to FrostFS contract address.
"""
# get FrostFS contract address
deposit_addr = converting_utils.contract_hash_to_address(FROSTFS_CONTRACT)
logger.info(f"FrostFS contract address: {deposit_addr}")
address_from = wallet_utils.get_last_address_from_wallet(
wallet_path=wallet_from_path, wallet_password=wallet_from_password
)
transfer_gas(
shell=shell,
main_chain=main_chain,
amount=amount,
wallet_from_path=wallet_from_path,
wallet_from_password=wallet_from_password,
address_to=deposit_addr,
address_from=address_from,
)
@reporter.step_deco("Get Mainnet Balance")
def get_mainnet_balance(main_chain: MainChain, address: str):
resp = main_chain.rpc_client.get_nep17_balances(address=address)
logger.info(f"Got getnep17balances response: {resp}")
for balance in resp["balance"]:
if balance["assethash"] == GAS_HASH:
return float(balance["amount"]) / ASSET_POWER_MAINCHAIN
return float(0)
@reporter.step_deco("Get Sidechain Balance")
def get_sidechain_balance(morph_chain: MorphChain, address: str):
resp = morph_chain.rpc_client.get_nep17_balances(address=address)
logger.info(f"Got getnep17balances response: {resp}")
for balance in resp["balance"]:
if balance["assethash"] == GAS_HASH:
return float(balance["amount"]) / ASSET_POWER_SIDECHAIN
return float(0)

View file

@ -0,0 +1,247 @@
import json
import logging
import os
import re
import uuid
from datetime import datetime, timedelta
from typing import Optional
from dateutil.parser import parse
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC
from frostfs_testlib.resources.common import CREDENTIALS_CREATE_TIMEOUT
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate
from frostfs_testlib.utils.cli_utils import _run_with_passwd
reporter = get_reporter()
logger = logging.getLogger("NeoLogger")
@reporter.step_deco("Expected all objects are presented in the bucket")
def check_objects_in_bucket(
s3_client: S3ClientWrapper,
bucket: str,
expected_objects: list,
unexpected_objects: Optional[list] = None,
) -> None:
unexpected_objects = unexpected_objects or []
bucket_objects = s3_client.list_objects(bucket)
assert len(bucket_objects) == len(
expected_objects
), f"Expected {len(expected_objects)} objects in the bucket"
for bucket_object in expected_objects:
assert (
bucket_object in bucket_objects
), f"Expected object {bucket_object} in objects list {bucket_objects}"
for bucket_object in unexpected_objects:
assert (
bucket_object not in bucket_objects
), f"Expected object {bucket_object} not in objects list {bucket_objects}"
@reporter.step_deco("Try to get object and got error")
def try_to_get_objects_and_expect_error(
s3_client: S3ClientWrapper, bucket: str, object_keys: list
) -> None:
for obj in object_keys:
try:
s3_client.get_object(bucket, obj)
raise AssertionError(f"Object {obj} found in bucket {bucket}")
except Exception as err:
assert "The specified key does not exist" in str(
err
), f"Expected error in exception {err}"
@reporter.step_deco("Set versioning status to '{status}' for bucket '{bucket}'")
def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: VersioningStatus):
s3_client.get_bucket_versioning_status(bucket)
s3_client.put_bucket_versioning(bucket, status=status)
bucket_status = s3_client.get_bucket_versioning_status(bucket)
assert bucket_status == status.value, f"Expected {bucket_status} status. Got {status.value}"
def object_key_from_file_path(full_path: str) -> str:
return os.path.basename(full_path)
def assert_tags(
actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None
) -> None:
expected_tags = (
[{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else []
)
unexpected_tags = (
[{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else []
)
if expected_tags == []:
assert not actual_tags, f"Expected there is no tags, got {actual_tags}"
assert len(expected_tags) == len(actual_tags)
for tag in expected_tags:
assert tag in actual_tags, f"Tag {tag} must be in {actual_tags}"
for tag in unexpected_tags:
assert tag not in actual_tags, f"Tag {tag} should not be in {actual_tags}"
@reporter.step_deco("Expected all tags are presented in object")
def check_tags_by_object(
s3_client: S3ClientWrapper,
bucket: str,
key: str,
expected_tags: list,
unexpected_tags: Optional[list] = None,
) -> None:
actual_tags = s3_client.get_object_tagging(bucket, key)
assert_tags(
expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags
)
@reporter.step_deco("Expected all tags are presented in bucket")
def check_tags_by_bucket(
s3_client: S3ClientWrapper,
bucket: str,
expected_tags: list,
unexpected_tags: Optional[list] = None,
) -> None:
actual_tags = s3_client.get_bucket_tagging(bucket)
assert_tags(
expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags
)
def assert_object_lock_mode(
s3_client: S3ClientWrapper,
bucket: str,
file_name: str,
object_lock_mode: str,
retain_until_date: datetime,
legal_hold_status: str = "OFF",
retain_period: Optional[int] = None,
):
object_dict = s3_client.get_object(bucket, file_name, full_output=True)
assert (
object_dict.get("ObjectLockMode") == object_lock_mode
), f"Expected Object Lock Mode is {object_lock_mode}"
assert (
object_dict.get("ObjectLockLegalHoldStatus") == legal_hold_status
), f"Expected Object Lock Legal Hold Status is {legal_hold_status}"
object_retain_date = object_dict.get("ObjectLockRetainUntilDate")
retain_date = (
parse(object_retain_date) if isinstance(object_retain_date, str) else object_retain_date
)
if retain_until_date:
assert retain_date.strftime("%Y-%m-%dT%H:%M:%S") == retain_until_date.strftime(
"%Y-%m-%dT%H:%M:%S"
), f'Expected Object Lock Retain Until Date is {str(retain_until_date.strftime("%Y-%m-%dT%H:%M:%S"))}'
elif retain_period:
last_modify_date = object_dict.get("LastModified")
last_modify = (
parse(last_modify_date) if isinstance(last_modify_date, str) else last_modify_date
)
assert (
retain_date - last_modify + timedelta(seconds=1)
).days == retain_period, f"Expected retention period is {retain_period} days"
def assert_s3_acl(acl_grants: list, permitted_users: str):
if permitted_users == "AllUsers":
grantees = {"AllUsers": 0, "CanonicalUser": 0}
for acl_grant in acl_grants:
if acl_grant.get("Grantee", {}).get("Type") == "Group":
uri = acl_grant.get("Grantee", {}).get("URI")
permission = acl_grant.get("Permission")
assert (uri, permission) == (
"http://acs.amazonaws.com/groups/global/AllUsers",
"FULL_CONTROL",
), "All Groups should have FULL_CONTROL"
grantees["AllUsers"] += 1
if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser":
permission = acl_grant.get("Permission")
assert permission == "FULL_CONTROL", "Canonical User should have FULL_CONTROL"
grantees["CanonicalUser"] += 1
assert grantees["AllUsers"] >= 1, "All Users should have FULL_CONTROL"
assert grantees["CanonicalUser"] >= 1, "Canonical User should have FULL_CONTROL"
if permitted_users == "CanonicalUser":
for acl_grant in acl_grants:
if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser":
permission = acl_grant.get("Permission")
assert permission == "FULL_CONTROL", "Only CanonicalUser should have FULL_CONTROL"
else:
logger.error("FULL_CONTROL is given to All Users")
@reporter.step_deco("Init S3 Credentials")
def init_s3_credentials(
wallet_path: str,
cluster: Cluster,
s3_bearer_rules_file: str,
policy: Optional[dict] = None,
):
bucket = str(uuid.uuid4())
s3gate_node = cluster.services(S3Gate)[0]
gate_public_key = s3gate_node.get_wallet_public_key()
cmd = (
f"{FROSTFS_AUTHMATE_EXEC} --debug --with-log --timeout {CREDENTIALS_CREATE_TIMEOUT} "
f"issue-secret --wallet {wallet_path} --gate-public-key={gate_public_key} "
f"--peer {cluster.default_rpc_endpoint} --container-friendly-name {bucket} "
f"--bearer-rules {s3_bearer_rules_file}"
)
if policy:
cmd += f" --container-policy {policy}'"
logger.info(f"Executing command: {cmd}")
try:
output = _run_with_passwd(cmd)
logger.info(f"Command completed with output: {output}")
# output contains some debug info and then several JSON structures, so we find each
# JSON structure by curly brackets (naive approach, but works while JSON is not nested)
# and then we take JSON containing secret_access_key
json_blocks = re.findall(r"\{.*?\}", output, re.DOTALL)
for json_block in json_blocks:
try:
parsed_json_block = json.loads(json_block)
if "secret_access_key" in parsed_json_block:
return (
parsed_json_block["container_id"],
parsed_json_block["access_key_id"],
parsed_json_block["secret_access_key"],
)
except json.JSONDecodeError:
raise AssertionError(f"Could not parse info from output\n{output}")
raise AssertionError(f"Could not find AWS credentials in output:\n{output}")
except Exception as exc:
raise RuntimeError(f"Failed to init s3 credentials because of error\n{exc}") from exc
@reporter.step_deco("Delete bucket with all objects")
def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str):
versioning_status = s3_client.get_bucket_versioning_status(bucket)
if versioning_status == VersioningStatus.ENABLED.value:
# From versioned bucket we should delete all versions and delete markers of all objects
objects_versions = s3_client.list_objects_versions(bucket)
if objects_versions:
s3_client.delete_object_versions_without_dm(bucket, objects_versions)
objects_delete_markers = s3_client.list_delete_markers(bucket)
if objects_delete_markers:
s3_client.delete_object_versions_without_dm(bucket, objects_delete_markers)
else:
# From non-versioned bucket it's sufficient to delete objects by key
objects = s3_client.list_objects(bucket)
if objects:
s3_client.delete_objects(bucket, objects)
objects_delete_markers = s3_client.list_delete_markers(bucket)
if objects_delete_markers:
s3_client.delete_object_versions_without_dm(bucket, objects_delete_markers)
# Delete the bucket itself
s3_client.delete_bucket(bucket)

View file

@ -0,0 +1,287 @@
import base64
import json
import logging
import os
import uuid
from dataclasses import dataclass
from enum import Enum
from typing import Any, Optional
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.utils import json_utils, wallet_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger")
UNRELATED_KEY = "unrelated key in the session"
UNRELATED_OBJECT = "unrelated object in the session"
UNRELATED_CONTAINER = "unrelated container in the session"
WRONG_VERB = "wrong verb of the session"
INVALID_SIGNATURE = "invalid signature of the session data"
class ObjectVerb(Enum):
PUT = "PUT"
DELETE = "DELETE"
GET = "GET"
RANGEHASH = "RANGEHASH"
RANGE = "RANGE"
HEAD = "HEAD"
SEARCH = "SEARCH"
class ContainerVerb(Enum):
CREATE = "PUT"
DELETE = "DELETE"
SETEACL = "SETEACL"
@dataclass
class Lifetime:
exp: int = 100000000
nbf: int = 0
iat: int = 0
@reporter.step_deco("Generate Session Token")
def generate_session_token(
owner_wallet: WalletInfo,
session_wallet: WalletInfo,
session: dict[str, dict[str, Any]],
tokens_dir: str,
lifetime: Optional[Lifetime] = None,
) -> str:
"""
This function generates session token and writes it to the file.
Args:
owner_wallet: wallet of container owner
session_wallet: wallet to which we grant the access via session token
session: Contains allowed operation with parameters
tokens_dir: Dir for token
lifetime: lifetime options for session
Returns:
The path to the generated session token file
"""
file_path = os.path.join(tokens_dir, str(uuid.uuid4()))
pub_key_64 = wallet_utils.get_wallet_public_key(
session_wallet.path, session_wallet.password, "base64"
)
lifetime = lifetime or Lifetime()
session_token = {
"body": {
"id": f"{base64.b64encode(uuid.uuid4().bytes).decode('utf-8')}",
"ownerID": {"value": f"{json_utils.encode_for_json(owner_wallet.get_address())}"},
"lifetime": {
"exp": f"{lifetime.exp}",
"nbf": f"{lifetime.nbf}",
"iat": f"{lifetime.iat}",
},
"sessionKey": pub_key_64,
}
}
session_token["body"].update(session)
logger.info(f"Got this Session Token: {session_token}")
with open(file_path, "w", encoding="utf-8") as session_token_file:
json.dump(session_token, session_token_file, ensure_ascii=False, indent=4)
return file_path
@reporter.step_deco("Generate Session Token For Container")
def generate_container_session_token(
owner_wallet: WalletInfo,
session_wallet: WalletInfo,
verb: ContainerVerb,
tokens_dir: str,
lifetime: Optional[Lifetime] = None,
cid: Optional[str] = None,
) -> str:
"""
This function generates session token for ContainerSessionContext
and writes it to the file. It is able to prepare session token file
for a specific container (<cid>) or for every container (adds
"wildcard" field).
Args:
owner_wallet: wallet of container owner.
session_wallet: wallet to which we grant the access via session token.
verb: verb to grant access to.
lifetime: lifetime options for session.
cid: container ID of the container
Returns:
The path to the generated session token file
"""
session = {
"container": {
"verb": verb.value,
"wildcard": cid is None,
**(
{"containerID": {"value": f"{json_utils.encode_for_json(cid)}"}}
if cid is not None
else {}
),
},
}
return generate_session_token(
owner_wallet=owner_wallet,
session_wallet=session_wallet,
session=session,
tokens_dir=tokens_dir,
lifetime=lifetime,
)
@reporter.step_deco("Generate Session Token For Object")
def generate_object_session_token(
owner_wallet: WalletInfo,
session_wallet: WalletInfo,
oids: list[str],
cid: str,
verb: ObjectVerb,
tokens_dir: str,
lifetime: Optional[Lifetime] = None,
) -> str:
"""
This function generates session token for ObjectSessionContext
and writes it to the file.
Args:
owner_wallet: wallet of container owner
session_wallet: wallet to which we grant the access via session token
cid: container ID of the container
oids: list of objectIDs to put into session
verb: verb to grant access to; Valid verbs are: ObjectVerb.
lifetime: lifetime options for session
Returns:
The path to the generated session token file
"""
session = {
"object": {
"verb": verb.value,
"target": {
"container": {"value": json_utils.encode_for_json(cid)},
"objects": [{"value": json_utils.encode_for_json(oid)} for oid in oids],
},
},
}
return generate_session_token(
owner_wallet=owner_wallet,
session_wallet=session_wallet,
session=session,
tokens_dir=tokens_dir,
lifetime=lifetime,
)
@reporter.step_deco("Get signed token for container session")
def get_container_signed_token(
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
verb: ContainerVerb,
shell: Shell,
tokens_dir: str,
lifetime: Optional[Lifetime] = None,
) -> str:
"""
Returns signed token file path for static container session
"""
session_token_file = generate_container_session_token(
owner_wallet=owner_wallet,
session_wallet=user_wallet,
verb=verb,
tokens_dir=tokens_dir,
lifetime=lifetime,
)
return sign_session_token(shell, session_token_file, owner_wallet)
@reporter.step_deco("Get signed token for object session")
def get_object_signed_token(
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
cid: str,
storage_objects: list[StorageObjectInfo],
verb: ObjectVerb,
shell: Shell,
tokens_dir: str,
lifetime: Optional[Lifetime] = None,
) -> str:
"""
Returns signed token file path for static object session
"""
storage_object_ids = [storage_object.oid for storage_object in storage_objects]
session_token_file = generate_object_session_token(
owner_wallet=owner_wallet,
session_wallet=user_wallet,
oids=storage_object_ids,
cid=cid,
verb=verb,
tokens_dir=tokens_dir,
lifetime=lifetime,
)
return sign_session_token(shell, session_token_file, owner_wallet)
@reporter.step_deco("Create Session Token")
def create_session_token(
shell: Shell,
owner: str,
wallet_path: str,
wallet_password: str,
rpc_endpoint: str,
) -> str:
"""
Create session token for an object.
Args:
shell: Shell instance.
owner: User that writes the token.
wallet_path: The path to wallet to which we grant the access via session token.
wallet_password: Wallet password.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
Returns:
The path to the generated session token file.
"""
session_token = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC)
frostfscli.session.create(
rpc_endpoint=rpc_endpoint,
address=owner,
wallet=wallet_path,
wallet_password=wallet_password,
out=session_token,
)
return session_token
@reporter.step_deco("Sign Session Token")
def sign_session_token(shell: Shell, session_token_file: str, wlt: WalletInfo) -> str:
"""
This function signs the session token by the given wallet.
Args:
shell: Shell instance.
session_token_file: The path to the session token file.
wlt: The path to the signing wallet.
Returns:
The path to the signed token.
"""
signed_token_file = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
frostfscli = FrostfsCli(
shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG
)
frostfscli.util.sign_session_token(
wallet=wlt.path, from_file=session_token_file, to_file=signed_token_file
)
return signed_token_file

View file

@ -0,0 +1,63 @@
import logging
from time import sleep
import pytest
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.error_patterns import OBJECT_ALREADY_REMOVED
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import delete_object, get_object
from frostfs_testlib.steps.epoch import tick_epoch
from frostfs_testlib.steps.tombstone import verify_head_tombstone
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
reporter = get_reporter()
logger = logging.getLogger("NeoLogger")
CLEANUP_TIMEOUT = 10
@reporter.step_deco("Delete Objects")
def delete_objects(
storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster
) -> None:
"""
Deletes given storage objects.
Args:
storage_objects: list of objects to delete
shell: executor for cli command
"""
with reporter.step("Delete objects"):
for storage_object in storage_objects:
storage_object.tombstone = delete_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
shell=shell,
endpoint=cluster.default_rpc_endpoint,
)
verify_head_tombstone(
wallet_path=storage_object.wallet_file_path,
cid=storage_object.cid,
oid_ts=storage_object.tombstone,
oid=storage_object.oid,
shell=shell,
endpoint=cluster.default_rpc_endpoint,
)
tick_epoch(shell, cluster)
sleep(CLEANUP_TIMEOUT)
with reporter.step("Get objects and check errors"):
for storage_object in storage_objects:
with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED):
get_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
shell=shell,
endpoint=cluster.default_rpc_endpoint,
)

View file

@ -0,0 +1,173 @@
#!/usr/bin/python3
"""
This module contains keywords which are used for asserting
that storage policies are respected.
"""
import logging
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import head_object
from frostfs_testlib.steps.complex_object_actions import get_last_object
from frostfs_testlib.storage.cluster import StorageNode
from frostfs_testlib.utils import string_utils
reporter = get_reporter()
logger = logging.getLogger("NeoLogger")
@reporter.step_deco("Get Object Copies")
def get_object_copies(
complexity: str, wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> int:
"""
The function performs requests to all nodes of the container and
finds out if they store a copy of the object. The procedure is
different for simple and complex object, so the function requires
a sign of object complexity.
Args:
complexity (str): the tag of object size and complexity,
[Simple|Complex]
wallet (str): the path to the wallet on whose behalf the
copies are got
cid (str): ID of the container
oid (str): ID of the Object
shell: executor for cli command
Returns:
(int): the number of object copies in the container
"""
return (
get_simple_object_copies(wallet, cid, oid, shell, nodes)
if complexity == "Simple"
else get_complex_object_copies(wallet, cid, oid, shell, nodes)
)
@reporter.step_deco("Get Simple Object Copies")
def get_simple_object_copies(
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> int:
"""
To figure out the number of a simple object copies, only direct
HEAD requests should be made to the every node of the container.
We consider non-empty HEAD response as a stored object copy.
Args:
wallet (str): the path to the wallet on whose behalf the
copies are got
cid (str): ID of the container
oid (str): ID of the Object
shell: executor for cli command
nodes: nodes to search on
Returns:
(int): the number of object copies in the container
"""
copies = 0
for node in nodes:
try:
response = head_object(
wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True
)
if response:
logger.info(f"Found object {oid} on node {node}")
copies += 1
except Exception:
logger.info(f"No {oid} object copy found on {node}, continue")
continue
return copies
@reporter.step_deco("Get Complex Object Copies")
def get_complex_object_copies(
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> int:
"""
To figure out the number of a complex object copies, we firstly
need to retrieve its Last object. We consider that the number of
complex object copies is equal to the number of its last object
copies. When we have the Last object ID, the task is reduced
to getting simple object copies.
Args:
wallet (str): the path to the wallet on whose behalf the
copies are got
cid (str): ID of the container
oid (str): ID of the Object
shell: executor for cli command
Returns:
(int): the number of object copies in the container
"""
last_oid = get_last_object(wallet, cid, oid, shell, nodes)
assert last_oid, f"No Last Object for {cid}/{oid} found among all Storage Nodes"
return get_simple_object_copies(wallet, cid, last_oid, shell, nodes)
@reporter.step_deco("Get Nodes With Object")
def get_nodes_with_object(
cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> list[StorageNode]:
"""
The function returns list of nodes which store
the given object.
Args:
cid (str): ID of the container which store the object
oid (str): object ID
shell: executor for cli command
nodes: nodes to find on
Returns:
(list): nodes which store the object
"""
nodes_list = []
for node in nodes:
wallet = node.get_wallet_path()
wallet_config = node.get_wallet_config_path()
try:
res = head_object(
wallet,
cid,
oid,
shell=shell,
endpoint=node.get_rpc_endpoint(),
is_direct=True,
wallet_config=wallet_config,
)
if res is not None:
logger.info(f"Found object {oid} on node {node}")
nodes_list.append(node)
except Exception:
logger.info(f"No {oid} object copy found on {node}, continue")
continue
return nodes_list
@reporter.step_deco("Get Nodes Without Object")
def get_nodes_without_object(
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> list[StorageNode]:
"""
The function returns list of nodes which do not store
the given object.
Args:
wallet (str): the path to the wallet on whose behalf
we request the nodes
cid (str): ID of the container which store the object
oid (str): object ID
shell: executor for cli command
Returns:
(list): nodes which do not store the object
"""
nodes_list = []
for node in nodes:
try:
res = head_object(
wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True
)
if res is None:
nodes_list.append(node)
except Exception as err:
if string_utils.is_str_match_pattern(err, OBJECT_NOT_FOUND):
nodes_list.append(node)
else:
raise Exception(f"Got error {err} on head object command") from err
return nodes_list

View file

@ -0,0 +1,41 @@
import json
import logging
from neo3.wallet import wallet
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import head_object
reporter = get_reporter()
logger = logging.getLogger("NeoLogger")
@reporter.step_deco("Verify Head Tombstone")
def verify_head_tombstone(
wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str
):
header = head_object(wallet_path, cid, oid_ts, shell=shell, endpoint=endpoint)["header"]
s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"]
logger.info(f"Header Session OIDs is {s_oid}")
logger.info(f"OID is {oid}")
assert header["containerID"] == cid, "Tombstone Header CID is wrong"
with open(wallet_path, "r") as file:
wlt_data = json.loads(file.read())
wlt = wallet.Wallet.from_json(wlt_data, password="")
addr = wlt.accounts[0].address
assert header["ownerID"] == addr, "Tombstone Owner ID is wrong"
assert header["objectType"] == "TOMBSTONE", "Header Type isn't Tombstone"
assert (
header["sessionToken"]["body"]["object"]["verb"] == "DELETE"
), "Header Session Type isn't DELETE"
assert (
header["sessionToken"]["body"]["object"]["target"]["container"] == cid
), "Header Session ID is wrong"
assert (
oid in header["sessionToken"]["body"]["object"]["target"]["objects"]
), "Header Session OID is wrong"

View file

@ -0,0 +1,33 @@
from frostfs_testlib.storage.constants import _FrostfsServicesNames
from frostfs_testlib.storage.dataclasses.frostfs_services import (
HTTPGate,
InnerRing,
MainChain,
MorphChain,
S3Gate,
StorageNode,
)
from frostfs_testlib.storage.service_registry import ServiceRegistry
__class_registry = ServiceRegistry()
# Register default public services
__class_registry.register_service(_FrostfsServicesNames.STORAGE, StorageNode)
__class_registry.register_service(_FrostfsServicesNames.INNER_RING, InnerRing)
__class_registry.register_service(_FrostfsServicesNames.MORPH_CHAIN, MorphChain)
__class_registry.register_service(_FrostfsServicesNames.S3_GATE, S3Gate)
__class_registry.register_service(_FrostfsServicesNames.HTTP_GATE, HTTPGate)
# # TODO: Remove this since we are no longer have main chain
__class_registry.register_service(_FrostfsServicesNames.MAIN_CHAIN, MainChain)
def get_service_registry() -> ServiceRegistry:
"""Returns registry with registered classes related to cluster and cluster nodes.
ServiceClassRegistry is a singleton instance that can be configured with multiple classes that
represents service on the cluster physical node.
Returns:
Singleton ServiceClassRegistry instance.
"""
return __class_registry

View file

@ -0,0 +1,237 @@
import random
import re
import yaml
from frostfs_testlib.hosting import Host, Hosting
from frostfs_testlib.hosting.config import ServiceConfig
from frostfs_testlib.storage import get_service_registry
from frostfs_testlib.storage.constants import ConfigAttributes
from frostfs_testlib.storage.dataclasses.frostfs_services import (
HTTPGate,
InnerRing,
MorphChain,
S3Gate,
StorageNode,
)
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
from frostfs_testlib.storage.service_registry import ServiceRegistry
class ClusterNode:
"""
Represents physical node where multiple different services may be located
"""
class_registry: ServiceRegistry
id: int
host: Host
def __init__(self, host: Host, id: int) -> None:
self.host = host
self.id = id
self.class_registry = get_service_registry()
@property
def host_ip(self):
return self.host.config.address
def __eq__(self, other):
return self.host.config.address == other.host.config.address
def __hash__(self):
return id(self.host.config.address)
def __str__(self):
return self.host.config.address
def __repr__(self) -> str:
return self.host.config.address
# for backward compatibility and to not touch other codebase too much
@property
def storage_node(self) -> StorageNode:
return self.service(StorageNode)
# for backward compatibility and to not touch other codebase too much
@property
def ir_node(self) -> InnerRing:
return self.service(InnerRing)
# for backward compatibility and to not touch other codebase too much
@property
def morph_chain(self) -> MorphChain:
return self.service(MorphChain)
# for backward compatibility and to not touch other codebase too much
@property
def http_gate(self) -> HTTPGate:
return self.service(HTTPGate)
# for backward compatibility and to not touch other codebase too much
@property
def s3_gate(self) -> S3Gate:
return self.service(S3Gate)
def service(self, service_type: type[ServiceClass]) -> ServiceClass:
"""
Get a service cluster node of specified type.
Args:
service_type: type of the service which should be returned,
for frostfs it can be StorageNode, S3Gate, HttpGate, MorphChain and InnerRing.
Returns:
service of service_type class.
"""
service_entry = self.class_registry.get_entry(service_type)
service_name = service_entry["hosting_service_name"]
pattern = f"{service_name}{self.id}"
config = self.host.get_service_config(pattern)
return service_type(
self.id,
config.name,
self.host,
)
def get_list_of_services(self) -> list[str]:
return [
config.attributes[ConfigAttributes.SERVICE_NAME] for config in self.host.config.services
]
class Cluster:
"""
This class represents a Cluster object for the whole storage based on provided hosting
"""
default_rpc_endpoint: str
default_s3_gate_endpoint: str
default_http_gate_endpoint: str
def __init__(self, hosting: Hosting) -> None:
self._hosting = hosting
self.class_registry = get_service_registry()
self.default_rpc_endpoint = self.services(StorageNode)[0].get_rpc_endpoint()
self.default_s3_gate_endpoint = self.services(S3Gate)[0].get_endpoint()
self.default_http_gate_endpoint = self.services(HTTPGate)[0].get_endpoint()
@property
def hosts(self) -> list[Host]:
"""
Returns list of Hosts
"""
return self._hosting.hosts
# for backward compatibility and to not touch other codebase too much
@property
def storage_nodes(self) -> list[StorageNode]:
return self.services(StorageNode)
# for backward compatibility and to not touch other codebase too much
@property
def ir_nodes(self) -> list[InnerRing]:
return self.services(InnerRing)
# for backward compatibility and to not touch other codebase too much
@property
def s3_gates(self) -> list[S3Gate]:
return self.services(S3Gate)
@property
def http_gates(self) -> list[HTTPGate]:
return self.services(HTTPGate)
@property
def morph_chain(self) -> list[MorphChain]:
return self.services(MorphChain)
def services(self, service_type: type[ServiceClass]) -> list[ServiceClass]:
"""
Get all services in a cluster of specified type.
Args:
service_type: type of the services which should be returned,
for frostfs it can be StorageNode, S3Gate, HttpGate, MorphChain and InnerRing.
Returns:
list of services of service_type class.
"""
service = self.class_registry.get_entry(service_type)
service_name = service["hosting_service_name"]
cls: type[NodeBase] = service["cls"]
pattern = f"{service_name}\d*$"
configs = self.hosting.find_service_configs(pattern)
found_nodes = []
for config in configs:
# config.name is something like s3-gate01. Cut last digits to know service type
service_type = re.findall(".*\D", config.name)[0]
# exclude unsupported services
if service_type != service_name:
continue
found_nodes.append(
cls(
self._get_id(config.name),
config.name,
self.hosting.get_host_by_service(config.name),
)
)
return found_nodes
@property
def cluster_nodes(self) -> list[ClusterNode]:
"""
Returns list of Cluster Nodes
"""
return [ClusterNode(host, id) for id, host in enumerate(self.hosts, start=1)]
@property
def hosting(self) -> Hosting:
return self._hosting
def _create_wallet_config(self, service: ServiceConfig) -> None:
wallet_path = service.attributes[ConfigAttributes.LOCAL_WALLET_CONFIG]
wallet_password = service.attributes[ConfigAttributes.WALLET_PASSWORD]
with open(wallet_path, "w") as file:
yaml.dump({"password": wallet_password}, file)
def create_wallet_configs(self, hosting: Hosting) -> None:
configs = hosting.find_service_configs(".*")
for config in configs:
if ConfigAttributes.LOCAL_WALLET_CONFIG in config.attributes:
self._create_wallet_config(config)
def is_local_devenv(self) -> bool:
if len(self.hosting.hosts) == 1:
host = self.hosting.hosts[0]
if host.config.address == "localhost" and host.config.plugin_name == "docker":
return True
return False
def _get_id(self, node_name) -> int:
pattern = "\d*$"
matches = re.search(pattern, node_name)
if not matches:
raise RuntimeError(f"Can't parse Id of the node {node_name}")
return int(matches.group())
def get_random_storage_rpc_endpoint(self) -> str:
return random.choice(self.get_storage_rpc_endpoints())
def get_storage_rpc_endpoints(self) -> list[str]:
nodes: list[StorageNode] = self.services(StorageNode)
return [node.get_rpc_endpoint() for node in nodes]
def get_morph_endpoints(self) -> list[str]:
nodes: list[MorphChain] = self.services(MorphChain)
return [node.get_endpoint() for node in nodes]

View file

@ -0,0 +1,22 @@
class ConfigAttributes:
SERVICE_NAME = "systemd_service_name"
WALLET_PASSWORD = "wallet_password"
WALLET_PATH = "wallet_path"
WALLET_CONFIG = "wallet_config"
CONFIG_PATH = "config_path"
LOCAL_WALLET_PATH = "local_wallet_path"
LOCAL_WALLET_CONFIG = "local_config_path"
ENDPOINT_DATA_0 = "endpoint_data0"
ENDPOINT_DATA_1 = "endpoint_data1"
ENDPOINT_INTERNAL = "endpoint_internal0"
CONTROL_ENDPOINT = "control_endpoint"
UN_LOCODE = "un_locode"
class _FrostfsServicesNames:
STORAGE = "s"
S3_GATE = "s3-gate"
HTTP_GATE = "http-gate"
MORPH_CHAIN = "morph-chain"
INNER_RING = "ir"
MAIN_CHAIN = "main-chain"

View file

@ -0,0 +1,207 @@
import frostfs_testlib.resources.optionals as optionals
from frostfs_testlib.load.k6 import K6
from frostfs_testlib.load.load_config import (
EndpointSelectionStrategy,
K6ProcessAllocationStrategy,
LoadParams,
LoadScenario,
LoadType,
)
from frostfs_testlib.load.load_steps import init_s3_client, prepare_k6_instances
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.load_params import (
K6_TEARDOWN_PERIOD,
LOAD_NODE_SSH_PASSWORD,
LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE,
LOAD_NODE_SSH_PRIVATE_KEY_PATH,
LOAD_NODE_SSH_USER,
LOAD_NODES,
)
from frostfs_testlib.shell.interfaces import SshCredentials
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.test_control import run_optionally
reporter = get_reporter()
class BackgroundLoadController:
k6_instances: list[K6]
k6_dir: str
load_params: LoadParams
load_nodes: list[str]
verification_params: LoadParams
nodes_under_load: list[ClusterNode]
ssh_credentials: SshCredentials
loaders_wallet: WalletInfo
endpoints: list[str]
def __init__(
self,
k6_dir: str,
load_params: LoadParams,
loaders_wallet: WalletInfo,
nodes_under_load: list[ClusterNode],
) -> None:
self.k6_dir = k6_dir
self.load_params = load_params
self.nodes_under_load = nodes_under_load
self.load_nodes = LOAD_NODES
self.loaders_wallet = loaders_wallet
if load_params.endpoint_selection_strategy is None:
raise RuntimeError("endpoint_selection_strategy should not be None")
self.endpoints = self._get_endpoints(
load_params.load_type, load_params.endpoint_selection_strategy
)
self.verification_params = LoadParams(
clients=load_params.readers,
scenario=LoadScenario.VERIFY,
registry_file=load_params.registry_file,
verify_time=load_params.verify_time,
load_type=load_params.load_type,
load_id=load_params.load_id,
working_dir=load_params.working_dir,
endpoint_selection_strategy=load_params.endpoint_selection_strategy,
k6_process_allocation_strategy=load_params.k6_process_allocation_strategy,
)
self.ssh_credentials = SshCredentials(
LOAD_NODE_SSH_USER,
LOAD_NODE_SSH_PASSWORD,
LOAD_NODE_SSH_PRIVATE_KEY_PATH,
LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE,
)
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, [])
def _get_endpoints(
self, load_type: LoadType, endpoint_selection_strategy: EndpointSelectionStrategy
):
all_endpoints = {
LoadType.gRPC: {
EndpointSelectionStrategy.ALL: list(
set(
endpoint
for node_under_load in self.nodes_under_load
for endpoint in node_under_load.service(StorageNode).get_all_rpc_endpoint()
)
),
EndpointSelectionStrategy.FIRST: list(
set(
node_under_load.service(StorageNode).get_rpc_endpoint()
for node_under_load in self.nodes_under_load
)
),
},
# for some reason xk6 appends http protocol on its own
LoadType.S3: {
EndpointSelectionStrategy.ALL: list(
set(
endpoint.replace("http://", "")
for node_under_load in self.nodes_under_load
for endpoint in node_under_load.service(S3Gate).get_all_endpoints()
)
),
EndpointSelectionStrategy.FIRST: list(
set(
node_under_load.service(S3Gate).get_endpoint().replace("http://", "")
for node_under_load in self.nodes_under_load
)
),
},
}
return all_endpoints[load_type][endpoint_selection_strategy]
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step_deco("Prepare background load instances")
def prepare(self):
if self.load_params.load_type == LoadType.S3:
init_s3_client(
self.load_nodes,
self.load_params,
self.k6_dir,
self.ssh_credentials,
self.nodes_under_load,
self.loaders_wallet,
)
self._prepare(self.load_params)
def _prepare(self, load_params: LoadParams):
self.k6_instances = prepare_k6_instances(
load_nodes=LOAD_NODES,
ssh_credentials=self.ssh_credentials,
k6_dir=self.k6_dir,
load_params=load_params,
endpoints=self.endpoints,
loaders_wallet=self.loaders_wallet,
)
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step_deco("Start background load")
def start(self):
if self.load_params.preset is None:
raise RuntimeError("Preset should not be none at the moment of start")
with reporter.step(
f"Start background load on nodes {self.nodes_under_load}: "
f"writers = {self.load_params.writers}, "
f"obj_size = {self.load_params.object_size}, "
f"load_time = {self.load_params.load_time}, "
f"prepare_json = {self.load_params.preset.pregen_json}, "
f"endpoints = {self.endpoints}"
):
for k6_load_instance in self.k6_instances:
k6_load_instance.start()
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step_deco("Stop background load")
def stop(self):
for k6_load_instance in self.k6_instances:
k6_load_instance.stop()
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, True)
def is_running(self):
for k6_load_instance in self.k6_instances:
if not k6_load_instance.is_running:
return False
return True
def wait_until_finish(self):
if self.load_params.load_time is None:
raise RuntimeError("LoadTime should not be none")
for k6_instance in self.k6_instances:
k6_instance.wait_until_finished(self.load_params.load_time + int(K6_TEARDOWN_PERIOD))
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
def verify(self):
if self.verification_params.verify_time is None:
raise RuntimeError("verify_time should not be none")
self._prepare(self.verification_params)
with reporter.step("Run verify background load data"):
for k6_verify_instance in self.k6_instances:
k6_verify_instance.start()
k6_verify_instance.wait_until_finished(self.verification_params.verify_time)
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step_deco("K6 run results")
def get_results(self) -> dict:
results = {}
for k6_instance in self.k6_instances:
if k6_instance.load_params.k6_process_allocation_strategy is None:
raise RuntimeError("k6_process_allocation_strategy should not be none")
result = k6_instance.get_results()
keys_map = {
K6ProcessAllocationStrategy.PER_LOAD_NODE: k6_instance.load_node,
K6ProcessAllocationStrategy.PER_ENDPOINT: k6_instance.endpoints[0],
}
key = keys_map[k6_instance.load_params.k6_process_allocation_strategy]
results[key] = result
return results

View file

@ -0,0 +1,130 @@
import time
import allure
import frostfs_testlib.resources.optionals as optionals
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.shell import CommandOptions, Shell
from frostfs_testlib.steps import epoch
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode
from frostfs_testlib.storage.controllers.disk_controller import DiskController
from frostfs_testlib.testing.test_control import run_optionally, wait_for_success
from frostfs_testlib.utils.failover_utils import (
wait_all_storage_nodes_returned,
wait_for_host_offline,
wait_for_host_online,
wait_for_node_online,
)
reporter = get_reporter()
class ClusterStateController:
def __init__(self, shell: Shell, cluster: Cluster) -> None:
self.stopped_nodes: list[ClusterNode] = []
self.detached_disks: dict[str, DiskController] = {}
self.stopped_storage_nodes: list[StorageNode] = []
self.cluster = cluster
self.shell = shell
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Stop host of node {node}")
def stop_node_host(self, node: ClusterNode, mode: str):
with allure.step(f"Stop host {node.host.config.address}"):
node.host.stop_host(mode=mode)
wait_for_host_offline(self.shell, node.storage_node)
self.stopped_nodes.append(node)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Start host of node {node}")
def start_node_host(self, node: ClusterNode):
with allure.step(f"Start host {node.host.config.address}"):
node.host.start_host()
wait_for_host_online(self.shell, node.storage_node)
wait_for_node_online(node.storage_node)
self.stopped_nodes.remove(node)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Start stopped hosts")
def start_stopped_hosts(self):
for node in self.stopped_nodes:
node.host.start_host()
self.stopped_nodes = []
wait_all_storage_nodes_returned(self.shell, self.cluster)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Detach disk {device} at {mountpoint} on node {node}")
def detach_disk(self, node: StorageNode, device: str, mountpoint: str):
disk_controller = self._get_disk_controller(node, device, mountpoint)
self.detached_disks[disk_controller.id] = disk_controller
disk_controller.detach()
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Attach disk {device} at {mountpoint} on node {node}")
def attach_disk(self, node: StorageNode, device: str, mountpoint: str):
disk_controller = self._get_disk_controller(node, device, mountpoint)
disk_controller.attach()
self.detached_disks.pop(disk_controller.id, None)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Restore detached disks")
def restore_disks(self):
for disk_controller in self.detached_disks.values():
disk_controller.attach()
self.detached_disks = {}
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Stop storage service on {node}")
def stop_storage_service(self, node: ClusterNode):
node.storage_node.stop_service()
self.stopped_storage_nodes.append(node.storage_node)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Start storage service on {node}")
def start_storage_service(self, node: ClusterNode):
node.storage_node.start_service()
self.stopped_storage_nodes.remove(node.storage_node)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Start stopped storage services")
def start_stopped_storage_services(self):
for node in self.stopped_storage_nodes:
node.start_service()
self.stopped_storage_nodes = []
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@reporter.step_deco("Hard reboot host {node} via magic SysRq option")
def panic_reboot_host(self, node: ClusterNode):
shell = node.host.get_shell()
shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"')
options = CommandOptions(close_stdin=True, timeout=1, check=False)
shell.exec('sudo sh -c "echo b > /proc/sysrq-trigger"', options)
# Let the things to be settled
# A little wait here to prevent ssh stuck during panic
time.sleep(10)
wait_for_host_online(self.shell, node.storage_node)
wait_for_node_online(node.storage_node)
@reporter.step_deco("Wait up to {timeout} seconds for nodes on cluster to align epochs")
def wait_for_epochs_align(self, timeout=60):
@wait_for_success(timeout, 5, None, True)
def check_epochs():
epochs_by_node = epoch.get_epochs_from_nodes(self.shell, self.cluster)
assert (
len(set(epochs_by_node.values())) == 1
), f"unaligned epochs found: {epochs_by_node}"
check_epochs()
def _get_disk_controller(
self, node: StorageNode, device: str, mountpoint: str
) -> DiskController:
disk_controller_id = DiskController.get_id(node, device)
if disk_controller_id in self.detached_disks.keys():
disk_controller = self.detached_disks[disk_controller_id]
else:
disk_controller = DiskController(node, device, mountpoint)
return disk_controller

View file

@ -0,0 +1,41 @@
from frostfs_testlib.hosting.interfaces import DiskInfo
from frostfs_testlib.shell import CommandOptions
from frostfs_testlib.storage.cluster import StorageNode
from frostfs_testlib.testing.test_control import wait_for_success
class DiskController:
def __init__(self, node: StorageNode, device: str, mountpoint: str) -> None:
self.node: StorageNode = node
self.device: str = device
self.device_by_label: str
self.mountpoint: str = mountpoint.strip()
self.disk_info: DiskInfo = DiskInfo()
self.id = self.get_id(node, device)
shell = node.host.get_shell()
cmd = f"sudo udevadm info -n {device} | egrep \"S:.*label\" | awk '{{print $2}}'"
self.device_by_label = f"/dev/{shell.exec(cmd).stdout.strip()}"
@wait_for_success(60, 3, False)
def _wait_until_detached(self):
return self.node.host.is_disk_attached(self.device, self.disk_info)
@wait_for_success(60, 3, True)
def _wait_until_attached(self):
return self.node.host.is_disk_attached(self.device, self.disk_info)
def detach(self):
self.disk_info = self.node.host.detach_disk(self.device)
self._wait_until_detached()
def attach(self):
self.node.host.attach_disk(self.device, self.disk_info)
self._wait_until_attached()
remote_shell = self.node.host.get_shell()
remote_shell.exec(f"sudo umount -l {self.device}", options=CommandOptions(check=False))
remote_shell.exec(f"sudo mount {self.device_by_label} {self.mountpoint}")
@staticmethod
def get_id(node: StorageNode, device: str):
return f"{node.host.config.address} - {device}"

View file

@ -0,0 +1,118 @@
import json
from typing import Any
from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.testing.test_control import wait_for_success
class ShardsWatcher:
shards_snapshots: list[dict[str, Any]] = []
def __init__(self, node_under_test: ClusterNode) -> None:
self.storage_node = node_under_test.storage_node
self.take_shards_snapshot()
def take_shards_snapshot(self):
snapshot = self.get_shards_snapshot()
self.shards_snapshots.append(snapshot)
def get_shards_snapshot(self):
shards_snapshot: dict[str, Any] = {}
shards = self.get_shards()
for shard in shards:
shards_snapshot[shard["shard_id"]] = shard
return shards_snapshot
def _get_current_snapshot(self):
return self.shards_snapshots[-1]
def _get_previous_snapshot(self):
return self.shards_snapshots[-2]
def _is_shard_present(self, shard_id):
snapshot = self._get_current_snapshot()
return shard_id in snapshot
def get_shards_with_new_errors(self):
current_snapshot = self._get_current_snapshot()
previous_snapshot = self._get_previous_snapshot()
shards_with_new_errors: dict[str, Any] = {}
for shard_id, shard in previous_snapshot.items():
if current_snapshot[shard_id]["error_count"] > shard["error_count"]:
shards_with_new_errors[shard_id] = current_snapshot[shard_id]
return shards_with_new_errors
def get_shards_with_errors(self):
snapshot = self.get_shards_snapshot()
shards_with_errors: dict[str, Any] = {}
for shard_id, shard in snapshot.items():
if shard["error_count"] > 0:
shards_with_errors[shard_id] = shard
return shards_with_errors
def get_shard_status(self, shard_id: str):
snapshot = self.get_shards_snapshot()
assert shard_id in snapshot, f"Shard {shard_id} is missing: {snapshot}"
return snapshot[shard_id]["mode"]
@wait_for_success(60, 2)
def await_for_all_shards_status(self, status: str):
snapshot = self.get_shards_snapshot()
for shard_id in snapshot:
assert snapshot[shard_id]["mode"] == status, f"Shard {shard_id} have wrong shard status"
@wait_for_success(60, 2)
def await_for_shard_status(self, shard_id: str, status: str):
assert self.get_shard_status(shard_id) == status
@wait_for_success(60, 2)
def await_for_shard_have_new_errors(self, shard_id: str):
self.take_shards_snapshot()
assert self._is_shard_present(shard_id)
shards_with_new_errors = self.get_shards_with_new_errors()
assert (
shard_id in shards_with_new_errors
), f"Expected shard {shard_id} to have new errors, but haven't {self.shards_snapshots[-1]}"
@wait_for_success(300, 5)
def await_for_shards_have_no_new_errors(self):
self.take_shards_snapshot()
shards_with_new_errors = self.get_shards_with_new_errors()
assert len(shards_with_new_errors) == 0
def get_shards(self) -> dict[str, Any]:
shards_cli = FrostfsCliShards(
self.storage_node.host.get_shell(),
self.storage_node.host.get_cli_config("frostfs-cli").exec_path,
)
response = shards_cli.list(
endpoint=self.storage_node.get_control_endpoint(),
wallet=self.storage_node.get_remote_wallet_path(),
wallet_password=self.storage_node.get_wallet_password(),
)
return json.loads(response.stdout.split(">", 1)[1])
def set_shard_mode(self, shard_id: str, mode: str, clear_errors: bool = True):
shards_cli = FrostfsCliShards(
self.storage_node.host.get_shell(),
self.storage_node.host.get_cli_config("frostfs-cli").exec_path,
)
return shards_cli.set_mode(
self.storage_node.get_control_endpoint(),
self.storage_node.get_remote_wallet_path(),
self.storage_node.get_wallet_password(),
mode=mode,
id=[shard_id],
clear_errors=clear_errors,
)

View file

@ -0,0 +1,103 @@
import logging
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, List, Optional, Union
from frostfs_testlib.utils import wallet_utils
logger = logging.getLogger("NeoLogger")
EACL_LIFETIME = 100500
FROSTFS_CONTRACT_CACHE_TIMEOUT = 30
class EACLOperation(Enum):
PUT = "put"
GET = "get"
HEAD = "head"
GET_RANGE = "getrange"
GET_RANGE_HASH = "getrangehash"
SEARCH = "search"
DELETE = "delete"
class EACLAccess(Enum):
ALLOW = "allow"
DENY = "deny"
class EACLRole(Enum):
OTHERS = "others"
USER = "user"
SYSTEM = "system"
class EACLHeaderType(Enum):
REQUEST = "req" # Filter request headers
OBJECT = "obj" # Filter object headers
SERVICE = "SERVICE" # Filter service headers. These are not processed by FrostFS nodes and exist for service use only
class EACLMatchType(Enum):
STRING_EQUAL = "=" # Return true if strings are equal
STRING_NOT_EQUAL = "!=" # Return true if strings are different
@dataclass
class EACLFilter:
header_type: EACLHeaderType = EACLHeaderType.REQUEST
match_type: EACLMatchType = EACLMatchType.STRING_EQUAL
key: Optional[str] = None
value: Optional[str] = None
def to_dict(self) -> Dict[str, Any]:
return {
"headerType": self.header_type,
"matchType": self.match_type,
"key": self.key,
"value": self.value,
}
@dataclass
class EACLFilters:
filters: Optional[List[EACLFilter]] = None
def __str__(self):
return ",".join(
[
f"{filter.header_type.value}:"
f"{filter.key}{filter.match_type.value}{filter.value}"
for filter in self.filters
]
if self.filters
else []
)
@dataclass
class EACLPubKey:
keys: Optional[List[str]] = None
@dataclass
class EACLRule:
operation: Optional[EACLOperation] = None
access: Optional[EACLAccess] = None
role: Optional[Union[EACLRole, str]] = None
filters: Optional[EACLFilters] = None
def to_dict(self) -> Dict[str, Any]:
return {
"Operation": self.operation,
"Access": self.access,
"Role": self.role,
"Filters": self.filters or [],
}
def __str__(self):
role = (
self.role.value
if isinstance(self.role, EACLRole)
else f'pubkey:{wallet_utils.get_wallet_public_key(self.role, "")}'
)
return f'{self.access.value} {self.operation.value} {self.filters or ""} {role}'

View file

@ -0,0 +1,173 @@
import yaml
from frostfs_testlib.blockchain import RPCClient
from frostfs_testlib.storage.constants import ConfigAttributes
from frostfs_testlib.storage.dataclasses.node_base import NodeBase
class InnerRing(NodeBase):
"""
Class represents inner ring node in a cluster
Inner ring node is not always the same as physical host (or physical node, if you will):
It can be service running in a container or on physical host
For testing perspective, it's not relevant how it is actually running,
since frostfs network will still treat it as "node"
"""
def service_healthcheck(self) -> bool:
health_metric = "frostfs_node_ir_health"
output = (
self.host.get_shell()
.exec(f"curl -s localhost:6662 | grep {health_metric} | sed 1,2d")
.stdout
)
return health_metric in output
def get_netmap_cleaner_threshold(self) -> str:
config_file = self.get_remote_config_path()
contents = self.host.get_shell().exec(f"cat {config_file}").stdout
config = yaml.safe_load(contents)
value = config["netmap_cleaner"]["threshold"]
return value
class S3Gate(NodeBase):
"""
Class represents S3 gateway in a cluster
"""
def get_endpoint(self) -> str:
return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0)
def get_all_endpoints(self) -> list[str]:
return [
self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0),
self._get_attribute(ConfigAttributes.ENDPOINT_DATA_1),
]
def service_healthcheck(self) -> bool:
health_metric = "frostfs_s3_gw_state_health"
output = (
self.host.get_shell()
.exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d")
.stdout
)
return health_metric in output
@property
def label(self) -> str:
return f"{self.name}: {self.get_endpoint()}"
class HTTPGate(NodeBase):
"""
Class represents HTTP gateway in a cluster
"""
def get_endpoint(self) -> str:
return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0)
def service_healthcheck(self) -> bool:
health_metric = "frostfs_http_gw_state_health"
output = (
self.host.get_shell()
.exec(f"curl -s localhost:5662 | grep {health_metric} | sed 1,2d")
.stdout
)
return health_metric in output
@property
def label(self) -> str:
return f"{self.name}: {self.get_endpoint()}"
class MorphChain(NodeBase):
"""
Class represents side-chain aka morph-chain consensus node in a cluster
Consensus node is not always the same as physical host (or physical node, if you will):
It can be service running in a container or on physical host
For testing perspective, it's not relevant how it is actually running,
since frostfs network will still treat it as "node"
"""
rpc_client: RPCClient
def construct(self):
self.rpc_client = RPCClient(self.get_endpoint())
def get_endpoint(self) -> str:
return self._get_attribute(ConfigAttributes.ENDPOINT_INTERNAL)
def service_healthcheck(self) -> bool:
# TODO Rework in 1.3 Release when metrics for each service will be available
return True
@property
def label(self) -> str:
return f"{self.name}: {self.get_endpoint()}"
class MainChain(NodeBase):
"""
Class represents main-chain consensus node in a cluster
Consensus node is not always the same as physical host:
It can be service running in a container or on physical host (or physical node, if you will):
For testing perspective, it's not relevant how it is actually running,
since frostfs network will still treat it as "node"
"""
rpc_client: RPCClient
def construct(self):
self.rpc_client = RPCClient(self.get_endpoint())
def get_endpoint(self) -> str:
return self._get_attribute(ConfigAttributes.ENDPOINT_INTERNAL)
@property
def label(self) -> str:
return f"{self.name}: {self.get_endpoint()}"
class StorageNode(NodeBase):
"""
Class represents storage node in a storage cluster
Storage node is not always the same as physical host:
It can be service running in a container or on physical host (or physical node, if you will):
For testing perspective, it's not relevant how it is actually running,
since frostfs network will still treat it as "node"
"""
def get_rpc_endpoint(self) -> str:
return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0)
def get_all_rpc_endpoint(self) -> list[str]:
return [
self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0),
self._get_attribute(ConfigAttributes.ENDPOINT_DATA_1),
]
def service_healthcheck(self) -> bool:
health_metric = "frostfs_node_state_health"
output = (
self.host.get_shell()
.exec(f"curl -s localhost:6672 | grep {health_metric} | sed 1,2d")
.stdout
)
return health_metric in output
def get_control_endpoint(self) -> str:
return self._get_attribute(ConfigAttributes.CONTROL_ENDPOINT)
def get_un_locode(self):
return self._get_attribute(ConfigAttributes.UN_LOCODE)
@property
def label(self) -> str:
return f"{self.name}: {self.get_rpc_endpoint()}"

View file

@ -0,0 +1,122 @@
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Optional, TypedDict, TypeVar
from frostfs_testlib.hosting.config import ServiceConfig
from frostfs_testlib.hosting.interfaces import Host
from frostfs_testlib.storage.constants import ConfigAttributes
from frostfs_testlib.utils import wallet_utils
@dataclass
class NodeBase(ABC):
"""
Represents a node of some underlying service
"""
id: str
name: str
host: Host
def __init__(self, id, name, host) -> None:
self.id = id
self.name = name
self.host = host
self.construct()
def construct(self):
pass
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return id(self.name)
def __str__(self):
return self.label
def __repr__(self) -> str:
return self.label
@property
def label(self) -> str:
return self.name
def get_service_systemctl_name(self) -> str:
return self._get_attribute(ConfigAttributes.SERVICE_NAME)
def start_service(self):
self.host.start_service(self.name)
@abstractmethod
def service_healthcheck(self) -> bool:
"""Service healthcheck."""
def stop_service(self):
self.host.stop_service(self.name)
def restart_service(self):
self.host.restart_service(self.name)
def get_wallet_password(self) -> str:
return self._get_attribute(ConfigAttributes.WALLET_PASSWORD)
def get_wallet_path(self) -> str:
return self._get_attribute(
ConfigAttributes.LOCAL_WALLET_PATH,
ConfigAttributes.WALLET_PATH,
)
def get_remote_wallet_path(self) -> str:
"""
Returns node wallet file path located on remote host
"""
return self._get_attribute(
ConfigAttributes.WALLET_PATH,
)
def get_remote_config_path(self) -> str:
"""
Returns node config file path located on remote host
"""
return self._get_attribute(
ConfigAttributes.CONFIG_PATH,
)
def get_wallet_config_path(self):
return self._get_attribute(
ConfigAttributes.LOCAL_WALLET_CONFIG,
ConfigAttributes.WALLET_CONFIG,
)
def get_wallet_public_key(self):
storage_wallet_path = self.get_wallet_path()
storage_wallet_pass = self.get_wallet_password()
return wallet_utils.get_wallet_public_key(storage_wallet_path, storage_wallet_pass)
def _get_attribute(
self, attribute_name: str, default_attribute_name: Optional[str] = None
) -> str:
config = self.host.get_service_config(self.name)
if attribute_name not in config.attributes:
if default_attribute_name is None:
raise RuntimeError(
f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either"
)
return config.attributes[default_attribute_name]
return config.attributes[attribute_name]
def _get_service_config(self) -> ServiceConfig:
return self.host.get_service_config(self.name)
ServiceClass = TypeVar("ServiceClass", bound=NodeBase)
class NodeClassDict(TypedDict):
hosting_service_name: str
cls: type[NodeBase]

View file

@ -0,0 +1,25 @@
from dataclasses import dataclass
from typing import Optional
@dataclass
class ObjectRef:
cid: str
oid: str
@dataclass
class LockObjectInfo(ObjectRef):
lifetime: Optional[int] = None
expire_at: Optional[int] = None
@dataclass
class StorageObjectInfo(ObjectRef):
size: Optional[int] = None
wallet_file_path: Optional[str] = None
file_path: Optional[str] = None
file_hash: Optional[str] = None
attributes: Optional[list[dict[str, str]]] = None
tombstone: Optional[str] = None
locks: Optional[list[LockObjectInfo]] = None

View file

@ -0,0 +1,90 @@
import json
import logging
import os
import uuid
from dataclasses import dataclass
from typing import Optional
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, DEFAULT_WALLET_PASS
from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.cluster import Cluster, NodeBase
from frostfs_testlib.utils.wallet_utils import get_last_address_from_wallet, init_wallet
logger = logging.getLogger("frostfs.testlib.utils")
@dataclass
class WalletInfo:
path: str
password: str = DEFAULT_WALLET_PASS
config_path: str = DEFAULT_WALLET_CONFIG
@staticmethod
def from_node(node: NodeBase):
return WalletInfo(
node.get_wallet_path(), node.get_wallet_password(), node.get_wallet_config_path()
)
def get_address(self) -> str:
"""
Extracts the last address from wallet via neo3 lib.
Returns:
The address of the wallet.
"""
return get_last_address_from_wallet(self.path, self.password)
def get_address_from_json(self, account_id: int = 0) -> str:
"""
Extracts address of the given account id from wallet using json lookup.
(Useful if neo3 fails for some reason and can't be used).
Args:
account_id: id of the account to get address.
Returns:
address string.
"""
with open(self.path, "r") as wallet:
wallet_json = json.load(wallet)
assert abs(account_id) + 1 <= len(
wallet_json["accounts"]
), f"There is no index '{account_id}' in wallet: {wallet_json}"
return wallet_json["accounts"][account_id]["address"]
class WalletFactory:
def __init__(self, wallets_dir: str, shell: Shell, cluster: Cluster) -> None:
self.shell = shell
self.wallets_dir = wallets_dir
self.cluster = cluster
def create_wallet(
self, file_name: Optional[str] = None, password: Optional[str] = None
) -> WalletInfo:
"""
Creates new default wallet.
Args:
file_name: output wallet file name.
password: wallet password.
Returns:
WalletInfo object of new wallet.
"""
if file_name is None:
file_name = str(uuid.uuid4())
if password is None:
password = ""
base_path = os.path.join(self.wallets_dir, file_name)
wallet_path = f"{base_path}.json"
wallet_config_path = f"{base_path}.yaml"
init_wallet(wallet_path, password)
with open(wallet_config_path, "w") as config_file:
config_file.write(f'password: "{password}"')
return WalletInfo(wallet_path, password, wallet_config_path)

View file

@ -0,0 +1,21 @@
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, NodeClassDict, ServiceClass
class ServiceRegistry:
_class_mapping: dict[str, NodeClassDict] = {}
def get_entry(self, service_type: type[ServiceClass]) -> NodeClassDict:
key = service_type.__name__
if key not in self._class_mapping:
raise RuntimeError(
f"Unregistered service type requested: {key}. At this moment registered services are: {self._class_mapping.keys()}"
)
return self._class_mapping[key]
def register_service(self, service_name: str, service_class: type[NodeBase]):
self._class_mapping[service_class.__name__] = {
"cls": service_class,
"hosting_service_name": service_name,
}

View file

@ -0,0 +1,32 @@
from typing import Optional
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps import epoch
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
reporter = get_reporter()
# To skip adding every mandatory singleton dependency to EACH test function
class ClusterTestBase:
shell: Shell
cluster: Cluster
@reporter.step_deco("Tick {epochs_to_tick} epochs")
def tick_epochs(self, epochs_to_tick: int, alive_node: Optional[StorageNode] = None):
for _ in range(epochs_to_tick):
self.tick_epoch(alive_node)
def tick_epoch(self, alive_node: Optional[StorageNode] = None):
epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node)
def wait_for_epochs_align(self):
epoch.wait_for_epochs_align(self.shell, self.cluster)
def get_epoch(self):
return epoch.get_epoch(self.shell, self.cluster)
def ensure_fresh_epoch(self):
return epoch.ensure_fresh_epoch(self.shell, self.cluster)

View file

@ -0,0 +1,164 @@
import inspect
import logging
from functools import wraps
from time import sleep, time
from typing import Any
from _pytest.outcomes import Failed
from pytest import fail
logger = logging.getLogger("NeoLogger")
# TODO: we may consider deprecating some methods here and use tenacity instead
class expect_not_raises:
"""
Decorator/Context manager check that some action, method or test does not raise exceptions
Useful to set proper state of failed test cases in allure
Example:
def do_stuff():
raise Exception("Fail")
def test_yellow(): <- this test is marked yellow (Test Defect) in allure
do_stuff()
def test_red(): <- this test is marked red (Failed) in allure
with expect_not_raises():
do_stuff()
@expect_not_raises()
def test_also_red(): <- this test is also marked red (Failed) in allure
do_stuff()
"""
def __enter__(self):
pass
def __exit__(self, exception_type, exception_value, exception_traceback):
if exception_value:
fail(str(exception_value))
def __call__(self, func):
@wraps(func)
def impl(*a, **kw):
with expect_not_raises():
func(*a, **kw)
return impl
def retry(max_attempts: int, sleep_interval: int = 1, expected_result: Any = None):
"""
Decorator to wait for some conditions/functions to pass successfully.
This is useful if you don't know exact time when something should pass successfully and do not
want to use sleep(X) with too big X.
Be careful though, wrapped function should only check the state of something, not change it.
"""
assert max_attempts >= 1, "Cannot apply retry decorator with max_attempts < 1"
def wrapper(func):
@wraps(func)
def impl(*a, **kw):
last_exception = None
for _ in range(max_attempts):
try:
actual_result = func(*a, **kw)
if expected_result is not None:
assert expected_result == actual_result
return actual_result
except Exception as ex:
logger.debug(ex)
last_exception = ex
sleep(sleep_interval)
except Failed as ex:
logger.debug(ex)
last_exception = ex
sleep(sleep_interval)
# timeout exceeded with no success, raise last_exception
if last_exception is not None:
raise last_exception
return impl
return wrapper
def run_optionally(enabled: bool, mock_value: Any = True):
"""
Decorator to run something conditionally.
MUST be placed after @pytest.fixture and before @allure decorators.
Args:
enabled: if true, decorated func will be called as usual. if false the decorated func will be skipped and mock_value will be returned.
mock_value: the value to be returned when decorated func is skipped.
"""
def deco(func):
@wraps(func)
def func_impl(*a, **kw):
if enabled:
return func(*a, **kw)
return mock_value
@wraps(func)
def gen_impl(*a, **kw):
if enabled:
yield from func(*a, **kw)
return
yield mock_value
return gen_impl if inspect.isgeneratorfunction(func) else func_impl
return deco
def wait_for_success(
max_wait_time: int = 60,
interval: int = 1,
expected_result: Any = None,
fail_testcase: bool = False,
):
"""
Decorator to wait for some conditions/functions to pass successfully.
This is useful if you don't know exact time when something should pass successfully and do not
want to use sleep(X) with too big X.
Be careful though, wrapped function should only check the state of something, not change it.
"""
def wrapper(func):
@wraps(func)
def impl(*a, **kw):
start = int(round(time()))
last_exception = None
while start + max_wait_time >= int(round(time())):
try:
actual_result = func(*a, **kw)
if expected_result is not None:
assert expected_result == actual_result
return actual_result
except Exception as ex:
logger.debug(ex)
last_exception = ex
sleep(interval)
except Failed as ex:
logger.debug(ex)
last_exception = ex
sleep(interval)
if fail_testcase:
fail(str(last_exception))
# timeout exceeded with no success, raise last_exception
if last_exception is not None:
raise last_exception
return impl
return wrapper

View file

@ -0,0 +1,135 @@
#!/usr/bin/python3.10
# TODO: This file is deprecated and all code which uses these calls should be refactored to use shell classes
"""
Helper functions to use with `frostfs-cli`, `neo-go` and other CLIs.
"""
import json
import logging
import subprocess
import sys
from contextlib import suppress
from datetime import datetime
from textwrap import shorten
from typing import TypedDict, Union
import pexpect
from frostfs_testlib.reporter import get_reporter
reporter = get_reporter()
logger = logging.getLogger("NeoLogger")
COLOR_GREEN = "\033[92m"
COLOR_OFF = "\033[0m"
def _cmd_run(cmd: str, timeout: int = 90) -> str:
"""
Runs given shell command <cmd>, in case of success returns its stdout,
in case of failure returns error message.
"""
compl_proc = None
start_time = datetime.now()
try:
logger.info(f"{COLOR_GREEN}Executing command: {cmd}{COLOR_OFF}")
start_time = datetime.utcnow()
compl_proc = subprocess.run(
cmd,
check=True,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
timeout=timeout,
shell=True,
)
output = compl_proc.stdout
return_code = compl_proc.returncode
end_time = datetime.utcnow()
logger.info(f"{COLOR_GREEN}Output: {output}{COLOR_OFF}")
_attach_allure_log(cmd, output, return_code, start_time, end_time)
return output
except subprocess.CalledProcessError as exc:
logger.info(
f"Command: {cmd}\n" f"Error:\nreturn code: {exc.returncode} " f"\nOutput: {exc.output}"
)
end_time = datetime.now()
return_code, cmd_output = subprocess.getstatusoutput(cmd)
_attach_allure_log(cmd, cmd_output, return_code, start_time, end_time)
raise RuntimeError(
f"Command: {cmd}\n" f"Error:\nreturn code: {exc.returncode}\n" f"Output: {exc.output}"
) from exc
except OSError as exc:
raise RuntimeError(f"Command: {cmd}\n" f"Output: {exc.strerror}") from exc
except Exception as exc:
return_code, cmd_output = subprocess.getstatusoutput(cmd)
end_time = datetime.now()
_attach_allure_log(cmd, cmd_output, return_code, start_time, end_time)
logger.info(
f"Command: {cmd}\n"
f"Error:\nreturn code: {return_code}\n"
f"Output: {exc.output.decode('utf-8') if type(exc.output) is bytes else exc.output}"
)
raise
def _run_with_passwd(cmd: str) -> str:
child = pexpect.spawn(cmd)
child.delaybeforesend = 1
child.expect(".*")
child.sendline("\r")
if sys.platform == "darwin":
child.expect(pexpect.EOF)
cmd = child.before
else:
child.wait()
cmd = child.read()
return cmd.decode()
def _configure_aws_cli(cmd: str, key_id: str, access_key: str, out_format: str = "json") -> str:
child = pexpect.spawn(cmd)
child.delaybeforesend = 1
child.expect("AWS Access Key ID.*")
child.sendline(key_id)
child.expect("AWS Secret Access Key.*")
child.sendline(access_key)
child.expect("Default region name.*")
child.sendline("")
child.expect("Default output format.*")
child.sendline(out_format)
child.wait()
cmd = child.read()
# child.expect(pexpect.EOF)
# cmd = child.before
return cmd.decode()
def _attach_allure_log(
cmd: str, output: str, return_code: int, start_time: datetime, end_time: datetime
) -> None:
command_attachment = (
f"COMMAND: '{cmd}'\n"
f"OUTPUT:\n {output}\n"
f"RC: {return_code}\n"
f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {end_time - start_time}"
)
with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'):
reporter.attach(command_attachment, "Command execution")
def log_command_execution(cmd: str, output: Union[str, TypedDict]) -> None:
logger.info(f"{cmd}: {output}")
with suppress(Exception):
json_output = json.dumps(output, indent=4, sort_keys=True)
output = json_output
command_attachment = f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n"
with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'):
reporter.attach(command_attachment, "Command execution")

View file

@ -0,0 +1,30 @@
import logging
import re
from frostfs_testlib.reporter import get_reporter
reporter = get_reporter()
logger = logging.getLogger("NeoLogger")
@reporter.step_deco("Read environment.properties")
def read_env_properties(file_path: str) -> dict:
with open(file_path, "r") as file:
raw_content = file.read()
env_properties = {}
for line in raw_content.split("\n"):
m = re.match("(.*?)=(.*)", line)
if not m:
logger.warning(f"Could not parse env property from {line}")
continue
key, value = m.group(1), m.group(2)
env_properties[key] = value
return env_properties
@reporter.step_deco("Update data in environment.properties")
def save_env_properties(file_path: str, env_data: dict) -> None:
with open(file_path, "a+") as env_file:
for env, env_value in env_data.items():
env_file.write(f"{env}={env_value}\n")

View file

@ -0,0 +1,256 @@
import logging
from dataclasses import dataclass
from time import sleep
from typing import Optional
from frostfs_testlib.hosting import Host
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.common import SERVICE_MAX_STARTUP_TIME
from frostfs_testlib.shell import CommandOptions, Shell
from frostfs_testlib.steps.cli.object import neo_go_dump_keys
from frostfs_testlib.steps.node_management import storage_node_healthcheck
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, NodeBase, StorageNode
from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain
from frostfs_testlib.testing.test_control import retry, wait_for_success
from frostfs_testlib.utils.datetime_utils import parse_time
reporter = get_reporter()
logger = logging.getLogger("NeoLogger")
@reporter.step_deco("Ping node")
def ping_host(shell: Shell, host: Host):
options = CommandOptions(check=False)
return shell.exec(f"ping {host.config.address} -c 1", options).return_code
@reporter.step_deco("Wait for storage nodes returned to cluster")
def wait_all_storage_nodes_returned(shell: Shell, cluster: Cluster) -> None:
with reporter.step("Run health check for all storage nodes"):
for node in cluster.services(StorageNode):
wait_for_host_online(shell, node)
wait_for_node_online(node)
@retry(max_attempts=60, sleep_interval=5, expected_result=0)
@reporter.step_deco("Waiting for host of {node} to go online")
def wait_for_host_online(shell: Shell, node: StorageNode):
try:
# TODO: Quick solution for now, should be replaced by lib interactions
return ping_host(shell, node.host)
except Exception as err:
logger.warning(f"Host ping fails with error {err}")
return 1
@retry(max_attempts=60, sleep_interval=5, expected_result=1)
@reporter.step_deco("Waiting for host of {node} to go offline")
def wait_for_host_offline(shell: Shell, node: StorageNode):
try:
# TODO: Quick solution for now, should be replaced by lib interactions
return ping_host(shell, node.host)
except Exception as err:
logger.warning(f"Host ping fails with error {err}")
return 0
@retry(max_attempts=10, sleep_interval=15, expected_result=True)
@reporter.step_deco("Waiting for node {node} to go online")
def wait_for_node_online(node: StorageNode):
try:
health_check = storage_node_healthcheck(node)
except Exception as err:
logger.warning(f"Node healthcheck fails with error {err}")
return False
return health_check.health_status == "READY" and health_check.network_status == "ONLINE"
@reporter.step_deco("Check and return status of given service")
def service_status(service: str, shell: Shell) -> str:
return shell.exec(f"sudo systemctl is-active {service}").stdout.rstrip()
@dataclass
class TopCommand:
"""
This class using `from_stdout` helps to parse result from `top command`, could return result only for one PID
pid: Process PID
output: stdout result from TOP command
"""
pid: Optional[str] = None
user: Optional[str] = None
pr: Optional[str] = None
ni: Optional[str] = None
virt: Optional[str] = None
res: Optional[str] = None
shr: Optional[str] = None
status: Optional[str] = None
cpu_percent: Optional[str] = None
mem_percent: Optional[str] = None
time: Optional[str] = None
cmd: Optional[str] = None
STATUS_RUNNING = "R"
STATUS_SLEEP = "S"
STATUS_ZOMBIE = "Z"
STATUS_UNSLEEP = "D"
STATUS_TRACED = "T"
@staticmethod
def from_stdout(output: str, requested_pid: int) -> "TopCommand":
list_var = [None for i in range(12)]
for line in output.split("\n"):
if str(requested_pid) in line:
list_var = line.split()
return TopCommand(
pid=list_var[0],
user=list_var[1],
pr=list_var[2],
ni=list_var[3],
virt=list_var[4],
res=list_var[5],
shr=list_var[6],
status=list_var[7],
cpu_percent=list_var[8],
mem_percent=list_var[9],
time=list_var[10],
cmd=list_var[11],
)
@reporter.step_deco("Run `top` command with specified PID")
def service_status_top(service: str, shell: Shell) -> TopCommand:
pid = service_pid(service, shell)
output = shell.exec(f"sudo top -b -n 1 -p {pid}").stdout
return TopCommand.from_stdout(output, pid)
@reporter.step_deco("Restart service n times with sleep")
def multiple_restart(
service_type: type[NodeBase],
node: ClusterNode,
count: int = 5,
sleep_interval: int = 2,
):
service_systemctl_name = node.service(service_type).get_service_systemctl_name()
service_name = node.service(service_type).name
for _ in range(count):
node.host.restart_service(service_name)
logger.info(
f"Restart {service_systemctl_name}; sleep {sleep_interval} seconds and continue"
)
sleep(sleep_interval)
@reporter.step_deco("Get status of list of services and check expected status")
@wait_for_success(60, 5)
def check_services_status(service_list: list[str], expected_status: str, shell: Shell):
cmd = ""
for service in service_list:
cmd += f' sudo systemctl status {service} --lines=0 | grep "Active:";'
result = shell.exec(cmd).stdout.rstrip()
statuses = list()
for line in result.split("\n"):
status_substring = line.split()
statuses.append(status_substring[1])
unique_statuses = list(set(statuses))
assert (
len(unique_statuses) == 1 and expected_status in unique_statuses
), f"Requested status={expected_status} not found in requested services={service_list}, list of statuses={result}"
@reporter.step_deco("Wait for active status of passed service")
@wait_for_success(60, 5)
def wait_service_in_desired_state(
service: str, shell: Shell, expected_status: Optional[str] = "active"
):
real_status = service_status(service=service, shell=shell)
assert (
expected_status == real_status
), f"Service {service}: expected status= {expected_status}, real status {real_status}"
@reporter.step_deco("Run healthcheck against passed service")
@wait_for_success(parse_time(SERVICE_MAX_STARTUP_TIME), 1)
def service_type_healthcheck(
service_type: type[NodeBase],
node: ClusterNode,
):
service = node.service(service_type)
assert (
service.service_healthcheck()
), f"Healthcheck failed for {service.get_service_systemctl_name()}, IP={node.host_ip}"
@reporter.step_deco("Kill by process name")
def kill_by_service_name(service_type: type[NodeBase], node: ClusterNode):
service_systemctl_name = node.service(service_type).get_service_systemctl_name()
pid = service_pid(service_systemctl_name, node.host.get_shell())
node.host.get_shell().exec(f"sudo kill -9 {pid}")
@reporter.step_deco("Service {service} suspend")
def suspend_service(shell: Shell, service: str):
shell.exec(f"sudo kill -STOP {service_pid(service, shell)}")
@reporter.step_deco("Service {service} resume")
def resume_service(shell: Shell, service: str):
shell.exec(f"sudo kill -CONT {service_pid(service, shell)}")
@reporter.step_deco("Retrieve service's pid")
# retry mechanism cause when the task has been started recently '0' PID could be returned
@wait_for_success(10, 1)
def service_pid(service: str, shell: Shell) -> int:
output = shell.exec(f"systemctl show --property MainPID {service}").stdout.rstrip()
splitted = output.split("=")
PID = int(splitted[1])
assert PID > 0, f"Service {service} has invalid PID={PID}"
return PID
@reporter.step_deco("Wrapper for neo-go dump keys command")
def dump_keys(shell: Shell, node: ClusterNode) -> dict:
host = node.host
service_config = host.get_service_config(node.service(MorphChain).name)
wallet = service_config.attributes["wallet_path"]
return neo_go_dump_keys(shell=shell, wallet=wallet)
@reporter.step_deco("Wait for object replication")
def wait_object_replication(
cid: str,
oid: str,
expected_copies: int,
shell: Shell,
nodes: list[StorageNode],
sleep_interval: int = 15,
attempts: int = 20,
) -> list[StorageNode]:
nodes_with_object = []
for _ in range(attempts):
nodes_with_object = get_nodes_with_object(cid, oid, shell=shell, nodes=nodes)
if len(nodes_with_object) >= expected_copies:
return nodes_with_object
sleep(sleep_interval)
raise AssertionError(
f"Expected {expected_copies} copies of object, but found {len(nodes_with_object)}. "
f"Waiting time {sleep_interval * attempts}"
)
def is_all_storage_nodes_returned(cluster: Cluster) -> bool:
with reporter.step("Run health check for all storage nodes"):
for node in cluster.services(StorageNode):
try:
health_check = storage_node_healthcheck(node)
except Exception as err:
logger.warning(f"Node healthcheck fails with error {err}")
return False
if health_check.health_status != "READY" or health_check.network_status != "ONLINE":
return False
return True

View file

@ -0,0 +1,168 @@
import hashlib
import logging
import os
import uuid
from typing import Any, Optional
from frostfs_testlib.reporter import get_reporter
from frostfs_testlib.resources.common import ASSETS_DIR
reporter = get_reporter()
logger = logging.getLogger("NeoLogger")
def generate_file(size: int) -> str:
"""Generates a binary file with the specified size in bytes.
Args:
size: Size in bytes, can be declared as 6e+6 for example.
Returns:
The path to the generated file.
"""
file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4()))
with open(file_path, "wb") as file:
file.write(os.urandom(size))
logger.info(f"File with size {size} bytes has been generated: {file_path}")
return file_path
def generate_file_with_content(
size: int,
file_path: Optional[str] = None,
content: Optional[str] = None,
) -> str:
"""Creates a new file with specified content.
Args:
file_path: Path to the file that should be created. If not specified, then random file
path will be generated.
content: Content that should be stored in the file. If not specified, then random binary
content will be generated.
Returns:
Path to the generated file.
"""
mode = "w+"
if content is None:
content = os.urandom(size)
mode = "wb"
if not file_path:
file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
else:
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
with open(file_path, mode) as file:
file.write(content)
return file_path
@reporter.step_deco("Get File Hash")
def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[int] = None) -> str:
"""Generates hash for the specified file.
Args:
file_path: Path to the file to generate hash for.
len: How many bytes to read.
offset: Position to start reading from.
Returns:
Hash of the file as hex-encoded string.
"""
file_hash = hashlib.sha256()
with open(file_path, "rb") as out:
if len and not offset:
file_hash.update(out.read(len))
elif len and offset:
out.seek(offset, 0)
file_hash.update(out.read(len))
elif offset and not len:
out.seek(offset, 0)
file_hash.update(out.read())
else:
file_hash.update(out.read())
return file_hash.hexdigest()
@reporter.step_deco("Concatenation set of files to one file")
def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) -> str:
"""Concatenates several files into a single file.
Args:
file_paths: Paths to the files to concatenate.
resulting_file_path: Path to the file where concatenated content should be stored.
Returns:
Path to the resulting file.
"""
if not resulting_file_path:
resulting_file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
with open(resulting_file_path, "wb") as f:
for file in file_paths:
with open(file, "rb") as part_file:
f.write(part_file.read())
return resulting_file_path
def split_file(file_path: str, parts: int) -> list[str]:
"""Splits specified file into several specified number of parts.
Each part is saved under name `{original_file}_part_{i}`.
Args:
file_path: Path to the file that should be split.
parts: Number of parts the file should be split into.
Returns:
Paths to the part files.
"""
with open(file_path, "rb") as file:
content = file.read()
content_size = len(content)
chunk_size = int((content_size + parts) / parts)
part_id = 1
part_file_paths = []
for content_offset in range(0, content_size + 1, chunk_size):
part_file_name = f"{file_path}_part_{part_id}"
part_file_paths.append(part_file_name)
with open(part_file_name, "wb") as out_file:
out_file.write(content[content_offset : content_offset + chunk_size])
part_id += 1
return part_file_paths
def get_file_content(
file_path: str, content_len: Optional[int] = None, mode: str = "r", offset: Optional[int] = None
) -> Any:
"""Returns content of specified file.
Args:
file_path: Path to the file.
content_len: Limit of content length. If None, then entire file content is returned;
otherwise only the first content_len bytes of the content are returned.
mode: Mode of opening the file.
offset: Position to start reading from.
Returns:
Content of the specified file.
"""
with open(file_path, mode) as file:
if content_len and not offset:
content = file.read(content_len)
elif content_len and offset:
file.seek(offset, 0)
content = file.read(content_len)
elif offset and not content_len:
file.seek(offset, 0)
content = file.read()
else:
content = file.read()
return content

View file

@ -0,0 +1,79 @@
import logging
import re
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli
from frostfs_testlib.hosting import Hosting
from frostfs_testlib.resources.cli import (
FROSTFS_ADM_EXEC,
FROSTFS_AUTHMATE_EXEC,
FROSTFS_CLI_EXEC,
NEOGO_EXECUTABLE,
)
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell
logger = logging.getLogger("NeoLogger")
def get_local_binaries_versions(shell: Shell) -> dict[str, str]:
versions = {}
for binary in [NEOGO_EXECUTABLE, FROSTFS_AUTHMATE_EXEC]:
out = shell.exec(f"{binary} --version").stdout
versions[binary] = _parse_version(out)
frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
versions[FROSTFS_CLI_EXEC] = _parse_version(frostfs_cli.version.get().stdout)
try:
frostfs_adm = FrostfsAdm(shell, FROSTFS_ADM_EXEC)
versions[FROSTFS_ADM_EXEC] = _parse_version(frostfs_adm.version.get().stdout)
except RuntimeError:
logger.info(f"{FROSTFS_ADM_EXEC} not installed")
out = shell.exec("aws --version").stdout
out_lines = out.split("\n")
versions["AWS"] = out_lines[0] if out_lines else "Unknown"
return versions
def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]:
versions_by_host = {}
for host in hosting.hosts:
binary_path_by_name = {} # Maps binary name to executable path
for service_config in host.config.services:
exec_path = service_config.attributes.get("exec_path")
if exec_path:
binary_path_by_name[service_config.name] = exec_path
for cli_config in host.config.clis:
binary_path_by_name[cli_config.name] = cli_config.exec_path
shell = host.get_shell()
versions_at_host = {}
for binary_name, binary_path in binary_path_by_name.items():
try:
result = shell.exec(f"{binary_path} --version")
versions_at_host[binary_name] = _parse_version(result.stdout)
except Exception as exc:
logger.error(f"Cannot get version for {binary_path} because of\n{exc}")
versions_at_host[binary_name] = "Unknown"
versions_by_host[host.config.address] = versions_at_host
# Consolidate versions across all hosts
versions = {}
for host, binary_versions in versions_by_host.items():
for name, version in binary_versions.items():
captured_version = versions.get(name)
if captured_version:
assert (
captured_version == version
), f"Binary {name} has inconsistent version on host {host}"
else:
versions[name] = version
return versions
def _parse_version(version_output: str) -> str:
version = re.search(r"version[:\s]*v?(.+)", version_output, re.IGNORECASE)
return version.group(1).strip() if version else "Unknown"