Compare commits

...

10 commits

Author SHA1 Message Date
d50b8b758b [#295] add billing metrics methods 2024-09-26 16:28:20 +03:00
24b8ca73d7 [#291] get namespace endpoint 2024-09-18 12:30:02 +00:00
cef64e315e [#267] add no rule found object and morph chain 2024-09-18 12:29:54 +00:00
0d750ed114 [#293] Add in CSC methods change blockchain netmap and update CliWrapper
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2024-09-17 10:19:28 +00:00
1bee69042b [#294] add wipe data using wipefs method
Signed-off-by: m.malygina <m.malygina@yadro.com>
2024-09-17 09:38:03 +00:00
4a2ac8a9b6 [#290] Update restore traffic method
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2024-09-11 10:42:51 +03:00
36bfe385d5 Added method get s3 endpoint for namespace 2024-09-10 14:05:44 +00:00
565fd4c72b [#289] Move temp dir fixture to testlib
Signed-off-by: a.berezin <a.berezin@yadro.com>
2024-09-10 13:28:57 +00:00
84e83487f9 [#288] Update object and chunks Clients
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2024-09-10 13:54:51 +03:00
d2f8323fb9 [#286] Change args id in shards.set-mode command
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2024-09-03 15:11:43 +03:00
21 changed files with 353 additions and 89 deletions

View file

@ -1,3 +1,3 @@
__version__ = "2.0.1"
from .fixtures import configure_testlib, hosting
from .fixtures import configure_testlib, hosting, temp_directory

View file

@ -350,3 +350,129 @@ class FrostfsAdmMorph(CliCommand):
if param not in ["self", "node_netmap_keys"]
},
)
def add_rule(
self,
endpoint: str,
chain_id: str,
target_name: str,
target_type: str,
rule: Optional[list[str]] = None,
path: Optional[str] = None,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
chain-id: Assign ID to the parsed chain
chain-id-hex: Flag to parse chain ID as hex
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
path: Path to encoded chain in JSON or binary format
rule: Rule statement
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control add-rule",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def get_rule(
self,
endpoint: str,
chain_id: str,
target_name: str,
target_type: str,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address string Address of wallet account
chain-id string Chain id
chain-id-hex Flag to parse chain ID as hex
endpoint string Remote node control address (as 'multiaddr' or '<host>:<port>')
target-name string Resource name in APE resource name format
target-type string Resource type(container/namespace)
timeout duration Timeout for an operation (default 15s)
wallet string Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control get-rule",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list_rules(
self,
target_type: str,
target_name: Optional[str] = None,
rpc_endpoint: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"morph ape list-rule-chains",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def remove_rule(
self,
endpoint: str,
chain_id: str,
target_name: str,
target_type: str,
all: Optional[bool] = None,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
all: Remove all chains
chain-id: Assign ID to the parsed chain
chain-id-hex: Flag to parse chain ID as hex
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control remove-rule",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -370,11 +370,11 @@ class FrostfsCliObject(CliCommand):
self,
rpc_endpoint: str,
cid: str,
oid: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
oid: Optional[str] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,

View file

@ -40,7 +40,7 @@ class FrostfsCliShards(CliCommand):
self,
endpoint: str,
mode: str,
id: Optional[list[str]],
id: Optional[list[str]] = None,
wallet: Optional[str] = None,
wallet_password: Optional[str] = None,
address: Optional[str] = None,

View file

@ -7,7 +7,7 @@ import yaml
from frostfs_testlib import reporter
from frostfs_testlib.hosting.hosting import Hosting
from frostfs_testlib.resources.common import HOSTING_CONFIG_FILE
from frostfs_testlib.resources.common import ASSETS_DIR, HOSTING_CONFIG_FILE
from frostfs_testlib.storage import get_service_registry
@ -24,6 +24,16 @@ def configure_testlib():
registry.register_service(svc.name, svc.load())
@pytest.fixture(scope="session")
def temp_directory(configure_testlib):
with reporter.step("Prepare tmp directory"):
full_path = ASSETS_DIR
if not os.path.exists(full_path):
os.mkdir(full_path)
return full_path
@pytest.fixture(scope="session")
def hosting(configure_testlib) -> Hosting:
with open(HOSTING_CONFIG_FILE, "r") as file:

View file

@ -185,6 +185,12 @@ class DockerHost(Host):
def is_file_exist(self, file_path: str) -> None:
raise NotImplementedError("Not implemented for docker")
def wipefs_storage_node_data(self, service_name: str) -> None:
raise NotImplementedError("Not implemented for docker")
def finish_wipefs(self, service_name: str) -> None:
raise NotImplementedError("Not implemented for docker")
def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None:
volume_path = self.get_data_directory(service_name)
@ -240,7 +246,7 @@ class DockerHost(Host):
until: Optional[datetime] = None,
unit: Optional[str] = None,
exclude_filter: Optional[str] = None,
priority: Optional[str] = None
priority: Optional[str] = None,
) -> str:
client = self._get_docker_client()
filtered_logs = ""

View file

@ -178,6 +178,21 @@ class Host(ABC):
cache_only: To delete cache only.
"""
@abstractmethod
def wipefs_storage_node_data(self, service_name: str) -> None:
"""Erases all data of the storage node with specified name.
Args:
service_name: Name of storage node service.
"""
def finish_wipefs(self, service_name: str) -> None:
"""Erases all data of the storage node with specified name.
Args:
service_name: Name of storage node service.
"""
@abstractmethod
def delete_fstree(self, service_name: str) -> None:
"""
@ -297,7 +312,7 @@ class Host(ABC):
until: Optional[datetime] = None,
unit: Optional[str] = None,
exclude_filter: Optional[str] = None,
priority: Optional[str] = None
priority: Optional[str] = None,
) -> str:
"""Get logs from host filtered by regex.
@ -306,7 +321,7 @@ class Host(ABC):
since: If set, limits the time from which logs should be collected. Must be in UTC.
until: If set, limits the time until which logs should be collected. Must be in UTC.
unit: required unit.
priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher.
priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher.
For example, if we specify the -p 2 option, journalctl will show all messages with levels 2, 1 and 0.
Returns:

View file

@ -29,3 +29,4 @@ S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not
RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied"
RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied"
NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound"
NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound"

View file

@ -1,8 +1,10 @@
import json
import re
from datetime import datetime
from frostfs_testlib import reporter
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.testing.test_control import wait_for_success
@reporter.step("Check metrics result")
@ -22,6 +24,24 @@ def check_metrics_counter(
), f"Expected: {counter_exp} {operator} Actual: {counter_act} in node: {cluster_node}"
@reporter.step("Check Victoria metrics agent metrics result")
def check_vma_metrics_counter(metrics_results: list[dict], bucket_name: str, operation: str, counter_exp: int = 0, operator: str = "=="):
with reporter.step(f"Get metrics value for bucket: {bucket_name}, operation: {operation}"):
counter_act = None
try:
for res in metrics_results:
metric = res["metric"]
if metric.get("bucket") == bucket_name and metric.get("operation") == operation and metric.get("cid"):
counter_act = int(res["values"][0][-1])
break
except Exception as e:
counter_act = 0
with reporter.step("Check metric value"):
assert counter_act, f"Not found metric value for bucket: {bucket_name}, operation: {operation}"
assert eval(f"{counter_act} {operator} {counter_exp}"), f"Expected: {counter_exp} {operator} Actual: {counter_act}"
@reporter.step("Get metrics value from node: {node}")
def get_metrics_value(node: ClusterNode, parse_from_command: bool = False, **metrics_greps: str):
try:
@ -36,6 +56,19 @@ def get_metrics_value(node: ClusterNode, parse_from_command: bool = False, **met
return metrics_counter
@reporter.step("Get VM agent metrics")
@wait_for_success(max_wait_time=300, interval=30)
def get_vm_agent_metrics_values(node: ClusterNode, metric_name: str, since: datetime = None, **greps):
try:
command_result = node.metrics.storage.get_vma_metrics_result(metric_name, since, **greps)
result = json.loads(command_result)
except Exception as e:
...
assert len(result["data"]["result"]) > 0, "Metrics are not available"
return result
@reporter.step("Parse metrics count and calc sum of result")
def calc_metrics_count_from_stdout(metric_result_stdout: str, command: str = None):
if command:

View file

@ -13,6 +13,7 @@ from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align
from frostfs_testlib.storage.cluster import Cluster, StorageNode
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils import datetime_utils
logger = logging.getLogger("NeoLogger")
@ -111,10 +112,7 @@ def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str:
storage_wallet_path = node.get_wallet_path()
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, config_file=storage_wallet_config)
return cli.netmap.snapshot(
rpc_endpoint=node.get_rpc_endpoint(),
wallet=storage_wallet_path,
).stdout
return cli.netmap.snapshot(rpc_endpoint=node.get_rpc_endpoint(), wallet=storage_wallet_path).stdout
@reporter.step("Get shard list for {node}")
@ -202,12 +200,7 @@ def delete_node_data(node: StorageNode) -> None:
@reporter.step("Exclude node {node_to_exclude} from network map")
def exclude_node_from_network_map(
node_to_exclude: StorageNode,
alive_node: StorageNode,
shell: Shell,
cluster: Cluster,
) -> None:
def exclude_node_from_network_map(node_to_exclude: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None:
node_netmap_key = node_to_exclude.get_wallet_public_key()
storage_node_set_status(node_to_exclude, status="offline")
@ -221,12 +214,7 @@ def exclude_node_from_network_map(
@reporter.step("Include node {node_to_include} into network map")
def include_node_to_network_map(
node_to_include: StorageNode,
alive_node: StorageNode,
shell: Shell,
cluster: Cluster,
) -> None:
def include_node_to_network_map(node_to_include: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None:
storage_node_set_status(node_to_include, status="online")
# Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch.
@ -236,7 +224,7 @@ def include_node_to_network_map(
tick_epoch(shell, cluster)
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
check_node_in_map(node_to_include, shell, alive_node)
await_node_in_map(node_to_include, shell, alive_node)
@reporter.step("Check node {node} in network map")
@ -250,6 +238,11 @@ def check_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[Stor
assert node_netmap_key in snapshot, f"Expected node with key {node_netmap_key} to be in network map"
@wait_for_success(300, 15, title="Await node {node} in network map")
def await_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None:
check_node_in_map(node, shell, alive_node)
@reporter.step("Check node {node} NOT in network map")
def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None:
alive_node = alive_node or node
@ -276,12 +269,7 @@ def wait_for_node_to_be_ready(node: StorageNode) -> None:
@reporter.step("Remove nodes from network map trough cli-adm morph command")
def remove_nodes_from_map_morph(
shell: Shell,
cluster: Cluster,
remove_nodes: list[StorageNode],
alive_node: Optional[StorageNode] = None,
):
def remove_nodes_from_map_morph(shell: Shell, cluster: Cluster, remove_nodes: list[StorageNode], alive_node: Optional[StorageNode] = None):
"""
Move node to the Offline state in the candidates list and tick an epoch to update the netmap
using frostfs-adm
@ -300,9 +288,5 @@ def remove_nodes_from_map_morph(
if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH:
# If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests)
frostfsadm = FrostfsAdm(
shell=remote_shell,
frostfs_adm_exec_path=FROSTFS_ADM_EXEC,
config_file=FROSTFS_ADM_CONFIG_PATH,
)
frostfsadm = FrostfsAdm(shell=remote_shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH)
frostfsadm.morph.remove_nodes(node_netmap_keys)

View file

@ -11,10 +11,10 @@ from frostfs_testlib.storage import get_service_registry
from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml
from frostfs_testlib.storage.constants import ConfigAttributes
from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode
from frostfs_testlib.storage.dataclasses.metrics import Metrics
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces
from frostfs_testlib.storage.service_registry import ServiceRegistry
from frostfs_testlib.storage.dataclasses.metrics import Metrics
class ClusterNode:
@ -31,7 +31,11 @@ class ClusterNode:
self.host = host
self.id = id
self.class_registry = get_service_registry()
self.metrics = Metrics(host=self.host, metrics_endpoint=self.storage_node.get_metrics_endpoint())
self.metrics = Metrics(
host=self.host,
metrics_endpoint=self.storage_node.get_metrics_endpoint(),
vm_agent_endpoint=self.storage_node.get_vm_agent_endpoint(),
)
@property
def host_ip(self):

View file

@ -12,8 +12,10 @@ class ConfigAttributes:
REMOTE_WALLET_CONFIG = "remote_wallet_config_path"
ENDPOINT_DATA_0 = "endpoint_data0"
ENDPOINT_DATA_1 = "endpoint_data1"
ENDPOINT_DATA_0_NS = "endpoint_data0_namespace"
ENDPOINT_INTERNAL = "endpoint_internal0"
ENDPOINT_PROMETHEUS = "endpoint_prometheus"
ENDPOINT_VM_AGENT = "endpoint_vm_agent"
CONTROL_ENDPOINT = "control_endpoint"
UN_LOCODE = "un_locode"

View file

@ -14,6 +14,7 @@ from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_E
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider
from frostfs_testlib.steps.network import IpHelper
from frostfs_testlib.steps.node_management import include_node_to_network_map, remove_nodes_from_map_morph
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode
from frostfs_testlib.storage.controllers.disk_controller import DiskController
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
@ -39,6 +40,7 @@ class ClusterStateController:
self.stopped_nodes: list[ClusterNode] = []
self.detached_disks: dict[str, DiskController] = {}
self.dropped_traffic: list[ClusterNode] = []
self.excluded_from_netmap: list[StorageNode] = []
self.stopped_services: set[NodeBase] = set()
self.cluster = cluster
self.healthcheck = healthcheck
@ -307,24 +309,17 @@ class ClusterStateController:
self.suspended_services = {}
@reporter.step("Drop traffic to {node}, nodes - {block_nodes}")
def drop_traffic(
self,
node: ClusterNode,
wakeup_timeout: int,
name_interface: str,
block_nodes: list[ClusterNode] = None,
) -> None:
def drop_traffic(self, node: ClusterNode, wakeup_timeout: int, name_interface: str, block_nodes: list[ClusterNode] = None) -> None:
list_ip = self._parse_interfaces(block_nodes, name_interface)
IpHelper.drop_input_traffic_to_node(node, list_ip)
time.sleep(wakeup_timeout)
self.dropped_traffic.append(node)
@reporter.step("Start traffic to {node}")
def restore_traffic(
self,
node: ClusterNode,
) -> None:
def restore_traffic(self, node: ClusterNode) -> None:
IpHelper.restore_input_traffic_to_node(node=node)
index = self.dropped_traffic.index(node)
self.dropped_traffic.pop(index)
@reporter.step("Restore blocked nodes")
def restore_all_traffic(self):
@ -408,9 +403,7 @@ class ClusterStateController:
@reporter.step("Set MaintenanceModeAllowed - {status}")
def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None:
frostfs_adm = FrostfsAdm(
shell=cluster_node.host.get_shell(),
frostfs_adm_exec_path=FROSTFS_ADM_EXEC,
config_file=FROSTFS_ADM_CONFIG_PATH,
shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH
)
frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}")
@ -451,6 +444,25 @@ class ClusterStateController:
else:
assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'"
def remove_node_from_netmap(self, removes_nodes: list[StorageNode]) -> None:
alive_storage = list(set(self.cluster.storage_nodes) - set(removes_nodes))[0]
remove_nodes_from_map_morph(self.shell, self.cluster, removes_nodes, alive_storage)
self.excluded_from_netmap.extend(removes_nodes)
def include_node_to_netmap(self, include_node: StorageNode, alive_node: StorageNode):
include_node_to_network_map(include_node, alive_node, self.shell, self.cluster)
self.excluded_from_netmap.pop(self.excluded_from_netmap.index(include_node))
def include_all_excluded_nodes(self):
if not self.excluded_from_netmap:
return
alive_node = list(set(self.cluster.storage_nodes) - set(self.excluded_from_netmap))[0]
if not alive_node:
return
for exclude_node in self.excluded_from_netmap.copy():
self.include_node_to_netmap(exclude_node, alive_node)
def _get_cli(
self, local_shell: Shell, local_wallet: WalletInfo, cluster_node: ClusterNode
) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]:
@ -467,11 +479,7 @@ class ClusterStateController:
frostfs_adm = FrostfsAdm(shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH)
frostfs_cli = FrostfsCli(local_shell, FROSTFS_CLI_EXEC, local_wallet.config_path)
frostfs_cli_remote = FrostfsCli(
shell=shell,
frostfs_cli_exec_path=FROSTFS_CLI_EXEC,
config_file=wallet_config_path,
)
frostfs_cli_remote = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path)
return frostfs_adm, frostfs_cli, frostfs_cli_remote
def _enable_date_synchronizer(self, cluster_node: ClusterNode):
@ -531,11 +539,8 @@ class ClusterStateController:
except Exception as err:
logger.warning(f"Host ping fails with error {err}")
return HostStatus.ONLINE
@reporter.step("Get contract by domain - {domain_name}")
def get_domain_contracts(self, cluster_node: ClusterNode, domain_name: str):
frostfs_adm = FrostfsAdm(
shell=cluster_node.host.get_shell(),
frostfs_adm_exec_path=FROSTFS_ADM_EXEC,
)
frostfs_adm = FrostfsAdm(shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC)
return frostfs_adm.morph.dump_hashes(cluster_node.morph_chain.get_http_endpoint(), domain_name).stdout

View file

@ -39,12 +39,18 @@ class S3Gate(NodeBase):
def get_endpoint(self) -> str:
return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0)
def get_ns_endpoint(self, ns_name: str) -> str:
return self._get_attribute(f"{ConfigAttributes.ENDPOINT_DATA_0}_namespace").format(namespace=ns_name)
def get_all_endpoints(self) -> list[str]:
return [
self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0),
self._get_attribute(ConfigAttributes.ENDPOINT_DATA_1),
]
def get_ns_endpoint(self, ns_name: str) -> str:
return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0_NS).format(namespace=ns_name)
def service_healthcheck(self) -> bool:
health_metric = "frostfs_s3_gw_state_health"
output = self.host.get_shell().exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d").stdout

View file

@ -1,20 +1,23 @@
from datetime import datetime
from frostfs_testlib.hosting import Host
from frostfs_testlib.shell.interfaces import CommandResult
class Metrics:
def __init__(self, host: Host, metrics_endpoint: str) -> None:
self.storage = StorageMetrics(host, metrics_endpoint)
def __init__(self, host: Host, metrics_endpoint: str, vm_agent_endpoint: str) -> None:
self.storage = StorageMetrics(host, metrics_endpoint, vm_agent_endpoint)
class StorageMetrics:
"""
Class represents storage metrics in a cluster
"""
def __init__(self, host: Host, metrics_endpoint: str) -> None:
def __init__(self, host: Host, metrics_endpoint: str, vm_agent_endpoint: str) -> None:
self.host = host
self.metrics_endpoint = metrics_endpoint
self.vm_agent_endpoint = vm_agent_endpoint
def get_metrics_search_by_greps(self, **greps) -> CommandResult:
"""
@ -29,7 +32,19 @@ class StorageMetrics:
additional_greps = " |grep ".join([grep_command for grep_command in greps.values()])
result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {additional_greps}")
return result
def get_vma_metrics_result(self, metric_name: str, since: datetime = None, **greps):
shell = self.host.get_shell(sudo=False)
if greps:
additional_filters = ",".join([f"{i}='{j}'" for i, j in greps.items()])
metric_name += f"{{{additional_filters}}}"
command = f'curl {self.vm_agent_endpoint}/api/v1/query_range --data-urlencode "query={metric_name}"'
if since:
date_from = since.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
command += f' --data-urlencode "start={date_from}"'
result = shell.exec(command).stdout
return result
def get_all_metrics(self) -> CommandResult:
shell = self.host.get_shell()
result = shell.exec(f"curl -s {self.metrics_endpoint}")

View file

@ -78,6 +78,9 @@ class NodeBase(HumanReadableABC):
def get_metrics_endpoint(self) -> str:
return self._get_attribute(ConfigAttributes.ENDPOINT_PROMETHEUS)
def get_vm_agent_endpoint(self) -> str:
return self._get_attribute(ConfigAttributes.ENDPOINT_VM_AGENT)
def stop_service(self, mask: bool = True):
if mask:
with reporter.step(f"Mask {self.name} service on {self.host.config.address}"):
@ -185,9 +188,7 @@ class NodeBase(HumanReadableABC):
if attribute_name not in config.attributes:
if default_attribute_name is None:
raise RuntimeError(
f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either"
)
raise RuntimeError(f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either")
return config.attributes[default_attribute_name]
@ -197,9 +198,7 @@ class NodeBase(HumanReadableABC):
return self.host.get_service_config(self.name)
def get_service_uptime(self, service: str) -> datetime:
result = self.host.get_shell().exec(
f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2"
)
result = self.host.get_shell().exec(f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2")
start_time = parser.parse(result.stdout.strip())
current_time = datetime.now(tz=timezone.utc)
active_time = current_time - start_time

View file

@ -90,3 +90,6 @@ class Chunk:
def __str__(self) -> str:
return self.object_id
def __repr__(self) -> str:
return self.object_id

View file

@ -8,6 +8,7 @@ from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher
from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo
from frostfs_testlib.storage.grpc_operations import interfaces
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.cli_utils import parse_netmap_output
@ -42,6 +43,7 @@ class ChunksOperations(interfaces.ChunksInterface):
if cluster_node.host_ip == node_info.node:
return (cluster_node, node_info)
@wait_for_success(300, 5, fail_testcase=None)
@reporter.step("Search shard with chunk {chunk}")
def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str:
oid_path = f"{chunk.object_id[0]}/{chunk.object_id[1]}/{chunk.object_id[2]}/{chunk.object_id[3]}"
@ -60,11 +62,10 @@ class ChunksOperations(interfaces.ChunksInterface):
rpc_endpoint: str,
cid: str,
oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
trace: bool = False,
trace: bool = True,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
@ -72,20 +73,33 @@ class ChunksOperations(interfaces.ChunksInterface):
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> list[Chunk]:
object_nodes = self.cli.object.nodes(**{param: value for param, value in locals().items() if param not in ["self"]})
return self._parse_object_nodes(object_nodes.stdout)
object_nodes = self.cli.object.nodes(
rpc_endpoint=rpc_endpoint,
cid=cid,
address=address,
bearer=bearer,
generate_key=generate_key,
oid=oid,
trace=trace,
root=root,
verify_presence_all=verify_presence_all,
json=json,
ttl=ttl,
xhdr=xhdr,
timeout=timeout,
)
return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])
@reporter.step("Get last parity chunk")
def get_parity(
self,
rpc_endpoint: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
oid: Optional[str] = None,
trace: bool = False,
trace: bool = True,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
@ -93,29 +107,56 @@ class ChunksOperations(interfaces.ChunksInterface):
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
) -> Chunk:
object_nodes = self.cli.object.nodes(**{param: value for param, value in locals().items() if param not in ["self"]})
return self._parse_object_nodes(object_nodes.stdout)[-1]
object_nodes = self.cli.object.nodes(
rpc_endpoint=rpc_endpoint,
cid=cid,
address=address,
bearer=bearer,
generate_key=generate_key,
oid=oid,
trace=trace,
root=root,
verify_presence_all=verify_presence_all,
json=json,
ttl=ttl,
xhdr=xhdr,
timeout=timeout,
)
return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[-1]
@reporter.step("Get first data chunk")
def get_first_data(
self,
rpc_endpoint: str,
cid: str,
wallet: Optional[str] = None,
oid: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
oid: Optional[str] = None,
trace: bool = False,
trace: bool = True,
root: bool = False,
verify_presence_all: bool = False,
json: bool = True,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> Chunk:
object_nodes = self.cli.object.nodes(**{param: value for param, value in locals().items() if param not in ["self"]})
return self._parse_object_nodes(object_nodes.stdout)[0]
object_nodes = self.cli.object.nodes(
rpc_endpoint=rpc_endpoint,
cid=cid,
address=address,
bearer=bearer,
generate_key=generate_key,
oid=oid,
trace=trace,
root=root,
verify_presence_all=verify_presence_all,
json=json,
ttl=ttl,
xhdr=xhdr,
timeout=timeout,
)
return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[0]
def _parse_object_nodes(self, object_nodes: str) -> list[Chunk]:
parse_result = json.loads(object_nodes)

View file

@ -8,7 +8,7 @@ from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.plugins import load_plugin
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.s3.interfaces import BucketContainerResolver
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.grpc_operations import interfaces
from frostfs_testlib.utils import json_utils
@ -266,6 +266,7 @@ class ContainerOperations(interfaces.ContainerInterface):
self,
endpoint: str,
cid: str,
cluster: Cluster,
address: Optional[str] = None,
ttl: Optional[int] = None,
from_file: Optional[str] = None,

View file

@ -509,6 +509,7 @@ class ObjectOperations(interfaces.ObjectInterface):
cid: str,
endpoint: str,
bearer: str = "",
oid: Optional[str] = None,
filters: Optional[dict] = None,
expected_objects_list: Optional[list] = None,
xhdr: Optional[dict] = None,
@ -516,6 +517,9 @@ class ObjectOperations(interfaces.ObjectInterface):
phy: bool = False,
root: bool = False,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
ttl: Optional[int] = None,
) -> list:
"""
SEARCH an Object.
@ -541,11 +545,15 @@ class ObjectOperations(interfaces.ObjectInterface):
rpc_endpoint=endpoint,
cid=cid,
bearer=bearer,
oid=oid,
xhdr=xhdr,
filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None,
session=session,
phy=phy,
root=root,
address=address,
generate_key=generate_key,
ttl=ttl,
timeout=timeout,
)

View file

@ -235,6 +235,7 @@ class ObjectInterface(ABC):
cid: str,
endpoint: str,
bearer: str = "",
oid: Optional[str] = None,
filters: Optional[dict] = None,
expected_objects_list: Optional[list] = None,
xhdr: Optional[dict] = None,
@ -242,6 +243,9 @@ class ObjectInterface(ABC):
phy: bool = False,
root: bool = False,
timeout: Optional[str] = None,
address: Optional[str] = None,
generate_key: Optional[bool] = None,
ttl: Optional[int] = None,
) -> List:
pass
@ -368,6 +372,7 @@ class ContainerInterface(ABC):
self,
endpoint: str,
cid: str,
cluster: Cluster,
address: Optional[str] = None,
ttl: Optional[int] = None,
from_file: Optional[str] = None,
@ -376,7 +381,7 @@ class ContainerInterface(ABC):
xhdr: Optional[dict] = None,
generate_key: Optional[bool] = None,
timeout: Optional[str] = None,
) -> List[str]:
) -> List[ClusterNode]:
"""Show the nodes participating in the container in the current epoch."""
raise NotImplementedError("No implemethed method nodes")