forked from TrueCloudLab/frostfs-testlib
Compare commits
No commits in common. "d50b8b758b0993768185341a593de055b046ab4e" and "eba782e7d26945d75bb1e233b16058e3b1b52f7d" have entirely different histories.
d50b8b758b
...
eba782e7d2
21 changed files with 89 additions and 353 deletions
|
@ -1,3 +1,3 @@
|
||||||
__version__ = "2.0.1"
|
__version__ = "2.0.1"
|
||||||
|
|
||||||
from .fixtures import configure_testlib, hosting, temp_directory
|
from .fixtures import configure_testlib, hosting
|
||||||
|
|
|
@ -350,129 +350,3 @@ class FrostfsAdmMorph(CliCommand):
|
||||||
if param not in ["self", "node_netmap_keys"]
|
if param not in ["self", "node_netmap_keys"]
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
def add_rule(
|
|
||||||
self,
|
|
||||||
endpoint: str,
|
|
||||||
chain_id: str,
|
|
||||||
target_name: str,
|
|
||||||
target_type: str,
|
|
||||||
rule: Optional[list[str]] = None,
|
|
||||||
path: Optional[str] = None,
|
|
||||||
chain_id_hex: Optional[bool] = None,
|
|
||||||
wallet: Optional[str] = None,
|
|
||||||
address: Optional[str] = None,
|
|
||||||
timeout: Optional[str] = None,
|
|
||||||
) -> CommandResult:
|
|
||||||
"""Drop objects from the node's local storage
|
|
||||||
|
|
||||||
Args:
|
|
||||||
address: Address of wallet account
|
|
||||||
chain-id: Assign ID to the parsed chain
|
|
||||||
chain-id-hex: Flag to parse chain ID as hex
|
|
||||||
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
|
||||||
path: Path to encoded chain in JSON or binary format
|
|
||||||
rule: Rule statement
|
|
||||||
target-name: Resource name in APE resource name format
|
|
||||||
target-type: Resource type(container/namespace)
|
|
||||||
timeout: Timeout for an operation (default 15s)
|
|
||||||
wallet: Path to the wallet or binary key
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Command`s result.
|
|
||||||
"""
|
|
||||||
return self._execute(
|
|
||||||
"control add-rule",
|
|
||||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_rule(
|
|
||||||
self,
|
|
||||||
endpoint: str,
|
|
||||||
chain_id: str,
|
|
||||||
target_name: str,
|
|
||||||
target_type: str,
|
|
||||||
chain_id_hex: Optional[bool] = None,
|
|
||||||
wallet: Optional[str] = None,
|
|
||||||
address: Optional[str] = None,
|
|
||||||
timeout: Optional[str] = None,
|
|
||||||
) -> CommandResult:
|
|
||||||
"""Drop objects from the node's local storage
|
|
||||||
|
|
||||||
Args:
|
|
||||||
address string Address of wallet account
|
|
||||||
chain-id string Chain id
|
|
||||||
chain-id-hex Flag to parse chain ID as hex
|
|
||||||
endpoint string Remote node control address (as 'multiaddr' or '<host>:<port>')
|
|
||||||
target-name string Resource name in APE resource name format
|
|
||||||
target-type string Resource type(container/namespace)
|
|
||||||
timeout duration Timeout for an operation (default 15s)
|
|
||||||
wallet string Path to the wallet or binary key
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Command`s result.
|
|
||||||
"""
|
|
||||||
return self._execute(
|
|
||||||
"control get-rule",
|
|
||||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
|
||||||
)
|
|
||||||
|
|
||||||
def list_rules(
|
|
||||||
self,
|
|
||||||
target_type: str,
|
|
||||||
target_name: Optional[str] = None,
|
|
||||||
rpc_endpoint: Optional[str] = None,
|
|
||||||
wallet: Optional[str] = None,
|
|
||||||
address: Optional[str] = None,
|
|
||||||
timeout: Optional[str] = None,
|
|
||||||
) -> CommandResult:
|
|
||||||
"""Drop objects from the node's local storage
|
|
||||||
|
|
||||||
Args:
|
|
||||||
address: Address of wallet account
|
|
||||||
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
|
||||||
target-name: Resource name in APE resource name format
|
|
||||||
target-type: Resource type(container/namespace)
|
|
||||||
timeout: Timeout for an operation (default 15s)
|
|
||||||
wallet: Path to the wallet or binary key
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Command`s result.
|
|
||||||
"""
|
|
||||||
return self._execute(
|
|
||||||
"morph ape list-rule-chains",
|
|
||||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
|
||||||
)
|
|
||||||
|
|
||||||
def remove_rule(
|
|
||||||
self,
|
|
||||||
endpoint: str,
|
|
||||||
chain_id: str,
|
|
||||||
target_name: str,
|
|
||||||
target_type: str,
|
|
||||||
all: Optional[bool] = None,
|
|
||||||
chain_id_hex: Optional[bool] = None,
|
|
||||||
wallet: Optional[str] = None,
|
|
||||||
address: Optional[str] = None,
|
|
||||||
timeout: Optional[str] = None,
|
|
||||||
) -> CommandResult:
|
|
||||||
"""Drop objects from the node's local storage
|
|
||||||
|
|
||||||
Args:
|
|
||||||
address: Address of wallet account
|
|
||||||
all: Remove all chains
|
|
||||||
chain-id: Assign ID to the parsed chain
|
|
||||||
chain-id-hex: Flag to parse chain ID as hex
|
|
||||||
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
|
||||||
target-name: Resource name in APE resource name format
|
|
||||||
target-type: Resource type(container/namespace)
|
|
||||||
timeout: Timeout for an operation (default 15s)
|
|
||||||
wallet: Path to the wallet or binary key
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Command`s result.
|
|
||||||
"""
|
|
||||||
return self._execute(
|
|
||||||
"control remove-rule",
|
|
||||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
|
||||||
)
|
|
|
@ -370,11 +370,11 @@ class FrostfsCliObject(CliCommand):
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: Optional[str] = None,
|
|
||||||
wallet: Optional[str] = None,
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
generate_key: Optional[bool] = None,
|
generate_key: Optional[bool] = None,
|
||||||
|
oid: Optional[str] = None,
|
||||||
trace: bool = False,
|
trace: bool = False,
|
||||||
root: bool = False,
|
root: bool = False,
|
||||||
verify_presence_all: bool = False,
|
verify_presence_all: bool = False,
|
||||||
|
|
|
@ -40,7 +40,7 @@ class FrostfsCliShards(CliCommand):
|
||||||
self,
|
self,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
mode: str,
|
mode: str,
|
||||||
id: Optional[list[str]] = None,
|
id: Optional[list[str]],
|
||||||
wallet: Optional[str] = None,
|
wallet: Optional[str] = None,
|
||||||
wallet_password: Optional[str] = None,
|
wallet_password: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
|
|
|
@ -7,7 +7,7 @@ import yaml
|
||||||
|
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.hosting.hosting import Hosting
|
from frostfs_testlib.hosting.hosting import Hosting
|
||||||
from frostfs_testlib.resources.common import ASSETS_DIR, HOSTING_CONFIG_FILE
|
from frostfs_testlib.resources.common import HOSTING_CONFIG_FILE
|
||||||
from frostfs_testlib.storage import get_service_registry
|
from frostfs_testlib.storage import get_service_registry
|
||||||
|
|
||||||
|
|
||||||
|
@ -24,16 +24,6 @@ def configure_testlib():
|
||||||
registry.register_service(svc.name, svc.load())
|
registry.register_service(svc.name, svc.load())
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
|
||||||
def temp_directory(configure_testlib):
|
|
||||||
with reporter.step("Prepare tmp directory"):
|
|
||||||
full_path = ASSETS_DIR
|
|
||||||
if not os.path.exists(full_path):
|
|
||||||
os.mkdir(full_path)
|
|
||||||
|
|
||||||
return full_path
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
def hosting(configure_testlib) -> Hosting:
|
def hosting(configure_testlib) -> Hosting:
|
||||||
with open(HOSTING_CONFIG_FILE, "r") as file:
|
with open(HOSTING_CONFIG_FILE, "r") as file:
|
||||||
|
|
|
@ -185,12 +185,6 @@ class DockerHost(Host):
|
||||||
def is_file_exist(self, file_path: str) -> None:
|
def is_file_exist(self, file_path: str) -> None:
|
||||||
raise NotImplementedError("Not implemented for docker")
|
raise NotImplementedError("Not implemented for docker")
|
||||||
|
|
||||||
def wipefs_storage_node_data(self, service_name: str) -> None:
|
|
||||||
raise NotImplementedError("Not implemented for docker")
|
|
||||||
|
|
||||||
def finish_wipefs(self, service_name: str) -> None:
|
|
||||||
raise NotImplementedError("Not implemented for docker")
|
|
||||||
|
|
||||||
def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None:
|
def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None:
|
||||||
volume_path = self.get_data_directory(service_name)
|
volume_path = self.get_data_directory(service_name)
|
||||||
|
|
||||||
|
@ -246,7 +240,7 @@ class DockerHost(Host):
|
||||||
until: Optional[datetime] = None,
|
until: Optional[datetime] = None,
|
||||||
unit: Optional[str] = None,
|
unit: Optional[str] = None,
|
||||||
exclude_filter: Optional[str] = None,
|
exclude_filter: Optional[str] = None,
|
||||||
priority: Optional[str] = None,
|
priority: Optional[str] = None
|
||||||
) -> str:
|
) -> str:
|
||||||
client = self._get_docker_client()
|
client = self._get_docker_client()
|
||||||
filtered_logs = ""
|
filtered_logs = ""
|
||||||
|
|
|
@ -178,21 +178,6 @@ class Host(ABC):
|
||||||
cache_only: To delete cache only.
|
cache_only: To delete cache only.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def wipefs_storage_node_data(self, service_name: str) -> None:
|
|
||||||
"""Erases all data of the storage node with specified name.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
service_name: Name of storage node service.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def finish_wipefs(self, service_name: str) -> None:
|
|
||||||
"""Erases all data of the storage node with specified name.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
service_name: Name of storage node service.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def delete_fstree(self, service_name: str) -> None:
|
def delete_fstree(self, service_name: str) -> None:
|
||||||
"""
|
"""
|
||||||
|
@ -312,7 +297,7 @@ class Host(ABC):
|
||||||
until: Optional[datetime] = None,
|
until: Optional[datetime] = None,
|
||||||
unit: Optional[str] = None,
|
unit: Optional[str] = None,
|
||||||
exclude_filter: Optional[str] = None,
|
exclude_filter: Optional[str] = None,
|
||||||
priority: Optional[str] = None,
|
priority: Optional[str] = None
|
||||||
) -> str:
|
) -> str:
|
||||||
"""Get logs from host filtered by regex.
|
"""Get logs from host filtered by regex.
|
||||||
|
|
||||||
|
|
|
@ -29,4 +29,3 @@ S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not
|
||||||
RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied"
|
RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied"
|
||||||
RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied"
|
RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied"
|
||||||
NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound"
|
NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound"
|
||||||
NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound"
|
|
||||||
|
|
|
@ -1,10 +1,8 @@
|
||||||
import json
|
|
||||||
import re
|
import re
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.storage.cluster import ClusterNode
|
|
||||||
from frostfs_testlib.testing.test_control import wait_for_success
|
from frostfs_testlib.testing.test_control import wait_for_success
|
||||||
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Check metrics result")
|
@reporter.step("Check metrics result")
|
||||||
|
@ -24,24 +22,6 @@ def check_metrics_counter(
|
||||||
), f"Expected: {counter_exp} {operator} Actual: {counter_act} in node: {cluster_node}"
|
), f"Expected: {counter_exp} {operator} Actual: {counter_act} in node: {cluster_node}"
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Check Victoria metrics agent metrics result")
|
|
||||||
def check_vma_metrics_counter(metrics_results: list[dict], bucket_name: str, operation: str, counter_exp: int = 0, operator: str = "=="):
|
|
||||||
with reporter.step(f"Get metrics value for bucket: {bucket_name}, operation: {operation}"):
|
|
||||||
counter_act = None
|
|
||||||
try:
|
|
||||||
for res in metrics_results:
|
|
||||||
metric = res["metric"]
|
|
||||||
if metric.get("bucket") == bucket_name and metric.get("operation") == operation and metric.get("cid"):
|
|
||||||
counter_act = int(res["values"][0][-1])
|
|
||||||
break
|
|
||||||
except Exception as e:
|
|
||||||
counter_act = 0
|
|
||||||
|
|
||||||
with reporter.step("Check metric value"):
|
|
||||||
assert counter_act, f"Not found metric value for bucket: {bucket_name}, operation: {operation}"
|
|
||||||
assert eval(f"{counter_act} {operator} {counter_exp}"), f"Expected: {counter_exp} {operator} Actual: {counter_act}"
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Get metrics value from node: {node}")
|
@reporter.step("Get metrics value from node: {node}")
|
||||||
def get_metrics_value(node: ClusterNode, parse_from_command: bool = False, **metrics_greps: str):
|
def get_metrics_value(node: ClusterNode, parse_from_command: bool = False, **metrics_greps: str):
|
||||||
try:
|
try:
|
||||||
|
@ -56,19 +36,6 @@ def get_metrics_value(node: ClusterNode, parse_from_command: bool = False, **met
|
||||||
return metrics_counter
|
return metrics_counter
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Get VM agent metrics")
|
|
||||||
@wait_for_success(max_wait_time=300, interval=30)
|
|
||||||
def get_vm_agent_metrics_values(node: ClusterNode, metric_name: str, since: datetime = None, **greps):
|
|
||||||
try:
|
|
||||||
command_result = node.metrics.storage.get_vma_metrics_result(metric_name, since, **greps)
|
|
||||||
result = json.loads(command_result)
|
|
||||||
except Exception as e:
|
|
||||||
...
|
|
||||||
|
|
||||||
assert len(result["data"]["result"]) > 0, "Metrics are not available"
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Parse metrics count and calc sum of result")
|
@reporter.step("Parse metrics count and calc sum of result")
|
||||||
def calc_metrics_count_from_stdout(metric_result_stdout: str, command: str = None):
|
def calc_metrics_count_from_stdout(metric_result_stdout: str, command: str = None):
|
||||||
if command:
|
if command:
|
||||||
|
|
|
@ -13,7 +13,6 @@ from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align
|
from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align
|
||||||
from frostfs_testlib.storage.cluster import Cluster, StorageNode
|
from frostfs_testlib.storage.cluster import Cluster, StorageNode
|
||||||
from frostfs_testlib.testing.test_control import wait_for_success
|
|
||||||
from frostfs_testlib.utils import datetime_utils
|
from frostfs_testlib.utils import datetime_utils
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
@ -112,7 +111,10 @@ def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str:
|
||||||
storage_wallet_path = node.get_wallet_path()
|
storage_wallet_path = node.get_wallet_path()
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, config_file=storage_wallet_config)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, config_file=storage_wallet_config)
|
||||||
return cli.netmap.snapshot(rpc_endpoint=node.get_rpc_endpoint(), wallet=storage_wallet_path).stdout
|
return cli.netmap.snapshot(
|
||||||
|
rpc_endpoint=node.get_rpc_endpoint(),
|
||||||
|
wallet=storage_wallet_path,
|
||||||
|
).stdout
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Get shard list for {node}")
|
@reporter.step("Get shard list for {node}")
|
||||||
|
@ -200,7 +202,12 @@ def delete_node_data(node: StorageNode) -> None:
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Exclude node {node_to_exclude} from network map")
|
@reporter.step("Exclude node {node_to_exclude} from network map")
|
||||||
def exclude_node_from_network_map(node_to_exclude: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None:
|
def exclude_node_from_network_map(
|
||||||
|
node_to_exclude: StorageNode,
|
||||||
|
alive_node: StorageNode,
|
||||||
|
shell: Shell,
|
||||||
|
cluster: Cluster,
|
||||||
|
) -> None:
|
||||||
node_netmap_key = node_to_exclude.get_wallet_public_key()
|
node_netmap_key = node_to_exclude.get_wallet_public_key()
|
||||||
|
|
||||||
storage_node_set_status(node_to_exclude, status="offline")
|
storage_node_set_status(node_to_exclude, status="offline")
|
||||||
|
@ -214,7 +221,12 @@ def exclude_node_from_network_map(node_to_exclude: StorageNode, alive_node: Stor
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Include node {node_to_include} into network map")
|
@reporter.step("Include node {node_to_include} into network map")
|
||||||
def include_node_to_network_map(node_to_include: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None:
|
def include_node_to_network_map(
|
||||||
|
node_to_include: StorageNode,
|
||||||
|
alive_node: StorageNode,
|
||||||
|
shell: Shell,
|
||||||
|
cluster: Cluster,
|
||||||
|
) -> None:
|
||||||
storage_node_set_status(node_to_include, status="online")
|
storage_node_set_status(node_to_include, status="online")
|
||||||
|
|
||||||
# Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch.
|
# Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch.
|
||||||
|
@ -224,7 +236,7 @@ def include_node_to_network_map(node_to_include: StorageNode, alive_node: Storag
|
||||||
tick_epoch(shell, cluster)
|
tick_epoch(shell, cluster)
|
||||||
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
|
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
|
||||||
|
|
||||||
await_node_in_map(node_to_include, shell, alive_node)
|
check_node_in_map(node_to_include, shell, alive_node)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Check node {node} in network map")
|
@reporter.step("Check node {node} in network map")
|
||||||
|
@ -238,11 +250,6 @@ def check_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[Stor
|
||||||
assert node_netmap_key in snapshot, f"Expected node with key {node_netmap_key} to be in network map"
|
assert node_netmap_key in snapshot, f"Expected node with key {node_netmap_key} to be in network map"
|
||||||
|
|
||||||
|
|
||||||
@wait_for_success(300, 15, title="Await node {node} in network map")
|
|
||||||
def await_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None:
|
|
||||||
check_node_in_map(node, shell, alive_node)
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Check node {node} NOT in network map")
|
@reporter.step("Check node {node} NOT in network map")
|
||||||
def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None:
|
def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None:
|
||||||
alive_node = alive_node or node
|
alive_node = alive_node or node
|
||||||
|
@ -269,7 +276,12 @@ def wait_for_node_to_be_ready(node: StorageNode) -> None:
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Remove nodes from network map trough cli-adm morph command")
|
@reporter.step("Remove nodes from network map trough cli-adm morph command")
|
||||||
def remove_nodes_from_map_morph(shell: Shell, cluster: Cluster, remove_nodes: list[StorageNode], alive_node: Optional[StorageNode] = None):
|
def remove_nodes_from_map_morph(
|
||||||
|
shell: Shell,
|
||||||
|
cluster: Cluster,
|
||||||
|
remove_nodes: list[StorageNode],
|
||||||
|
alive_node: Optional[StorageNode] = None,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Move node to the Offline state in the candidates list and tick an epoch to update the netmap
|
Move node to the Offline state in the candidates list and tick an epoch to update the netmap
|
||||||
using frostfs-adm
|
using frostfs-adm
|
||||||
|
@ -288,5 +300,9 @@ def remove_nodes_from_map_morph(shell: Shell, cluster: Cluster, remove_nodes: li
|
||||||
|
|
||||||
if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH:
|
if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH:
|
||||||
# If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests)
|
# If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests)
|
||||||
frostfsadm = FrostfsAdm(shell=remote_shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH)
|
frostfsadm = FrostfsAdm(
|
||||||
|
shell=remote_shell,
|
||||||
|
frostfs_adm_exec_path=FROSTFS_ADM_EXEC,
|
||||||
|
config_file=FROSTFS_ADM_CONFIG_PATH,
|
||||||
|
)
|
||||||
frostfsadm.morph.remove_nodes(node_netmap_keys)
|
frostfsadm.morph.remove_nodes(node_netmap_keys)
|
||||||
|
|
|
@ -11,10 +11,10 @@ from frostfs_testlib.storage import get_service_registry
|
||||||
from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml
|
from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml
|
||||||
from frostfs_testlib.storage.constants import ConfigAttributes
|
from frostfs_testlib.storage.constants import ConfigAttributes
|
||||||
from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode
|
from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode
|
||||||
from frostfs_testlib.storage.dataclasses.metrics import Metrics
|
|
||||||
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
|
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
|
||||||
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces
|
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces
|
||||||
from frostfs_testlib.storage.service_registry import ServiceRegistry
|
from frostfs_testlib.storage.service_registry import ServiceRegistry
|
||||||
|
from frostfs_testlib.storage.dataclasses.metrics import Metrics
|
||||||
|
|
||||||
|
|
||||||
class ClusterNode:
|
class ClusterNode:
|
||||||
|
@ -31,11 +31,7 @@ class ClusterNode:
|
||||||
self.host = host
|
self.host = host
|
||||||
self.id = id
|
self.id = id
|
||||||
self.class_registry = get_service_registry()
|
self.class_registry = get_service_registry()
|
||||||
self.metrics = Metrics(
|
self.metrics = Metrics(host=self.host, metrics_endpoint=self.storage_node.get_metrics_endpoint())
|
||||||
host=self.host,
|
|
||||||
metrics_endpoint=self.storage_node.get_metrics_endpoint(),
|
|
||||||
vm_agent_endpoint=self.storage_node.get_vm_agent_endpoint(),
|
|
||||||
)
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def host_ip(self):
|
def host_ip(self):
|
||||||
|
|
|
@ -12,10 +12,8 @@ class ConfigAttributes:
|
||||||
REMOTE_WALLET_CONFIG = "remote_wallet_config_path"
|
REMOTE_WALLET_CONFIG = "remote_wallet_config_path"
|
||||||
ENDPOINT_DATA_0 = "endpoint_data0"
|
ENDPOINT_DATA_0 = "endpoint_data0"
|
||||||
ENDPOINT_DATA_1 = "endpoint_data1"
|
ENDPOINT_DATA_1 = "endpoint_data1"
|
||||||
ENDPOINT_DATA_0_NS = "endpoint_data0_namespace"
|
|
||||||
ENDPOINT_INTERNAL = "endpoint_internal0"
|
ENDPOINT_INTERNAL = "endpoint_internal0"
|
||||||
ENDPOINT_PROMETHEUS = "endpoint_prometheus"
|
ENDPOINT_PROMETHEUS = "endpoint_prometheus"
|
||||||
ENDPOINT_VM_AGENT = "endpoint_vm_agent"
|
|
||||||
CONTROL_ENDPOINT = "control_endpoint"
|
CONTROL_ENDPOINT = "control_endpoint"
|
||||||
UN_LOCODE = "un_locode"
|
UN_LOCODE = "un_locode"
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,6 @@ from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_E
|
||||||
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
||||||
from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider
|
from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider
|
||||||
from frostfs_testlib.steps.network import IpHelper
|
from frostfs_testlib.steps.network import IpHelper
|
||||||
from frostfs_testlib.steps.node_management import include_node_to_network_map, remove_nodes_from_map_morph
|
|
||||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode
|
||||||
from frostfs_testlib.storage.controllers.disk_controller import DiskController
|
from frostfs_testlib.storage.controllers.disk_controller import DiskController
|
||||||
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
|
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
|
||||||
|
@ -40,7 +39,6 @@ class ClusterStateController:
|
||||||
self.stopped_nodes: list[ClusterNode] = []
|
self.stopped_nodes: list[ClusterNode] = []
|
||||||
self.detached_disks: dict[str, DiskController] = {}
|
self.detached_disks: dict[str, DiskController] = {}
|
||||||
self.dropped_traffic: list[ClusterNode] = []
|
self.dropped_traffic: list[ClusterNode] = []
|
||||||
self.excluded_from_netmap: list[StorageNode] = []
|
|
||||||
self.stopped_services: set[NodeBase] = set()
|
self.stopped_services: set[NodeBase] = set()
|
||||||
self.cluster = cluster
|
self.cluster = cluster
|
||||||
self.healthcheck = healthcheck
|
self.healthcheck = healthcheck
|
||||||
|
@ -309,17 +307,24 @@ class ClusterStateController:
|
||||||
self.suspended_services = {}
|
self.suspended_services = {}
|
||||||
|
|
||||||
@reporter.step("Drop traffic to {node}, nodes - {block_nodes}")
|
@reporter.step("Drop traffic to {node}, nodes - {block_nodes}")
|
||||||
def drop_traffic(self, node: ClusterNode, wakeup_timeout: int, name_interface: str, block_nodes: list[ClusterNode] = None) -> None:
|
def drop_traffic(
|
||||||
|
self,
|
||||||
|
node: ClusterNode,
|
||||||
|
wakeup_timeout: int,
|
||||||
|
name_interface: str,
|
||||||
|
block_nodes: list[ClusterNode] = None,
|
||||||
|
) -> None:
|
||||||
list_ip = self._parse_interfaces(block_nodes, name_interface)
|
list_ip = self._parse_interfaces(block_nodes, name_interface)
|
||||||
IpHelper.drop_input_traffic_to_node(node, list_ip)
|
IpHelper.drop_input_traffic_to_node(node, list_ip)
|
||||||
time.sleep(wakeup_timeout)
|
time.sleep(wakeup_timeout)
|
||||||
self.dropped_traffic.append(node)
|
self.dropped_traffic.append(node)
|
||||||
|
|
||||||
@reporter.step("Start traffic to {node}")
|
@reporter.step("Start traffic to {node}")
|
||||||
def restore_traffic(self, node: ClusterNode) -> None:
|
def restore_traffic(
|
||||||
|
self,
|
||||||
|
node: ClusterNode,
|
||||||
|
) -> None:
|
||||||
IpHelper.restore_input_traffic_to_node(node=node)
|
IpHelper.restore_input_traffic_to_node(node=node)
|
||||||
index = self.dropped_traffic.index(node)
|
|
||||||
self.dropped_traffic.pop(index)
|
|
||||||
|
|
||||||
@reporter.step("Restore blocked nodes")
|
@reporter.step("Restore blocked nodes")
|
||||||
def restore_all_traffic(self):
|
def restore_all_traffic(self):
|
||||||
|
@ -403,7 +408,9 @@ class ClusterStateController:
|
||||||
@reporter.step("Set MaintenanceModeAllowed - {status}")
|
@reporter.step("Set MaintenanceModeAllowed - {status}")
|
||||||
def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None:
|
def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None:
|
||||||
frostfs_adm = FrostfsAdm(
|
frostfs_adm = FrostfsAdm(
|
||||||
shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH
|
shell=cluster_node.host.get_shell(),
|
||||||
|
frostfs_adm_exec_path=FROSTFS_ADM_EXEC,
|
||||||
|
config_file=FROSTFS_ADM_CONFIG_PATH,
|
||||||
)
|
)
|
||||||
frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}")
|
frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}")
|
||||||
|
|
||||||
|
@ -444,25 +451,6 @@ class ClusterStateController:
|
||||||
else:
|
else:
|
||||||
assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'"
|
assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'"
|
||||||
|
|
||||||
def remove_node_from_netmap(self, removes_nodes: list[StorageNode]) -> None:
|
|
||||||
alive_storage = list(set(self.cluster.storage_nodes) - set(removes_nodes))[0]
|
|
||||||
remove_nodes_from_map_morph(self.shell, self.cluster, removes_nodes, alive_storage)
|
|
||||||
self.excluded_from_netmap.extend(removes_nodes)
|
|
||||||
|
|
||||||
def include_node_to_netmap(self, include_node: StorageNode, alive_node: StorageNode):
|
|
||||||
include_node_to_network_map(include_node, alive_node, self.shell, self.cluster)
|
|
||||||
self.excluded_from_netmap.pop(self.excluded_from_netmap.index(include_node))
|
|
||||||
|
|
||||||
def include_all_excluded_nodes(self):
|
|
||||||
if not self.excluded_from_netmap:
|
|
||||||
return
|
|
||||||
alive_node = list(set(self.cluster.storage_nodes) - set(self.excluded_from_netmap))[0]
|
|
||||||
if not alive_node:
|
|
||||||
return
|
|
||||||
|
|
||||||
for exclude_node in self.excluded_from_netmap.copy():
|
|
||||||
self.include_node_to_netmap(exclude_node, alive_node)
|
|
||||||
|
|
||||||
def _get_cli(
|
def _get_cli(
|
||||||
self, local_shell: Shell, local_wallet: WalletInfo, cluster_node: ClusterNode
|
self, local_shell: Shell, local_wallet: WalletInfo, cluster_node: ClusterNode
|
||||||
) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]:
|
) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]:
|
||||||
|
@ -479,7 +467,11 @@ class ClusterStateController:
|
||||||
|
|
||||||
frostfs_adm = FrostfsAdm(shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH)
|
frostfs_adm = FrostfsAdm(shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH)
|
||||||
frostfs_cli = FrostfsCli(local_shell, FROSTFS_CLI_EXEC, local_wallet.config_path)
|
frostfs_cli = FrostfsCli(local_shell, FROSTFS_CLI_EXEC, local_wallet.config_path)
|
||||||
frostfs_cli_remote = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path)
|
frostfs_cli_remote = FrostfsCli(
|
||||||
|
shell=shell,
|
||||||
|
frostfs_cli_exec_path=FROSTFS_CLI_EXEC,
|
||||||
|
config_file=wallet_config_path,
|
||||||
|
)
|
||||||
return frostfs_adm, frostfs_cli, frostfs_cli_remote
|
return frostfs_adm, frostfs_cli, frostfs_cli_remote
|
||||||
|
|
||||||
def _enable_date_synchronizer(self, cluster_node: ClusterNode):
|
def _enable_date_synchronizer(self, cluster_node: ClusterNode):
|
||||||
|
@ -542,5 +534,8 @@ class ClusterStateController:
|
||||||
|
|
||||||
@reporter.step("Get contract by domain - {domain_name}")
|
@reporter.step("Get contract by domain - {domain_name}")
|
||||||
def get_domain_contracts(self, cluster_node: ClusterNode, domain_name: str):
|
def get_domain_contracts(self, cluster_node: ClusterNode, domain_name: str):
|
||||||
frostfs_adm = FrostfsAdm(shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC)
|
frostfs_adm = FrostfsAdm(
|
||||||
|
shell=cluster_node.host.get_shell(),
|
||||||
|
frostfs_adm_exec_path=FROSTFS_ADM_EXEC,
|
||||||
|
)
|
||||||
return frostfs_adm.morph.dump_hashes(cluster_node.morph_chain.get_http_endpoint(), domain_name).stdout
|
return frostfs_adm.morph.dump_hashes(cluster_node.morph_chain.get_http_endpoint(), domain_name).stdout
|
||||||
|
|
|
@ -39,18 +39,12 @@ class S3Gate(NodeBase):
|
||||||
def get_endpoint(self) -> str:
|
def get_endpoint(self) -> str:
|
||||||
return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0)
|
return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0)
|
||||||
|
|
||||||
def get_ns_endpoint(self, ns_name: str) -> str:
|
|
||||||
return self._get_attribute(f"{ConfigAttributes.ENDPOINT_DATA_0}_namespace").format(namespace=ns_name)
|
|
||||||
|
|
||||||
def get_all_endpoints(self) -> list[str]:
|
def get_all_endpoints(self) -> list[str]:
|
||||||
return [
|
return [
|
||||||
self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0),
|
self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0),
|
||||||
self._get_attribute(ConfigAttributes.ENDPOINT_DATA_1),
|
self._get_attribute(ConfigAttributes.ENDPOINT_DATA_1),
|
||||||
]
|
]
|
||||||
|
|
||||||
def get_ns_endpoint(self, ns_name: str) -> str:
|
|
||||||
return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0_NS).format(namespace=ns_name)
|
|
||||||
|
|
||||||
def service_healthcheck(self) -> bool:
|
def service_healthcheck(self) -> bool:
|
||||||
health_metric = "frostfs_s3_gw_state_health"
|
health_metric = "frostfs_s3_gw_state_health"
|
||||||
output = self.host.get_shell().exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d").stdout
|
output = self.host.get_shell().exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d").stdout
|
||||||
|
|
|
@ -1,23 +1,20 @@
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
from frostfs_testlib.hosting import Host
|
from frostfs_testlib.hosting import Host
|
||||||
from frostfs_testlib.shell.interfaces import CommandResult
|
from frostfs_testlib.shell.interfaces import CommandResult
|
||||||
|
|
||||||
|
|
||||||
class Metrics:
|
class Metrics:
|
||||||
def __init__(self, host: Host, metrics_endpoint: str, vm_agent_endpoint: str) -> None:
|
def __init__(self, host: Host, metrics_endpoint: str) -> None:
|
||||||
self.storage = StorageMetrics(host, metrics_endpoint, vm_agent_endpoint)
|
self.storage = StorageMetrics(host, metrics_endpoint)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class StorageMetrics:
|
class StorageMetrics:
|
||||||
"""
|
"""
|
||||||
Class represents storage metrics in a cluster
|
Class represents storage metrics in a cluster
|
||||||
"""
|
"""
|
||||||
|
def __init__(self, host: Host, metrics_endpoint: str) -> None:
|
||||||
def __init__(self, host: Host, metrics_endpoint: str, vm_agent_endpoint: str) -> None:
|
|
||||||
self.host = host
|
self.host = host
|
||||||
self.metrics_endpoint = metrics_endpoint
|
self.metrics_endpoint = metrics_endpoint
|
||||||
self.vm_agent_endpoint = vm_agent_endpoint
|
|
||||||
|
|
||||||
def get_metrics_search_by_greps(self, **greps) -> CommandResult:
|
def get_metrics_search_by_greps(self, **greps) -> CommandResult:
|
||||||
"""
|
"""
|
||||||
|
@ -33,18 +30,6 @@ class StorageMetrics:
|
||||||
result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {additional_greps}")
|
result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {additional_greps}")
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def get_vma_metrics_result(self, metric_name: str, since: datetime = None, **greps):
|
|
||||||
shell = self.host.get_shell(sudo=False)
|
|
||||||
if greps:
|
|
||||||
additional_filters = ",".join([f"{i}='{j}'" for i, j in greps.items()])
|
|
||||||
metric_name += f"{{{additional_filters}}}"
|
|
||||||
command = f'curl {self.vm_agent_endpoint}/api/v1/query_range --data-urlencode "query={metric_name}"'
|
|
||||||
if since:
|
|
||||||
date_from = since.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
|
|
||||||
command += f' --data-urlencode "start={date_from}"'
|
|
||||||
result = shell.exec(command).stdout
|
|
||||||
return result
|
|
||||||
|
|
||||||
def get_all_metrics(self) -> CommandResult:
|
def get_all_metrics(self) -> CommandResult:
|
||||||
shell = self.host.get_shell()
|
shell = self.host.get_shell()
|
||||||
result = shell.exec(f"curl -s {self.metrics_endpoint}")
|
result = shell.exec(f"curl -s {self.metrics_endpoint}")
|
||||||
|
|
|
@ -78,9 +78,6 @@ class NodeBase(HumanReadableABC):
|
||||||
def get_metrics_endpoint(self) -> str:
|
def get_metrics_endpoint(self) -> str:
|
||||||
return self._get_attribute(ConfigAttributes.ENDPOINT_PROMETHEUS)
|
return self._get_attribute(ConfigAttributes.ENDPOINT_PROMETHEUS)
|
||||||
|
|
||||||
def get_vm_agent_endpoint(self) -> str:
|
|
||||||
return self._get_attribute(ConfigAttributes.ENDPOINT_VM_AGENT)
|
|
||||||
|
|
||||||
def stop_service(self, mask: bool = True):
|
def stop_service(self, mask: bool = True):
|
||||||
if mask:
|
if mask:
|
||||||
with reporter.step(f"Mask {self.name} service on {self.host.config.address}"):
|
with reporter.step(f"Mask {self.name} service on {self.host.config.address}"):
|
||||||
|
@ -188,7 +185,9 @@ class NodeBase(HumanReadableABC):
|
||||||
|
|
||||||
if attribute_name not in config.attributes:
|
if attribute_name not in config.attributes:
|
||||||
if default_attribute_name is None:
|
if default_attribute_name is None:
|
||||||
raise RuntimeError(f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either")
|
raise RuntimeError(
|
||||||
|
f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either"
|
||||||
|
)
|
||||||
|
|
||||||
return config.attributes[default_attribute_name]
|
return config.attributes[default_attribute_name]
|
||||||
|
|
||||||
|
@ -198,7 +197,9 @@ class NodeBase(HumanReadableABC):
|
||||||
return self.host.get_service_config(self.name)
|
return self.host.get_service_config(self.name)
|
||||||
|
|
||||||
def get_service_uptime(self, service: str) -> datetime:
|
def get_service_uptime(self, service: str) -> datetime:
|
||||||
result = self.host.get_shell().exec(f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2")
|
result = self.host.get_shell().exec(
|
||||||
|
f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2"
|
||||||
|
)
|
||||||
start_time = parser.parse(result.stdout.strip())
|
start_time = parser.parse(result.stdout.strip())
|
||||||
current_time = datetime.now(tz=timezone.utc)
|
current_time = datetime.now(tz=timezone.utc)
|
||||||
active_time = current_time - start_time
|
active_time = current_time - start_time
|
||||||
|
|
|
@ -90,6 +90,3 @@ class Chunk:
|
||||||
|
|
||||||
def __str__(self) -> str:
|
def __str__(self) -> str:
|
||||||
return self.object_id
|
return self.object_id
|
||||||
|
|
||||||
def __repr__(self) -> str:
|
|
||||||
return self.object_id
|
|
||||||
|
|
|
@ -8,7 +8,6 @@ from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||||
from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher
|
from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher
|
||||||
from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo
|
from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo
|
||||||
from frostfs_testlib.storage.grpc_operations import interfaces
|
from frostfs_testlib.storage.grpc_operations import interfaces
|
||||||
from frostfs_testlib.testing.test_control import wait_for_success
|
|
||||||
from frostfs_testlib.utils.cli_utils import parse_netmap_output
|
from frostfs_testlib.utils.cli_utils import parse_netmap_output
|
||||||
|
|
||||||
|
|
||||||
|
@ -43,7 +42,6 @@ class ChunksOperations(interfaces.ChunksInterface):
|
||||||
if cluster_node.host_ip == node_info.node:
|
if cluster_node.host_ip == node_info.node:
|
||||||
return (cluster_node, node_info)
|
return (cluster_node, node_info)
|
||||||
|
|
||||||
@wait_for_success(300, 5, fail_testcase=None)
|
|
||||||
@reporter.step("Search shard with chunk {chunk}")
|
@reporter.step("Search shard with chunk {chunk}")
|
||||||
def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str:
|
def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str:
|
||||||
oid_path = f"{chunk.object_id[0]}/{chunk.object_id[1]}/{chunk.object_id[2]}/{chunk.object_id[3]}"
|
oid_path = f"{chunk.object_id[0]}/{chunk.object_id[1]}/{chunk.object_id[2]}/{chunk.object_id[3]}"
|
||||||
|
@ -62,10 +60,11 @@ class ChunksOperations(interfaces.ChunksInterface):
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
generate_key: Optional[bool] = None,
|
generate_key: Optional[bool] = None,
|
||||||
trace: bool = True,
|
trace: bool = False,
|
||||||
root: bool = False,
|
root: bool = False,
|
||||||
verify_presence_all: bool = False,
|
verify_presence_all: bool = False,
|
||||||
json: bool = True,
|
json: bool = True,
|
||||||
|
@ -73,33 +72,20 @@ class ChunksOperations(interfaces.ChunksInterface):
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
timeout: Optional[str] = None,
|
timeout: Optional[str] = None,
|
||||||
) -> list[Chunk]:
|
) -> list[Chunk]:
|
||||||
object_nodes = self.cli.object.nodes(
|
object_nodes = self.cli.object.nodes(**{param: value for param, value in locals().items() if param not in ["self"]})
|
||||||
rpc_endpoint=rpc_endpoint,
|
return self._parse_object_nodes(object_nodes.stdout)
|
||||||
cid=cid,
|
|
||||||
address=address,
|
|
||||||
bearer=bearer,
|
|
||||||
generate_key=generate_key,
|
|
||||||
oid=oid,
|
|
||||||
trace=trace,
|
|
||||||
root=root,
|
|
||||||
verify_presence_all=verify_presence_all,
|
|
||||||
json=json,
|
|
||||||
ttl=ttl,
|
|
||||||
xhdr=xhdr,
|
|
||||||
timeout=timeout,
|
|
||||||
)
|
|
||||||
return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])
|
|
||||||
|
|
||||||
@reporter.step("Get last parity chunk")
|
@reporter.step("Get last parity chunk")
|
||||||
def get_parity(
|
def get_parity(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
generate_key: Optional[bool] = None,
|
generate_key: Optional[bool] = None,
|
||||||
oid: Optional[str] = None,
|
oid: Optional[str] = None,
|
||||||
trace: bool = True,
|
trace: bool = False,
|
||||||
root: bool = False,
|
root: bool = False,
|
||||||
verify_presence_all: bool = False,
|
verify_presence_all: bool = False,
|
||||||
json: bool = True,
|
json: bool = True,
|
||||||
|
@ -107,56 +93,29 @@ class ChunksOperations(interfaces.ChunksInterface):
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
timeout: Optional[str] = None,
|
timeout: Optional[str] = None,
|
||||||
) -> Chunk:
|
) -> Chunk:
|
||||||
object_nodes = self.cli.object.nodes(
|
object_nodes = self.cli.object.nodes(**{param: value for param, value in locals().items() if param not in ["self"]})
|
||||||
rpc_endpoint=rpc_endpoint,
|
return self._parse_object_nodes(object_nodes.stdout)[-1]
|
||||||
cid=cid,
|
|
||||||
address=address,
|
|
||||||
bearer=bearer,
|
|
||||||
generate_key=generate_key,
|
|
||||||
oid=oid,
|
|
||||||
trace=trace,
|
|
||||||
root=root,
|
|
||||||
verify_presence_all=verify_presence_all,
|
|
||||||
json=json,
|
|
||||||
ttl=ttl,
|
|
||||||
xhdr=xhdr,
|
|
||||||
timeout=timeout,
|
|
||||||
)
|
|
||||||
return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[-1]
|
|
||||||
|
|
||||||
@reporter.step("Get first data chunk")
|
@reporter.step("Get first data chunk")
|
||||||
def get_first_data(
|
def get_first_data(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: Optional[str] = None,
|
wallet: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
generate_key: Optional[bool] = None,
|
generate_key: Optional[bool] = None,
|
||||||
trace: bool = True,
|
oid: Optional[str] = None,
|
||||||
|
trace: bool = False,
|
||||||
root: bool = False,
|
root: bool = False,
|
||||||
verify_presence_all: bool = False,
|
verify_presence_all: bool = False,
|
||||||
json: bool = True,
|
json: bool = True,
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
timeout: Optional[str] = None,
|
||||||
) -> Chunk:
|
) -> Chunk:
|
||||||
object_nodes = self.cli.object.nodes(
|
object_nodes = self.cli.object.nodes(**{param: value for param, value in locals().items() if param not in ["self"]})
|
||||||
rpc_endpoint=rpc_endpoint,
|
return self._parse_object_nodes(object_nodes.stdout)[0]
|
||||||
cid=cid,
|
|
||||||
address=address,
|
|
||||||
bearer=bearer,
|
|
||||||
generate_key=generate_key,
|
|
||||||
oid=oid,
|
|
||||||
trace=trace,
|
|
||||||
root=root,
|
|
||||||
verify_presence_all=verify_presence_all,
|
|
||||||
json=json,
|
|
||||||
ttl=ttl,
|
|
||||||
xhdr=xhdr,
|
|
||||||
timeout=timeout,
|
|
||||||
)
|
|
||||||
return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[0]
|
|
||||||
|
|
||||||
def _parse_object_nodes(self, object_nodes: str) -> list[Chunk]:
|
def _parse_object_nodes(self, object_nodes: str) -> list[Chunk]:
|
||||||
parse_result = json.loads(object_nodes)
|
parse_result = json.loads(object_nodes)
|
||||||
|
|
|
@ -8,7 +8,7 @@ from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
|
||||||
from frostfs_testlib.plugins import load_plugin
|
from frostfs_testlib.plugins import load_plugin
|
||||||
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
|
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
|
||||||
from frostfs_testlib.s3.interfaces import BucketContainerResolver
|
from frostfs_testlib.s3.interfaces import BucketContainerResolver
|
||||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
from frostfs_testlib.storage.grpc_operations import interfaces
|
from frostfs_testlib.storage.grpc_operations import interfaces
|
||||||
from frostfs_testlib.utils import json_utils
|
from frostfs_testlib.utils import json_utils
|
||||||
|
|
||||||
|
@ -266,7 +266,6 @@ class ContainerOperations(interfaces.ContainerInterface):
|
||||||
self,
|
self,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
cluster: Cluster,
|
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
from_file: Optional[str] = None,
|
from_file: Optional[str] = None,
|
||||||
|
|
|
@ -509,7 +509,6 @@ class ObjectOperations(interfaces.ObjectInterface):
|
||||||
cid: str,
|
cid: str,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
bearer: str = "",
|
bearer: str = "",
|
||||||
oid: Optional[str] = None,
|
|
||||||
filters: Optional[dict] = None,
|
filters: Optional[dict] = None,
|
||||||
expected_objects_list: Optional[list] = None,
|
expected_objects_list: Optional[list] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
|
@ -517,9 +516,6 @@ class ObjectOperations(interfaces.ObjectInterface):
|
||||||
phy: bool = False,
|
phy: bool = False,
|
||||||
root: bool = False,
|
root: bool = False,
|
||||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||||
address: Optional[str] = None,
|
|
||||||
generate_key: Optional[bool] = None,
|
|
||||||
ttl: Optional[int] = None,
|
|
||||||
) -> list:
|
) -> list:
|
||||||
"""
|
"""
|
||||||
SEARCH an Object.
|
SEARCH an Object.
|
||||||
|
@ -545,15 +541,11 @@ class ObjectOperations(interfaces.ObjectInterface):
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
cid=cid,
|
cid=cid,
|
||||||
bearer=bearer,
|
bearer=bearer,
|
||||||
oid=oid,
|
|
||||||
xhdr=xhdr,
|
xhdr=xhdr,
|
||||||
filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None,
|
filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None,
|
||||||
session=session,
|
session=session,
|
||||||
phy=phy,
|
phy=phy,
|
||||||
root=root,
|
root=root,
|
||||||
address=address,
|
|
||||||
generate_key=generate_key,
|
|
||||||
ttl=ttl,
|
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -235,7 +235,6 @@ class ObjectInterface(ABC):
|
||||||
cid: str,
|
cid: str,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
bearer: str = "",
|
bearer: str = "",
|
||||||
oid: Optional[str] = None,
|
|
||||||
filters: Optional[dict] = None,
|
filters: Optional[dict] = None,
|
||||||
expected_objects_list: Optional[list] = None,
|
expected_objects_list: Optional[list] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
|
@ -243,9 +242,6 @@ class ObjectInterface(ABC):
|
||||||
phy: bool = False,
|
phy: bool = False,
|
||||||
root: bool = False,
|
root: bool = False,
|
||||||
timeout: Optional[str] = None,
|
timeout: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
|
||||||
generate_key: Optional[bool] = None,
|
|
||||||
ttl: Optional[int] = None,
|
|
||||||
) -> List:
|
) -> List:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -372,7 +368,6 @@ class ContainerInterface(ABC):
|
||||||
self,
|
self,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
cluster: Cluster,
|
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
from_file: Optional[str] = None,
|
from_file: Optional[str] = None,
|
||||||
|
@ -381,7 +376,7 @@ class ContainerInterface(ABC):
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
generate_key: Optional[bool] = None,
|
generate_key: Optional[bool] = None,
|
||||||
timeout: Optional[str] = None,
|
timeout: Optional[str] = None,
|
||||||
) -> List[ClusterNode]:
|
) -> List[str]:
|
||||||
"""Show the nodes participating in the container in the current epoch."""
|
"""Show the nodes participating in the container in the current epoch."""
|
||||||
raise NotImplementedError("No implemethed method nodes")
|
raise NotImplementedError("No implemethed method nodes")
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue