forked from TrueCloudLab/frostfs-testcases
Refactor for cluster usage
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
This commit is contained in:
parent
d9e881001e
commit
bd05aae585
46 changed files with 3859 additions and 2703 deletions
|
@ -1,55 +1,51 @@
|
|||
import logging
|
||||
from time import sleep
|
||||
from typing import Optional
|
||||
|
||||
import allure
|
||||
from common import NEOFS_NETMAP_DICT
|
||||
from neofs_testlib.hosting import Hosting
|
||||
from cluster import Cluster, StorageNode
|
||||
from neofs_testlib.shell import Shell
|
||||
from python_keywords.node_management import node_healthcheck
|
||||
from python_keywords.node_management import storage_node_healthcheck
|
||||
from storage_policy import get_nodes_with_object
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
|
||||
@allure.step("Wait for object replication")
|
||||
def wait_object_replication_on_nodes(
|
||||
wallet: str,
|
||||
def wait_object_replication(
|
||||
cid: str,
|
||||
oid: str,
|
||||
expected_copies: int,
|
||||
shell: Shell,
|
||||
excluded_nodes: Optional[list[str]] = None,
|
||||
) -> list[str]:
|
||||
excluded_nodes = excluded_nodes or []
|
||||
nodes: list[StorageNode],
|
||||
) -> list[StorageNode]:
|
||||
sleep_interval, attempts = 15, 20
|
||||
nodes = []
|
||||
for __attempt in range(attempts):
|
||||
nodes = get_nodes_with_object(wallet, cid, oid, shell=shell, skip_nodes=excluded_nodes)
|
||||
if len(nodes) >= expected_copies:
|
||||
return nodes
|
||||
nodes_with_object = []
|
||||
for _ in range(attempts):
|
||||
nodes_with_object = get_nodes_with_object(cid, oid, shell=shell, nodes=nodes)
|
||||
if len(nodes_with_object) >= expected_copies:
|
||||
return nodes_with_object
|
||||
sleep(sleep_interval)
|
||||
raise AssertionError(
|
||||
f"Expected {expected_copies} copies of object, but found {len(nodes)}. "
|
||||
f"Expected {expected_copies} copies of object, but found {len(nodes_with_object)}. "
|
||||
f"Waiting time {sleep_interval * attempts}"
|
||||
)
|
||||
|
||||
|
||||
@allure.step("Wait for storage node returned to cluster")
|
||||
def wait_all_storage_node_returned(hosting: Hosting) -> None:
|
||||
@allure.step("Wait for storage nodes returned to cluster")
|
||||
def wait_all_storage_nodes_returned(cluster: Cluster) -> None:
|
||||
sleep_interval, attempts = 15, 20
|
||||
for __attempt in range(attempts):
|
||||
if is_all_storage_node_returned(hosting):
|
||||
if is_all_storage_nodes_returned(cluster):
|
||||
return
|
||||
sleep(sleep_interval)
|
||||
raise AssertionError("Storage node(s) is broken")
|
||||
|
||||
|
||||
def is_all_storage_node_returned(hosting: Hosting) -> bool:
|
||||
def is_all_storage_nodes_returned(cluster: Cluster) -> bool:
|
||||
with allure.step("Run health check for all storage nodes"):
|
||||
for node_name in NEOFS_NETMAP_DICT.keys():
|
||||
for node in cluster.storage_nodes:
|
||||
try:
|
||||
health_check = node_healthcheck(hosting, node_name)
|
||||
health_check = storage_node_healthcheck(node)
|
||||
except Exception as err:
|
||||
logger.warning(f"Node healthcheck fails with error {err}")
|
||||
return False
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue