forked from TrueCloudLab/frostfs-testcases
Add test for adding node to cluster
Signed-off-by: a.y.volkov <a.y.volkov@yadro.com>
This commit is contained in:
parent
b468a06f4e
commit
f97bfed183
9 changed files with 218 additions and 30 deletions
|
@ -16,6 +16,8 @@ import pexpect
|
|||
from robot.api import logger
|
||||
|
||||
ROBOT_AUTO_KEYWORDS = False
|
||||
COLOR_GREEN = "\033[92m"
|
||||
COLOR_OFF = "\033[0m"
|
||||
|
||||
|
||||
def _cmd_run(cmd: str, timeout: int = 30) -> str:
|
||||
|
@ -26,7 +28,7 @@ def _cmd_run(cmd: str, timeout: int = 30) -> str:
|
|||
compl_proc = None
|
||||
start_time = datetime.now()
|
||||
try:
|
||||
logger.info(f"Executing command: {cmd}")
|
||||
logger.info(f"{COLOR_GREEN}Executing command: {cmd}{COLOR_OFF}")
|
||||
start_time = datetime.utcnow()
|
||||
compl_proc = subprocess.run(cmd, check=True, universal_newlines=True,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|
||||
|
@ -35,7 +37,7 @@ def _cmd_run(cmd: str, timeout: int = 30) -> str:
|
|||
output = compl_proc.stdout
|
||||
return_code = compl_proc.returncode
|
||||
end_time = datetime.utcnow()
|
||||
logger.info(f"Output: {output}")
|
||||
logger.info(f"{COLOR_GREEN}Output: {output}{COLOR_OFF}")
|
||||
_attach_allure_log(cmd, output, return_code, start_time, end_time)
|
||||
|
||||
return output
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
#!/usr/bin/python3.9
|
||||
import sys
|
||||
|
||||
import allure
|
||||
from robot.api import logger
|
||||
from robot.api.deco import keyword
|
||||
|
||||
|
@ -28,18 +30,27 @@ def tick_epoch():
|
|||
# If neofs-adm is available, then we tick epoch with it (to be consistent with UAT tests)
|
||||
cmd = f"{NEOFS_ADM_EXEC} morph force-new-epoch -c {NEOFS_ADM_CONFIG_PATH}"
|
||||
logger.info(f"Executing shell command: {cmd}")
|
||||
out = ''
|
||||
err = ''
|
||||
try:
|
||||
out = wrappers.run_sh(cmd)
|
||||
logger.info(f"Command completed with output: {out}")
|
||||
except Exception as exc:
|
||||
logger.error(exc)
|
||||
err = str(exc)
|
||||
raise RuntimeError("Failed to tick epoch") from exc
|
||||
|
||||
finally:
|
||||
if 'allure' in sys.modules:
|
||||
allure.attach((
|
||||
f'COMMAND: {cmd}\n'
|
||||
f'OUTPUT:\n {out}\n'
|
||||
f'ERROR: {err}\n'
|
||||
), 'Tick Epoch', allure.attachment_type.TEXT)
|
||||
return
|
||||
|
||||
# Otherwise we tick epoch using transaction
|
||||
cur_epoch = get_epoch()
|
||||
return contract.invoke_contract_multisig(
|
||||
contract.get_netmap_contract_hash(MORPH_ENDPOINT),
|
||||
f"newEpoch int:{cur_epoch+1}",
|
||||
f"newEpoch int:{cur_epoch + 1}",
|
||||
IR_WALLET_PATH, IR_WALLET_PASS, MORPH_ENDPOINT)
|
||||
|
|
49
robot/resources/lib/python_keywords/failover_utils.py
Normal file
49
robot/resources/lib/python_keywords/failover_utils.py
Normal file
|
@ -0,0 +1,49 @@
|
|||
import logging
|
||||
from time import sleep
|
||||
from typing import Optional
|
||||
|
||||
import allure
|
||||
|
||||
from common import NEOFS_NETMAP_DICT
|
||||
from python_keywords.node_management import node_healthcheck
|
||||
from storage_policy import get_nodes_with_object
|
||||
|
||||
logger = logging.getLogger('NeoLogger')
|
||||
|
||||
|
||||
@allure.step('Wait for object replication')
|
||||
def wait_object_replication_on_nodes(wallet: str, cid: str, oid: str, expected_copies: int,
|
||||
excluded_nodes: Optional[list[str]] = None) -> list[str]:
|
||||
excluded_nodes = excluded_nodes or []
|
||||
sleep_interval, attempts = 10, 18
|
||||
nodes = []
|
||||
for __attempt in range(attempts):
|
||||
nodes = get_nodes_with_object(wallet, cid, oid, skip_nodes=excluded_nodes)
|
||||
if len(nodes) == expected_copies:
|
||||
return nodes
|
||||
sleep(sleep_interval)
|
||||
raise AssertionError(f'Expected {expected_copies} copies of object, but found {len(nodes)}. '
|
||||
f'Waiting time {sleep_interval * attempts}')
|
||||
|
||||
|
||||
@allure.step('Wait for storage node returned to cluster')
|
||||
def wait_all_storage_node_returned():
|
||||
sleep_interval, attempts = 10, 12
|
||||
for __attempt in range(attempts):
|
||||
if is_all_storage_node_returned():
|
||||
return
|
||||
sleep(sleep_interval)
|
||||
raise AssertionError('Storage node(s) is broken')
|
||||
|
||||
|
||||
def is_all_storage_node_returned() -> bool:
|
||||
with allure.step('Run health check for all storage nodes'):
|
||||
for node_name in NEOFS_NETMAP_DICT.keys():
|
||||
try:
|
||||
health_check = node_healthcheck(node_name)
|
||||
except Exception as err:
|
||||
logger.warning(f'Node healthcheck fails with error {err}')
|
||||
return False
|
||||
if health_check.health_status != 'READY' or health_check.network_status != 'ONLINE':
|
||||
return False
|
||||
return True
|
|
@ -178,7 +178,7 @@ def node_healthcheck(node_name: str) -> HealthStatus:
|
|||
|
||||
|
||||
@keyword('Set status for node')
|
||||
def node_set_status(node_name: str, status: str) -> None:
|
||||
def node_set_status(node_name: str, status: str, retry=False) -> None:
|
||||
"""
|
||||
The function sets particular status for given node.
|
||||
Args:
|
||||
|
@ -188,7 +188,12 @@ def node_set_status(node_name: str, status: str) -> None:
|
|||
(void)
|
||||
"""
|
||||
command = f"control set-status --status {status}"
|
||||
run_control_command(node_name, command)
|
||||
try:
|
||||
run_control_command(node_name, command)
|
||||
except AssertionError as err:
|
||||
if not retry:
|
||||
raise AssertionError(f'Command control set-status failed with error {err}') from err
|
||||
run_control_command(node_name, command)
|
||||
|
||||
|
||||
@keyword('Get netmap snapshot')
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue