forked from TrueCloudLab/frostfs-testcases
Fix node management tests.
This commit is contained in:
parent
69b3061a90
commit
72e3292b51
3 changed files with 18 additions and 6 deletions
|
@ -65,7 +65,7 @@ class HostClient:
|
|||
|
||||
TIMEOUT_RESTORE_CONNECTION = 10, 24
|
||||
|
||||
def __init__(self, ip: str, login: Optional[str] = None, password: Optional[str] = None,
|
||||
def __init__(self, ip: str, login: str, password: Optional[str] = None,
|
||||
private_key_path: Optional[str] = None, init_ssh_client=True) -> None:
|
||||
self.ip = ip
|
||||
self.login = login
|
||||
|
@ -154,6 +154,7 @@ class HostClient:
|
|||
)
|
||||
self.ssh_client.connect(
|
||||
hostname=self.ip,
|
||||
username=self.login,
|
||||
pkey=RSAKey.from_private_key_file(self.private_key_path, self.password),
|
||||
timeout=self.CONNECTION_TIMEOUT
|
||||
)
|
||||
|
|
|
@ -41,7 +41,7 @@ def return_all_storage_nodes_fixture(sbercloud_client):
|
|||
|
||||
|
||||
def panic_reboot_host(ip: str = None):
|
||||
ssh = HostClient(ip=ip, private_key_path=f"{os.getcwd()}/configuration/id_rsa")
|
||||
ssh = HostClient(ip=ip, login="root", private_key_path=f"{os.getcwd()}/configuration/id_rsa")
|
||||
ssh.exec('echo 1 > /proc/sys/kernel/sysrq')
|
||||
with pytest.raises(HostIsNotAvailable):
|
||||
ssh.exec('echo b > /proc/sysrq-trigger', timeout=1)
|
||||
|
|
|
@ -54,7 +54,7 @@ def crate_container_and_pick_node(prepare_wallet_and_deposit):
|
|||
|
||||
|
||||
@pytest.fixture
|
||||
def start_node_if_needed():
|
||||
def after_run_start_all_nodes():
|
||||
yield
|
||||
try:
|
||||
start_nodes_remote(list(NEOFS_NETMAP_DICT.keys()))
|
||||
|
@ -62,6 +62,16 @@ def start_node_if_needed():
|
|||
logger.error(f'Node start fails with error:\n{err}')
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def after_run_set_all_nodes_online():
|
||||
yield
|
||||
for node in list(NEOFS_NETMAP_DICT.keys()):
|
||||
try:
|
||||
node_set_status(node, status="online")
|
||||
except Exception as err:
|
||||
logger.error(f"Node status change fails with error:\n{err}")
|
||||
|
||||
|
||||
@allure.title('Control Operations with storage nodes')
|
||||
@pytest.mark.node_mgmt
|
||||
def test_nodes_management(prepare_tmp_dir):
|
||||
|
@ -86,7 +96,7 @@ def test_nodes_management(prepare_tmp_dir):
|
|||
|
||||
with allure.step(f'Check node {random_node} went to offline'):
|
||||
health_check = node_healthcheck(random_node)
|
||||
assert health_check.health_status == 'READY' and health_check.network_status == 'STATUS_UNDEFINED'
|
||||
assert health_check.health_status == 'READY' and health_check.network_status == 'OFFLINE'
|
||||
snapshot = get_netmap_snapshot(node_name=alive_node)
|
||||
assert random_node not in snapshot, f'Expected node {random_node} not in netmap'
|
||||
|
||||
|
@ -166,9 +176,10 @@ def test_placement_policy_negative(prepare_wallet_and_deposit, placement_rule, e
|
|||
validate_object_copies(wallet, placement_rule, file_path, expected_copies)
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="We cover this scenario for Sbercloud in failover tests")
|
||||
@pytest.mark.node_mgmt
|
||||
@allure.title('NeoFS object replication on node failover')
|
||||
def test_replication(prepare_wallet_and_deposit, start_node_if_needed):
|
||||
@allure.title("NeoFS object replication on node failover")
|
||||
def test_replication(prepare_wallet_and_deposit, after_run_start_all_nodes):
|
||||
"""
|
||||
Test checks object replication on storage not failover and come back.
|
||||
"""
|
||||
|
|
Loading…
Add table
Reference in a new issue