Fix node management tests
When we call storage node's control endpoint we need to override storage wallet path. Signed-off-by: Vladimir Domnich <v.domnich@yadro.com>
This commit is contained in:
parent
eb5532c08e
commit
2c232c222c
7 changed files with 108 additions and 69 deletions
|
@ -65,7 +65,7 @@ class HostClient:
|
|||
|
||||
TIMEOUT_RESTORE_CONNECTION = 10, 24
|
||||
|
||||
def __init__(self, ip: str, login: Optional[str] = None, password: Optional[str] = None,
|
||||
def __init__(self, ip: str, login: str, password: Optional[str] = None,
|
||||
private_key_path: Optional[str] = None, init_ssh_client=True) -> None:
|
||||
self.ip = ip
|
||||
self.login = login
|
||||
|
@ -154,6 +154,7 @@ class HostClient:
|
|||
)
|
||||
self.ssh_client.connect(
|
||||
hostname=self.ip,
|
||||
username=self.login,
|
||||
pkey=RSAKey.from_private_key_file(self.private_key_path, self.password),
|
||||
timeout=self.CONNECTION_TIMEOUT
|
||||
)
|
||||
|
|
|
@ -34,14 +34,14 @@ def sbercloud_client():
|
|||
pytest.fail('SberCloud infrastructure not available')
|
||||
|
||||
|
||||
@pytest.fixture(scope='session', autouse=True)
|
||||
@pytest.fixture(autouse=True)
|
||||
def return_all_storage_nodes_fixture(sbercloud_client):
|
||||
yield
|
||||
return_all_storage_nodes(sbercloud_client)
|
||||
|
||||
|
||||
def panic_reboot_host(ip: str = None):
|
||||
ssh = HostClient(ip=ip)
|
||||
ssh = HostClient(ip=ip, login="root", private_key_path=f"{os.getcwd()}/configuration/id_rsa")
|
||||
ssh.exec('echo 1 > /proc/sys/kernel/sysrq')
|
||||
with pytest.raises(HostIsNotAvailable):
|
||||
ssh.exec('echo b > /proc/sysrq-trigger', timeout=1)
|
||||
|
@ -51,8 +51,8 @@ def return_all_storage_nodes(sbercloud_client: SberCloud):
|
|||
for host in stopped_hosts:
|
||||
with allure.step(f'Start storage node {host}'):
|
||||
sbercloud_client.start_node(node_ip=host.split(':')[-2])
|
||||
stopped_hosts.remove(host)
|
||||
wait_all_storage_node_returned()
|
||||
stopped_hosts.clear()
|
||||
|
||||
|
||||
def is_all_storage_node_returned() -> bool:
|
||||
|
|
|
@ -54,7 +54,7 @@ def crate_container_and_pick_node(prepare_wallet_and_deposit):
|
|||
|
||||
|
||||
@pytest.fixture
|
||||
def start_node_if_needed():
|
||||
def after_run_start_all_nodes():
|
||||
yield
|
||||
try:
|
||||
start_nodes_remote(list(NEOFS_NETMAP_DICT.keys()))
|
||||
|
@ -62,6 +62,16 @@ def start_node_if_needed():
|
|||
logger.error(f'Node start fails with error:\n{err}')
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def after_run_set_all_nodes_online():
|
||||
yield
|
||||
for node in list(NEOFS_NETMAP_DICT.keys()):
|
||||
try:
|
||||
node_set_status(node, status="online")
|
||||
except Exception as err:
|
||||
logger.error(f"Node status change fails with error:\n{err}")
|
||||
|
||||
|
||||
@allure.title('Control Operations with storage nodes')
|
||||
@pytest.mark.node_mgmt
|
||||
def test_nodes_management(prepare_tmp_dir):
|
||||
|
@ -86,7 +96,7 @@ def test_nodes_management(prepare_tmp_dir):
|
|||
|
||||
with allure.step(f'Check node {random_node} went to offline'):
|
||||
health_check = node_healthcheck(random_node)
|
||||
assert health_check.health_status == 'READY' and health_check.network_status == 'STATUS_UNDEFINED'
|
||||
assert health_check.health_status == 'READY' and health_check.network_status == 'OFFLINE'
|
||||
snapshot = get_netmap_snapshot(node_name=alive_node)
|
||||
assert random_node not in snapshot, f'Expected node {random_node} not in netmap'
|
||||
|
||||
|
@ -166,9 +176,10 @@ def test_placement_policy_negative(prepare_wallet_and_deposit, placement_rule, e
|
|||
validate_object_copies(wallet, placement_rule, file_path, expected_copies)
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="We cover this scenario for Sbercloud in failover tests")
|
||||
@pytest.mark.node_mgmt
|
||||
@allure.title('NeoFS object replication on node failover')
|
||||
def test_replication(prepare_wallet_and_deposit, start_node_if_needed):
|
||||
@allure.title("NeoFS object replication on node failover")
|
||||
def test_replication(prepare_wallet_and_deposit, after_run_start_all_nodes):
|
||||
"""
|
||||
Test checks object replication on storage not failover and come back.
|
||||
"""
|
||||
|
|
|
@ -3,15 +3,15 @@ from time import sleep
|
|||
|
||||
import allure
|
||||
import pytest
|
||||
from common import SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE
|
||||
from container import create_container
|
||||
from epoch import get_epoch, tick_epoch
|
||||
from tombstone import verify_head_tombstone
|
||||
from python_keywords.neofs_verbs import (delete_object, get_object, get_range,
|
||||
get_range_hash, head_object,
|
||||
put_object, search_object)
|
||||
from python_keywords.storage_policy import get_simple_object_copies
|
||||
from python_keywords.utility_keywords import generate_file, get_file_hash
|
||||
from common import SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE
|
||||
from tombstone import verify_head_tombstone
|
||||
from utility import get_file_content
|
||||
|
||||
logger = logging.getLogger('NeoLogger')
|
||||
|
@ -101,11 +101,12 @@ def test_object_api(prepare_wallet_and_deposit, request, object_size):
|
|||
@pytest.mark.sanity
|
||||
@pytest.mark.grpc_api
|
||||
@pytest.mark.parametrize('object_size', [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE], ids=['simple object', 'complex object'])
|
||||
def test_object_life_time(prepare_container, request, object_size):
|
||||
def test_object_api(prepare_wallet_and_deposit, request, object_size):
|
||||
"""
|
||||
Test object deleted after expiration epoch.
|
||||
"""
|
||||
cid, wallet = prepare_container
|
||||
wallet = prepare_wallet_and_deposit
|
||||
cid = create_container(wallet)
|
||||
|
||||
allure.dynamic.title(f'Test object life time for {request.node.callspec.id}')
|
||||
|
||||
|
|
|
@ -99,14 +99,14 @@ class AwsCliClient:
|
|||
|
||||
cmd = f'aws --no-verify-ssl s3api delete-objects --bucket {Bucket} --delete file://{file_path} ' \
|
||||
f'--endpoint {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
output = _cmd_run(cmd, timeout=90)
|
||||
return self._to_json(output)
|
||||
|
||||
def delete_object(self, Bucket: str, Key: str, VersionId: str = None) -> dict:
|
||||
version = f' --version-id {VersionId}' if VersionId else ''
|
||||
cmd = f'aws --no-verify-ssl s3api delete-object --bucket {Bucket} --key {Key} {version}' \
|
||||
f' --endpoint {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
output = _cmd_run(cmd, timeout=90)
|
||||
return self._to_json(output)
|
||||
|
||||
def get_object_attributes(self, bucket: str, key: str, *attributes: str, version_id: str = None,
|
||||
|
@ -122,7 +122,7 @@ class AwsCliClient:
|
|||
|
||||
def delete_bucket(self, Bucket: str) -> dict:
|
||||
cmd = f'aws --no-verify-ssl s3api delete-bucket --bucket {Bucket} --endpoint {S3_GATE}'
|
||||
output = _cmd_run(cmd)
|
||||
output = _cmd_run(cmd, timeout=90)
|
||||
return self._to_json(output)
|
||||
|
||||
def get_bucket_tagging(self, Bucket: str) -> dict:
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/usr/bin/python3
|
||||
#!/usr/bin/python3.9
|
||||
|
||||
"""
|
||||
This module contains keywords for management test stand
|
||||
|
@ -9,14 +9,16 @@ import random
|
|||
import re
|
||||
from contextlib import contextmanager
|
||||
from dataclasses import dataclass
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from cli_helpers import _cmd_run
|
||||
|
||||
import docker
|
||||
from common import (NEOFS_NETMAP_DICT, STORAGE_NODE_BIN_PATH, STORAGE_NODE_CONFIG_PATH,
|
||||
STORAGE_NODE_PRIVATE_CONTROL_ENDPOINT, STORAGE_NODE_PWD, STORAGE_NODE_USER)
|
||||
from common import (NEOFS_CLI_EXEC, NEOFS_NETMAP_DICT, STORAGE_CONTROL_ENDPOINT_PRIVATE,
|
||||
STORAGE_NODE_BIN_PATH, STORAGE_NODE_SSH_PASSWORD,
|
||||
STORAGE_NODE_SSH_PRIVATE_KEY_PATH, STORAGE_NODE_SSH_USER, WALLET_CONFIG)
|
||||
from robot.api import logger
|
||||
from robot.api.deco import keyword
|
||||
from ssh_helper import HostClient, HostIsNotAvailable
|
||||
from ssh_helper import HostClient
|
||||
|
||||
ROBOT_AUTO_KEYWORDS = False
|
||||
|
||||
|
@ -42,12 +44,16 @@ def create_ssh_client(node_name: str) -> HostClient:
|
|||
if node_name not in NEOFS_NETMAP_DICT:
|
||||
raise AssertionError(f'Node {node_name} is not found!')
|
||||
|
||||
# We use rpc endpoint to determine host address, because control endpoint
|
||||
# (if it is private) will be a local address on the host machine
|
||||
node_config = NEOFS_NETMAP_DICT.get(node_name)
|
||||
host = node_config.get('control').split(':')[0]
|
||||
try:
|
||||
ssh_client = HostClient(host, STORAGE_NODE_USER, STORAGE_NODE_PWD)
|
||||
except HostIsNotAvailable:
|
||||
ssh_client = HostClient(host)
|
||||
host = node_config.get('rpc').split(':')[0]
|
||||
ssh_client = HostClient(
|
||||
host,
|
||||
login=STORAGE_NODE_SSH_USER,
|
||||
password=STORAGE_NODE_SSH_PASSWORD,
|
||||
private_key_path=STORAGE_NODE_SSH_PRIVATE_KEY_PATH,
|
||||
)
|
||||
|
||||
try:
|
||||
yield ssh_client
|
||||
|
@ -111,9 +117,9 @@ def get_control_endpoint_and_wallet(endpoint_number: str = ''):
|
|||
|
||||
endpoint_values = NEOFS_NETMAP_DICT[f'{endpoint_num}']
|
||||
endpoint_control = endpoint_values['control']
|
||||
wlt = endpoint_values['wallet_path']
|
||||
wallet = endpoint_values['wallet_path']
|
||||
|
||||
return endpoint_num, endpoint_control, wlt
|
||||
return endpoint_num, endpoint_control, wallet
|
||||
|
||||
|
||||
@keyword('Get Locode')
|
||||
|
@ -166,16 +172,13 @@ def node_healthcheck(node_name: str) -> HealthStatus:
|
|||
Returns:
|
||||
health status as HealthStatus object.
|
||||
"""
|
||||
with create_ssh_client(node_name) as ssh_client:
|
||||
cmd = f'{STORAGE_NODE_BIN_PATH}/neofs-cli control healthcheck ' \
|
||||
f'--endpoint {STORAGE_NODE_PRIVATE_CONTROL_ENDPOINT} ' \
|
||||
f'--config {STORAGE_NODE_CONFIG_PATH}'
|
||||
output = ssh_client.exec_with_confirmation(cmd, [''])
|
||||
return HealthStatus.from_stdout(output.stdout)
|
||||
command = "control healthcheck"
|
||||
output = run_control_command(node_name, command)
|
||||
return HealthStatus.from_stdout(output)
|
||||
|
||||
|
||||
@keyword('Set status for node')
|
||||
def node_set_status(node_name: str, status: str):
|
||||
def node_set_status(node_name: str, status: str) -> None:
|
||||
"""
|
||||
The function sets particular status for given node.
|
||||
Args:
|
||||
|
@ -184,15 +187,12 @@ def node_set_status(node_name: str, status: str):
|
|||
Returns:
|
||||
(void)
|
||||
"""
|
||||
with create_ssh_client(node_name) as ssh_client:
|
||||
cmd = f'{STORAGE_NODE_BIN_PATH}/neofs-cli control set-status ' \
|
||||
f'--endpoint {STORAGE_NODE_PRIVATE_CONTROL_ENDPOINT} ' \
|
||||
f'--config {STORAGE_NODE_CONFIG_PATH} --status {status}'
|
||||
ssh_client.exec_with_confirmation(cmd, [''])
|
||||
command = f"control set-status --status {status}"
|
||||
run_control_command(node_name, command)
|
||||
|
||||
|
||||
@keyword('Get netmap snapshot')
|
||||
def get_netmap_snapshot(node_name: str = None) -> str:
|
||||
def get_netmap_snapshot(node_name: Optional[str] = None) -> str:
|
||||
"""
|
||||
The function returns string representation of netmap-snapshot.
|
||||
Args:
|
||||
|
@ -201,17 +201,12 @@ def get_netmap_snapshot(node_name: str = None) -> str:
|
|||
string representation of netmap-snapshot
|
||||
"""
|
||||
node_name = node_name or list(NEOFS_NETMAP_DICT)[0]
|
||||
|
||||
with create_ssh_client(node_name) as ssh_client:
|
||||
cmd = f'{STORAGE_NODE_BIN_PATH}/neofs-cli control netmap-snapshot ' \
|
||||
f'--endpoint {STORAGE_NODE_PRIVATE_CONTROL_ENDPOINT} ' \
|
||||
f'--config {STORAGE_NODE_CONFIG_PATH}'
|
||||
output = ssh_client.exec_with_confirmation(cmd, [''])
|
||||
return output.stdout
|
||||
command = "control netmap-snapshot"
|
||||
return run_control_command(node_name, command)
|
||||
|
||||
|
||||
@keyword('Shard list for node')
|
||||
def node_shard_list(node_name: str) -> List[str]:
|
||||
def node_shard_list(node_name: str) -> list[str]:
|
||||
"""
|
||||
The function returns list of shards for particular node.
|
||||
Args:
|
||||
|
@ -219,12 +214,9 @@ def node_shard_list(node_name: str) -> List[str]:
|
|||
Returns:
|
||||
list of shards.
|
||||
"""
|
||||
with create_ssh_client(node_name) as ssh_client:
|
||||
cmd = f'{STORAGE_NODE_BIN_PATH}/neofs-cli control shards list ' \
|
||||
f'--endpoint {STORAGE_NODE_PRIVATE_CONTROL_ENDPOINT} ' \
|
||||
f'--config {STORAGE_NODE_CONFIG_PATH}'
|
||||
output = ssh_client.exec_with_confirmation(cmd, [''])
|
||||
return re.findall(r'Shard (.*):', output.stdout)
|
||||
command = "control shards list"
|
||||
output = run_control_command(node_name, command)
|
||||
return re.findall(r'Shard (.*):', output)
|
||||
|
||||
|
||||
@keyword('Shard list for node')
|
||||
|
@ -236,12 +228,8 @@ def node_shard_set_mode(node_name: str, shard: str, mode: str) -> str:
|
|||
Returns:
|
||||
health status as HealthStatus object.
|
||||
"""
|
||||
with create_ssh_client(node_name) as ssh_client:
|
||||
cmd = f'{STORAGE_NODE_BIN_PATH}/neofs-cli control shards set-mode ' \
|
||||
f'--endpoint {STORAGE_NODE_PRIVATE_CONTROL_ENDPOINT} ' \
|
||||
f'--config {STORAGE_NODE_CONFIG_PATH} --id {shard} --mode {mode}'
|
||||
output = ssh_client.exec_with_confirmation(cmd, [''])
|
||||
return output.stdout
|
||||
command = f"control shards set-mode --id {shard} --mode {mode}"
|
||||
return run_control_command(node_name, command)
|
||||
|
||||
|
||||
@keyword('Drop object from node {node_name}')
|
||||
|
@ -253,9 +241,40 @@ def drop_object(node_name: str, cid: str, oid: str) -> str:
|
|||
Returns:
|
||||
health status as HealthStatus object.
|
||||
"""
|
||||
command = f"control drop-objects -o {cid}/{oid}"
|
||||
return run_control_command(node_name, command)
|
||||
|
||||
|
||||
def run_control_command(node_name: str, command: str) -> str:
|
||||
control_endpoint = NEOFS_NETMAP_DICT[node_name]["control"]
|
||||
wallet_path = NEOFS_NETMAP_DICT[node_name]["wallet_path"]
|
||||
|
||||
if not STORAGE_CONTROL_ENDPOINT_PRIVATE:
|
||||
cmd = (
|
||||
f'{NEOFS_CLI_EXEC} {command} --rpc-endpoint {control_endpoint} '
|
||||
f'--wallet {wallet_path} --config {WALLET_CONFIG}'
|
||||
)
|
||||
output = _cmd_run(cmd)
|
||||
return output
|
||||
|
||||
# Private control endpoint is accessible only from the host where storage node is running
|
||||
# So, we connect to storage node host and run CLI command from there
|
||||
with create_ssh_client(node_name) as ssh_client:
|
||||
cmd = f'{STORAGE_NODE_BIN_PATH}/neofs-cli control drop-objects ' \
|
||||
f'--endpoint {STORAGE_NODE_PRIVATE_CONTROL_ENDPOINT} ' \
|
||||
f'--config {STORAGE_NODE_CONFIG_PATH} -o {cid}/{oid}'
|
||||
output = ssh_client.exec_with_confirmation(cmd, [''])
|
||||
# Copy wallet content on storage node host
|
||||
with open(wallet_path, "r") as file:
|
||||
wallet = file.read()
|
||||
remote_wallet_path = "/tmp/{node_name}-wallet.json"
|
||||
ssh_client.exec_with_confirmation(f"echo '{wallet}' > {remote_wallet_path}", [""])
|
||||
|
||||
# Put config on storage node host
|
||||
remote_config_path = "/tmp/{node_name}-config.yaml"
|
||||
remote_config = 'password: ""'
|
||||
ssh_client.exec_with_confirmation(f"echo '{remote_config}' > {remote_config_path}", [""])
|
||||
|
||||
# Execute command
|
||||
cmd = (
|
||||
f'{STORAGE_NODE_BIN_PATH}/neofs-cli {command} --endpoint {control_endpoint} '
|
||||
f'--wallet {remote_wallet_path} --config {remote_config_path}'
|
||||
)
|
||||
output = ssh_client.exec_with_confirmation(cmd, [""])
|
||||
return output.stdout
|
||||
|
|
|
@ -43,6 +43,7 @@ STORAGE_CONTROL_ENDPOINT_1 = os.getenv("STORAGE_CONTROL_ENDPOINT_1", "s01.neofs.
|
|||
STORAGE_CONTROL_ENDPOINT_2 = os.getenv("STORAGE_CONTROL_ENDPOINT_2", "s02.neofs.devenv:8081")
|
||||
STORAGE_CONTROL_ENDPOINT_3 = os.getenv("STORAGE_CONTROL_ENDPOINT_3", "s03.neofs.devenv:8081")
|
||||
STORAGE_CONTROL_ENDPOINT_4 = os.getenv("STORAGE_CONTROL_ENDPOINT_4", "s04.neofs.devenv:8081")
|
||||
STORAGE_CONTROL_ENDPOINT_PRIVATE = os.getenv("STORAGE_CONTROL_ENDPOINT_PRIVATE", "false").lower() == "true"
|
||||
|
||||
STORAGE_WALLET_PATH_1 = os.getenv("STORAGE_WALLET_PATH_1", f"{DEVENV_PATH}/services/storage/wallet01.json")
|
||||
STORAGE_WALLET_PATH_2 = os.getenv("STORAGE_WALLET_PATH_2", f"{DEVENV_PATH}/services/storage/wallet02.json")
|
||||
|
@ -95,14 +96,20 @@ IR_WALLET_PASS = os.getenv("IR_WALLET_PASS", "one")
|
|||
S3_GATE_WALLET_PATH = os.getenv("S3_GATE_WALLET_PATH", f"{DEVENV_PATH}/services/s3_gate/wallet.json")
|
||||
S3_GATE_WALLET_PASS = os.getenv("S3_GATE_WALLET_PASS", "s3")
|
||||
|
||||
STORAGE_NODE_USER = os.getenv("STORAGE_NODE_USER", "root")
|
||||
STORAGE_NODE_PWD = os.getenv("STORAGE_NODE_PWD")
|
||||
STORAGE_NODE_BIN_PATH = os.getenv("STORAGE_NODE_BIN_PATH", f"{DEVENV_PATH}/vendor/neofs-cli")
|
||||
# Parameters that control SSH connection to storage node
|
||||
STORAGE_NODE_SSH_USER = os.getenv("STORAGE_NODE_SSH_USER")
|
||||
STORAGE_NODE_SSH_PASSWORD = os.getenv("STORAGE_NODE_SSH_PASSWORD")
|
||||
STORAGE_NODE_SSH_PRIVATE_KEY_PATH = os.getenv("STORAGE_NODE_SSH_PRIVATE_KEY_PATH")
|
||||
|
||||
# Parameters that control resources located on the storage node:
|
||||
# STORAGE_NODE_BIN_PATH - path to directory that contains CLI tools
|
||||
# STORAGE_NODE_CONFIG_PATH - configuration file for neofs-cli on the storage node
|
||||
STORAGE_NODE_BIN_PATH = os.getenv("STORAGE_NODE_BIN_PATH", f"{DEVENV_PATH}/vendor")
|
||||
STORAGE_NODE_CONFIG_PATH = os.getenv("STORAGE_NODE_CONFIG_PATH", f"{DEVENV_PATH}/services/storage/cli-cfg.yml")
|
||||
STORAGE_NODE_PRIVATE_CONTROL_ENDPOINT = os.getenv("STORAGE_NODE_PRIVATE_CONTROL_ENDPOINT", "localhost:8091")
|
||||
STORAGE_NODE_WALLET_PATH = os.getenv("STORAGE_NODE_WALLET_PATH") # allows to override path to wallet that was given in STORAGE_NODE_CONFIG_PATH, this is temp parameter, should be fixed in environment setup
|
||||
|
||||
# Path and config for neofs-adm utility. Optional if tests are running against devenv
|
||||
NEOFS_ADM_EXEC = os.getenv("NEOFS_ADM_EXEC")
|
||||
NEOFS_ADM_CONFIG_PATH = os.getenv("NEOFS_ADM_CONFIG_PATH")
|
||||
|
||||
FREE_STORAGE = os.getenv('FREE_STORAGE', "false").lower() == "true"
|
||||
FREE_STORAGE = os.getenv("FREE_STORAGE", "false").lower() == "true"
|
||||
|
|
Loading…
Reference in a new issue