Add tests that start or stop services on remote vm
Signed-off-by: Vladimir Domnich <v.domnich@yadro.com>
This commit is contained in:
parent
f97bfed183
commit
91197335ba
6 changed files with 175 additions and 140 deletions
131
pytest_tests/helpers/service_helper.py
Normal file
131
pytest_tests/helpers/service_helper.py
Normal file
|
@ -0,0 +1,131 @@
|
|||
from contextlib import contextmanager
|
||||
import logging
|
||||
|
||||
import docker
|
||||
|
||||
from cli_helpers import _cmd_run
|
||||
from common import (INFRASTRUCTURE_TYPE, NEOFS_CLI_EXEC, NEOFS_NETMAP_DICT, STORAGE_NODE_BIN_PATH,
|
||||
STORAGE_NODE_SSH_PASSWORD, STORAGE_NODE_SSH_PRIVATE_KEY_PATH,
|
||||
STORAGE_NODE_SSH_USER, WALLET_CONFIG)
|
||||
from ssh_helper import HostClient
|
||||
|
||||
|
||||
logger = logging.getLogger('NeoLogger')
|
||||
|
||||
|
||||
class LocalDevEnvStorageServiceHelper:
|
||||
"""
|
||||
Manages storage services running on local devenv.
|
||||
"""
|
||||
def stop_node(self, node_name: str) -> None:
|
||||
container_name = node_name.split('.')[0]
|
||||
client = docker.APIClient()
|
||||
client.stop(container_name)
|
||||
|
||||
def start_node(self, node_name: str) -> None:
|
||||
container_name = node_name.split('.')[0]
|
||||
client = docker.APIClient()
|
||||
client.start(container_name)
|
||||
|
||||
def run_control_command(self, node_name: str, command: str) -> str:
|
||||
control_endpoint = NEOFS_NETMAP_DICT[node_name]["control"]
|
||||
wallet_path = NEOFS_NETMAP_DICT[node_name]["wallet_path"]
|
||||
|
||||
cmd = (
|
||||
f'{NEOFS_CLI_EXEC} {command} --endpoint {control_endpoint} '
|
||||
f'--wallet {wallet_path} --config {WALLET_CONFIG}'
|
||||
)
|
||||
output = _cmd_run(cmd)
|
||||
return output
|
||||
|
||||
|
||||
class CloudVmStorageServiceHelper:
|
||||
def stop_node(self, node_name: str) -> None:
|
||||
with _create_ssh_client(node_name) as ssh_client:
|
||||
cmd = "systemctl stop neofs-storage"
|
||||
output = ssh_client.exec_with_confirmation(cmd, [""])
|
||||
logger.info(f"Stop command output: {output.stdout}")
|
||||
|
||||
def start_node(self, node_name: str) -> None:
|
||||
with _create_ssh_client(node_name) as ssh_client:
|
||||
cmd = "systemctl start neofs-storage"
|
||||
output = ssh_client.exec_with_confirmation(cmd, [""])
|
||||
logger.info(f"Start command output: {output.stdout}")
|
||||
|
||||
def run_control_command(self, node_name: str, command: str) -> str:
|
||||
control_endpoint = NEOFS_NETMAP_DICT[node_name]["control"]
|
||||
wallet_path = NEOFS_NETMAP_DICT[node_name]["wallet_path"]
|
||||
|
||||
# Private control endpoint is accessible only from the host where storage node is running
|
||||
# So, we connect to storage node host and run CLI command from there
|
||||
with _create_ssh_client(node_name) as ssh_client:
|
||||
# Copy wallet content on storage node host
|
||||
with open(wallet_path, "r") as file:
|
||||
wallet = file.read()
|
||||
remote_wallet_path = "/tmp/{node_name}-wallet.json"
|
||||
ssh_client.exec_with_confirmation(f"echo '{wallet}' > {remote_wallet_path}", [""])
|
||||
|
||||
# Put config on storage node host
|
||||
remote_config_path = "/tmp/{node_name}-config.yaml"
|
||||
remote_config = 'password: ""'
|
||||
ssh_client.exec_with_confirmation(f"echo '{remote_config}' > {remote_config_path}", [""])
|
||||
|
||||
# Execute command
|
||||
cmd = (
|
||||
f'{STORAGE_NODE_BIN_PATH}/neofs-cli {command} --endpoint {control_endpoint} '
|
||||
f'--wallet {remote_wallet_path} --config {remote_config_path}'
|
||||
)
|
||||
output = ssh_client.exec_with_confirmation(cmd, [""])
|
||||
return output.stdout
|
||||
|
||||
|
||||
class RemoteDevEnvStorageServiceHelper:
|
||||
"""
|
||||
Manages storage services running on remote devenv.
|
||||
"""
|
||||
def stop_node(self, node_name: str) -> None:
|
||||
container_name = node_name.split('.')[0]
|
||||
with _create_ssh_client(node_name) as ssh_client:
|
||||
ssh_client.exec(f'docker stop {container_name}')
|
||||
|
||||
def start_node(self, node_name: str) -> None:
|
||||
container_name = node_name.split('.')[0]
|
||||
with _create_ssh_client(node_name) as ssh_client:
|
||||
ssh_client.exec(f'docker start {container_name}')
|
||||
|
||||
def run_control_command(self, node_name: str, command: str) -> str:
|
||||
# On remote devenv it works same way as in cloud
|
||||
return CloudVmStorageServiceHelper().run_control_command(node_name, command)
|
||||
|
||||
|
||||
def get_storage_service_helper():
|
||||
if INFRASTRUCTURE_TYPE == "LOCAL_DEVENV":
|
||||
return LocalDevEnvStorageServiceHelper()
|
||||
if INFRASTRUCTURE_TYPE == "REMOTE_DEVENV":
|
||||
return RemoteDevEnvStorageServiceHelper()
|
||||
if INFRASTRUCTURE_TYPE == "CLOUD_VM":
|
||||
return CloudVmStorageServiceHelper()
|
||||
|
||||
raise EnvironmentError(f"Infrastructure type is not supported: {INFRASTRUCTURE_TYPE}")
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _create_ssh_client(node_name: str) -> HostClient:
|
||||
if node_name not in NEOFS_NETMAP_DICT:
|
||||
raise AssertionError(f'Node {node_name} is not found!')
|
||||
|
||||
# We use rpc endpoint to determine host address, because control endpoint
|
||||
# (if it is private) will be a local address on the host machine
|
||||
node_config = NEOFS_NETMAP_DICT.get(node_name)
|
||||
host = node_config.get('rpc').split(':')[0]
|
||||
ssh_client = HostClient(
|
||||
host,
|
||||
login=STORAGE_NODE_SSH_USER,
|
||||
password=STORAGE_NODE_SSH_PASSWORD,
|
||||
private_key_path=STORAGE_NODE_SSH_PRIVATE_KEY_PATH,
|
||||
)
|
||||
|
||||
try:
|
||||
yield ssh_client
|
||||
finally:
|
||||
ssh_client.drop()
|
|
@ -9,10 +9,10 @@ from robot.api import deco
|
|||
|
||||
import wallet
|
||||
from cli_helpers import _cmd_run
|
||||
from common import ASSETS_DIR, FREE_STORAGE, MAINNET_WALLET_PATH, NEOFS_NETMAP_DICT
|
||||
from common import (ASSETS_DIR, FREE_STORAGE, INFRASTRUCTURE_TYPE, MAINNET_WALLET_PATH,
|
||||
NEOFS_NETMAP_DICT)
|
||||
from payment_neogo import neofs_deposit, transfer_mainnet_gas
|
||||
from python_keywords.node_management import node_healthcheck, create_ssh_client
|
||||
from sbercloud_helper import SberCloudConfig
|
||||
|
||||
|
||||
def robot_keyword_adapter(name=None, tags=(), types=()):
|
||||
|
@ -32,8 +32,7 @@ def cloud_infrastructure_check():
|
|||
|
||||
|
||||
def is_cloud_infrastructure():
|
||||
cloud_config = SberCloudConfig.from_env()
|
||||
return cloud_config.project_id is not None
|
||||
return INFRASTRUCTURE_TYPE == "CLOUD_VM"
|
||||
|
||||
|
||||
@pytest.fixture(scope='session', autouse=True)
|
||||
|
@ -118,22 +117,16 @@ def prepare_tmp_dir():
|
|||
shutil.rmtree(full_path)
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
@allure.title('Init wallet with address')
|
||||
def init_wallet_with_address(prepare_tmp_dir):
|
||||
yield wallet.init_wallet(ASSETS_DIR)
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
@allure.title('Prepare wallet and deposit')
|
||||
def prepare_wallet_and_deposit(init_wallet_with_address):
|
||||
wallet, addr, _ = init_wallet_with_address
|
||||
logger.info(f'Init wallet: {wallet},\naddr: {addr}')
|
||||
allure.attach.file(wallet, os.path.basename(wallet), allure.attachment_type.JSON)
|
||||
def prepare_wallet_and_deposit(prepare_tmp_dir):
|
||||
wallet_path, addr, _ = wallet.init_wallet(ASSETS_DIR)
|
||||
logger.info(f'Init wallet: {wallet_path},\naddr: {addr}')
|
||||
allure.attach.file(wallet_path, os.path.basename(wallet_path), allure.attachment_type.JSON)
|
||||
|
||||
if not FREE_STORAGE:
|
||||
deposit = 30
|
||||
transfer_mainnet_gas(wallet, deposit + 1, wallet_path=MAINNET_WALLET_PATH)
|
||||
neofs_deposit(wallet, deposit)
|
||||
transfer_mainnet_gas(wallet_path, deposit + 1, wallet_path=MAINNET_WALLET_PATH)
|
||||
neofs_deposit(wallet_path, deposit)
|
||||
|
||||
return wallet
|
||||
return wallet_path
|
||||
|
|
|
@ -2,7 +2,6 @@ import logging
|
|||
|
||||
import allure
|
||||
import pytest
|
||||
|
||||
from common import (STORAGE_NODE_SSH_PRIVATE_KEY_PATH, STORAGE_NODE_SSH_USER,
|
||||
STORAGE_NODE_SSH_PASSWORD)
|
||||
from failover_utils import wait_all_storage_node_returned, wait_object_replication_on_nodes
|
||||
|
|
|
@ -13,13 +13,13 @@ from python_keywords.failover_utils import wait_object_replication_on_nodes
|
|||
from python_keywords.neofs_verbs import delete_object, get_object, head_object, put_object
|
||||
from python_keywords.node_management import (create_ssh_client, drop_object, get_netmap_snapshot,
|
||||
get_locode, node_healthcheck, node_set_status,
|
||||
node_shard_list, node_shard_set_mode,
|
||||
start_nodes_remote, stop_nodes_remote)
|
||||
node_shard_list, node_shard_set_mode)
|
||||
from storage_policy import get_nodes_with_object, get_simple_object_copies
|
||||
from utility import placement_policy_from_container, robot_time_to_int, wait_for_gc_pass_on_storage_nodes
|
||||
from utility_keywords import generate_file
|
||||
from wellknown_acl import PUBLIC_ACL
|
||||
|
||||
|
||||
logger = logging.getLogger('NeoLogger')
|
||||
check_nodes = []
|
||||
|
||||
|
@ -55,7 +55,7 @@ def crate_container_and_pick_node(prepare_wallet_and_deposit):
|
|||
def after_run_start_all_nodes():
|
||||
yield
|
||||
try:
|
||||
start_nodes_remote(list(NEOFS_NETMAP_DICT.keys()))
|
||||
start_nodes(list(NEOFS_NETMAP_DICT.keys()))
|
||||
except Exception as err:
|
||||
logger.error(f'Node start fails with error:\n{err}')
|
||||
|
||||
|
@ -334,11 +334,11 @@ def test_replication(prepare_wallet_and_deposit, after_run_start_all_nodes):
|
|||
assert len(nodes) == expected_nodes_count, f'Expected {expected_nodes_count} copies, got {len(nodes)}'
|
||||
|
||||
node_names = [name for name, config in NEOFS_NETMAP_DICT.items() if config.get('rpc') in nodes]
|
||||
stopped_nodes = stop_nodes_remote(1, node_names)
|
||||
stopped_nodes = stop_nodes(1, node_names)
|
||||
|
||||
wait_for_expected_object_copies(wallet, cid, oid)
|
||||
|
||||
start_nodes_remote(stopped_nodes)
|
||||
start_nodes(stopped_nodes)
|
||||
tick_epoch()
|
||||
|
||||
for node_name in node_names:
|
||||
|
|
|
@ -1,24 +1,18 @@
|
|||
#!/usr/bin/python3.9
|
||||
|
||||
"""
|
||||
This module contains keywords for management test stand
|
||||
nodes. It assumes that nodes are docker containers.
|
||||
This module contains keywords for tests that check management of storage nodes.
|
||||
"""
|
||||
|
||||
import random
|
||||
import re
|
||||
from contextlib import contextmanager
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional
|
||||
from cli_helpers import _cmd_run
|
||||
|
||||
import docker
|
||||
from common import (NEOFS_CLI_EXEC, NEOFS_NETMAP_DICT, STORAGE_CONTROL_ENDPOINT_PRIVATE,
|
||||
STORAGE_NODE_BIN_PATH, STORAGE_NODE_SSH_PASSWORD,
|
||||
STORAGE_NODE_SSH_PRIVATE_KEY_PATH, STORAGE_NODE_SSH_USER, WALLET_CONFIG)
|
||||
from common import NEOFS_NETMAP_DICT
|
||||
from robot.api import logger
|
||||
from robot.api.deco import keyword
|
||||
from ssh_helper import HostClient
|
||||
from service_helper import get_storage_service_helper
|
||||
|
||||
ROBOT_AUTO_KEYWORDS = False
|
||||
|
||||
|
@ -39,28 +33,6 @@ class HealthStatus:
|
|||
return HealthStatus(network, health)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def create_ssh_client(node_name: str) -> HostClient:
|
||||
if node_name not in NEOFS_NETMAP_DICT:
|
||||
raise AssertionError(f'Node {node_name} is not found!')
|
||||
|
||||
# We use rpc endpoint to determine host address, because control endpoint
|
||||
# (if it is private) will be a local address on the host machine
|
||||
node_config = NEOFS_NETMAP_DICT.get(node_name)
|
||||
host = node_config.get('rpc').split(':')[0]
|
||||
ssh_client = HostClient(
|
||||
host,
|
||||
login=STORAGE_NODE_SSH_USER,
|
||||
password=STORAGE_NODE_SSH_PASSWORD,
|
||||
private_key_path=STORAGE_NODE_SSH_PRIVATE_KEY_PATH,
|
||||
)
|
||||
|
||||
try:
|
||||
yield ssh_client
|
||||
finally:
|
||||
ssh_client.drop()
|
||||
|
||||
|
||||
@keyword('Stop Nodes')
|
||||
def stop_nodes(number: int, nodes: list) -> list:
|
||||
"""
|
||||
|
@ -72,12 +44,11 @@ def stop_nodes(number: int, nodes: list) -> list:
|
|||
Returns:
|
||||
(list): the list of nodes which have been shut down
|
||||
"""
|
||||
nodes = random.sample(nodes, number)
|
||||
client = docker.APIClient()
|
||||
for node in nodes:
|
||||
node = node.split('.')[0]
|
||||
client.stop(node)
|
||||
return nodes
|
||||
helper = get_storage_service_helper()
|
||||
nodes_to_stop = random.sample(nodes, number)
|
||||
for node in nodes_to_stop:
|
||||
helper.stop_node(node)
|
||||
return nodes_to_stop
|
||||
|
||||
|
||||
@keyword('Start Nodes')
|
||||
|
@ -89,10 +60,9 @@ def start_nodes(nodes: list) -> None:
|
|||
Returns:
|
||||
(void)
|
||||
"""
|
||||
client = docker.APIClient()
|
||||
helper = get_storage_service_helper()
|
||||
for node in nodes:
|
||||
node = node.split('.')[0]
|
||||
client.start(node)
|
||||
helper.start(node)
|
||||
|
||||
|
||||
@keyword('Get control endpoint and wallet')
|
||||
|
@ -131,38 +101,6 @@ def get_locode():
|
|||
return locode
|
||||
|
||||
|
||||
@keyword('Stop Nodes Remote')
|
||||
def stop_nodes_remote(number: int, nodes: list) -> list:
|
||||
"""
|
||||
The function shuts down the given number of randomly
|
||||
selected nodes in docker.
|
||||
Args:
|
||||
number (int): the number of nodes to shut down
|
||||
nodes (list): the list of nodes for possible shut down
|
||||
Returns:
|
||||
(list): the list of nodes which have been shut down
|
||||
"""
|
||||
nodes = random.sample(nodes, number)
|
||||
for node in nodes:
|
||||
node = node.split('.')[0]
|
||||
with create_ssh_client(node) as ssh_client:
|
||||
ssh_client.exec(f'docker stop {node}')
|
||||
return nodes
|
||||
|
||||
|
||||
@keyword('Start Nodes Remote')
|
||||
def start_nodes_remote(nodes: list) -> None:
|
||||
"""
|
||||
The function starts nodes in docker.
|
||||
Args:
|
||||
nodes (list): the list of nodes for possible shut down
|
||||
"""
|
||||
for node in nodes:
|
||||
node = node.split('.')[0]
|
||||
with create_ssh_client(node) as ssh_client:
|
||||
ssh_client.exec(f'docker start {node}')
|
||||
|
||||
|
||||
@keyword('Healthcheck for node')
|
||||
def node_healthcheck(node_name: str) -> HealthStatus:
|
||||
"""
|
||||
|
@ -173,27 +111,23 @@ def node_healthcheck(node_name: str) -> HealthStatus:
|
|||
health status as HealthStatus object.
|
||||
"""
|
||||
command = "control healthcheck"
|
||||
output = run_control_command(node_name, command)
|
||||
output = _run_control_command(node_name, command)
|
||||
return HealthStatus.from_stdout(output)
|
||||
|
||||
|
||||
@keyword('Set status for node')
|
||||
def node_set_status(node_name: str, status: str, retry=False) -> None:
|
||||
def node_set_status(node_name: str, status: str, retries: int = 0) -> None:
|
||||
"""
|
||||
The function sets particular status for given node.
|
||||
Args:
|
||||
node_name str: node name to use for netmap snapshot operation
|
||||
status str: online or offline.
|
||||
retries (optional, int): number of retry attempts if it didn't work from the first time
|
||||
Returns:
|
||||
(void)
|
||||
"""
|
||||
command = f"control set-status --status {status}"
|
||||
try:
|
||||
run_control_command(node_name, command)
|
||||
except AssertionError as err:
|
||||
if not retry:
|
||||
raise AssertionError(f'Command control set-status failed with error {err}') from err
|
||||
run_control_command(node_name, command)
|
||||
_run_control_command(node_name, command, retries)
|
||||
|
||||
|
||||
@keyword('Get netmap snapshot')
|
||||
|
@ -207,7 +141,7 @@ def get_netmap_snapshot(node_name: Optional[str] = None) -> str:
|
|||
"""
|
||||
node_name = node_name or list(NEOFS_NETMAP_DICT)[0]
|
||||
command = "control netmap-snapshot"
|
||||
return run_control_command(node_name, command)
|
||||
return _run_control_command(node_name, command)
|
||||
|
||||
|
||||
@keyword('Shard list for node')
|
||||
|
@ -220,7 +154,7 @@ def node_shard_list(node_name: str) -> list[str]:
|
|||
list of shards.
|
||||
"""
|
||||
command = "control shards list"
|
||||
output = run_control_command(node_name, command)
|
||||
output = _run_control_command(node_name, command)
|
||||
return re.findall(r'Shard (.*):', output)
|
||||
|
||||
|
||||
|
@ -234,7 +168,7 @@ def node_shard_set_mode(node_name: str, shard: str, mode: str) -> str:
|
|||
health status as HealthStatus object.
|
||||
"""
|
||||
command = f"control shards set-mode --id {shard} --mode {mode}"
|
||||
return run_control_command(node_name, command)
|
||||
return _run_control_command(node_name, command)
|
||||
|
||||
|
||||
@keyword('Drop object from node {node_name}')
|
||||
|
@ -247,39 +181,16 @@ def drop_object(node_name: str, cid: str, oid: str) -> str:
|
|||
health status as HealthStatus object.
|
||||
"""
|
||||
command = f"control drop-objects -o {cid}/{oid}"
|
||||
return run_control_command(node_name, command)
|
||||
return _run_control_command(node_name, command)
|
||||
|
||||
|
||||
def run_control_command(node_name: str, command: str) -> str:
|
||||
control_endpoint = NEOFS_NETMAP_DICT[node_name]["control"]
|
||||
wallet_path = NEOFS_NETMAP_DICT[node_name]["wallet_path"]
|
||||
|
||||
if not STORAGE_CONTROL_ENDPOINT_PRIVATE:
|
||||
cmd = (
|
||||
f'{NEOFS_CLI_EXEC} {command} --endpoint {control_endpoint} '
|
||||
f'--wallet {wallet_path} --config {WALLET_CONFIG}'
|
||||
)
|
||||
output = _cmd_run(cmd)
|
||||
return output
|
||||
|
||||
# Private control endpoint is accessible only from the host where storage node is running
|
||||
# So, we connect to storage node host and run CLI command from there
|
||||
with create_ssh_client(node_name) as ssh_client:
|
||||
# Copy wallet content on storage node host
|
||||
with open(wallet_path, "r") as file:
|
||||
wallet = file.read()
|
||||
remote_wallet_path = "/tmp/{node_name}-wallet.json"
|
||||
ssh_client.exec_with_confirmation(f"echo '{wallet}' > {remote_wallet_path}", [""])
|
||||
|
||||
# Put config on storage node host
|
||||
remote_config_path = "/tmp/{node_name}-config.yaml"
|
||||
remote_config = 'password: ""'
|
||||
ssh_client.exec_with_confirmation(f"echo '{remote_config}' > {remote_config_path}", [""])
|
||||
|
||||
# Execute command
|
||||
cmd = (
|
||||
f'{STORAGE_NODE_BIN_PATH}/neofs-cli {command} --endpoint {control_endpoint} '
|
||||
f'--wallet {remote_wallet_path} --config {remote_config_path}'
|
||||
)
|
||||
output = ssh_client.exec_with_confirmation(cmd, [""])
|
||||
return output.stdout
|
||||
def _run_control_command(node_name: str, command: str, retries: int = 0) -> str:
|
||||
helper = get_storage_service_helper()
|
||||
for attempt in range(1 + retries): # original attempt + specified retries
|
||||
try:
|
||||
return helper.run_control_command(node_name, command)
|
||||
except AssertionError as err:
|
||||
if attempt < retries:
|
||||
logger.warn(f'Command {command} failed with error {err} and will be retried')
|
||||
continue
|
||||
raise AssertionError(f'Command {command} failed with error {err}') from err
|
||||
|
|
|
@ -110,4 +110,5 @@ STORAGE_NODE_BIN_PATH = os.getenv("STORAGE_NODE_BIN_PATH", f"{DEVENV_PATH}/vendo
|
|||
NEOFS_ADM_EXEC = os.getenv("NEOFS_ADM_EXEC")
|
||||
NEOFS_ADM_CONFIG_PATH = os.getenv("NEOFS_ADM_CONFIG_PATH")
|
||||
|
||||
INFRASTRUCTURE_TYPE = os.getenv("INFRASTRUCTURE_TYPE", "LOCAL_DEVENV")
|
||||
FREE_STORAGE = os.getenv("FREE_STORAGE", "false").lower() == "true"
|
||||
|
|
Loading…
Reference in a new issue