diff --git a/pytest_tests/helpers/service_helper.py b/pytest_tests/helpers/service_helper.py index 38a226ab..ca27ed87 100644 --- a/pytest_tests/helpers/service_helper.py +++ b/pytest_tests/helpers/service_helper.py @@ -51,11 +51,15 @@ class LocalDevEnvStorageServiceHelper: output = _cmd_run(cmd) return output - def destroy_node(self, node_name: str) -> None: - container_name = _get_storage_container_name(node_name) + def delete_node_data(self, node_name: str) -> None: + volume_name = _get_storage_volume_name(node_name) + client = docker.APIClient() - client.remove_container(container_name, force=True) - + volume_info = client.inspect_volume(volume_name) + volume_path = volume_info["Mountpoint"] + + _cmd_run(f"rm -rf {volume_path}/*") + def get_binaries_version(self) -> dict: return {} @@ -121,9 +125,8 @@ class CloudVmStorageServiceHelper: output = ssh_client.exec_with_confirmation(cmd, [""]) return output.stdout - def destroy_node(self, node_name: str) -> None: + def delete_node_data(self, node_name: str) -> None: with _create_ssh_client(node_name) as ssh_client: - ssh_client.exec(f'systemctl stop {self.STORAGE_SERVICE}') ssh_client.exec('rm -rf /srv/neofs/*') def get_binaries_version(self, binaries: list = None) -> dict: @@ -161,6 +164,7 @@ class CloudVmStorageServiceHelper: f'(mismatch on node {node_name})' return version_map + class RemoteDevEnvStorageServiceHelper: """ Manages storage services running on remote devenv. @@ -189,17 +193,21 @@ class RemoteDevEnvStorageServiceHelper: # On remote devenv it works same way as in cloud return CloudVmStorageServiceHelper().run_control_command(node_name, command) - def destroy_node(self, node_name: str) -> None: - container_name = _get_storage_container_name(node_name) + def delete_node_data(self, node_name: str) -> None: + volume_name = _get_storage_volume_name(node_name) with _create_ssh_client(node_name) as ssh_client: - ssh_client.exec(f'docker rm {container_name} --force') + volume_info_raw = ssh_client.exec(f'docker volume inspect {volume_name}').stdout + volume_info = json.loads(volume_info_raw) + volume_path = volume_info[0]["Mountpoint"] + + ssh_client.exec(f'rm -rf {volume_path}/*') def get_binaries_version(self) -> dict: return {} def _get_container_by_name(self, node_name: str, container_name: str) -> dict: with _create_ssh_client(node_name) as ssh_client: - output = ssh_client.exec('docker ps -a --format "{{json .}}"') + output = ssh_client.exec('docker ps -a --format "{{json .}}"').stdout containers = json.loads(output) for container in containers: @@ -244,7 +252,16 @@ def _create_ssh_client(node_name: str) -> HostClient: def _get_storage_container_name(node_name: str) -> str: """ - Converts name of storage name (as it is listed in netmap) into the name of docker container + Converts name of storage node (as it is listed in netmap) into the name of docker container that runs instance of this storage node. """ return node_name.split('.')[0] + + +def _get_storage_volume_name(node_name: str) -> str: + """ + Converts name of storage node (as it is listed in netmap) into the name of docker volume + that contains data of this storage node. + """ + container_name = _get_storage_container_name(node_name) + return f"storage_storage_{container_name}" diff --git a/pytest_tests/testsuites/network/test_node_management.py b/pytest_tests/testsuites/network/test_node_management.py index 628fab0d..97d13c1b 100644 --- a/pytest_tests/testsuites/network/test_node_management.py +++ b/pytest_tests/testsuites/network/test_node_management.py @@ -11,9 +11,12 @@ from epoch import tick_epoch from python_keywords.container import create_container, get_container from python_keywords.failover_utils import wait_object_replication_on_nodes from python_keywords.neofs_verbs import delete_object, get_object, head_object, put_object -from python_keywords.node_management import (check_node_in_map, delete_node, drop_object, exclude_node_from_network_map, get_netmap_snapshot, get_locode, include_node_to_network_map, +from python_keywords.node_management import (check_node_in_map, delete_node_data, drop_object, + exclude_node_from_network_map, get_netmap_snapshot, + get_locode, include_node_to_network_map, node_healthcheck, node_set_status, - node_shard_list, node_shard_set_mode, start_nodes, stop_nodes) + node_shard_list, node_shard_set_mode, + start_nodes, stop_nodes) from service_helper import get_storage_service_helper from storage_policy import get_nodes_with_object, get_simple_object_copies from utility import (placement_policy_from_container, robot_time_to_int, @@ -77,7 +80,7 @@ def return_nodes(alive_node: str = None): helper.wait_for_node_to_start(node) with allure.step(f'Move node {node} to online state'): - node_set_status(node, status='online', retry=True) + node_set_status(node, status='online', retries=2) check_nodes.remove(node) sleep(robot_time_to_int(MAINNET_BLOCK_TIME)) @@ -108,9 +111,10 @@ def test_add_nodes(prepare_tmp_dir, prepare_wallet_and_deposit, return_nodes_aft check_node_in_map(additional_node, alive_node) - with allure.step(f'Exclude node {additional_node} from map and clean it up'): - delete_node(additional_node, alive_node) - check_nodes.append(additional_node) + # Add node to recovery list before messing with it + check_nodes.append(additional_node) + exclude_node_from_network_map(additional_node, alive_node) + delete_node_data(additional_node) cid = create_container(wallet, rule=placement_rule_3, basic_acl=PUBLIC_ACL) oid = put_object(wallet, source_file_path, cid, endpoint=NEOFS_NETMAP_DICT[alive_node].get('rpc')) diff --git a/robot/resources/lib/python_keywords/node_management.py b/robot/resources/lib/python_keywords/node_management.py index c0ffa2d3..92654ff7 100644 --- a/robot/resources/lib/python_keywords/node_management.py +++ b/robot/resources/lib/python_keywords/node_management.py @@ -188,11 +188,10 @@ def drop_object(node_name: str, cid: str, oid: str) -> str: return _run_control_command(node_name, command) -def delete_node(node_name: str, alive_node: str) -> None: - exclude_node_from_network_map(node_name, alive_node) - +@keyword('Delete data of node {node_name}') +def delete_node_data(node_name: str) -> None: helper = get_storage_service_helper() - helper.destroy_node(node_name) + helper.delete_node_data(node_name) time.sleep(robot_time_to_int(MAINNET_BLOCK_TIME))