forked from TrueCloudLab/frostfs-testcases
Fix node cleanup step
The intention of the test was not to delete node entirely, but just to erase it's data. Signed-off-by: Vladimir Domnich <v.domnich@yadro.com>
This commit is contained in:
parent
d6861f4f62
commit
c131bb04ba
3 changed files with 41 additions and 21 deletions
|
@ -51,10 +51,14 @@ class LocalDevEnvStorageServiceHelper:
|
|||
output = _cmd_run(cmd)
|
||||
return output
|
||||
|
||||
def destroy_node(self, node_name: str) -> None:
|
||||
container_name = _get_storage_container_name(node_name)
|
||||
def delete_node_data(self, node_name: str) -> None:
|
||||
volume_name = _get_storage_volume_name(node_name)
|
||||
|
||||
client = docker.APIClient()
|
||||
client.remove_container(container_name, force=True)
|
||||
volume_info = client.inspect_volume(volume_name)
|
||||
volume_path = volume_info["Mountpoint"]
|
||||
|
||||
_cmd_run(f"rm -rf {volume_path}/*")
|
||||
|
||||
def get_binaries_version(self) -> dict:
|
||||
return {}
|
||||
|
@ -121,9 +125,8 @@ class CloudVmStorageServiceHelper:
|
|||
output = ssh_client.exec_with_confirmation(cmd, [""])
|
||||
return output.stdout
|
||||
|
||||
def destroy_node(self, node_name: str) -> None:
|
||||
def delete_node_data(self, node_name: str) -> None:
|
||||
with _create_ssh_client(node_name) as ssh_client:
|
||||
ssh_client.exec(f'systemctl stop {self.STORAGE_SERVICE}')
|
||||
ssh_client.exec('rm -rf /srv/neofs/*')
|
||||
|
||||
def get_binaries_version(self, binaries: list = None) -> dict:
|
||||
|
@ -161,6 +164,7 @@ class CloudVmStorageServiceHelper:
|
|||
f'(mismatch on node {node_name})'
|
||||
return version_map
|
||||
|
||||
|
||||
class RemoteDevEnvStorageServiceHelper:
|
||||
"""
|
||||
Manages storage services running on remote devenv.
|
||||
|
@ -189,17 +193,21 @@ class RemoteDevEnvStorageServiceHelper:
|
|||
# On remote devenv it works same way as in cloud
|
||||
return CloudVmStorageServiceHelper().run_control_command(node_name, command)
|
||||
|
||||
def destroy_node(self, node_name: str) -> None:
|
||||
container_name = _get_storage_container_name(node_name)
|
||||
def delete_node_data(self, node_name: str) -> None:
|
||||
volume_name = _get_storage_volume_name(node_name)
|
||||
with _create_ssh_client(node_name) as ssh_client:
|
||||
ssh_client.exec(f'docker rm {container_name} --force')
|
||||
volume_info_raw = ssh_client.exec(f'docker volume inspect {volume_name}').stdout
|
||||
volume_info = json.loads(volume_info_raw)
|
||||
volume_path = volume_info[0]["Mountpoint"]
|
||||
|
||||
ssh_client.exec(f'rm -rf {volume_path}/*')
|
||||
|
||||
def get_binaries_version(self) -> dict:
|
||||
return {}
|
||||
|
||||
def _get_container_by_name(self, node_name: str, container_name: str) -> dict:
|
||||
with _create_ssh_client(node_name) as ssh_client:
|
||||
output = ssh_client.exec('docker ps -a --format "{{json .}}"')
|
||||
output = ssh_client.exec('docker ps -a --format "{{json .}}"').stdout
|
||||
containers = json.loads(output)
|
||||
|
||||
for container in containers:
|
||||
|
@ -244,7 +252,16 @@ def _create_ssh_client(node_name: str) -> HostClient:
|
|||
|
||||
def _get_storage_container_name(node_name: str) -> str:
|
||||
"""
|
||||
Converts name of storage name (as it is listed in netmap) into the name of docker container
|
||||
Converts name of storage node (as it is listed in netmap) into the name of docker container
|
||||
that runs instance of this storage node.
|
||||
"""
|
||||
return node_name.split('.')[0]
|
||||
|
||||
|
||||
def _get_storage_volume_name(node_name: str) -> str:
|
||||
"""
|
||||
Converts name of storage node (as it is listed in netmap) into the name of docker volume
|
||||
that contains data of this storage node.
|
||||
"""
|
||||
container_name = _get_storage_container_name(node_name)
|
||||
return f"storage_storage_{container_name}"
|
||||
|
|
|
@ -11,9 +11,12 @@ from epoch import tick_epoch
|
|||
from python_keywords.container import create_container, get_container
|
||||
from python_keywords.failover_utils import wait_object_replication_on_nodes
|
||||
from python_keywords.neofs_verbs import delete_object, get_object, head_object, put_object
|
||||
from python_keywords.node_management import (check_node_in_map, delete_node, drop_object, exclude_node_from_network_map, get_netmap_snapshot, get_locode, include_node_to_network_map,
|
||||
from python_keywords.node_management import (check_node_in_map, delete_node_data, drop_object,
|
||||
exclude_node_from_network_map, get_netmap_snapshot,
|
||||
get_locode, include_node_to_network_map,
|
||||
node_healthcheck, node_set_status,
|
||||
node_shard_list, node_shard_set_mode, start_nodes, stop_nodes)
|
||||
node_shard_list, node_shard_set_mode,
|
||||
start_nodes, stop_nodes)
|
||||
from service_helper import get_storage_service_helper
|
||||
from storage_policy import get_nodes_with_object, get_simple_object_copies
|
||||
from utility import (placement_policy_from_container, robot_time_to_int,
|
||||
|
@ -77,7 +80,7 @@ def return_nodes(alive_node: str = None):
|
|||
helper.wait_for_node_to_start(node)
|
||||
|
||||
with allure.step(f'Move node {node} to online state'):
|
||||
node_set_status(node, status='online', retry=True)
|
||||
node_set_status(node, status='online', retries=2)
|
||||
|
||||
check_nodes.remove(node)
|
||||
sleep(robot_time_to_int(MAINNET_BLOCK_TIME))
|
||||
|
@ -108,9 +111,10 @@ def test_add_nodes(prepare_tmp_dir, prepare_wallet_and_deposit, return_nodes_aft
|
|||
|
||||
check_node_in_map(additional_node, alive_node)
|
||||
|
||||
with allure.step(f'Exclude node {additional_node} from map and clean it up'):
|
||||
delete_node(additional_node, alive_node)
|
||||
# Add node to recovery list before messing with it
|
||||
check_nodes.append(additional_node)
|
||||
exclude_node_from_network_map(additional_node, alive_node)
|
||||
delete_node_data(additional_node)
|
||||
|
||||
cid = create_container(wallet, rule=placement_rule_3, basic_acl=PUBLIC_ACL)
|
||||
oid = put_object(wallet, source_file_path, cid, endpoint=NEOFS_NETMAP_DICT[alive_node].get('rpc'))
|
||||
|
|
|
@ -188,11 +188,10 @@ def drop_object(node_name: str, cid: str, oid: str) -> str:
|
|||
return _run_control_command(node_name, command)
|
||||
|
||||
|
||||
def delete_node(node_name: str, alive_node: str) -> None:
|
||||
exclude_node_from_network_map(node_name, alive_node)
|
||||
|
||||
@keyword('Delete data of node {node_name}')
|
||||
def delete_node_data(node_name: str) -> None:
|
||||
helper = get_storage_service_helper()
|
||||
helper.destroy_node(node_name)
|
||||
helper.delete_node_data(node_name)
|
||||
time.sleep(robot_time_to_int(MAINNET_BLOCK_TIME))
|
||||
|
||||
|
||||
|
|
Loading…
Reference in a new issue