[#193] Refactoring old functions for FrostfsCli

Refactoring old functions for FrostfsCli

    Signed-off-by: Mikhail Kadilov m.kadilov@yadro.com
Mikhail Kadilov 2024-02-11 03:44:48 +03:00
parent e5058d38ca
commit f746b1fcd8
6 changed files with 78 additions and 83 deletions

View File

@ -5,7 +5,7 @@ from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.acl import create_eacl, set_eacl, wait_for_cache_expired
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.steps.node_management import drop_object
from frostfs_testlib.steps.node_management import NodeManagement
from frostfs_testlib.storage.dataclasses.acl import EACLAccess, EACLOperation, EACLRole, EACLRule
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.failover_utils import wait_object_replication
@ -215,7 +215,8 @@ class TestEACLContainer(ClusterTestBase):
wait_for_cache_expired()
with reporter.step("Drop object to check replication"):
drop_object(storage_node, cid=cid, oid=oid)
node_management = NodeManagement()
node_management.drop_object(storage_node, cid=cid, oid=oid)
storage_wallet_path = storage_node.get_wallet_path()
with reporter.step("Wait for dropped object replicated"):

View File

@ -4,7 +4,7 @@ from frostfs_testlib import reporter
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import create_container, delete_container, get_container
from frostfs_testlib.steps.cli.object import delete_object, put_object_to_random_node
from frostfs_testlib.steps.node_management import get_netmap_snapshot
from frostfs_testlib.steps.node_management import NodeManagement
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
@ -28,9 +28,11 @@ from pytest_tests.resources.policy_error_patterns import (
@pytest.mark.container
@pytest.mark.policy
class TestPolicy(ClusterTestBase):
node_management = NodeManagement()
@wait_for_success(1200, 60, title="Wait for full field price on node", expected_result=True)
def await_for_price_attribute_on_nodes(self):
netmap = parse_netmap_output(get_netmap_snapshot(node=self.cluster.storage_nodes[0], shell=self.shell))
netmap = parse_netmap_output(self.node_management.get_netmap_snapshot(node=self.cluster.storage_nodes[0], shell=self.shell))
netmap = self.get_netmap_param(netmap)
for node in self.cluster.storage_nodes:
node_address = node.get_rpc_endpoint().split(":")[0]
@ -229,7 +231,7 @@ class TestPolicy(ClusterTestBase):
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = parse_netmap_output(self.node_management.get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = self.get_netmap_param(netmap)
node_address = resulting_copies[0].get_rpc_endpoint().split(":")[0]
with reporter.step(f"Check the node is selected from {placement_params['country']}"):
@ -276,7 +278,7 @@ class TestPolicy(ClusterTestBase):
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = parse_netmap_output(self.node_management.get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = self.get_netmap_param(netmap)
node_address = resulting_copies[0].get_rpc_endpoint().split(":")[0]
with reporter.step(f"Check the node is selected from {placement_params['country'][0]}"):
@ -326,7 +328,7 @@ class TestPolicy(ClusterTestBase):
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = parse_netmap_output(self.node_management.get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = self.get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected from any country"):
for node in resulting_copies:
@ -376,7 +378,7 @@ class TestPolicy(ClusterTestBase):
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = parse_netmap_output(self.node_management.get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = self.get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected from {placement_params['country']}"):
for node in resulting_copies:
@ -499,7 +501,7 @@ class TestPolicy(ClusterTestBase):
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = parse_netmap_output(self.node_management.get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = self.get_netmap_param(netmap)
node_address = resulting_copies[0].get_rpc_endpoint().split(":")[0]
with reporter.step(f"Check the node is selected with price <= {placement_params['Price']}"):
@ -547,7 +549,7 @@ class TestPolicy(ClusterTestBase):
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = parse_netmap_output(self.node_management.get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = self.get_netmap_param(netmap)
with reporter.step(f"Check the node is selected with price between 1 and 10"):
for node in resulting_copies:
@ -597,7 +599,7 @@ class TestPolicy(ClusterTestBase):
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = parse_netmap_output(self.node_management.get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = self.get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected with max and min prices"):
for node in resulting_copies:
@ -721,7 +723,7 @@ class TestPolicy(ClusterTestBase):
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = parse_netmap_output(self.node_management.get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = self.get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected with price > {placement_params['Price']}"):
for node in resulting_copies:
@ -770,7 +772,7 @@ class TestPolicy(ClusterTestBase):
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = parse_netmap_output(self.node_management.get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = self.get_netmap_param(netmap)
with reporter.step(
f"Check two nodes are selected not with country code '{placement_params['country_code']}'"
@ -823,7 +825,7 @@ class TestPolicy(ClusterTestBase):
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = parse_netmap_output(self.node_management.get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = self.get_netmap_param(netmap)
with reporter.step(f"Check all nodes are selected"):
for node in resulting_copies:
@ -955,7 +957,7 @@ class TestPolicy(ClusterTestBase):
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = parse_netmap_output(self.node_management.get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = self.get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected with price < {placement_params['Price']}"):
for node in resulting_copies:
@ -1004,7 +1006,7 @@ class TestPolicy(ClusterTestBase):
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = parse_netmap_output(self.node_management.get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = self.get_netmap_param(netmap)
with reporter.step(f"Check three nodes are selected not from {placement_params['continent']}"):
for node in resulting_copies:
@ -1056,7 +1058,7 @@ class TestPolicy(ClusterTestBase):
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = parse_netmap_output(self.node_management.get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = self.get_netmap_param(netmap)
with reporter.step(f"Check all nodes are selected"):
for node in resulting_copies:
@ -1187,7 +1189,7 @@ class TestPolicy(ClusterTestBase):
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = parse_netmap_output(self.node_management.get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = self.get_netmap_param(netmap)
node_address = resulting_copies[0].get_rpc_endpoint().split(":")[0]
with reporter.step(f"Check the node is selected with price >= {placement_params['Price']}"):
@ -1234,7 +1236,7 @@ class TestPolicy(ClusterTestBase):
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = parse_netmap_output(self.node_management.get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = self.get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected from {' or '.join(placement_params['country'])}"):
for node in resulting_copies:
@ -1285,7 +1287,7 @@ class TestPolicy(ClusterTestBase):
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = parse_netmap_output(self.node_management.get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = self.get_netmap_param(netmap)
with reporter.step(f"Check all node are selected"):
for node in resulting_copies:
@ -1407,7 +1409,7 @@ class TestPolicy(ClusterTestBase):
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = parse_netmap_output(self.node_management.get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = self.get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected not from {placement_params['country'][0]}"):
for node in resulting_copies:
@ -1457,7 +1459,7 @@ class TestPolicy(ClusterTestBase):
), f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = parse_netmap_output(self.node_management.get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = self.get_netmap_param(netmap)
with reporter.step(f"Check three nodes are selected from any country"):
for node in resulting_copies:
@ -1507,7 +1509,7 @@ class TestPolicy(ClusterTestBase):
), f"Expected {expected_copies} or {expected_copies + 1} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = parse_netmap_output(self.node_management.get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = self.get_netmap_param(netmap)
list_of_location = []
for node in resulting_copies:

View File

@ -9,7 +9,7 @@ from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import StorageContainer, StorageContainerInfo, create_container
from frostfs_testlib.steps.cli.object import get_object, get_object_nodes, put_object
from frostfs_testlib.steps.node_management import check_node_in_map, check_node_not_in_map
from frostfs_testlib.steps.node_management import NodeManagement
from frostfs_testlib.storage.cluster import ClusterNode, StorageNode
from frostfs_testlib.storage.controllers import ClusterStateController
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
@ -28,13 +28,16 @@ logger = logging.getLogger("NeoLogger")
@pytest.mark.failover
@pytest.mark.failover_server
class TestFailoverServer(ClusterTestBase):
node_management = NodeManagement()
@wait_for_success(max_wait_time=120, interval=1)
def wait_node_not_in_map(self, *args, **kwargs):
check_node_not_in_map(*args, **kwargs)
self.node_management.check_node_not_in_map(*args, **kwargs)
@wait_for_success(max_wait_time=120, interval=1)
def wait_node_in_map(self, *args, **kwargs):
check_node_in_map(*args, **kwargs)
self.node_management.check_node_in_map(*args, **kwargs)
@reporter.step("Create {count_containers} containers and {count_files} objects")
@pytest.fixture

View File

@ -11,14 +11,7 @@ from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
from frostfs_testlib.steps.cli.container import StorageContainer, StorageContainerInfo, create_container
from frostfs_testlib.steps.cli.object import get_object, put_object_to_random_node
from frostfs_testlib.steps.node_management import (
check_node_in_map,
check_node_not_in_map,
exclude_node_from_network_map,
include_node_to_network_map,
remove_nodes_from_map_morph,
wait_for_node_to_be_ready,
)
from frostfs_testlib.steps.node_management import NodeManagement
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode
from frostfs_testlib.storage.controllers import ClusterStateController, ShardsWatcher
@ -61,6 +54,9 @@ def after_run_return_all_stopped_services(cluster_state_controller: ClusterState
@pytest.mark.failover
@pytest.mark.failover_storage
class TestFailoverStorage(ClusterTestBase):
node_management = NodeManagement()
@allure.title("Shutdown and start node (stop_mode={stop_mode})")
@pytest.mark.parametrize("stop_mode", ["hard", "soft"])
@pytest.mark.failover_reboot
@ -170,6 +166,9 @@ class TestFailoverStorage(ClusterTestBase):
@pytest.mark.failover
@pytest.mark.failover_empty_map
class TestEmptyMap(ClusterTestBase):
node_management = NodeManagement()
"""
A set of tests for makes map empty and verify that we can read objects after that
"""
@ -180,7 +179,7 @@ class TestEmptyMap(ClusterTestBase):
yield
with reporter.step("Return all storage nodes to network map"):
for node in stopped_nodes:
include_node_to_network_map(node, node, shell=self.shell, cluster=self.cluster)
self.node_management.include_node_to_network_map(node, node, shell=self.shell, cluster=self.cluster)
stopped_nodes.remove(node)
@pytest.mark.failover_empty_map_offlne
@ -223,11 +222,11 @@ class TestEmptyMap(ClusterTestBase):
with reporter.step("Exclude all storage nodes from network map"):
for node in storage_nodes:
stopped_nodes.append(node)
exclude_node_from_network_map(node, node, shell=self.shell, cluster=self.cluster)
self.node_management.exclude_node_from_network_map(node, node, shell=self.shell, cluster=self.cluster)
with reporter.step("Return all storage nodes to network map"):
for node in storage_nodes:
include_node_to_network_map(node, node, shell=self.shell, cluster=self.cluster)
self.node_management.include_node_to_network_map(node, node, shell=self.shell, cluster=self.cluster)
stopped_nodes.remove(node)
with reporter.step("Check that we can read object"):
@ -240,7 +239,7 @@ class TestEmptyMap(ClusterTestBase):
with reporter.step("Return all storage nodes to network map"):
cluster_state_controller.start_all_stopped_services()
for node in stopped_nodes:
check_node_in_map(node, shell=self.shell, alive_node=node)
self.node_management.check_node_in_map(node, shell=self.shell, alive_node=node)
@pytest.mark.failover_empty_map_stop_service
@allure.title("Empty network map via stop all storage services (s3_client={s3_client})")
@ -286,7 +285,7 @@ class TestEmptyMap(ClusterTestBase):
cluster_state_controller.stop_services_of_type(StorageNode)
with reporter.step("Remove all nodes from network map"):
remove_nodes_from_map_morph(
self.node_management.remove_nodes_from_map_morph(
shell=self.shell, cluster=self.cluster, remove_nodes=self.cluster.services(StorageNode)
)
@ -302,21 +301,21 @@ class TestEmptyMap(ClusterTestBase):
with reporter.step("Start first node and check network map"):
cluster_state_controller.start_service_of_type(self.cluster.cluster_nodes[0], StorageNode)
wait_for_node_to_be_ready(first_node)
self.node_management.wait_for_node_to_be_ready(first_node)
for check_node in self.cluster.storage_nodes:
check_node_not_in_map(check_node, shell=self.shell, alive_node=first_node)
self.node_management.check_node_not_in_map(check_node, shell=self.shell, alive_node=first_node)
for node in self.cluster.cluster_nodes[1:]:
storage_node = node.service(StorageNode)
cluster_state_controller.start_service_of_type(node, StorageNode)
wait_for_node_to_be_ready(storage_node)
self.node_management.wait_for_node_to_be_ready(storage_node)
sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
self.tick_epochs(1)
check_node_in_map(storage_node, shell=self.shell, alive_node=first_node)
self.node_management.check_node_in_map(storage_node, shell=self.shell, alive_node=first_node)
@allure.title("Object loss from fstree/blobovnicza (versioning=enabled, s3_client={s3_client})")
def test_s3_fstree_blobovnicza_loss_versioning_on(

View File

@ -24,18 +24,7 @@ from frostfs_testlib.steps.cli.object import (
put_object_to_random_node,
search_object,
)
from frostfs_testlib.steps.node_management import (
check_node_in_map,
delete_node_data,
drop_object,
exclude_node_from_network_map,
get_locode_from_random_node,
include_node_to_network_map,
node_shard_list,
node_shard_set_mode,
storage_node_set_status,
wait_for_node_to_be_ready,
)
from frostfs_testlib.steps.node_management import NodeManagement
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import ClusterNode, StorageNode
from frostfs_testlib.storage.controllers import ClusterStateController
@ -57,6 +46,9 @@ check_nodes_cluster_node_hosts: list[Host] = []
@pytest.mark.add_nodes
@pytest.mark.node_mgmt
class TestNodeManagement(ClusterTestBase):
node_management = NodeManagement()
@pytest.fixture
@allure.title("Create container and pick the node with data")
def create_container_and_pick_node(
@ -81,13 +73,13 @@ class TestNodeManagement(ClusterTestBase):
yield cid, node
shards = node_shard_list(node)
shards = self.node_management.node_shard_list(node)
assert shards
for shard in shards:
node_shard_set_mode(node, shard, "read-write")
self.node_management.node_shard_set_mode(node, shard, "read-write")
node_shard_list(node)
self.node_management.node_shard_list(node)
@reporter.step("Tick epoch with retries")
def tick_epoch_with_retries(self, attempts: int = 3, timeout: int = 3, wait_block: int = None):
@ -117,7 +109,7 @@ class TestNodeManagement(ClusterTestBase):
with reporter.step(f"Start node {node}"):
node.start_service()
with reporter.step(f"Waiting status ready for node {node}"):
wait_for_node_to_be_ready(node)
self.node_management.wait_for_node_to_be_ready(node)
# We need to wait for node to establish notifications from morph-chain
# Otherwise it will hang up when we will try to set status
@ -125,13 +117,13 @@ class TestNodeManagement(ClusterTestBase):
with reporter.step(f"Move node {node} to online state"):
node_cluster_node_host = check_nodes_cluster_node_hosts[check_nodes.index(node)]
storage_node_set_status(node, node_cluster_node_host, status="online", retries=2)
self.node_management.storage_node_set_status(node, node_cluster_node_host, status="online", retries=2)
check_nodes_cluster_node_hosts.pop(check_nodes.index(node))
check_nodes.remove(node)
sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
self.tick_epoch_with_retries(3, wait_block=2)
check_node_in_map(node, shell=self.shell, alive_node=alive_node)
self.node_management.check_node_in_map(node, shell=self.shell, alive_node=alive_node)
@allure.title("Add one node to cluster")
@pytest.mark.add_nodes
@ -162,14 +154,14 @@ class TestNodeManagement(ClusterTestBase):
alive_node = random.choice(
[storage_node for storage_node in storage_nodes if storage_node.id != random_node.id]
)
check_node_in_map(random_node, shell=self.shell, alive_node=alive_node)
self.node_management.check_node_in_map(random_node, shell=self.shell, alive_node=alive_node)
# Add node to recovery list before messing with it
check_nodes.append(random_node)
check_nodes_cluster_node_hosts.append(random_node_host)
exclude_node_from_network_map(random_node, alive_node, shell=self.shell, cluster=self.cluster)
delete_node_data(random_node)
self.node_management.exclude_node_from_network_map(random_node, alive_node, shell=self.shell, cluster=self.cluster)
self.node_management.delete_node_data(random_node)
cid = create_container(
wallet,
@ -194,7 +186,7 @@ class TestNodeManagement(ClusterTestBase):
# Add node to recovery list before messing with it
check_nodes.append(random_node)
check_nodes_cluster_node_hosts.append(random_node_host)
exclude_node_from_network_map(random_node, alive_node, shell=self.shell, cluster=self.cluster)
self.node_management.exclude_node_from_network_map(random_node, alive_node, shell=self.shell, cluster=self.cluster)
wait_object_replication(
cid,
@ -203,7 +195,7 @@ class TestNodeManagement(ClusterTestBase):
shell=self.shell,
nodes=list(set(storage_nodes) - {random_node}),
)
include_node_to_network_map(random_node, alive_node, shell=self.shell, cluster=self.cluster)
self.node_management.include_node_to_network_map(random_node, alive_node, shell=self.shell, cluster=self.cluster)
wait_object_replication(cid, oid, 3, shell=self.shell, nodes=storage_nodes)
with reporter.step("Check container could be created with new node"):
@ -234,7 +226,7 @@ class TestNodeManagement(ClusterTestBase):
file_path_simple = generate_file(simple_object_size.value)
file_path_complex = generate_file(complex_object_size.value)
locode = get_locode_from_random_node(self.cluster)
locode = self.node_management.get_locode_from_random_node(self.cluster)
rule = f"REP 1 IN SE CBF 1 SELECT 1 FROM LOC AS SE FILTER 'UN-LOCODE' EQ '{locode}' AS LOC"
cid = create_container(wallet, rule=rule, shell=self.shell, endpoint=endpoint)
oid_simple = put_object_to_random_node(wallet, file_path_simple, cid, shell=self.shell, cluster=self.cluster)
@ -251,7 +243,7 @@ class TestNodeManagement(ClusterTestBase):
with reporter.step(f"Drop object {oid}"):
get_object_from_random_node(wallet, cid, oid, shell=self.shell, cluster=self.cluster)
head_object(wallet, cid, oid, shell=self.shell, endpoint=endpoint)
drop_object(random_node, cid, oid)
self.node_management.drop_object(random_node, cid, oid)
self.wait_for_obj_dropped(wallet, cid, oid, endpoint, get_object)
self.wait_for_obj_dropped(wallet, cid, oid, endpoint, head_object)
@ -272,13 +264,13 @@ class TestNodeManagement(ClusterTestBase):
# for mode in ('read-only', 'degraded'):
for mode in ("degraded",):
shards = node_shard_list(node)
shards = self.node_management.node_shard_list(node)
assert shards
for shard in shards:
node_shard_set_mode(node, shard, mode)
self.node_management.node_shard_set_mode(node, shard, mode)
shards = node_shard_list(node)
shards = self.node_management.node_shard_list(node)
assert shards
with pytest.raises(RuntimeError):
@ -290,9 +282,9 @@ class TestNodeManagement(ClusterTestBase):
get_object_from_random_node(wallet, cid, original_oid, self.shell, self.cluster)
for shard in shards:
node_shard_set_mode(node, shard, "read-write")
self.node_management.node_shard_set_mode(node, shard, "read-write")
shards = node_shard_list(node)
shards = self.node_management.node_shard_list(node)
assert shards
oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster)
@ -304,11 +296,6 @@ class TestNodeManagement(ClusterTestBase):
wallet = default_wallet
placement_rule = "REP 3 IN X SELECT 4 FROM * AS X"
source_file_path = generate_file(simple_object_size.value)
# storage_nodes = self.cluster.storage_nodes
# random_node = random.choice(storage_nodes[1:])
# alive_node = random.choice(
# [storage_node for storage_node in storage_nodes if storage_node.id != random_node.id]
# )
storage_nodes = []
corresponding_cluster_node_hosts = []

View File

@ -19,7 +19,7 @@ from frostfs_testlib.steps.cli.container import StorageContainer, StorageContain
from frostfs_testlib.steps.cli.object import delete_object, head_object, lock_object
from frostfs_testlib.steps.complex_object_actions import get_link_object, get_storage_object_chunks
from frostfs_testlib.steps.epoch import ensure_fresh_epoch, get_epoch, tick_epoch
from frostfs_testlib.steps.node_management import drop_object
from frostfs_testlib.steps.node_management import NodeManagement
from frostfs_testlib.steps.storage_object import delete_objects
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster
@ -137,6 +137,9 @@ def verify_object_available(wallet_file_path: str, cid: str, oid: str, shell: Sh
@pytest.mark.grpc_object_lock
class TestObjectLockWithGrpc(ClusterTestBase):
node_management = NodeManagement()
@pytest.fixture()
def new_locked_storage_object(self, user_container: StorageContainer, object_size: ObjectSize) -> StorageObjectInfo:
"""
@ -514,7 +517,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
)
for node in nodes_with_object:
with expect_not_raises():
drop_object(node, new_locked_storage_object.cid, link_object_id)
self.node_management.drop_object(node, new_locked_storage_object.cid, link_object_id)
@allure.title("Drop chunks of locked complex object")
@pytest.mark.grpc_control
@ -537,7 +540,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
)
for node in nodes_with_object:
with expect_not_raises():
drop_object(node, new_locked_storage_object.cid, chunk_object_id)
self.node_management.drop_object(node, new_locked_storage_object.cid, chunk_object_id)
@allure.title("Drop locked object (obj_size={object_size})")
@pytest.mark.grpc_control
@ -551,7 +554,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
for node in nodes_with_object:
with expect_not_raises():
drop_object(node, new_locked_storage_object.cid, new_locked_storage_object.oid)
self.node_management.drop_object(node, new_locked_storage_object.cid, new_locked_storage_object.oid)
@allure.title("Link object of complex object is protected from deletion")
@pytest.mark.parametrize(