Updates after testlib #151

Merged
abereziny merged 1 commit from abereziny/frostfs-testcases:feature-updates into master 2023-11-22 14:56:31 +00:00
3 changed files with 19 additions and 48 deletions
Showing only changes of commit 0e57ad79cd - Show all commits

View file

@ -23,7 +23,6 @@ from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper
from frostfs_testlib.shell import LocalShell, Shell
from frostfs_testlib.steps.cli.container import list_containers
from frostfs_testlib.steps.cli.object import get_netmap_netinfo
from frostfs_testlib.steps.node_management import storage_node_healthcheck
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
@ -345,19 +344,11 @@ def readiness_on_node(cluster_node: ClusterNode):
@allure.title("[Autouse/Test] Run health check for all nodes")
@pytest.fixture(autouse=True)
def run_health_check(cluster: Cluster, request: pytest.FixtureRequest):
def run_health_check(healthcheck: Healthcheck, cluster: Cluster, request: pytest.FixtureRequest):
if request.node.get_closest_marker("no_healthcheck"):
# Skip healthcheck for tests marked with no_healthcheck
return
parallel(healthcheck_on_node, cluster.cluster_nodes)
@allure.title("Run health check for {cluster_node}")
def healthcheck_on_node(cluster_node: ClusterNode):
health_check = storage_node_healthcheck(cluster_node.storage_node)
assert (
health_check.health_status == "READY" and health_check.network_status == "ONLINE"
), f"Node {cluster_node} is not healthy"
parallel(healthcheck.storage_healthcheck, cluster.cluster_nodes)
@allure.step("Prepare wallet and deposit")

View file

@ -5,6 +5,7 @@ from time import sleep
import allure
import pytest
from frostfs_testlib.healthcheck.interfaces import Healthcheck
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import (
@ -20,7 +21,8 @@ from frostfs_testlib.storage.controllers import ClusterStateController
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, StorageObjectInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.failover_utils import wait_all_storage_nodes_returned, wait_object_replication
from frostfs_testlib.testing.parallel import parallel
from frostfs_testlib.utils.failover_utils import wait_object_replication
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
logger = logging.getLogger("NeoLogger")
@ -42,7 +44,7 @@ OBJECT_ATTRIBUTES = [
class TestFailoverNetwork(ClusterTestBase):
@pytest.fixture(autouse=True)
@allure.title("Restore network")
def restore_network(self, cluster_state_controller: ClusterStateController):
def restore_network(self, healthcheck: Healthcheck, cluster_state_controller: ClusterStateController):
yield
with allure.step(f"Count blocked nodes {len(blocked_nodes)}"):
not_empty = len(blocked_nodes) != 0
@ -51,7 +53,7 @@ class TestFailoverNetwork(ClusterTestBase):
cluster_state_controller.restore_traffic(mode="ports", node=node)
blocked_nodes.remove(node)
if not_empty:
wait_all_storage_nodes_returned(self.shell, self.cluster)
parallel(healthcheck.storage_healthcheck, self.cluster.cluster_nodes)
@pytest.fixture()
@allure.title("Restore drop traffic to system")

View file

@ -1,11 +1,9 @@
import logging
import os.path
import random
import time
import allure
import pytest
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import StorageContainer, StorageContainerInfo, create_container
from frostfs_testlib.steps.cli.object import get_object
@ -17,8 +15,7 @@ from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjec
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.utils.failover_utils import wait_for_host_offline, wait_object_replication
from frostfs_testlib.utils.failover_utils import wait_object_replication
from frostfs_testlib.utils.file_utils import get_file_hash
from pytest import FixtureRequest
@ -75,7 +72,7 @@ class TestFailoverServer(ClusterTestBase):
) -> StorageObjectInfo:
count_object = request.param
object_sizes = [simple_object_size, complex_object_size]
object_list = []
object_list: list[StorageObjectInfo] = []
for cont in containers:
for _ in range(count_object):
object_list.append(cont.generate_object(size=random.choice(object_sizes).value))
@ -133,9 +130,7 @@ class TestFailoverServer(ClusterTestBase):
@pytest.mark.parametrize("containers, storage_objects", [(5, 10)], indirect=True)
def test_complete_node_shutdown(
self,
containers: list[StorageContainer],
storage_objects: list[StorageObjectInfo],
default_wallet: str,
node_to_stop: ClusterNode,
cluster_state_controller: ClusterStateController,
):
@ -145,17 +140,12 @@ class TestFailoverServer(ClusterTestBase):
storage_nodes = [cluster.storage_node for cluster in alive_nodes]
with allure.step("Tick epoch"):
self.tick_epochs(1, storage_nodes[0])
with allure.step("Wait 2 block time"):
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
with allure.step("Tick epoch and wait for 2 blocks"):
self.tick_epochs(1, storage_nodes[0], wait_block=2)
with allure.step(f"Stop node"):
cluster_state_controller.stop_node_host(node=node_to_stop, mode="hard")
with allure.step(f"Check if the node {node_to_stop.storage_node} has stopped"):
wait_for_host_offline(self.shell, node_to_stop.storage_node)
with allure.step("Verify that there are no corrupted objects"):
corrupted_objects_list = self.get_corrupted_objects_list(storage_nodes, storage_objects)
@ -166,13 +156,8 @@ class TestFailoverServer(ClusterTestBase):
count_tick_epoch = int(alive_nodes[0].ir_node.get_netmap_cleaner_threshold()) + 4
with allure.step(f"Tick {count_tick_epoch} epoch, in {storage_nodes[0]} node"):
for tick in range(count_tick_epoch):
self.tick_epoch(storage_nodes[0])
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
with allure.step(f"Check if the node {node_to_stop.storage_node} has stopped"):
wait_for_host_offline(self.shell, node_to_stop.storage_node)
with allure.step(f"Tick {count_tick_epoch} epochs and wait for 2 blocks"):
self.tick_epochs(count_tick_epoch, wait_block=2)
with allure.step(f"Check {node_to_stop} in not map"):
self.wait_node_not_in_map(node_to_stop.storage_node, self.shell, alive_node=storage_nodes[0])
@ -185,26 +170,19 @@ class TestFailoverServer(ClusterTestBase):
@pytest.mark.parametrize("containers, storage_objects", [(5, 10)], indirect=True)
def test_temporarily_disable_a_node(
self,
containers: list[StorageContainer],
storage_objects: list[StorageObjectInfo],
default_wallet: str,
node_to_stop: ClusterNode,
cluster_state_controller: ClusterStateController,
):
with allure.step(f"Remove {node_to_stop} from the list of nodes"):
storage_nodes = list(set(self.cluster.storage_nodes) - {node_to_stop.storage_node})
with allure.step("Tick epoch"):
self.tick_epochs(1, storage_nodes[0])
with allure.step("Wait 2 block time"):
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
with allure.step("Tick epoch and wait for 2 blocks"):
self.tick_epochs(1, storage_nodes[0], wait_block=2)
with allure.step(f"Stop node"):
cluster_state_controller.stop_node_host(node=node_to_stop, mode="hard")
with allure.step(f"Check if the node {node_to_stop} has stopped"):
wait_for_host_offline(self.shell, node_to_stop.storage_node)
with allure.step("Verify that there are no corrupted objects"):
corrupted_objects_list = self.get_corrupted_objects_list(storage_nodes, storage_objects)
assert not corrupted_objects_list