Fixture for restore stopped storage nodes in test_failover_storage

support/v0.37
Yaroslava Lukoyanova 2023-05-26 07:49:23 +03:00 committed by ylukoyan
parent e36e18dc57
commit d0660d626b
2 changed files with 31 additions and 29 deletions

View File

@ -23,6 +23,7 @@ from frostfs_testlib.steps.cli.object import get_netmap_netinfo
from frostfs_testlib.steps.node_management import storage_node_healthcheck
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.dataclasses.wallet import WalletFactory
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils import env_utils, version_utils
@ -149,6 +150,12 @@ def s3_policy(request: pytest.FixtureRequest):
return policy
@pytest.fixture(scope="session")
def cluster_state_controller(client_shell: Shell, cluster: Cluster) -> ClusterStateController:
controller = ClusterStateController(client_shell, cluster)
yield controller
@allure.step("[Class]: Create S3 client")
@pytest.fixture(scope="class")
def s3_client(

View File

@ -22,6 +22,7 @@ from frostfs_testlib.steps.node_management import (
)
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.cluster import Cluster, StorageNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.utils.failover_utils import (
@ -60,6 +61,11 @@ def return_stopped_hosts(shell: Shell, cluster: Cluster) -> None:
@pytest.mark.failover
class TestFailoverStorage(ClusterTestBase):
@pytest.fixture(scope="function", autouse=True)
def start_stopped_services(self, cluster_state_controller: ClusterStateController):
yield
cluster_state_controller.start_stopped_storage_services()
@allure.title("Lose and return storage node's host")
@pytest.mark.parametrize("hard_reboot", [True, False])
@pytest.mark.failover_reboot
@ -362,7 +368,10 @@ class TestEmptyMap(ClusterTestBase):
@allure.title("Test S3 Object loss from fstree/blobovnicza, versioning is enabled")
def test_s3_fstree_blobovnicza_loss_versioning_on(
self, s3_client: S3ClientWrapper, simple_object_size
self,
s3_client: S3ClientWrapper,
simple_object_size: int,
cluster_state_controller: ClusterStateController,
):
bucket = s3_client.create_bucket()
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
@ -377,10 +386,9 @@ class TestEmptyMap(ClusterTestBase):
object_versions.append(put_object)
with allure.step("Stop all storage nodes"):
for node in self.cluster.storage_nodes:
for node in self.cluster.cluster_nodes:
with allure.step(f"Stop storage service on node: {node}"):
node.stop_service()
stopped_nodes.append(node)
cluster_state_controller.stop_storage_service(node)
with allure.step("Delete blobovnicza and fstree from all nodes"):
for node in self.cluster.storage_nodes:
@ -388,12 +396,7 @@ class TestEmptyMap(ClusterTestBase):
node.delete_fstree()
with allure.step("Start all storage nodes"):
for node in list(stopped_nodes):
with allure.step(f"Start node {node}"):
node.start_service()
stopped_nodes.remove(node)
with allure.step(f"Waiting status ready for node {node}"):
wait_for_node_to_be_ready(node)
cluster_state_controller.start_stopped_storage_services()
# need to get Delete Marker first
with allure.step("Delete the object from the bucket"):
@ -410,7 +413,10 @@ class TestEmptyMap(ClusterTestBase):
@allure.title("Test S3 Object loss from fstree/blobovnicza, versioning is disabled")
def test_s3_fstree_blobovnicza_loss_versioning_off(
self, s3_client: S3ClientWrapper, simple_object_size
self,
s3_client: S3ClientWrapper,
simple_object_size: int,
cluster_state_controller: ClusterStateController,
):
bucket = s3_client.create_bucket()
@ -422,10 +428,9 @@ class TestEmptyMap(ClusterTestBase):
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
with allure.step("Stop all storage nodes"):
for node in self.cluster.storage_nodes:
for node in self.cluster.cluster_nodes:
with allure.step(f"Stop storage service on node: {node}"):
node.stop_service()
stopped_nodes.append(node)
cluster_state_controller.stop_storage_service(node)
with allure.step("Delete blobovnicza and fstree from all nodes"):
for node in self.cluster.storage_nodes:
@ -433,12 +438,7 @@ class TestEmptyMap(ClusterTestBase):
node.delete_fstree()
with allure.step("Start all storage nodes"):
for node in list(stopped_nodes):
with allure.step(f"Start node {node}"):
node.start_service()
stopped_nodes.remove(node)
with allure.step(f"Waiting status ready for node {node}"):
wait_for_node_to_be_ready(node)
cluster_state_controller.start_stopped_storage_services()
with allure.step("Delete the object from the bucket"):
s3_client.delete_object(bucket, file_name)
@ -460,6 +460,7 @@ class TestEmptyMap(ClusterTestBase):
s3_client: S3ClientWrapper,
simple_object_size: int,
versioning_status: VersioningStatus,
cluster_state_controller: ClusterStateController,
):
bucket = s3_client.create_bucket()
if versioning_status:
@ -473,22 +474,16 @@ class TestEmptyMap(ClusterTestBase):
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
with allure.step("Stop all storage nodes"):
for node in self.cluster.storage_nodes:
for node in self.cluster.cluster_nodes:
with allure.step(f"Stop storage service on node: {node}"):
node.stop_service()
stopped_nodes.append(node)
cluster_state_controller.stop_storage_service(node)
with allure.step("Delete pilorama.db from all nodes"):
for node in self.cluster.storage_nodes:
node.delete_pilorama()
with allure.step("Start all storage nodes"):
for node in list(stopped_nodes):
with allure.step(f"Start node {node}"):
node.start_service()
stopped_nodes.remove(node)
with allure.step(f"Waiting status ready for node {node}"):
wait_for_node_to_be_ready(node)
cluster_state_controller.start_stopped_storage_services()
with allure.step("Check list objects first time"):
objects_list = s3_client.list_objects(bucket)