forked from TrueCloudLab/frostfs-testcases
Move Time test
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
This commit is contained in:
parent
b0c9502bd3
commit
850b8533a8
2 changed files with 82 additions and 75 deletions
78
pytest_tests/testsuites/failovers/test_discrete_time.py
Normal file
78
pytest_tests/testsuites/failovers/test_discrete_time.py
Normal file
|
@ -0,0 +1,78 @@
|
|||
import datetime
|
||||
from time import sleep
|
||||
|
||||
import allure
|
||||
import pytest
|
||||
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
||||
from frostfs_testlib.steps.cli.object import neo_go_query_height
|
||||
from frostfs_testlib.storage.controllers import ClusterStateController
|
||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||
from frostfs_testlib.utils import datetime_utils
|
||||
|
||||
|
||||
@pytest.mark.time
|
||||
@pytest.mark.failover
|
||||
class TestTime(ClusterTestBase):
|
||||
@allure.step("Neo-go should continue to release blocks")
|
||||
def check_nodes_block(self, cluster_state_controller: ClusterStateController):
|
||||
count_blocks = {}
|
||||
with allure.step("Get current block id"):
|
||||
for cluster_node in self.cluster.cluster_nodes:
|
||||
cluster_state_controller.get_node_date(cluster_node)
|
||||
count_blocks[cluster_node] = neo_go_query_height(
|
||||
shell=cluster_node.host.get_shell(), endpoint=cluster_node.morph_chain.get_http_endpoint()
|
||||
)["Latest block"]
|
||||
with allure.step("Wait for 3 blocks"):
|
||||
sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 3)
|
||||
with allure.step("Current block id should be higher than before"):
|
||||
for cluster_node in self.cluster.cluster_nodes:
|
||||
shell = cluster_node.host.get_shell()
|
||||
now_block = neo_go_query_height(shell=shell, endpoint=cluster_node.morph_chain.get_http_endpoint())[
|
||||
"Latest block"
|
||||
]
|
||||
assert count_blocks[cluster_node] < now_block
|
||||
|
||||
@pytest.fixture()
|
||||
def node_time_synchronizer(self, cluster_state_controller: ClusterStateController) -> None:
|
||||
cluster_state_controller.set_sync_date_all_nodes(status="inactive")
|
||||
yield
|
||||
cluster_state_controller.set_sync_date_all_nodes(status="active")
|
||||
|
||||
@allure.title("Changing hardware and system time")
|
||||
def test_system_time(self, cluster_state_controller: ClusterStateController, node_time_synchronizer: None):
|
||||
cluster_nodes = self.cluster.cluster_nodes
|
||||
timezone_utc = datetime.timezone.utc
|
||||
node_1, node_2, node_3 = cluster_nodes[0:3]
|
||||
|
||||
with allure.step("On node 1, move the system time forward by 5 days"):
|
||||
cluster_state_controller.change_node_date(
|
||||
node_1, (datetime.datetime.now(timezone_utc) + datetime.timedelta(days=5))
|
||||
)
|
||||
|
||||
self.check_nodes_block(cluster_state_controller)
|
||||
|
||||
with allure.step("On node 2, move the system time back 5 days."):
|
||||
cluster_state_controller.change_node_date(
|
||||
node_2, (datetime.datetime.now(timezone_utc) - datetime.timedelta(days=5))
|
||||
)
|
||||
|
||||
self.check_nodes_block(cluster_state_controller)
|
||||
|
||||
with allure.step("On node 3, move the system time forward by 10 days"):
|
||||
cluster_state_controller.change_node_date(
|
||||
node_3, (datetime.datetime.now(timezone_utc) + datetime.timedelta(days=10))
|
||||
)
|
||||
|
||||
self.check_nodes_block(cluster_state_controller)
|
||||
|
||||
with allure.step("Return the time on all nodes to the current one"):
|
||||
for cluster_node in self.cluster.cluster_nodes:
|
||||
cluster_state_controller.restore_node_date(cluster_node)
|
||||
|
||||
self.check_nodes_block(cluster_state_controller)
|
||||
|
||||
with allure.step("Reboot all nodes"):
|
||||
cluster_state_controller.shutdown_cluster(mode="soft")
|
||||
cluster_state_controller.start_stopped_hosts()
|
||||
|
||||
self.check_nodes_block(cluster_state_controller)
|
|
@ -1,4 +1,3 @@
|
|||
import datetime
|
||||
import logging
|
||||
import random
|
||||
from time import sleep
|
||||
|
@ -6,20 +5,18 @@ from typing import Optional, Tuple
|
|||
|
||||
import allure
|
||||
import pytest
|
||||
from frostfs_testlib.resources.common import FROSTFS_CONTRACT_CACHE_TIMEOUT, MORPH_BLOCK_TIME
|
||||
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
||||
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
|
||||
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
|
||||
from frostfs_testlib.steps.cli.container import create_container, get_container
|
||||
from frostfs_testlib.steps.cli.container import create_container
|
||||
from frostfs_testlib.steps.cli.object import (
|
||||
delete_object,
|
||||
get_object,
|
||||
get_object_from_random_node,
|
||||
head_object,
|
||||
neo_go_query_height,
|
||||
put_object,
|
||||
put_object_to_random_node,
|
||||
)
|
||||
from frostfs_testlib.steps.epoch import tick_epoch
|
||||
from frostfs_testlib.steps.node_management import (
|
||||
check_node_in_map,
|
||||
delete_node_data,
|
||||
|
@ -29,20 +26,18 @@ from frostfs_testlib.steps.node_management import (
|
|||
include_node_to_network_map,
|
||||
node_shard_list,
|
||||
node_shard_set_mode,
|
||||
storage_node_healthcheck,
|
||||
storage_node_set_status,
|
||||
wait_for_node_to_be_ready,
|
||||
)
|
||||
from frostfs_testlib.steps.storage_policy import get_nodes_with_object, get_simple_object_copies
|
||||
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
|
||||
from frostfs_testlib.storage.cluster import StorageNode
|
||||
from frostfs_testlib.storage.controllers import ClusterStateController
|
||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||
from frostfs_testlib.utils import datetime_utils, string_utils
|
||||
from frostfs_testlib.utils.failover_utils import wait_object_replication
|
||||
from frostfs_testlib.utils.file_utils import generate_file
|
||||
|
||||
from pytest_tests.helpers.utility import placement_policy_from_container, wait_for_gc_pass_on_storage_nodes
|
||||
from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
check_nodes: list[StorageNode] = []
|
||||
|
@ -325,69 +320,3 @@ class TestNodeManagement(ClusterTestBase):
|
|||
raise AssertionError(f'Expected "{OBJECT_NOT_FOUND}" error, got\n{err}')
|
||||
|
||||
raise AssertionError(f"Object {oid} was not dropped from node")
|
||||
|
||||
|
||||
@pytest.mark.time
|
||||
class TestTime(ClusterTestBase):
|
||||
@allure.step("Neo-go should continue to release blocks")
|
||||
def check_nodes_block(self, cluster_state_controller: ClusterStateController):
|
||||
count_blocks = {}
|
||||
with allure.step("Get current block id"):
|
||||
for cluster_node in self.cluster.cluster_nodes:
|
||||
cluster_state_controller.get_node_date(cluster_node)
|
||||
count_blocks[cluster_node] = neo_go_query_height(
|
||||
shell=cluster_node.host.get_shell(), endpoint=cluster_node.morph_chain.get_http_endpoint()
|
||||
)["Latest block"]
|
||||
with allure.step("Wait for 3 blocks"):
|
||||
sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 3)
|
||||
with allure.step("Current block id should be higher than before"):
|
||||
for cluster_node in self.cluster.cluster_nodes:
|
||||
shell = cluster_node.host.get_shell()
|
||||
now_block = neo_go_query_height(shell=shell, endpoint=cluster_node.morph_chain.get_http_endpoint())[
|
||||
"Latest block"
|
||||
]
|
||||
assert count_blocks[cluster_node] < now_block
|
||||
|
||||
@pytest.fixture()
|
||||
def node_time_synchronizer(self, cluster_state_controller: ClusterStateController) -> None:
|
||||
cluster_state_controller.set_sync_date_all_nodes(status="inactive")
|
||||
yield
|
||||
cluster_state_controller.set_sync_date_all_nodes(status="active")
|
||||
|
||||
def test_system_time(self, cluster_state_controller: ClusterStateController, node_time_synchronizer: None):
|
||||
cluster_nodes = self.cluster.cluster_nodes
|
||||
timezone_utc = datetime.timezone.utc
|
||||
node_1, node_2, node_3 = cluster_nodes[0:3]
|
||||
|
||||
with allure.step("On node 1, move the system time forward by 2 years"):
|
||||
cluster_state_controller.change_node_date(
|
||||
node_1, (datetime.datetime.now(timezone_utc) + datetime.timedelta(days=(365 * 2)))
|
||||
)
|
||||
|
||||
self.check_nodes_block(cluster_state_controller)
|
||||
|
||||
with allure.step("On node 2, move the system time back 2 years."):
|
||||
cluster_state_controller.change_node_date(
|
||||
node_2, (datetime.datetime.now(timezone_utc) - datetime.timedelta(days=(365 * 2)))
|
||||
)
|
||||
|
||||
self.check_nodes_block(cluster_state_controller)
|
||||
|
||||
with allure.step("On node 3, move the system time forward by 3 years"):
|
||||
cluster_state_controller.change_node_date(
|
||||
node_3, (datetime.datetime.now(timezone_utc) + datetime.timedelta(days=(365 * 3)))
|
||||
)
|
||||
|
||||
self.check_nodes_block(cluster_state_controller)
|
||||
|
||||
with allure.step("Return the time on all nodes to the current one"):
|
||||
for cluster_node in self.cluster.cluster_nodes:
|
||||
cluster_state_controller.restore_node_date(cluster_node)
|
||||
|
||||
self.check_nodes_block(cluster_state_controller)
|
||||
|
||||
with allure.step("Reboot all nodes"):
|
||||
cluster_state_controller.shutdown_cluster(mode="soft")
|
||||
cluster_state_controller.start_stopped_hosts()
|
||||
|
||||
self.check_nodes_block(cluster_state_controller)
|
||||
|
|
Loading…
Reference in a new issue