2023-11-02 07:02:44 +00:00
|
|
|
import datetime
|
2022-07-08 17:24:55 +00:00
|
|
|
import logging
|
2022-12-05 22:31:45 +00:00
|
|
|
import random
|
2022-07-08 17:24:55 +00:00
|
|
|
from time import sleep
|
2022-12-05 22:31:45 +00:00
|
|
|
from typing import Optional, Tuple
|
2022-07-08 17:24:55 +00:00
|
|
|
|
|
|
|
import allure
|
|
|
|
import pytest
|
2023-05-15 09:59:33 +00:00
|
|
|
from frostfs_testlib.resources.common import FROSTFS_CONTRACT_CACHE_TIMEOUT, MORPH_BLOCK_TIME
|
|
|
|
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
|
|
|
|
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
|
|
|
|
from frostfs_testlib.steps.cli.container import create_container, get_container
|
|
|
|
from frostfs_testlib.steps.cli.object import (
|
2022-12-05 22:31:45 +00:00
|
|
|
delete_object,
|
|
|
|
get_object,
|
|
|
|
get_object_from_random_node,
|
|
|
|
head_object,
|
2023-11-02 07:02:44 +00:00
|
|
|
neo_go_query_height,
|
2022-12-05 22:31:45 +00:00
|
|
|
put_object,
|
|
|
|
put_object_to_random_node,
|
|
|
|
)
|
2023-05-15 09:59:33 +00:00
|
|
|
from frostfs_testlib.steps.epoch import tick_epoch
|
|
|
|
from frostfs_testlib.steps.node_management import (
|
2022-09-23 11:09:41 +00:00
|
|
|
check_node_in_map,
|
|
|
|
delete_node_data,
|
|
|
|
drop_object,
|
|
|
|
exclude_node_from_network_map,
|
2022-12-05 22:31:45 +00:00
|
|
|
get_locode_from_random_node,
|
2022-09-23 11:09:41 +00:00
|
|
|
include_node_to_network_map,
|
|
|
|
node_shard_list,
|
|
|
|
node_shard_set_mode,
|
2022-12-05 22:31:45 +00:00
|
|
|
storage_node_healthcheck,
|
|
|
|
storage_node_set_status,
|
2023-04-19 07:23:08 +00:00
|
|
|
wait_for_node_to_be_ready,
|
2022-09-23 11:09:41 +00:00
|
|
|
)
|
2023-05-15 09:59:33 +00:00
|
|
|
from frostfs_testlib.steps.storage_policy import get_nodes_with_object, get_simple_object_copies
|
|
|
|
from frostfs_testlib.storage.cluster import StorageNode
|
2023-11-02 07:02:44 +00:00
|
|
|
from frostfs_testlib.storage.controllers import ClusterStateController
|
2023-08-02 11:54:03 +00:00
|
|
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
2023-05-15 09:59:33 +00:00
|
|
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
|
|
|
from frostfs_testlib.utils import datetime_utils, string_utils
|
|
|
|
from frostfs_testlib.utils.failover_utils import wait_object_replication
|
|
|
|
from frostfs_testlib.utils.file_utils import generate_file
|
|
|
|
|
2023-10-13 08:52:42 +00:00
|
|
|
from pytest_tests.helpers.utility import placement_policy_from_container, wait_for_gc_pass_on_storage_nodes
|
2022-07-08 17:24:55 +00:00
|
|
|
|
2022-09-23 11:09:41 +00:00
|
|
|
logger = logging.getLogger("NeoLogger")
|
2022-12-05 22:31:45 +00:00
|
|
|
check_nodes: list[StorageNode] = []
|
2022-07-08 17:24:55 +00:00
|
|
|
|
|
|
|
|
2022-12-05 22:31:45 +00:00
|
|
|
@allure.title("Add one node to cluster")
|
|
|
|
@pytest.mark.add_nodes
|
|
|
|
@pytest.mark.node_mgmt
|
|
|
|
class TestNodeManagement(ClusterTestBase):
|
|
|
|
@pytest.fixture
|
|
|
|
@allure.title("Create container and pick the node with data")
|
2022-12-07 12:38:56 +00:00
|
|
|
def create_container_and_pick_node(
|
2023-08-02 11:54:03 +00:00
|
|
|
self, default_wallet: str, simple_object_size: ObjectSize
|
2022-12-07 12:38:56 +00:00
|
|
|
) -> Tuple[str, StorageNode]:
|
2023-08-02 11:54:03 +00:00
|
|
|
file_path = generate_file(simple_object_size.value)
|
2022-12-05 22:31:45 +00:00
|
|
|
placement_rule = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
|
|
|
|
endpoint = self.cluster.default_rpc_endpoint
|
2022-07-08 17:24:55 +00:00
|
|
|
|
2022-12-05 22:31:45 +00:00
|
|
|
cid = create_container(
|
|
|
|
default_wallet,
|
|
|
|
shell=self.shell,
|
|
|
|
endpoint=endpoint,
|
|
|
|
rule=placement_rule,
|
|
|
|
basic_acl=PUBLIC_ACL,
|
|
|
|
)
|
|
|
|
oid = put_object_to_random_node(default_wallet, file_path, cid, self.shell, self.cluster)
|
2022-08-03 15:20:50 +00:00
|
|
|
|
2022-12-05 22:31:45 +00:00
|
|
|
nodes = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
|
|
|
|
assert len(nodes) == 1
|
|
|
|
node = nodes[0]
|
2022-08-03 15:20:50 +00:00
|
|
|
|
2022-12-05 22:31:45 +00:00
|
|
|
yield cid, node
|
2022-08-03 15:20:50 +00:00
|
|
|
|
2022-12-05 22:31:45 +00:00
|
|
|
shards = node_shard_list(node)
|
|
|
|
assert shards
|
2022-08-03 15:20:50 +00:00
|
|
|
|
2022-12-05 22:31:45 +00:00
|
|
|
for shard in shards:
|
|
|
|
node_shard_set_mode(node, shard, "read-write")
|
|
|
|
|
|
|
|
node_shard_list(node)
|
|
|
|
|
|
|
|
@allure.step("Tick epoch with retries")
|
2023-10-13 08:52:42 +00:00
|
|
|
def tick_epoch_with_retries(self, attempts: int = 3, timeout: int = 3, wait_block: int = None):
|
2022-12-05 22:31:45 +00:00
|
|
|
for attempt in range(attempts):
|
|
|
|
try:
|
2023-10-13 08:52:42 +00:00
|
|
|
self.tick_epoch(wait_block=wait_block)
|
2022-12-05 22:31:45 +00:00
|
|
|
except RuntimeError:
|
|
|
|
sleep(timeout)
|
|
|
|
if attempt >= attempts - 1:
|
|
|
|
raise
|
|
|
|
continue
|
|
|
|
return
|
2022-08-03 15:20:50 +00:00
|
|
|
|
2022-12-05 22:31:45 +00:00
|
|
|
@pytest.fixture
|
|
|
|
def after_run_start_all_nodes(self):
|
|
|
|
yield
|
|
|
|
self.return_nodes()
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def return_nodes_after_test_run(self):
|
|
|
|
yield
|
|
|
|
self.return_nodes()
|
|
|
|
|
|
|
|
@allure.step("Return node to cluster")
|
|
|
|
def return_nodes(self, alive_node: Optional[StorageNode] = None) -> None:
|
|
|
|
for node in list(check_nodes):
|
|
|
|
with allure.step(f"Start node {node}"):
|
|
|
|
node.start_service()
|
|
|
|
with allure.step(f"Waiting status ready for node {node}"):
|
2023-03-09 10:19:41 +00:00
|
|
|
wait_for_node_to_be_ready(node)
|
2022-12-05 22:31:45 +00:00
|
|
|
|
|
|
|
# We need to wait for node to establish notifications from morph-chain
|
|
|
|
# Otherwise it will hang up when we will try to set status
|
2023-02-19 23:58:07 +00:00
|
|
|
sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
|
2022-12-05 22:31:45 +00:00
|
|
|
|
|
|
|
with allure.step(f"Move node {node} to online state"):
|
|
|
|
storage_node_set_status(node, status="online", retries=2)
|
|
|
|
|
|
|
|
check_nodes.remove(node)
|
2023-02-19 23:58:07 +00:00
|
|
|
sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
|
2023-10-13 08:52:42 +00:00
|
|
|
self.tick_epoch_with_retries(3, wait_block=2)
|
2022-12-05 22:31:45 +00:00
|
|
|
check_node_in_map(node, shell=self.shell, alive_node=alive_node)
|
|
|
|
|
|
|
|
@allure.title("Add one node to cluster")
|
|
|
|
@pytest.mark.add_nodes
|
|
|
|
def test_add_nodes(
|
|
|
|
self,
|
2023-08-02 11:54:03 +00:00
|
|
|
default_wallet: str,
|
|
|
|
simple_object_size: ObjectSize,
|
2022-12-05 22:31:45 +00:00
|
|
|
return_nodes_after_test_run,
|
|
|
|
):
|
2022-12-07 07:44:49 +00:00
|
|
|
"""
|
2023-05-15 09:59:33 +00:00
|
|
|
This test remove one node from frostfs_testlib.storage.cluster then add it back. Test uses base control operations with storage nodes (healthcheck, netmap-snapshot, set-status).
|
2022-12-07 07:44:49 +00:00
|
|
|
"""
|
2022-12-05 22:31:45 +00:00
|
|
|
wallet = default_wallet
|
|
|
|
placement_rule_3 = "REP 3 IN X CBF 1 SELECT 3 FROM * AS X"
|
|
|
|
placement_rule_4 = "REP 4 IN X CBF 1 SELECT 4 FROM * AS X"
|
2023-08-02 11:54:03 +00:00
|
|
|
source_file_path = generate_file(simple_object_size.value)
|
2022-12-05 22:31:45 +00:00
|
|
|
|
|
|
|
storage_nodes = self.cluster.storage_nodes
|
|
|
|
random_node = random.choice(storage_nodes[1:])
|
|
|
|
alive_node = random.choice(
|
|
|
|
[storage_node for storage_node in storage_nodes if storage_node.id != random_node.id]
|
2022-09-23 11:09:41 +00:00
|
|
|
)
|
2022-08-03 15:20:50 +00:00
|
|
|
|
2022-12-05 22:31:45 +00:00
|
|
|
check_node_in_map(random_node, shell=self.shell, alive_node=alive_node)
|
|
|
|
|
|
|
|
# Add node to recovery list before messing with it
|
|
|
|
check_nodes.append(random_node)
|
2023-10-13 08:52:42 +00:00
|
|
|
exclude_node_from_network_map(random_node, alive_node, shell=self.shell, cluster=self.cluster)
|
2022-12-05 22:31:45 +00:00
|
|
|
delete_node_data(random_node)
|
2022-08-03 15:20:50 +00:00
|
|
|
|
2022-10-13 16:13:45 +00:00
|
|
|
cid = create_container(
|
2022-12-05 22:31:45 +00:00
|
|
|
wallet,
|
|
|
|
rule=placement_rule_3,
|
|
|
|
basic_acl=PUBLIC_ACL,
|
|
|
|
shell=self.shell,
|
|
|
|
endpoint=alive_node.get_rpc_endpoint(),
|
2022-10-13 16:13:45 +00:00
|
|
|
)
|
2022-09-23 11:09:41 +00:00
|
|
|
oid = put_object(
|
2022-10-13 16:13:45 +00:00
|
|
|
wallet,
|
|
|
|
source_file_path,
|
|
|
|
cid,
|
2022-12-05 22:31:45 +00:00
|
|
|
shell=self.shell,
|
|
|
|
endpoint=alive_node.get_rpc_endpoint(),
|
2022-10-13 16:13:45 +00:00
|
|
|
)
|
2022-12-05 22:31:45 +00:00
|
|
|
wait_object_replication(cid, oid, 3, shell=self.shell, nodes=storage_nodes)
|
|
|
|
|
|
|
|
self.return_nodes(alive_node)
|
|
|
|
|
|
|
|
with allure.step("Check data could be replicated to new node"):
|
|
|
|
random_node = random.choice(list(set(storage_nodes) - {random_node, alive_node}))
|
|
|
|
# Add node to recovery list before messing with it
|
|
|
|
check_nodes.append(random_node)
|
2023-10-13 08:52:42 +00:00
|
|
|
exclude_node_from_network_map(random_node, alive_node, shell=self.shell, cluster=self.cluster)
|
2022-12-05 22:31:45 +00:00
|
|
|
|
|
|
|
wait_object_replication(
|
|
|
|
cid,
|
|
|
|
oid,
|
|
|
|
3,
|
|
|
|
shell=self.shell,
|
|
|
|
nodes=list(set(storage_nodes) - {random_node}),
|
|
|
|
)
|
2023-10-13 08:52:42 +00:00
|
|
|
include_node_to_network_map(random_node, alive_node, shell=self.shell, cluster=self.cluster)
|
2022-12-05 22:31:45 +00:00
|
|
|
wait_object_replication(cid, oid, 3, shell=self.shell, nodes=storage_nodes)
|
|
|
|
|
|
|
|
with allure.step("Check container could be created with new node"):
|
|
|
|
cid = create_container(
|
|
|
|
wallet,
|
|
|
|
rule=placement_rule_4,
|
|
|
|
basic_acl=PUBLIC_ACL,
|
|
|
|
shell=self.shell,
|
|
|
|
endpoint=alive_node.get_rpc_endpoint(),
|
|
|
|
)
|
|
|
|
oid = put_object(
|
|
|
|
wallet,
|
|
|
|
source_file_path,
|
|
|
|
cid,
|
|
|
|
shell=self.shell,
|
|
|
|
endpoint=alive_node.get_rpc_endpoint(),
|
|
|
|
)
|
|
|
|
wait_object_replication(cid, oid, 4, shell=self.shell, nodes=storage_nodes)
|
|
|
|
|
|
|
|
@pytest.mark.node_mgmt
|
2023-09-08 10:35:34 +00:00
|
|
|
@allure.title("Drop object using control command")
|
2023-10-13 08:52:42 +00:00
|
|
|
def test_drop_object(self, default_wallet, complex_object_size: ObjectSize, simple_object_size: ObjectSize):
|
2022-12-05 22:31:45 +00:00
|
|
|
"""
|
2023-01-09 12:46:03 +00:00
|
|
|
Test checks object could be dropped using `frostfs-cli control drop-objects` command.
|
2022-12-05 22:31:45 +00:00
|
|
|
"""
|
|
|
|
wallet = default_wallet
|
|
|
|
endpoint = self.cluster.default_rpc_endpoint
|
2023-08-02 11:54:03 +00:00
|
|
|
file_path_simple = generate_file(simple_object_size.value)
|
|
|
|
file_path_complex = generate_file(complex_object_size.value)
|
2022-12-05 22:31:45 +00:00
|
|
|
|
|
|
|
locode = get_locode_from_random_node(self.cluster)
|
2023-10-13 08:52:42 +00:00
|
|
|
rule = f"REP 1 IN SE CBF 1 SELECT 1 FROM LOC AS SE FILTER 'UN-LOCODE' EQ '{locode}' AS LOC"
|
2022-12-05 22:31:45 +00:00
|
|
|
cid = create_container(wallet, rule=rule, shell=self.shell, endpoint=endpoint)
|
2023-10-13 08:52:42 +00:00
|
|
|
oid_simple = put_object_to_random_node(wallet, file_path_simple, cid, shell=self.shell, cluster=self.cluster)
|
|
|
|
oid_complex = put_object_to_random_node(wallet, file_path_complex, cid, shell=self.shell, cluster=self.cluster)
|
2022-07-08 17:24:55 +00:00
|
|
|
|
2022-12-05 22:31:45 +00:00
|
|
|
for oid in (oid_simple, oid_complex):
|
|
|
|
get_object_from_random_node(wallet, cid, oid, shell=self.shell, cluster=self.cluster)
|
|
|
|
head_object(wallet, cid, oid, shell=self.shell, endpoint=endpoint)
|
2022-07-08 17:24:55 +00:00
|
|
|
|
2023-10-13 08:52:42 +00:00
|
|
|
nodes_with_object = get_nodes_with_object(cid, oid_simple, shell=self.shell, nodes=self.cluster.storage_nodes)
|
2022-12-05 22:31:45 +00:00
|
|
|
random_node = random.choice(nodes_with_object)
|
|
|
|
|
|
|
|
for oid in (oid_simple, oid_complex):
|
|
|
|
with allure.step(f"Drop object {oid}"):
|
2023-10-13 08:52:42 +00:00
|
|
|
get_object_from_random_node(wallet, cid, oid, shell=self.shell, cluster=self.cluster)
|
2022-12-05 22:31:45 +00:00
|
|
|
head_object(wallet, cid, oid, shell=self.shell, endpoint=endpoint)
|
|
|
|
drop_object(random_node, cid, oid)
|
|
|
|
self.wait_for_obj_dropped(wallet, cid, oid, endpoint, get_object)
|
|
|
|
self.wait_for_obj_dropped(wallet, cid, oid, endpoint, head_object)
|
|
|
|
|
|
|
|
@pytest.mark.node_mgmt
|
|
|
|
@pytest.mark.skip(reason="Need to clarify scenario")
|
|
|
|
@allure.title("Control Operations with storage nodes")
|
|
|
|
def test_shards(
|
|
|
|
self,
|
|
|
|
default_wallet,
|
|
|
|
create_container_and_pick_node,
|
2023-08-02 11:54:03 +00:00
|
|
|
simple_object_size: ObjectSize,
|
2022-12-05 22:31:45 +00:00
|
|
|
):
|
|
|
|
wallet = default_wallet
|
2023-08-02 11:54:03 +00:00
|
|
|
file_path = generate_file(simple_object_size.value)
|
2022-12-05 22:31:45 +00:00
|
|
|
|
|
|
|
cid, node = create_container_and_pick_node
|
|
|
|
original_oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster)
|
|
|
|
|
|
|
|
# for mode in ('read-only', 'degraded'):
|
|
|
|
for mode in ("degraded",):
|
|
|
|
shards = node_shard_list(node)
|
|
|
|
assert shards
|
|
|
|
|
|
|
|
for shard in shards:
|
|
|
|
node_shard_set_mode(node, shard, mode)
|
|
|
|
|
|
|
|
shards = node_shard_list(node)
|
|
|
|
assert shards
|
|
|
|
|
|
|
|
with pytest.raises(RuntimeError):
|
|
|
|
put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster)
|
|
|
|
|
|
|
|
with pytest.raises(RuntimeError):
|
2023-10-13 08:52:42 +00:00
|
|
|
delete_object(wallet, cid, original_oid, self.shell, self.cluster.default_rpc_endpoint)
|
2022-12-05 22:31:45 +00:00
|
|
|
|
|
|
|
get_object_from_random_node(wallet, cid, original_oid, self.shell, self.cluster)
|
|
|
|
|
|
|
|
for shard in shards:
|
|
|
|
node_shard_set_mode(node, shard, "read-write")
|
|
|
|
|
|
|
|
shards = node_shard_list(node)
|
|
|
|
assert shards
|
|
|
|
|
|
|
|
oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster)
|
|
|
|
delete_object(wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
|
|
|
|
|
2023-04-19 07:23:08 +00:00
|
|
|
@pytest.mark.node_mgmt
|
|
|
|
@allure.title("Put object with stopped node")
|
2023-10-13 08:52:42 +00:00
|
|
|
def test_stop_node(self, default_wallet, return_nodes_after_test_run, simple_object_size: ObjectSize):
|
2023-04-19 07:23:08 +00:00
|
|
|
wallet = default_wallet
|
2023-10-13 08:52:42 +00:00
|
|
|
placement_rule = "REP 3 IN X SELECT 4 FROM * AS X"
|
2023-08-02 11:54:03 +00:00
|
|
|
source_file_path = generate_file(simple_object_size.value)
|
2023-04-19 07:23:08 +00:00
|
|
|
storage_nodes = self.cluster.storage_nodes
|
|
|
|
random_node = random.choice(storage_nodes[1:])
|
|
|
|
alive_node = random.choice(
|
|
|
|
[storage_node for storage_node in storage_nodes if storage_node.id != random_node.id]
|
|
|
|
)
|
|
|
|
|
|
|
|
cid = create_container(
|
|
|
|
wallet,
|
|
|
|
rule=placement_rule,
|
|
|
|
basic_acl=PUBLIC_ACL,
|
|
|
|
shell=self.shell,
|
|
|
|
endpoint=random_node.get_rpc_endpoint(),
|
|
|
|
)
|
2023-05-15 09:59:33 +00:00
|
|
|
with allure.step("Stop the random node"):
|
2023-04-19 07:23:08 +00:00
|
|
|
check_nodes.append(random_node)
|
|
|
|
random_node.stop_service()
|
|
|
|
with allure.step("Try to put an object and expect success"):
|
|
|
|
put_object(
|
|
|
|
wallet,
|
|
|
|
source_file_path,
|
|
|
|
cid,
|
|
|
|
shell=self.shell,
|
|
|
|
endpoint=alive_node.get_rpc_endpoint(),
|
|
|
|
)
|
|
|
|
self.return_nodes(alive_node)
|
|
|
|
|
2022-12-05 22:31:45 +00:00
|
|
|
@allure.step("Wait for object to be dropped")
|
|
|
|
def wait_for_obj_dropped(self, wallet: str, cid: str, oid: str, endpoint: str, checker) -> None:
|
|
|
|
for _ in range(3):
|
|
|
|
try:
|
|
|
|
checker(wallet, cid, oid, shell=self.shell, endpoint=endpoint)
|
|
|
|
wait_for_gc_pass_on_storage_nodes()
|
|
|
|
except Exception as err:
|
2023-02-19 23:58:07 +00:00
|
|
|
if string_utils.is_str_match_pattern(err, OBJECT_NOT_FOUND):
|
2022-12-05 22:31:45 +00:00
|
|
|
return
|
|
|
|
raise AssertionError(f'Expected "{OBJECT_NOT_FOUND}" error, got\n{err}')
|
|
|
|
|
|
|
|
raise AssertionError(f"Object {oid} was not dropped from node")
|
2023-11-02 07:02:44 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.time
|
|
|
|
class TestTime(ClusterTestBase):
|
|
|
|
@allure.step("Neo-go should continue to release blocks")
|
|
|
|
def check_nodes_block(self, cluster_state_controller: ClusterStateController):
|
|
|
|
count_blocks = {}
|
|
|
|
with allure.step("Get current block id"):
|
|
|
|
for cluster_node in self.cluster.cluster_nodes:
|
|
|
|
cluster_state_controller.get_node_date(cluster_node)
|
|
|
|
count_blocks[cluster_node] = neo_go_query_height(
|
|
|
|
shell=cluster_node.host.get_shell(), endpoint=cluster_node.morph_chain.get_http_endpoint()
|
|
|
|
)["Latest block"]
|
|
|
|
with allure.step("Wait for 3 blocks"):
|
|
|
|
sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 3)
|
|
|
|
with allure.step("Current block id should be higher than before"):
|
|
|
|
for cluster_node in self.cluster.cluster_nodes:
|
|
|
|
shell = cluster_node.host.get_shell()
|
|
|
|
now_block = neo_go_query_height(shell=shell, endpoint=cluster_node.morph_chain.get_http_endpoint())[
|
|
|
|
"Latest block"
|
|
|
|
]
|
|
|
|
assert count_blocks[cluster_node] < now_block
|
|
|
|
|
|
|
|
@pytest.fixture()
|
|
|
|
def node_time_synchronizer(self, cluster_state_controller: ClusterStateController) -> None:
|
|
|
|
cluster_state_controller.set_sync_date_all_nodes(status="inactive")
|
|
|
|
yield
|
|
|
|
cluster_state_controller.set_sync_date_all_nodes(status="active")
|
|
|
|
|
|
|
|
def test_system_time(self, cluster_state_controller: ClusterStateController, node_time_synchronizer: None):
|
|
|
|
cluster_nodes = self.cluster.cluster_nodes
|
|
|
|
timezone_utc = datetime.timezone.utc
|
|
|
|
node_1, node_2, node_3 = cluster_nodes[0:3]
|
|
|
|
|
|
|
|
with allure.step("On node 1, move the system time forward by 2 years"):
|
|
|
|
cluster_state_controller.change_node_date(
|
|
|
|
node_1, (datetime.datetime.now(timezone_utc) + datetime.timedelta(days=(365 * 2)))
|
|
|
|
)
|
|
|
|
|
|
|
|
self.check_nodes_block(cluster_state_controller)
|
|
|
|
|
|
|
|
with allure.step("On node 2, move the system time back 2 years."):
|
|
|
|
cluster_state_controller.change_node_date(
|
|
|
|
node_2, (datetime.datetime.now(timezone_utc) - datetime.timedelta(days=(365 * 2)))
|
|
|
|
)
|
|
|
|
|
|
|
|
self.check_nodes_block(cluster_state_controller)
|
|
|
|
|
|
|
|
with allure.step("On node 3, move the system time forward by 3 years"):
|
|
|
|
cluster_state_controller.change_node_date(
|
|
|
|
node_3, (datetime.datetime.now(timezone_utc) + datetime.timedelta(days=(365 * 3)))
|
|
|
|
)
|
|
|
|
|
|
|
|
self.check_nodes_block(cluster_state_controller)
|
|
|
|
|
|
|
|
with allure.step("Return the time on all nodes to the current one"):
|
|
|
|
for cluster_node in self.cluster.cluster_nodes:
|
|
|
|
cluster_state_controller.restore_node_date(cluster_node)
|
|
|
|
|
|
|
|
self.check_nodes_block(cluster_state_controller)
|
|
|
|
|
|
|
|
with allure.step("Reboot all nodes"):
|
|
|
|
cluster_state_controller.shutdown_cluster(mode="soft")
|
|
|
|
cluster_state_controller.start_stopped_hosts()
|
|
|
|
|
|
|
|
self.check_nodes_block(cluster_state_controller)
|