Update pilorama loss and shards test cases #177

Merged
ylukoyan merged 1 commit from ylukoyan/frostfs-testcases:pilorama_fix into master 2023-12-22 16:07:26 +00:00
2 changed files with 21 additions and 120 deletions

View file

@ -425,7 +425,8 @@ class TestEmptyMap(ClusterTestBase):
with reporter.step("Delete pilorama.db from all nodes"): with reporter.step("Delete pilorama.db from all nodes"):
for node in self.cluster.storage_nodes: for node in self.cluster.storage_nodes:
abereziny marked this conversation as resolved Outdated

let's use get_shards directly

let's use `get_shards` directly
node.delete_pilorama() for shard in node.get_shards():
node.delete_file(shard.pilorama)
with reporter.step("Start all storage nodes"): with reporter.step("Start all storage nodes"):
cluster_state_controller.start_all_stopped_services() cluster_state_controller.start_all_stopped_services()
@ -445,17 +446,6 @@ class TestEmptyMap(ClusterTestBase):
@pytest.mark.failover @pytest.mark.failover
@pytest.mark.failover_data_loss @pytest.mark.failover_data_loss
class TestStorageDataLoss(ClusterTestBase): class TestStorageDataLoss(ClusterTestBase):
@reporter.step("Get list of all piloramas on node")
def get_piloramas_list(self, node: StorageNode) -> list:
data_directory_path = node.get_data_directory()
cmd = f"sudo ls -1 {data_directory_path}/meta*/pilorama*"
shell = node.host.get_shell()
stdout = shell.exec(cmd).stdout
piloramas = stdout.split("\n")
return piloramas
@allure.title( @allure.title(
"After metabase loss on all nodes operations on objects and buckets should be still available via S3 (s3_client={s3_client})" "After metabase loss on all nodes operations on objects and buckets should be still available via S3 (s3_client={s3_client})"
) )
@ -663,15 +653,22 @@ class TestStorageDataLoss(ClusterTestBase):
object_versions.append(put_object) object_versions.append(put_object)
node_to_check = self.cluster.storage_nodes[0] node_to_check = self.cluster.storage_nodes[0]
piloramas_list_before_removing = {}
abereziny marked this conversation as resolved Outdated

It should be reporter.step everywhere. Looks like branch is not rebased on actual master

It should be `reporter.step` everywhere. Looks like branch is not rebased on actual master
with reporter.step("Get list of all pilorama.db"): piloramas_list_before_removing = []
abereziny marked this conversation as resolved Outdated

Let's use get_shards directly

Let's use `get_shards` directly
piloramas_list_before_removing = self.get_piloramas_list(node_to_check) with reporter.step("Get list of all pilorama.db on shards"):
for shard in node_to_check.get_shards():
piloramas_list_before_removing.append(shard.pilorama)
with reporter.step("Check that all pilorama.db files exist on node"):
for pilorama in piloramas_list_before_removing:
assert node_to_check.is_file_exist(pilorama), f"File {pilorama} does not exist"
with reporter.step("Stop all storage nodes"): with reporter.step("Stop all storage nodes"):
cluster_state_controller.stop_services_of_type(StorageNode) cluster_state_controller.stop_services_of_type(StorageNode)
with reporter.step("Delete pilorama.db from one node"): with reporter.step("Delete pilorama.db from one node"):
node_to_check.delete_pilorama() for pilorama in piloramas_list_before_removing:
node_to_check.delete_file(pilorama)
with reporter.step("Start all storage nodes"): with reporter.step("Start all storage nodes"):
cluster_state_controller.start_all_stopped_services() cluster_state_controller.start_all_stopped_services()
@ -680,10 +677,9 @@ class TestStorageDataLoss(ClusterTestBase):
self.tick_epochs(1) self.tick_epochs(1)
sleep(120) sleep(120)
piloramas_list_afrer_removing = {}
with reporter.step("Get list of all pilorama.db after sync"): with reporter.step("Get list of all pilorama.db after sync"):
piloramas_list_afrer_removing = self.get_piloramas_list(node_to_check) for pilorama in piloramas_list_before_removing:
assert piloramas_list_afrer_removing == piloramas_list_before_removing, "List of pilorama.db is different" assert node_to_check.is_file_exist(pilorama), f"File {pilorama} does not exist"
with reporter.step("Check bucket versioning"): with reporter.step("Check bucket versioning"):
bucket_versioning = s3_client.get_bucket_versioning_status(bucket) bucket_versioning = s3_client.get_bucket_versioning_status(bucket)

View file

@ -1,108 +1,13 @@
import json import json
import pathlib import pathlib
import re
from dataclasses import dataclass
from io import StringIO
import allure import allure
import pytest import pytest
import yaml
from configobj import ConfigObj
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
from frostfs_testlib.storage.cluster import Cluster, StorageNode from frostfs_testlib.storage.cluster import Cluster, StorageNode
from frostfs_testlib.storage.dataclasses.shard import Shard
SHARD_PREFIX = "FROSTFS_STORAGE_SHARD_"
BLOBSTOR_PREFIX = "_BLOBSTOR_"
@dataclass
class Blobstor:
path: str
path_type: str
def __eq__(self, other) -> bool:
if not isinstance(other, self.__class__):
raise RuntimeError(f"Only two {self.__class__.__name__} instances can be compared")
return self.path == other.path and self.path_type == other.path_type
def __hash__(self):
return hash((self.path, self.path_type))
@staticmethod
def from_config_object(section: ConfigObj, shard_id: str, blobstor_id: str):
var_prefix = f"{SHARD_PREFIX}{shard_id}{BLOBSTOR_PREFIX}{blobstor_id}"
return Blobstor(section.get(f"{var_prefix}_PATH"), section.get(f"{var_prefix}_TYPE"))
@dataclass
class Shard:
blobstor: list[Blobstor]
metabase: str
writecache: str
def __eq__(self, other) -> bool:
if not isinstance(other, self.__class__):
raise RuntimeError(f"Only two {self.__class__.__name__} instances can be compared")
return (
set(self.blobstor) == set(other.blobstor)
and self.metabase == other.metabase
and self.writecache == other.writecache
)
def __hash__(self):
return hash((self.metabase, self.writecache))
@staticmethod
def _get_blobstor_count_from_section(config_object: ConfigObj, shard_id: int):
pattern = f"{SHARD_PREFIX}{shard_id}{BLOBSTOR_PREFIX}"
blobstors = {key[: len(pattern) + 2] for key in config_object.keys() if pattern in key}
return len(blobstors)
@staticmethod
def from_config_object(config_object: ConfigObj, shard_id: int):
var_prefix = f"{SHARD_PREFIX}{shard_id}"
blobstor_count = Shard._get_blobstor_count_from_section(config_object, shard_id)
blobstors = [
Blobstor.from_config_object(config_object, shard_id, blobstor_id) for blobstor_id in range(blobstor_count)
]
write_cache_enabled = config_object.as_bool(f"{var_prefix}_WRITECACHE_ENABLED")
return Shard(
blobstors,
config_object.get(f"{var_prefix}_METABASE_PATH"),
config_object.get(f"{var_prefix}_WRITECACHE_PATH") if write_cache_enabled else "",
)
@staticmethod
def from_object(shard):
metabase = shard["metabase"]["path"] if "path" in shard["metabase"] else shard["metabase"]
writecache = shard["writecache"]["path"] if "path" in shard["writecache"] else shard["writecache"]
return Shard(
blobstor=[Blobstor(path=blobstor["path"], path_type=blobstor["type"]) for blobstor in shard["blobstor"]],
metabase=metabase,
writecache=writecache,
)
def shards_from_yaml(contents: str) -> list[Shard]:
config = yaml.safe_load(contents)
config["storage"]["shard"].pop("default")
return [Shard.from_object(shard) for shard in config["storage"]["shard"].values()]
def shards_from_env(contents: str) -> list[Shard]:
configObj = ConfigObj(StringIO(contents))
pattern = f"{SHARD_PREFIX}\d*"
num_shards = len(set(re.findall(pattern, contents)))
return [Shard.from_config_object(configObj, shard_id) for shard_id in range(num_shards)]
@pytest.mark.shard @pytest.mark.shard
@ -111,15 +16,14 @@ class TestControlShard:
def get_shards_from_config(node: StorageNode) -> list[Shard]: def get_shards_from_config(node: StorageNode) -> list[Shard]:
config_file = node.get_shard_config_path() config_file = node.get_shard_config_path()
file_type = pathlib.Path(config_file).suffix file_type = pathlib.Path(config_file).suffix
contents = node.host.get_shell().exec(f"cat {config_file}").stdout
parser_method = { parser_method = {
".env": shards_from_env, ".env": node.get_shards_from_env,
".yaml": shards_from_yaml, ".yaml": node.get_shards,
".yml": shards_from_yaml, ".yml": node.get_shards,
} }
shards = parser_method[file_type](contents) shards = parser_method[file_type]()
return shards return shards
@staticmethod @staticmethod
@ -145,4 +49,5 @@ class TestControlShard:
for storage_node in cluster.storage_nodes: for storage_node in cluster.storage_nodes:
shards_from_config = self.get_shards_from_config(storage_node) shards_from_config = self.get_shards_from_config(storage_node)
shards_from_cli = self.get_shards_from_cli(storage_node) shards_from_cli = self.get_shards_from_cli(storage_node)
assert set(shards_from_config) == set(shards_from_cli) assert set(shards_from_config) == set(shards_from_cli)