[#228] add test container metrics
Some checks reported warnings
DCO check / Commits Check (pull_request) Has been cancelled

This commit is contained in:
Ilyas Niyazov 2024-05-15 11:18:03 +03:00
parent 9cf083fac9
commit 3941619431

View file

@ -0,0 +1,97 @@
import math
import re
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.steps.cli.container import create_container, delete_container
from frostfs_testlib.steps.cli.object import delete_object, get_object_nodes, put_object_to_random_node
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.file_utils import generate_file
@pytest.mark.container
class TestContainerMetrics(ClusterTestBase):
@wait_for_success(interval=10)
def check_sum_counter_metrics_in_nodes(
self, cluster_nodes: list[ClusterNode], cid: str, phy_exp: int, logic_exp: int, user_exp: int
):
counter_phy = 0
counter_logic = 0
counter_user = 0
for cluster_node in cluster_nodes:
metric_result = cluster_node.metrics.storage.get_metric_container(f"container_objects_total", cid)
counter_phy += self.get_count_metric_type_from_stdout(metric_result.stdout, "phy")
counter_logic += self.get_count_metric_type_from_stdout(metric_result.stdout, "logic")
counter_user += self.get_count_metric_type_from_stdout(metric_result.stdout, "user")
assert counter_phy == phy_exp, f"Expected metric Phy={phy_exp}, Actual: {counter_phy} in nodes: {cluster_nodes}"
assert (
counter_logic == logic_exp
), f"Expected metric logic={logic_exp}, Actual: {counter_logic} in nodes: {cluster_nodes}"
assert (
counter_user == user_exp
), f"Expected metric User={user_exp}, Actual: {counter_user} in nodes: {cluster_nodes}"
@staticmethod
def get_count_metric_type_from_stdout(metric_result_stdout: str, metric_type: str):
result = re.findall(rf'type="{metric_type}"}}\s(\d+)', metric_result_stdout)
return sum(map(int, result))
@allure.title("Container metrics (obj_size={object_size})")
def test_container_metrics(
self, object_size: ObjectSize, max_object_size: int, default_wallet: WalletInfo, cluster: Cluster
):
file_path = generate_file(object_size.value)
placement_policy = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
copies = 2
object_chunks = 0
head_object = 1
link_object = 0
if object_size.value > max_object_size:
object_chunks = math.ceil(object_size.value / max_object_size)
link_object = 1
with reporter.step(f"Create container with policy {placement_policy}"):
cid = create_container(
default_wallet,
rule=placement_policy,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
with reporter.step("Put object to random node"):
storage_object_id = put_object_to_random_node(
wallet=default_wallet,
path=file_path,
cid=cid,
shell=self.shell,
cluster=cluster,
)
with reporter.step("Check metric appears in node where the object is located"):
object_nodes = get_object_nodes(
cluster=cluster, cid=cid, oid=storage_object_id, alive_node=cluster.cluster_nodes[0]
)
count_metrics_exp = (object_chunks + head_object + link_object) * copies
self.check_sum_counter_metrics_in_nodes(
object_nodes, cid, phy_exp=count_metrics_exp, logic_exp=count_metrics_exp, user_exp=copies
)
with reporter.step("Delete file, wait until gc remove object"):
delete_object(default_wallet, cid, storage_object_id, self.shell, self.cluster.default_rpc_endpoint)
count_metrics_exp = len(object_nodes)
self.check_sum_counter_metrics_in_nodes(
object_nodes, cid, phy_exp=count_metrics_exp, logic_exp=count_metrics_exp, user_exp=0
)
with reporter.step("Check metrics(Phy, Logic, User) in each nodes"):
# Phy and Logic metrics are 4, because in rule 'CBF 2 SELECT 2 FROM', cbf2*sel2=4
self.check_sum_counter_metrics_in_nodes(cluster.cluster_nodes, cid, phy_exp=4, logic_exp=4, user_exp=0)
with reporter.step("Delete container"):
delete_container(default_wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)