[] fix and extend container metrics tests

This commit is contained in:
Ilyas Niyazov 2024-10-08 08:56:15 +03:00 committed by Ilyas Niyazov
parent 8dcb3ccf3c
commit 6442a52abd
4 changed files with 58 additions and 2 deletions

View file

@ -35,3 +35,16 @@ def wait_for_gc_pass_on_storage_nodes() -> None:
wait_time = datetime_utils.parse_time(STORAGE_GC_TIME) wait_time = datetime_utils.parse_time(STORAGE_GC_TIME)
with reporter.step(f"Wait {wait_time}s until GC completes on storage nodes"): with reporter.step(f"Wait {wait_time}s until GC completes on storage nodes"):
time.sleep(wait_time) time.sleep(wait_time)
def are_numbers_similar(num1, num2, tolerance_percentage: float = 1.0):
"""
if difference of numbers is less than permissible deviation than numbers are similar
"""
# Calculate the permissible deviation
average = (num1 + num2) / 2
tolerance = average * (tolerance_percentage / 100)
# Calculate the real difference
difference = abs(num1 - num2)
return difference <= tolerance

View file

@ -1,7 +1,7 @@
import math import math
import time
import allure import allure
from frostfs_testlib.testing.parallel import parallel
import pytest import pytest
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.steps.cli.container import create_container, search_nodes_with_container from frostfs_testlib.steps.cli.container import create_container, search_nodes_with_container
@ -14,10 +14,17 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file from frostfs_testlib.utils.file_utils import generate_file
from ...helpers.utility import are_numbers_similar
@pytest.mark.nightly @pytest.mark.nightly
@pytest.mark.container @pytest.mark.container
class TestContainerMetrics(ClusterTestBase): class TestContainerMetrics(ClusterTestBase):
@reporter.step("Put object to container: {cid}")
def put_object_parallel(self, file_path: str, wallet: WalletInfo, cid: str):
oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster)
return oid
@allure.title("Container metrics (obj_size={object_size},policy={policy})") @allure.title("Container metrics (obj_size={object_size},policy={policy})")
@pytest.mark.parametrize("placement_policy, policy", [("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", "REP"), ("EC 1.1 CBF 1", "EC")]) @pytest.mark.parametrize("placement_policy, policy", [("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", "REP"), ("EC 1.1 CBF 1", "EC")])
def test_container_metrics( def test_container_metrics(
@ -121,3 +128,39 @@ class TestContainerMetrics(ClusterTestBase):
with reporter.step(f"Check container size metrics"): with reporter.step(f"Check container size metrics"):
act_metric = get_metrics_value(object_nodes[0], command="frostfs_node_engine_container_size_bytes", cid=cid) act_metric = get_metrics_value(object_nodes[0], command="frostfs_node_engine_container_size_bytes", cid=cid)
assert act_metric == int(tombstone["header"]["payloadLength"]) assert act_metric == int(tombstone["header"]["payloadLength"])
@allure.title("Container size metrics put {objects_count} objects (obj_size={object_size})")
@pytest.mark.parametrize("objects_count", [5, 10, 20])
def test_container_size_metrics_more_objects(
self,
object_size: ObjectSize,
default_wallet: WalletInfo,
objects_count: int
):
with reporter.step(f"Create container"):
cid = create_container(default_wallet, self.shell, self.cluster.default_rpc_endpoint)
with reporter.step(f"Put {objects_count} objects"):
files_path = [generate_file(object_size.value) for _ in range(objects_count)]
futures = parallel(self.put_object_parallel, files_path, wallet=default_wallet, cid=cid)
oids = [future.result() for future in futures]
with reporter.step("Check metric appears in all nodes"):
metric_values = [get_metrics_value(node, command="frostfs_node_engine_container_size_bytes", cid=cid) for node in self.cluster.cluster_nodes]
actual_value = sum(metric_values) // 2 # for policy REP 2, value divide by 2
expected_value = object_size.value * objects_count
assert are_numbers_similar(actual_value, expected_value, tolerance_percentage=2), "metric container size bytes value not correct"
with reporter.step("Delete file, wait until gc remove object"):
tombstones_size = 0
for oid in oids:
tombstone_id = delete_object(default_wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
tombstone = head_object(default_wallet, cid, tombstone_id, self.shell, self.cluster.default_rpc_endpoint)
tombstones_size += int(tombstone["header"]["payloadLength"])
with reporter.step(f"Check container size metrics, 'should be positive in all nodes'"):
futures = parallel(get_metrics_value, self.cluster.cluster_nodes, command="frostfs_node_engine_container_size_bytes", cid=cid)
metrics_value_nodes = [future.result() for future in futures]
for act_metric in metrics_value_nodes:
assert act_metric >= 0, "Metrics value is negative"
assert sum(metrics_value_nodes) // len(self.cluster.cluster_nodes) == tombstones_size, "tomstone size of objects not correct"

View file

@ -42,7 +42,7 @@ class TestLogsMetrics(ClusterTestBase):
logs = cluster_node.host.get_filtered_logs( logs = cluster_node.host.get_filtered_logs(
log_level, unit="frostfs-storage", since=after_time, until=until_time, priority=log_priority log_level, unit="frostfs-storage", since=after_time, until=until_time, priority=log_priority
) )
result = re.findall(rf"\s+{log_level}\s+", logs) result = re.findall(rf":\s+{log_level}\s+", logs)
count_logs += len(result) count_logs += len(result)
except RuntimeError as e: except RuntimeError as e:
... ...