[#313] Extend container metrics tests #313
4 changed files with 109 additions and 5 deletions
|
@ -35,3 +35,16 @@ def wait_for_gc_pass_on_storage_nodes() -> None:
|
||||||
wait_time = datetime_utils.parse_time(STORAGE_GC_TIME)
|
wait_time = datetime_utils.parse_time(STORAGE_GC_TIME)
|
||||||
with reporter.step(f"Wait {wait_time}s until GC completes on storage nodes"):
|
with reporter.step(f"Wait {wait_time}s until GC completes on storage nodes"):
|
||||||
time.sleep(wait_time)
|
time.sleep(wait_time)
|
||||||
|
|
||||||
|
|
||||||
|
def are_numbers_similar(num1, num2, tolerance_percentage: float = 1.0):
|
||||||
|
"""
|
||||||
|
if difference of numbers is less than permissible deviation than numbers are similar
|
||||||
|
"""
|
||||||
|
# Calculate the permissible deviation
|
||||||
|
average = (num1 + num2) / 2
|
||||||
|
tolerance = average * (tolerance_percentage / 100)
|
||||||
|
|
||||||
|
# Calculate the real difference
|
||||||
|
difference = abs(num1 - num2)
|
||||||
|
return difference <= tolerance
|
||||||
|
|
0
pytest_tests/testsuites/metrics/__init__.py
Normal file
0
pytest_tests/testsuites/metrics/__init__.py
Normal file
|
@ -1,22 +1,37 @@
|
||||||
import math
|
import math
|
||||||
import time
|
|
||||||
|
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.steps.cli.container import create_container, search_nodes_with_container
|
from frostfs_testlib.testing.parallel import parallel
|
||||||
|
from frostfs_testlib.steps.cli.container import create_container, delete_container, search_nodes_with_container, wait_for_container_deletion
|
||||||
from frostfs_testlib.steps.cli.object import delete_object, head_object, put_object_to_random_node
|
from frostfs_testlib.steps.cli.object import delete_object, head_object, put_object_to_random_node
|
||||||
from frostfs_testlib.steps.metrics import check_metrics_counter, get_metrics_value
|
from frostfs_testlib.steps.metrics import calc_metrics_count_from_stdout, check_metrics_counter, get_metrics_value
|
||||||
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
|
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
|
||||||
from frostfs_testlib.storage.cluster import Cluster
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.utils.file_utils import generate_file
|
from frostfs_testlib.utils.file_utils import generate_file
|
||||||
|
|
||||||
|
from ...helpers.utility import are_numbers_similar
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.container
|
@pytest.mark.container
|
||||||
class TestContainerMetrics(ClusterTestBase):
|
class TestContainerMetrics(ClusterTestBase):
|
||||||
|
@reporter.step("Put object to container: {cid}")
|
||||||
|
def put_object_parallel(self, file_path: str, wallet: WalletInfo, cid: str):
|
||||||
|
oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster)
|
||||||
|
return oid
|
||||||
|
|
||||||
|
@reporter.step("Get metrics value from node")
|
||||||
|
def get_metrics_search_by_greps_parallel(self, node: ClusterNode, **greps):
|
||||||
|
try:
|
||||||
|
content_stdout = node.metrics.storage.get_metrics_search_by_greps(greps)
|
||||||
|
return calc_metrics_count_from_stdout(content_stdout)
|
||||||
|
except Exception as e:
|
||||||
|
return None
|
||||||
|
|
||||||
@allure.title("Container metrics (obj_size={object_size},policy={policy})")
|
@allure.title("Container metrics (obj_size={object_size},policy={policy})")
|
||||||
@pytest.mark.parametrize("placement_policy, policy", [("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", "REP"), ("EC 1.1 CBF 1", "EC")])
|
@pytest.mark.parametrize("placement_policy, policy", [("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", "REP"), ("EC 1.1 CBF 1", "EC")])
|
||||||
def test_container_metrics(
|
def test_container_metrics(
|
||||||
|
@ -120,3 +135,79 @@ class TestContainerMetrics(ClusterTestBase):
|
||||||
with reporter.step(f"Check container size metrics"):
|
with reporter.step(f"Check container size metrics"):
|
||||||
act_metric = get_metrics_value(object_nodes[0], command="frostfs_node_engine_container_size_bytes", cid=cid)
|
act_metric = get_metrics_value(object_nodes[0], command="frostfs_node_engine_container_size_bytes", cid=cid)
|
||||||
assert act_metric == int(tombstone["header"]["payloadLength"])
|
assert act_metric == int(tombstone["header"]["payloadLength"])
|
||||||
|
|
||||||
|
@allure.title("Container size metrics put {objects_count} objects (obj_size={object_size})")
|
||||||
|
@pytest.mark.parametrize("objects_count", [5, 10, 20])
|
||||||
|
def test_container_size_metrics_more_objects(
|
||||||
|
self,
|
||||||
|
object_size: ObjectSize,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
objects_count: int
|
||||||
|
):
|
||||||
|
with reporter.step(f"Create container"):
|
||||||
|
cid = create_container(default_wallet, self.shell, self.cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Put {objects_count} objects"):
|
||||||
|
files_path = [generate_file(object_size.value) for _ in range(objects_count)]
|
||||||
|
futures = parallel(self.put_object_parallel, files_path, wallet=default_wallet, cid=cid)
|
||||||
|
oids = [future.result() for future in futures]
|
||||||
|
|
||||||
|
with reporter.step("Check metric appears in all nodes"):
|
||||||
|
metric_values = [get_metrics_value(node, command="frostfs_node_engine_container_size_bytes", cid=cid) for node in self.cluster.cluster_nodes]
|
||||||
|
actual_value = sum(metric_values) // 2 # for policy REP 2, value divide by 2
|
||||||
|
expected_value = object_size.value * objects_count
|
||||||
|
assert are_numbers_similar(actual_value, expected_value, tolerance_percentage=2), "metric container size bytes value not correct"
|
||||||
|
|
||||||
|
with reporter.step("Delete file, wait until gc remove object"):
|
||||||
|
tombstones_size = 0
|
||||||
|
for oid in oids:
|
||||||
|
tombstone_id = delete_object(default_wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
|
||||||
|
tombstone = head_object(default_wallet, cid, tombstone_id, self.shell, self.cluster.default_rpc_endpoint)
|
||||||
|
tombstones_size += int(tombstone["header"]["payloadLength"])
|
||||||
|
|
||||||
|
with reporter.step(f"Check container size metrics, 'should be positive in all nodes'"):
|
||||||
|
futures = parallel(get_metrics_value, self.cluster.cluster_nodes, command="frostfs_node_engine_container_size_bytes", cid=cid)
|
||||||
|
metrics_value_nodes = [future.result() for future in futures]
|
||||||
|
for act_metric in metrics_value_nodes:
|
||||||
|
assert act_metric >= 0, "Metrics value is negative"
|
||||||
|
assert sum(metrics_value_nodes) // len(self.cluster.cluster_nodes) == tombstones_size, "tomstone size of objects not correct"
|
||||||
|
|
||||||
|
|
||||||
|
@allure.title("Container metrics (policy={policy})")
|
||||||
|
@pytest.mark.parametrize("placement_policy, policy", [("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", "REP"), ("EC 1.1 CBF 1", "EC")])
|
||||||
|
def test_container_metrics_delete_complex_objects(
|
||||||
|
self,
|
||||||
|
complex_object_size: ObjectSize,
|
||||||
|
default_wallet: WalletInfo,
|
||||||
|
cluster: Cluster,
|
||||||
|
placement_policy: str,
|
||||||
|
policy: str
|
||||||
|
):
|
||||||
|
copies = 2 if policy == "REP" else 1
|
||||||
|
objects_count = 2
|
||||||
|
metric_name = "frostfs_node_engine_container_objects_total"
|
||||||
|
with reporter.step(f"Create container"):
|
||||||
|
cid = create_container(default_wallet, self.shell, cluster.default_rpc_endpoint, rule=placement_policy)
|
||||||
|
|
||||||
|
with reporter.step(f"Put {objects_count} objects"):
|
||||||
|
files_path = [generate_file(complex_object_size.value) for _ in range(objects_count)]
|
||||||
|
futures = parallel(self.put_object_parallel, files_path, wallet=default_wallet, cid=cid)
|
||||||
|
oids = [future.result() for future in futures]
|
||||||
|
|
||||||
|
with reporter.step(f"Check metrics value in each nodes, should be {objects_count} for 'user'"):
|
||||||
|
check_metrics_counter(cluster.cluster_nodes, counter_exp=objects_count * copies, command=metric_name, cid=cid, type="user")
|
||||||
|
|
||||||
|
with reporter.step("Delete objects and container"):
|
||||||
|
for oid in oids:
|
||||||
|
delete_object(default_wallet, cid, oid, self.shell, cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
|
delete_container(default_wallet, cid, self.shell, cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step("Tick epoch and check container was deleted"):
|
||||||
|
self.tick_epoch()
|
||||||
|
wait_for_container_deletion(default_wallet, cid, shell=self.shell, endpoint=cluster.default_rpc_endpoint)
|
||||||
|
|
||||||
|
with reporter.step(f"Check metrics value in each nodes, should not be show any result"):
|
||||||
|
futures = parallel(self.get_metrics_search_by_greps_parallel, cluster.cluster_nodes, command=metric_name, cid=cid)
|
||||||
|
metrics_results = [future.result() for future in futures if future.result() is not None]
|
||||||
|
assert len(metrics_results) == 0, f"Metrics value is not empty in Prometheus, actual value in nodes: {metrics_results}"
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
allure-pytest==2.13.2
|
allure-pytest==2.13.2
|
||||||
allure-python-commons==2.13.2
|
allure-python-commons==2.13.2
|
||||||
base58==2.1.0
|
base58==2.1.0
|
||||||
boto3==1.16.33
|
boto3==1.35.30
|
||||||
botocore==1.19.33
|
botocore==1.19.33
|
||||||
configobj==5.0.6
|
configobj==5.0.6
|
||||||
neo-mamba==1.0.0
|
neo-mamba==1.0.0
|
||||||
|
|
Loading…
Reference in a new issue