frostfs-testcases/pytest_tests/testsuites/metrics/test_logs_metrics.py

63 lines
2.9 KiB
Python
Raw Normal View History

2024-05-30 09:25:04 +00:00
import random
import re
2024-06-25 13:23:25 +00:00
from datetime import datetime, timezone
2024-05-30 09:25:04 +00:00
import allure
import pytest
from frostfs_testlib import reporter
2024-06-25 13:23:25 +00:00
from frostfs_testlib.steps.metrics import get_metrics_value
2024-05-30 09:25:04 +00:00
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import wait_for_success
class TestLogsMetrics(ClusterTestBase):
@pytest.fixture
2024-06-25 13:23:25 +00:00
def restart_storage_service(self, cluster_state_controller: ClusterStateController) -> datetime:
2024-05-30 09:25:04 +00:00
config_manager = cluster_state_controller.manager(ConfigStateManager)
config_manager.csc.stop_services_of_type(StorageNode)
2024-06-25 13:23:25 +00:00
restart_time = datetime.now(timezone.utc)
2024-05-30 09:25:04 +00:00
config_manager.csc.start_services_of_type(StorageNode)
yield restart_time
cluster_state_controller.manager(ConfigStateManager).revert_all()
@wait_for_success(interval=10)
def check_metrics_in_node(self, cluster_node: ClusterNode, restart_time: datetime, log_priority: str = None, **metrics_greps):
counter_logs = self.get_count_logs_by_level(cluster_node, metrics_greps.get("level"), restart_time, log_priority)
counter_metrics = get_metrics_value(cluster_node, **metrics_greps)
assert counter_logs == counter_metrics, f"counter_logs: {counter_logs}, counter_metrics: {counter_metrics} in node: {cluster_node}"
2024-05-30 09:25:04 +00:00
@staticmethod
def get_count_logs_by_level(cluster_node: ClusterNode, log_level: str, after_time: datetime, log_priority: str):
2024-05-30 09:25:04 +00:00
count_logs = 0
try:
logs = cluster_node.host.get_filtered_logs(log_level, unit="frostfs-storage", since=after_time, priority=log_priority)
result = re.findall(rf"\s+{log_level}\s+", logs)
2024-05-30 09:25:04 +00:00
count_logs += len(result)
except RuntimeError as e:
...
return count_logs
@allure.title("Metrics for the log counter")
2024-06-25 13:23:25 +00:00
def test_log_counter_metrics(self, cluster: Cluster, restart_storage_service: datetime):
2024-05-30 09:25:04 +00:00
restart_time = restart_storage_service
with reporter.step("Select random node"):
node = random.choice(cluster.cluster_nodes)
with reporter.step(f"Check metrics count logs with level 'info'"):
self.check_metrics_in_node(
node,
restart_time,
log_priority="6..6",
command="frostfs_node_logger_entry_count",
level="info",
dropped="false",
)
2024-05-30 09:25:04 +00:00
with reporter.step(f"Check metrics count logs with level 'error'"):
self.check_metrics_in_node(node, restart_time, command="frostfs_node_logger_entry_count", level="error", dropped="false")