Ilyas Niyazov
f6576d4f6f
Some checks failed
DCO check / Commits Check (pull_request) Has been cancelled
67 lines
3.2 KiB
Python
67 lines
3.2 KiB
Python
import random
|
|
import re
|
|
from datetime import datetime, timezone
|
|
|
|
import allure
|
|
import pytest
|
|
from frostfs_testlib import reporter
|
|
from frostfs_testlib.steps.metrics import get_metrics_value
|
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
|
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
|
|
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
|
|
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
|
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
|
from frostfs_testlib.testing.test_control import wait_for_success
|
|
|
|
|
|
class TestLogsMetrics(ClusterTestBase):
|
|
@pytest.fixture
|
|
def revert_all(self, cluster_state_controller: ClusterStateController):
|
|
yield
|
|
cluster_state_controller.manager(ConfigStateManager).revert_all()
|
|
|
|
def restart_storage_service(self, cluster_state_controller: ClusterStateController) -> datetime:
|
|
config_manager = cluster_state_controller.manager(ConfigStateManager)
|
|
config_manager.csc.stop_services_of_type(StorageNode)
|
|
restart_time = datetime.now(timezone.utc)
|
|
config_manager.csc.start_services_of_type(StorageNode)
|
|
return restart_time
|
|
|
|
@wait_for_success(interval=10)
|
|
def check_metrics_in_node(self, cluster_node: ClusterNode, restart_time: datetime, log_priority: str = None, **metrics_greps):
|
|
current_time = datetime.now(timezone.utc)
|
|
counter_metrics = get_metrics_value(cluster_node, **metrics_greps)
|
|
counter_logs = self.get_count_logs_by_level(cluster_node, metrics_greps.get("level"), restart_time, current_time, log_priority)
|
|
assert counter_logs == counter_metrics, f"counter_logs: {counter_logs}, counter_metrics: {counter_metrics} in node: {cluster_node}"
|
|
|
|
@staticmethod
|
|
def get_count_logs_by_level(cluster_node: ClusterNode, log_level: str, after_time: datetime, until_time: datetime, log_priority: str):
|
|
count_logs = 0
|
|
try:
|
|
logs = cluster_node.host.get_filtered_logs(
|
|
log_level, unit="frostfs-storage", since=after_time, until=until_time, priority=log_priority
|
|
)
|
|
result = re.findall(rf"\s+{log_level}\s+", logs)
|
|
count_logs += len(result)
|
|
except RuntimeError as e:
|
|
...
|
|
return count_logs
|
|
|
|
@allure.title("Metrics for the log counter")
|
|
def test_log_counter_metrics(self, cluster_state_controller: ClusterStateController, revert_all):
|
|
restart_time = self.restart_storage_service(cluster_state_controller)
|
|
with reporter.step("Select random node"):
|
|
node = random.choice(self.cluster.cluster_nodes)
|
|
|
|
with reporter.step(f"Check metrics count logs with level 'info'"):
|
|
self.check_metrics_in_node(
|
|
node,
|
|
restart_time,
|
|
log_priority="6..6",
|
|
command="frostfs_node_logger_entry_count",
|
|
level="info",
|
|
dropped="false",
|
|
)
|
|
|
|
with reporter.step(f"Check metrics count logs with level 'error'"):
|
|
self.check_metrics_in_node(node, restart_time, command="frostfs_node_logger_entry_count", level="error", dropped="false")
|