[#240] Added test log counter metrics #240
1 changed files with 74 additions and 0 deletions
74
pytest_tests/testsuites/metrics/test_logs_metrics.py
Normal file
74
pytest_tests/testsuites/metrics/test_logs_metrics.py
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
import random
|
||||||
|
import re
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
import allure
|
||||||
|
import pytest
|
||||||
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.shell import Shell
|
||||||
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||||
|
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
|
||||||
|
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
|
||||||
|
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
|
||||||
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
|
from frostfs_testlib.testing.test_control import wait_for_success
|
||||||
|
|
||||||
|
|
||||||
|
class TestLogsMetrics(ClusterTestBase):
|
||||||
|
@pytest.fixture
|
||||||
|
def restart_storage_service(self, cluster_state_controller: ClusterStateController) -> str:
|
||||||
|
config_manager = cluster_state_controller.manager(ConfigStateManager)
|
||||||
|
config_manager.csc.stop_services_of_type(StorageNode)
|
||||||
|
restart_time = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
|
||||||
|
config_manager.csc.start_services_of_type(StorageNode)
|
||||||
|
yield restart_time
|
||||||
|
|
||||||
|
cluster_state_controller.manager(ConfigStateManager).revert_all()
|
||||||
|
|
||||||
|
@wait_for_success(interval=10)
|
||||||
|
def check_metrics_in_node(self, cluster_node: ClusterNode, counter_exp: int, **metrics_greps: str):
|
||||||
|
counter_act = self.get_metrics_value(cluster_node, **metrics_greps)
|
||||||
|
assert counter_act == counter_exp, f"Expected: {counter_exp}, Actual: {counter_act} in node: {cluster_node}"
|
||||||
|
|
||||||
|
def get_metrics_value(self, node: ClusterNode, **metrics_greps: str):
|
||||||
|
try:
|
||||||
|
command_result = node.metrics.storage.get_metrics_search_by_greps(**metrics_greps)
|
||||||
|
metrics_counter = self.calc_metrics_count_from_stdout(command_result.stdout)
|
||||||
|
except RuntimeError as e:
|
||||||
|
metrics_counter = 0
|
||||||
|
|
||||||
|
return metrics_counter
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def calc_metrics_count_from_stdout(metric_result_stdout: str):
|
||||||
|
result = re.findall(r"}\s(\d+)", metric_result_stdout)
|
||||||
|
return sum(map(int, result))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_count_logs_by_level(shell: Shell, log_level: str, after_time: str):
|
||||||
|
count_logs = 0
|
||||||
|
try:
|
||||||
|
logs = shell.exec(f"journalctl -u frostfs-storage --grep='{log_level}' --since '{after_time}'")
|
||||||
anikeev-yadro marked this conversation as resolved
Outdated
|
|||||||
|
result = re.findall(rf"Z\s+{log_level}\s+", logs.stdout)
|
||||||
|
count_logs += len(result)
|
||||||
|
except RuntimeError as e:
|
||||||
|
...
|
||||||
|
return count_logs
|
||||||
|
|
||||||
|
@allure.title("Metrics for the log counter")
|
||||||
|
def test_log_counter_metrics(self, cluster: Cluster, restart_storage_service: str):
|
||||||
|
restart_time = restart_storage_service
|
||||||
|
with reporter.step("Select random node"):
|
||||||
|
node = random.choice(cluster.cluster_nodes)
|
||||||
|
|
||||||
|
with reporter.step("Get count logs from journalctl with level 'info'"):
|
||||||
|
count_logs_info = self.get_count_logs_by_level(node.host.get_shell(), "info", restart_time)
|
||||||
|
|
||||||
|
with reporter.step(f"Check metrics count logs with level 'info'"):
|
||||||
|
self.check_metrics_in_node(node, count_logs_info, command="frostfs_node_logger_entry_count", level="info")
|
||||||
|
|
||||||
|
with reporter.step("Get count logs from journalctl with level 'error'"):
|
||||||
|
count_logs_error = self.get_count_logs_by_level(node.host.get_shell(), "error", restart_time)
|
||||||
|
|
||||||
|
with reporter.step(f"Check metrics count logs with level 'error'"):
|
||||||
|
self.check_metrics_in_node(node, count_logs_error, command="frostfs_node_logger_entry_count", level="error")
|
Loading…
Reference in a new issue
Maybe it will be easier this way? Without any calculations on the test side.
in regular expression I calculate log level text located after time, until
--grep='error' | wc -l
will count each lines with substring 'error' in any locationf.e. logs with level 'info' contain word 'error' in message
wc -l
will count it as logs with level='error'