From ed19a83068345546951edcff83587bb68000326f Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Wed, 14 Aug 2024 19:54:42 +0300 Subject: [PATCH] [#290] Fixed tests logs metrics --- .../testsuites/metrics/test_logs_metrics.py | 25 +++++++++++-------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/pytest_tests/testsuites/metrics/test_logs_metrics.py b/pytest_tests/testsuites/metrics/test_logs_metrics.py index 56ec36e7..795d0971 100644 --- a/pytest_tests/testsuites/metrics/test_logs_metrics.py +++ b/pytest_tests/testsuites/metrics/test_logs_metrics.py @@ -16,37 +16,42 @@ from frostfs_testlib.testing.test_control import wait_for_success class TestLogsMetrics(ClusterTestBase): @pytest.fixture + def revert_all(self, cluster_state_controller: ClusterStateController): + yield + cluster_state_controller.manager(ConfigStateManager).revert_all() + def restart_storage_service(self, cluster_state_controller: ClusterStateController) -> datetime: config_manager = cluster_state_controller.manager(ConfigStateManager) config_manager.csc.stop_services_of_type(StorageNode) restart_time = datetime.now(timezone.utc) config_manager.csc.start_services_of_type(StorageNode) - yield restart_time - - cluster_state_controller.manager(ConfigStateManager).revert_all() + return restart_time @wait_for_success(interval=10) def check_metrics_in_node(self, cluster_node: ClusterNode, restart_time: datetime, log_priority: str = None, **metrics_greps): - counter_logs = self.get_count_logs_by_level(cluster_node, metrics_greps.get("level"), restart_time, log_priority) + current_time = datetime.now(timezone.utc) counter_metrics = get_metrics_value(cluster_node, **metrics_greps) + counter_logs = self.get_count_logs_by_level(cluster_node, metrics_greps.get("level"), restart_time, current_time, log_priority) assert counter_logs == counter_metrics, f"counter_logs: {counter_logs}, counter_metrics: {counter_metrics} in node: {cluster_node}" @staticmethod - def get_count_logs_by_level(cluster_node: ClusterNode, log_level: str, after_time: datetime, log_priority: str): + def get_count_logs_by_level(cluster_node: ClusterNode, log_level: str, after_time: datetime, until_time: datetime, log_priority: str): count_logs = 0 try: - logs = cluster_node.host.get_filtered_logs(log_level, unit="frostfs-storage", since=after_time, priority=log_priority) - result = re.findall(rf"\s+{log_level}\s+", logs) + logs = cluster_node.host.get_filtered_logs( + log_level, unit="frostfs-storage", since=after_time, until=until_time, priority=log_priority + ) + result = re.findall(rf"Z\s+{log_level}\s+", logs) count_logs += len(result) except RuntimeError as e: ... return count_logs @allure.title("Metrics for the log counter") - def test_log_counter_metrics(self, cluster: Cluster, restart_storage_service: datetime): - restart_time = restart_storage_service + def test_log_counter_metrics(self, cluster_state_controller: ClusterStateController, revert_all): + restart_time = self.restart_storage_service(cluster_state_controller) with reporter.step("Select random node"): - node = random.choice(cluster.cluster_nodes) + node = random.choice(self.cluster.cluster_nodes) with reporter.step(f"Check metrics count logs with level 'info'"): self.check_metrics_in_node(