Compare commits

...

1 commit

Author SHA1 Message Date
96d7488f6d Fixed test logs metrics 2024-09-03 16:51:34 +03:00

View file

@ -31,21 +31,52 @@ class TestLogsMetrics(ClusterTestBase):
def check_metrics_in_node(self, cluster_node: ClusterNode, restart_time: datetime, log_priority: str = None, **metrics_greps):
current_time = datetime.now(timezone.utc)
counter_metrics = get_metrics_value(cluster_node, **metrics_greps)
counter_logs = self.get_count_logs_by_level(cluster_node, metrics_greps.get("level"), restart_time, current_time, log_priority)
assert counter_logs == counter_metrics, f"counter_logs: {counter_logs}, counter_metrics: {counter_metrics} in node: {cluster_node}"
# counter_logs = self.get_count_logs_by_level(cluster_node, metrics_greps.get("level"), restart_time, current_time, log_priority)
counter_logs = self.get_logs_count_by_level(cluster_node, metrics_greps.get("level"), restart_time, current_time, log_priority)
if counter_metrics + counter_logs < 1000:
assert (
counter_logs == counter_metrics
), f"counter_logs: {counter_logs}, counter_metrics: {counter_metrics} in node: {cluster_node}"
else:
assert self.are_numbers_similar(
counter_logs, counter_metrics
), f"counter_logs: {counter_logs}, counter_metrics: {counter_metrics} in node: {cluster_node}"
@staticmethod
def get_count_logs_by_level(cluster_node: ClusterNode, log_level: str, after_time: datetime, until_time: datetime, log_priority: str):
count_logs = 0
def are_numbers_similar(num1, num2, tolerance_percentage=1):
"""
if difference of numbers is less than permissible deviation than numbers are similar
"""
# Calculate the permissible deviation
average = (num1 + num2) / 2
tolerance = average * (tolerance_percentage / 100)
# Calculate the real difference
difference = abs(num1 - num2)
return difference <= tolerance
@staticmethod
def get_logs_count_by_level(node: ClusterNode, level: str, since: datetime, until: datetime, priority: str):
try:
logs = cluster_node.host.get_filtered_logs(
log_level, unit="frostfs-storage", since=after_time, until=until_time, priority=log_priority
result = node.host.get_filtered_logs(
filter_regex=level, unit="frostfs-storage", since=since, until=until, priority=priority, calc_count=True
)
result = re.findall(rf"Z\s+{log_level}\s+", logs)
count_logs += len(result)
except RuntimeError as e:
...
return count_logs
return int(result)
except Exception as e:
return e
# @staticmethod
# def get_count_logs_by_level(cluster_node: ClusterNode, log_level: str, after_time: datetime, until_time: datetime, log_priority: str):
# count_logs = 0
# try:
# logs = cluster_node.host.get_filtered_logs(
# log_level, unit="frostfs-storage", since=after_time, until=until_time, priority=log_priority
# )
# result = re.findall(rf"Z\s+{log_level}\s+", logs)
# count_logs += len(result)
# except RuntimeError as e:
# ...
# return count_logs
@allure.title("Metrics for the log counter")
def test_log_counter_metrics(self, cluster_state_controller: ClusterStateController, revert_all):
@ -56,12 +87,22 @@ class TestLogsMetrics(ClusterTestBase):
with reporter.step(f"Check metrics count logs with level 'info'"):
self.check_metrics_in_node(
node,
restart_time,
restart_time=restart_time,
log_priority="6..6",
command="frostfs_node_logger_entry_count",
level="info",
dropped="false",
)
with reporter.step(f"Check metrics count logs with level 'error'"):
self.check_metrics_in_node(
node,
restart_time=restart_time,
log_priority="3..3",
command="frostfs_node_logger_entry_count",
level="error",
dropped="false",
)
with reporter.step(f"Check metrics count logs with level 'error'"):
self.check_metrics_in_node(node, restart_time, command="frostfs_node_logger_entry_count", level="error", dropped="false")