[#266] fix tests metrics object and logs

fix-metrics-tests-object-v0.39
Ilyas Niyazov 2024-07-02 16:46:04 +03:00
parent b9ee97eb24
commit ba79bf46cb
3 changed files with 31 additions and 12 deletions

View File

@ -26,17 +26,25 @@ class TestLogsMetrics(ClusterTestBase):
cluster_state_controller.manager(ConfigStateManager).revert_all()
@wait_for_success(interval=10)
def check_metrics_in_node(self, cluster_node: ClusterNode, restart_time: datetime, **metrics_greps):
counter_exp = self.get_count_logs_by_level(cluster_node, metrics_greps.get("level"), restart_time)
counter_act = get_metrics_value(cluster_node, **metrics_greps)
assert counter_act == counter_exp, f"Expected: {counter_exp}, Actual: {counter_act} in node: {cluster_node}"
def check_metrics_in_node(
self, cluster_node: ClusterNode, restart_time: datetime, log_priority: str = None, **metrics_greps
):
counter_logs = self.get_count_logs_by_level(
cluster_node, metrics_greps.get("level"), restart_time, log_priority
)
counter_metrics = get_metrics_value(cluster_node, **metrics_greps)
assert (
counter_logs == counter_metrics
), f"counter_logs: {counter_logs}, counter_metrics: {counter_metrics} in node: {cluster_node}"
@staticmethod
def get_count_logs_by_level(cluster_node: ClusterNode, log_level: str, after_time: datetime):
def get_count_logs_by_level(cluster_node: ClusterNode, log_level: str, after_time: datetime, log_priority: str):
count_logs = 0
try:
logs = cluster_node.host.get_filtered_logs(log_level, unit="frostfs-storage", since=after_time)
result = re.findall(rf"Z\s+{log_level}\s+", logs)
logs = cluster_node.host.get_filtered_logs(
log_level, unit="frostfs-storage", since=after_time, priority=log_priority
)
result = re.findall(rf"\s+{log_level}\s+", logs)
count_logs += len(result)
except RuntimeError as e:
...
@ -49,7 +57,16 @@ class TestLogsMetrics(ClusterTestBase):
node = random.choice(cluster.cluster_nodes)
with reporter.step(f"Check metrics count logs with level 'info'"):
self.check_metrics_in_node(node, restart_time, command="frostfs_node_logger_entry_count", level="info")
self.check_metrics_in_node(
node,
restart_time,
log_priority="6..6",
command="frostfs_node_logger_entry_count",
level="info",
dropped="false",
)
with reporter.step(f"Check metrics count logs with level 'error'"):
self.check_metrics_in_node(node, restart_time, command="frostfs_node_logger_entry_count", level="error")
self.check_metrics_in_node(
node, restart_time, command="frostfs_node_logger_entry_count", level="error", dropped="false"
)

View File

@ -62,7 +62,9 @@ class TestObjectMetrics(ClusterTestBase):
)
for node in object_nodes:
all_metrics = node.metrics.storage.get_all_metrics()
all_metrics = node.metrics.storage.get_metrics_search_by_greps(
command="frostfs_node_engine_container_size_byte"
)
assert (
cid not in all_metrics.stdout
), "metrics of removed containers shouldn't appear in the storage node"

View File

@ -80,12 +80,12 @@ class TestLogs:
"SSH_privKey": r"([-]+BEGIN [^\s]+ PRIVATE KEY[-]+[\s]*[^-]*[-]+END [^\s]+ PRIVATE KEY[-]+)",
"possible_Creds": r"(?i)("
r"password\s*[`=:]+\s*[^\s]+|"
r"password is\s*[`=:]*\s*[^\s]+|"
r"password is\s*[`=:]+\s*[^\s]+|"
r"passwd\s*[`=:]+\s*[^\s]+)",
}
issues_regex = "|".join(_regex.values())
exclude_filter = "COMMAND="
exclude_filter = r"COMMAND=\|--\sBoot\s"
time.sleep(2)