forked from TrueCloudLab/frostfs-testlib
add latency report
This commit is contained in:
parent
e14896400f
commit
f2d34dbf2e
2 changed files with 47 additions and 2 deletions
|
@ -8,12 +8,15 @@ class MetricsBase(ABC):
|
||||||
_WRITE_SUCCESS = ""
|
_WRITE_SUCCESS = ""
|
||||||
_WRITE_ERRORS = ""
|
_WRITE_ERRORS = ""
|
||||||
_WRITE_THROUGHPUT = "data_sent"
|
_WRITE_THROUGHPUT = "data_sent"
|
||||||
|
_WRITE_LATENCY = ""
|
||||||
|
|
||||||
_READ_SUCCESS = ""
|
_READ_SUCCESS = ""
|
||||||
_READ_ERRORS = ""
|
_READ_ERRORS = ""
|
||||||
|
_READ_LATENCY = ""
|
||||||
_READ_THROUGHPUT = "data_received"
|
_READ_THROUGHPUT = "data_received"
|
||||||
|
|
||||||
_DELETE_SUCCESS = ""
|
_DELETE_SUCCESS = ""
|
||||||
|
_DELETE_LATENCY = ""
|
||||||
_DELETE_ERRORS = ""
|
_DELETE_ERRORS = ""
|
||||||
|
|
||||||
def __init__(self, summary) -> None:
|
def __init__(self, summary) -> None:
|
||||||
|
@ -27,6 +30,10 @@ class MetricsBase(ABC):
|
||||||
@property
|
@property
|
||||||
def write_success_iterations(self) -> int:
|
def write_success_iterations(self) -> int:
|
||||||
return self._get_metric(self._WRITE_SUCCESS)
|
return self._get_metric(self._WRITE_SUCCESS)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def write_latency(self) -> dict:
|
||||||
|
return self._get_metric(self._WRITE_LATENCY)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def write_rate(self) -> float:
|
def write_rate(self) -> float:
|
||||||
|
@ -47,6 +54,10 @@ class MetricsBase(ABC):
|
||||||
@property
|
@property
|
||||||
def read_success_iterations(self) -> int:
|
def read_success_iterations(self) -> int:
|
||||||
return self._get_metric(self._READ_SUCCESS)
|
return self._get_metric(self._READ_SUCCESS)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def read_latency(self) -> dict:
|
||||||
|
return self._get_metric(self._READ_LATENCY)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def read_rate(self) -> int:
|
def read_rate(self) -> int:
|
||||||
|
@ -67,6 +78,10 @@ class MetricsBase(ABC):
|
||||||
@property
|
@property
|
||||||
def delete_success_iterations(self) -> int:
|
def delete_success_iterations(self) -> int:
|
||||||
return self._get_metric(self._DELETE_SUCCESS)
|
return self._get_metric(self._DELETE_SUCCESS)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def delete_latency(self) -> dict:
|
||||||
|
return self._get_metric(self._DELETE_LATENCY)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def delete_failed_iterations(self) -> int:
|
def delete_failed_iterations(self) -> int:
|
||||||
|
@ -77,7 +92,7 @@ class MetricsBase(ABC):
|
||||||
return self._get_metric_rate(self._DELETE_SUCCESS)
|
return self._get_metric_rate(self._DELETE_SUCCESS)
|
||||||
|
|
||||||
def _get_metric(self, metric: str) -> int:
|
def _get_metric(self, metric: str) -> int:
|
||||||
metrics_method_map = {"counter": self._get_counter_metric, "gauge": self._get_gauge_metric}
|
metrics_method_map = {"counter": self._get_counter_metric, "gauge": self._get_gauge_metric, "trend" : self._get_trend_metrics}
|
||||||
|
|
||||||
if metric not in self.metrics:
|
if metric not in self.metrics:
|
||||||
return 0
|
return 0
|
||||||
|
@ -114,28 +129,37 @@ class MetricsBase(ABC):
|
||||||
|
|
||||||
def _get_gauge_metric(self, metric: str) -> int:
|
def _get_gauge_metric(self, metric: str) -> int:
|
||||||
return metric["values"]["value"]
|
return metric["values"]["value"]
|
||||||
|
|
||||||
|
def _get_trend_metrics(self, metric: str) -> int:
|
||||||
|
return metric["values"]
|
||||||
|
|
||||||
|
|
||||||
class GrpcMetrics(MetricsBase):
|
class GrpcMetrics(MetricsBase):
|
||||||
_WRITE_SUCCESS = "frostfs_obj_put_total"
|
_WRITE_SUCCESS = "frostfs_obj_put_total"
|
||||||
_WRITE_ERRORS = "frostfs_obj_put_fails"
|
_WRITE_ERRORS = "frostfs_obj_put_fails"
|
||||||
|
_WRITE_LATENCY = "frostfs_obj_put_duration"
|
||||||
|
|
||||||
_READ_SUCCESS = "frostfs_obj_get_total"
|
_READ_SUCCESS = "frostfs_obj_get_total"
|
||||||
_READ_ERRORS = "frostfs_obj_get_fails"
|
_READ_ERRORS = "frostfs_obj_get_fails"
|
||||||
|
_READ_LATENCY = "frostfs_obj_get_duration"
|
||||||
|
|
||||||
_DELETE_SUCCESS = "frostfs_obj_delete_total"
|
_DELETE_SUCCESS = "frostfs_obj_delete_total"
|
||||||
_DELETE_ERRORS = "frostfs_obj_delete_fails"
|
_DELETE_ERRORS = "frostfs_obj_delete_fails"
|
||||||
|
_DELETE_LATENCY = "frostfs_obj_delete_duration"
|
||||||
|
|
||||||
|
|
||||||
class S3Metrics(MetricsBase):
|
class S3Metrics(MetricsBase):
|
||||||
_WRITE_SUCCESS = "aws_obj_put_total"
|
_WRITE_SUCCESS = "aws_obj_put_total"
|
||||||
_WRITE_ERRORS = "aws_obj_put_fails"
|
_WRITE_ERRORS = "aws_obj_put_fails"
|
||||||
|
_WRITE_LATENCY = "aws_obj_put_duration"
|
||||||
|
|
||||||
_READ_SUCCESS = "aws_obj_get_total"
|
_READ_SUCCESS = "aws_obj_get_total"
|
||||||
_READ_ERRORS = "aws_obj_get_fails"
|
_READ_ERRORS = "aws_obj_get_fails"
|
||||||
|
_READ_LATENCY = "aws_obj_get_duration"
|
||||||
|
|
||||||
_DELETE_SUCCESS = "aws_obj_delete_total"
|
_DELETE_SUCCESS = "aws_obj_delete_total"
|
||||||
_DELETE_ERRORS = "aws_obj_delete_fails"
|
_DELETE_ERRORS = "aws_obj_delete_fails"
|
||||||
|
_DELETE_LATENCY = "aws_obj_delete_duration"
|
||||||
|
|
||||||
|
|
||||||
class LocalMetrics(MetricsBase):
|
class LocalMetrics(MetricsBase):
|
||||||
|
|
|
@ -2,6 +2,7 @@ from datetime import datetime
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
|
import os
|
||||||
|
|
||||||
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario
|
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario
|
||||||
from frostfs_testlib.load.load_metrics import get_metrics_object
|
from frostfs_testlib.load.load_metrics import get_metrics_object
|
||||||
|
@ -109,6 +110,7 @@ class LoadReport:
|
||||||
total_rate: float,
|
total_rate: float,
|
||||||
throughput: float,
|
throughput: float,
|
||||||
errors: dict[str, int],
|
errors: dict[str, int],
|
||||||
|
latency: dict[str, dict],
|
||||||
):
|
):
|
||||||
throughput_html = ""
|
throughput_html = ""
|
||||||
if throughput > 0:
|
if throughput > 0:
|
||||||
|
@ -127,6 +129,15 @@ class LoadReport:
|
||||||
):
|
):
|
||||||
per_node_errors_html += self._row(f"At {node_key}", errors)
|
per_node_errors_html += self._row(f"At {node_key}", errors)
|
||||||
|
|
||||||
|
latency_html = ""
|
||||||
|
if latency:
|
||||||
|
for node_key, param_dict in latency.items():
|
||||||
|
latency_values = ""
|
||||||
|
for param_name, param_val in param_dict.items():
|
||||||
|
latency_values += f"{param_name}={param_val:.2f}ms "
|
||||||
|
|
||||||
|
latency_html += self._row(f"Put latency {node_key.split(':')[0]}", latency_values)
|
||||||
|
|
||||||
object_size, object_size_unit = calc_unit(self.load_params.object_size, 1)
|
object_size, object_size_unit = calc_unit(self.load_params.object_size, 1)
|
||||||
duration = self._seconds_to_formatted_duration(self.load_params.load_time)
|
duration = self._seconds_to_formatted_duration(self.load_params.load_time)
|
||||||
model = self._get_model_string()
|
model = self._get_model_string()
|
||||||
|
@ -135,6 +146,7 @@ class LoadReport:
|
||||||
errors_percent = 0
|
errors_percent = 0
|
||||||
if total_operations:
|
if total_operations:
|
||||||
errors_percent = total_errors/total_operations*100.0
|
errors_percent = total_errors/total_operations*100.0
|
||||||
|
|
||||||
html = f"""
|
html = f"""
|
||||||
<table border="1" cellpadding="5px"><tbody>
|
<table border="1" cellpadding="5px"><tbody>
|
||||||
<tr><th colspan="2" bgcolor="gainsboro">{short_summary}</th></tr>
|
<tr><th colspan="2" bgcolor="gainsboro">{short_summary}</th></tr>
|
||||||
|
@ -142,7 +154,7 @@ class LoadReport:
|
||||||
{self._row("Total operations", total_operations)}
|
{self._row("Total operations", total_operations)}
|
||||||
{self._row("OP/sec", f"{total_rate:.2f}")}
|
{self._row("OP/sec", f"{total_rate:.2f}")}
|
||||||
{throughput_html}
|
{throughput_html}
|
||||||
|
{latency_html}
|
||||||
<tr><th colspan="2" bgcolor="gainsboro">Errors</th></tr>
|
<tr><th colspan="2" bgcolor="gainsboro">Errors</th></tr>
|
||||||
{per_node_errors_html}
|
{per_node_errors_html}
|
||||||
{self._row("Total", f"{total_errors} ({errors_percent:.2f}%)")}
|
{self._row("Total", f"{total_errors} ({errors_percent:.2f}%)")}
|
||||||
|
@ -160,6 +172,7 @@ class LoadReport:
|
||||||
write_operations = 0
|
write_operations = 0
|
||||||
write_op_sec = 0
|
write_op_sec = 0
|
||||||
write_throughput = 0
|
write_throughput = 0
|
||||||
|
write_latency = {}
|
||||||
write_errors = {}
|
write_errors = {}
|
||||||
requested_write_rate = self.load_params.write_rate
|
requested_write_rate = self.load_params.write_rate
|
||||||
requested_write_rate_str = (
|
requested_write_rate_str = (
|
||||||
|
@ -169,12 +182,14 @@ class LoadReport:
|
||||||
read_operations = 0
|
read_operations = 0
|
||||||
read_op_sec = 0
|
read_op_sec = 0
|
||||||
read_throughput = 0
|
read_throughput = 0
|
||||||
|
read_latency = {}
|
||||||
read_errors = {}
|
read_errors = {}
|
||||||
requested_read_rate = self.load_params.read_rate
|
requested_read_rate = self.load_params.read_rate
|
||||||
requested_read_rate_str = f"{requested_read_rate}op/sec" if requested_read_rate else ""
|
requested_read_rate_str = f"{requested_read_rate}op/sec" if requested_read_rate else ""
|
||||||
|
|
||||||
delete_operations = 0
|
delete_operations = 0
|
||||||
delete_op_sec = 0
|
delete_op_sec = 0
|
||||||
|
delete_latency = {}
|
||||||
delete_errors = {}
|
delete_errors = {}
|
||||||
requested_delete_rate = self.load_params.delete_rate
|
requested_delete_rate = self.load_params.delete_rate
|
||||||
requested_delete_rate_str = (
|
requested_delete_rate_str = (
|
||||||
|
@ -210,6 +225,7 @@ class LoadReport:
|
||||||
if write_operations:
|
if write_operations:
|
||||||
write_section_required = True
|
write_section_required = True
|
||||||
write_op_sec += metrics.write_rate
|
write_op_sec += metrics.write_rate
|
||||||
|
write_latency[node_key] = metrics.write_latency
|
||||||
write_throughput += metrics.write_throughput
|
write_throughput += metrics.write_throughput
|
||||||
if metrics.write_failed_iterations:
|
if metrics.write_failed_iterations:
|
||||||
write_errors[node_key] = metrics.write_failed_iterations
|
write_errors[node_key] = metrics.write_failed_iterations
|
||||||
|
@ -219,6 +235,7 @@ class LoadReport:
|
||||||
read_section_required = True
|
read_section_required = True
|
||||||
read_op_sec += metrics.read_rate
|
read_op_sec += metrics.read_rate
|
||||||
read_throughput += metrics.read_throughput
|
read_throughput += metrics.read_throughput
|
||||||
|
read_latency[node_key] = metrics.read_latency
|
||||||
if metrics.read_failed_iterations:
|
if metrics.read_failed_iterations:
|
||||||
read_errors[node_key] = metrics.read_failed_iterations
|
read_errors[node_key] = metrics.read_failed_iterations
|
||||||
|
|
||||||
|
@ -226,6 +243,7 @@ class LoadReport:
|
||||||
if delete_operations:
|
if delete_operations:
|
||||||
delete_section_required = True
|
delete_section_required = True
|
||||||
delete_op_sec += metrics.delete_rate
|
delete_op_sec += metrics.delete_rate
|
||||||
|
delete_latency[node_key] = metrics.delete_latency
|
||||||
if metrics.delete_failed_iterations:
|
if metrics.delete_failed_iterations:
|
||||||
delete_errors[node_key] = metrics.delete_failed_iterations
|
delete_errors[node_key] = metrics.delete_failed_iterations
|
||||||
|
|
||||||
|
@ -238,6 +256,7 @@ class LoadReport:
|
||||||
write_op_sec,
|
write_op_sec,
|
||||||
write_throughput,
|
write_throughput,
|
||||||
write_errors,
|
write_errors,
|
||||||
|
write_latency,
|
||||||
)
|
)
|
||||||
|
|
||||||
if read_section_required:
|
if read_section_required:
|
||||||
|
@ -249,6 +268,7 @@ class LoadReport:
|
||||||
read_op_sec,
|
read_op_sec,
|
||||||
read_throughput,
|
read_throughput,
|
||||||
read_errors,
|
read_errors,
|
||||||
|
read_latency,
|
||||||
)
|
)
|
||||||
|
|
||||||
if delete_section_required:
|
if delete_section_required:
|
||||||
|
@ -260,6 +280,7 @@ class LoadReport:
|
||||||
delete_op_sec,
|
delete_op_sec,
|
||||||
0,
|
0,
|
||||||
delete_errors,
|
delete_errors,
|
||||||
|
delete_latency,
|
||||||
)
|
)
|
||||||
|
|
||||||
return html
|
return html
|
||||||
|
|
Loading…
Reference in a new issue