Add node addresses metric #186

Merged
dkirillov merged 1 commit from ironbee/frostfs-s3-gw:add-node-addresses-metric into master 2024-09-04 19:51:12 +00:00
3 changed files with 19 additions and 0 deletions

View file

@ -15,6 +15,7 @@ This document outlines major changes between releases.
- Fix parsing signed headers in presigned urls (#182) - Fix parsing signed headers in presigned urls (#182)
### Added ### Added
- Add a metric with addresses of nodes of the same and highest priority that are currently healthy (#51)
- Support dump metrics descriptions (#80) - Support dump metrics descriptions (#80)
- Add `copies_numbers` section to `placement_policy` in config file and support vectors of copies numbers (#70, #101) - Add `copies_numbers` section to `placement_policy` in config file and support vectors of copies numbers (#70, #101)
- Support impersonate bearer token (#81, #105) - Support impersonate bearer token (#81, #105)

View file

@ -48,6 +48,14 @@ var appMetricsDesc = map[string]map[string]Description{
Help: "Average request duration (in milliseconds) for specific method on node in pool", Help: "Average request duration (in milliseconds) for specific method on node in pool",
VariableLabels: []string{"node", "method"}, VariableLabels: []string{"node", "method"},
}, },
currentNodesMetric: Description{
Type: dto.MetricType_GAUGE,
Namespace: namespace,
Subsystem: poolSubsystem,
Name: currentNodesMetric,
Help: "Addresses of nodes of the same and highest priority that are currently healthy",
VariableLabels: []string{"address"},
},
}, },
billingSubsystem: { billingSubsystem: {
userRequestsMetric: Description{ userRequestsMetric: Description{

View file

@ -15,6 +15,7 @@ const (
overallNodeRequestsMetric = "overall_node_requests" overallNodeRequestsMetric = "overall_node_requests"
currentErrorMetric = "current_errors" currentErrorMetric = "current_errors"
avgRequestDurationMetric = "avg_request_duration" avgRequestDurationMetric = "avg_request_duration"
currentNodesMetric = "current_nodes"
) )
const ( const (
@ -42,6 +43,7 @@ type poolMetricsCollector struct {
overallNodeRequests *prometheus.GaugeVec overallNodeRequests *prometheus.GaugeVec
currentErrors *prometheus.GaugeVec currentErrors *prometheus.GaugeVec
requestDuration *prometheus.GaugeVec requestDuration *prometheus.GaugeVec
currentNodes *prometheus.GaugeVec
} }
func newPoolMetricsCollector(scraper StatisticScraper) *poolMetricsCollector { func newPoolMetricsCollector(scraper StatisticScraper) *poolMetricsCollector {
@ -52,6 +54,7 @@ func newPoolMetricsCollector(scraper StatisticScraper) *poolMetricsCollector {
overallNodeRequests: mustNewGaugeVec(appMetricsDesc[poolSubsystem][overallNodeRequestsMetric]), overallNodeRequests: mustNewGaugeVec(appMetricsDesc[poolSubsystem][overallNodeRequestsMetric]),
currentErrors: mustNewGaugeVec(appMetricsDesc[poolSubsystem][currentErrorMetric]), currentErrors: mustNewGaugeVec(appMetricsDesc[poolSubsystem][currentErrorMetric]),
requestDuration: mustNewGaugeVec(appMetricsDesc[poolSubsystem][avgRequestDurationMetric]), requestDuration: mustNewGaugeVec(appMetricsDesc[poolSubsystem][avgRequestDurationMetric]),
currentNodes: mustNewGaugeVec(appMetricsDesc[poolSubsystem][currentNodesMetric]),
} }
} }
@ -62,6 +65,7 @@ func (m *poolMetricsCollector) Collect(ch chan<- prometheus.Metric) {
m.overallNodeRequests.Collect(ch) m.overallNodeRequests.Collect(ch)
m.currentErrors.Collect(ch) m.currentErrors.Collect(ch)
m.requestDuration.Collect(ch) m.requestDuration.Collect(ch)
m.currentNodes.Collect(ch)
} }
func (m *poolMetricsCollector) Describe(descs chan<- *prometheus.Desc) { func (m *poolMetricsCollector) Describe(descs chan<- *prometheus.Desc) {
@ -70,6 +74,7 @@ func (m *poolMetricsCollector) Describe(descs chan<- *prometheus.Desc) {
m.overallNodeRequests.Describe(descs) m.overallNodeRequests.Describe(descs)
m.currentErrors.Describe(descs) m.currentErrors.Describe(descs)
m.requestDuration.Describe(descs) m.requestDuration.Describe(descs)
m.currentNodes.Describe(descs)
} }
func (m *poolMetricsCollector) updateStatistic() { func (m *poolMetricsCollector) updateStatistic() {
@ -79,6 +84,7 @@ func (m *poolMetricsCollector) updateStatistic() {
m.overallNodeRequests.Reset() m.overallNodeRequests.Reset()
m.currentErrors.Reset() m.currentErrors.Reset()
m.requestDuration.Reset() m.requestDuration.Reset()
m.currentNodes.Reset()
for _, node := range stat.Nodes() { for _, node := range stat.Nodes() {
m.overallNodeErrors.WithLabelValues(node.Address()).Set(float64(node.OverallErrors())) m.overallNodeErrors.WithLabelValues(node.Address()).Set(float64(node.OverallErrors()))
@ -88,6 +94,10 @@ func (m *poolMetricsCollector) updateStatistic() {
m.updateRequestsDuration(node) m.updateRequestsDuration(node)
} }
for _, addr := range stat.CurrentNodes() {
m.currentNodes.WithLabelValues(addr).Set(1)
}
m.overallErrors.Set(float64(stat.OverallErrors())) m.overallErrors.Set(float64(stat.OverallErrors()))
} }