[#1653] qos: Add metrics

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
Dmitrii Stepanov 2025-03-05 13:53:32 +03:00 committed by Dmitrii Stepanov
parent d36afa31c7
commit 3727d60331
10 changed files with 214 additions and 21 deletions

View file

@ -4,6 +4,8 @@ import (
"context"
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
@ -15,6 +17,9 @@ import (
const (
defaultIdleTimeout time.Duration = 0
defaultShare float64 = 1.0
minusOne = ^uint64(0)
defaultMetricsCollectTimeout = 5 * time.Second
)
type ReleaseFunc scheduling.ReleaseFunc
@ -22,6 +27,8 @@ type ReleaseFunc scheduling.ReleaseFunc
type Limiter interface {
ReadRequest(context.Context) (ReleaseFunc, error)
WriteRequest(context.Context) (ReleaseFunc, error)
SetParentID(string)
SetMetrics(Metrics)
Close()
}
@ -34,10 +41,6 @@ func NewLimiter(c *limits.Config) (Limiter, error) {
if err := validateConfig(c); err != nil {
return nil, err
}
read, write := c.Read(), c.Write()
if isNoop(read, write) {
return noopLimiterInstance, nil
}
readScheduler, err := createScheduler(c.Read())
if err != nil {
return nil, fmt.Errorf("create read scheduler: %w", err)
@ -46,10 +49,18 @@ func NewLimiter(c *limits.Config) (Limiter, error) {
if err != nil {
return nil, fmt.Errorf("create write scheduler: %w", err)
}
return &mClockLimiter{
l := &mClockLimiter{
readScheduler: readScheduler,
writeScheduler: writeScheduler,
}, nil
closeCh: make(chan struct{}),
wg: &sync.WaitGroup{},
readStats: createStats(),
writeStats: createStats(),
}
l.shardID.Store(&shardID{})
l.metrics.Store(&metricsHolder{metrics: &noopMetrics{}})
l.startMetricsCollect()
return l, nil
}
func createScheduler(config limits.OpConfig) (scheduler, error) {
@ -91,7 +102,7 @@ var (
)
func NewNoopLimiter() Limiter {
return &noopLimiter{}
return noopLimiterInstance
}
type noopLimiter struct{}
@ -104,43 +115,109 @@ func (n *noopLimiter) WriteRequest(context.Context) (ReleaseFunc, error) {
return releaseStub, nil
}
func (n *noopLimiter) SetParentID(string) {}
func (n *noopLimiter) Close() {}
func (n *noopLimiter) SetMetrics(Metrics) {}
var _ Limiter = (*mClockLimiter)(nil)
type shardID struct {
id string
}
type mClockLimiter struct {
readScheduler scheduler
writeScheduler scheduler
readStats map[string]*stat
writeStats map[string]*stat
shardID atomic.Pointer[shardID]
metrics atomic.Pointer[metricsHolder]
closeCh chan struct{}
wg *sync.WaitGroup
}
func (n *mClockLimiter) ReadRequest(ctx context.Context) (ReleaseFunc, error) {
return requestArrival(ctx, n.readScheduler)
return requestArrival(ctx, n.readScheduler, n.readStats)
}
func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) {
return requestArrival(ctx, n.writeScheduler)
return requestArrival(ctx, n.writeScheduler, n.writeStats)
}
func requestArrival(ctx context.Context, s scheduler) (ReleaseFunc, error) {
func requestArrival(ctx context.Context, s scheduler, stats map[string]*stat) (ReleaseFunc, error) {
tag, ok := tagging.IOTagFromContext(ctx)
if !ok {
tag = IOTagClient.String()
}
stat := getStat(tag, stats)
stat.pending.Add(1)
if tag == IOTagCritical.String() {
return releaseStub, nil
stat.inProgress.Add(1)
return func() {
stat.completed.Add(1)
}, nil
}
rel, err := s.RequestArrival(ctx, tag)
stat.inProgress.Add(1)
if err != nil {
if errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) ||
errors.Is(err, errSemaphoreLimitExceeded) {
stat.resourceExhausted.Add(1)
return nil, &apistatus.ResourceExhausted{}
}
stat.completed.Add(1)
return nil, err
}
return ReleaseFunc(rel), nil
return func() {
rel()
stat.completed.Add(1)
}, nil
}
func (n *mClockLimiter) Close() {
n.readScheduler.Close()
n.writeScheduler.Close()
close(n.closeCh)
n.wg.Wait()
n.metrics.Load().metrics.Close(n.shardID.Load().id)
}
func (n *mClockLimiter) SetParentID(parentID string) {
n.shardID.Store(&shardID{id: parentID})
}
func (n *mClockLimiter) SetMetrics(m Metrics) {
n.metrics.Store(&metricsHolder{metrics: m})
}
func (n *mClockLimiter) startMetricsCollect() {
n.wg.Add(1)
go func() {
defer n.wg.Done()
ticker := time.NewTicker(defaultMetricsCollectTimeout)
defer ticker.Stop()
for {
select {
case <-n.closeCh:
return
case <-ticker.C:
shardID := n.shardID.Load().id
if shardID == "" {
continue
}
metrics := n.metrics.Load().metrics
for tag, s := range n.readStats {
metrics.SetOperationTagCounters(shardID, "read", tag, s.pending.Load(), s.inProgress.Load(), s.completed.Load(), s.resourceExhausted.Load())
}
for tag, s := range n.writeStats {
metrics.SetOperationTagCounters(shardID, "write", tag, s.pending.Load(), s.inProgress.Load(), s.completed.Load(), s.resourceExhausted.Load())
}
}
}
}()
}