frostfs-node/pkg/services/policer/process.go
Dmitrii Stepanov f0355a453e
All checks were successful
ci/woodpecker/push/pre-commit Pipeline was successful
[#463] policer: Remove capacity rebalance logic
Current implementation has some quirks. For example,
using only half of object.put.pool_size_remote threads
tells replicator that is node is 50% loaded,
but in reality we could be putting lot's of big objects.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-18 10:52:12 +00:00

66 lines
1.4 KiB
Go

package policer
import (
"context"
"errors"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
"go.uber.org/zap"
)
func (p *Policer) Run(ctx context.Context) {
p.shardPolicyWorker(ctx)
p.log.Info(logs.PolicerRoutineStopped)
}
func (p *Policer) shardPolicyWorker(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
default:
}
addrs, err := p.keySpaceIterator.Next(ctx, p.batchSize)
if err != nil {
if errors.Is(err, engine.ErrEndOfListing) {
p.keySpaceIterator.Rewind()
time.Sleep(p.sleepDuration) // finished whole cycle, sleep a bit
continue
}
p.log.Warn(logs.PolicerFailureAtObjectSelectForReplication, zap.Error(err))
}
for i := range addrs {
select {
case <-ctx.Done():
return
default:
addr := addrs[i]
if p.objsInWork.inWork(addr.Address) {
// do not process an object
// that is in work
continue
}
err := p.taskPool.Submit(func() {
v, ok := p.cache.Get(addr.Address)
if ok && time.Since(v) < p.evictDuration {
return
}
if p.objsInWork.add(addr.Address) {
p.processObject(ctx, addr)
p.cache.Add(addr.Address, time.Now())
p.objsInWork.remove(addr.Address)
}
})
if err != nil {
p.log.Warn(logs.PolicerPoolSubmission, zap.Error(err))
}
}
}
}
}