forked from TrueCloudLab/frostfs-node
Aleksey Savchuk
f0c43c8d80
Use `zap.Error` instead of `zap.String` for logging errors: change all expressions like `zap.String("error", err.Error())` or `zap.String("err", err.Error())` to `zap.Error(err)`. Leave similar expressions with other messages unchanged, for example, `zap.String("last_error", lastErr.Error())` or `zap.String("reason", ctx.Err().Error())`. This change was made by applying the following patch: ```diff @@ var err expression @@ -zap.String("error", err.Error()) +zap.Error(err) @@ var err expression @@ -zap.String("err", err.Error()) +zap.Error(err) ``` Signed-off-by: Aleksey Savchuk <a.savchuk@yadro.com>
110 lines
2.5 KiB
Go
110 lines
2.5 KiB
Go
package policer
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"sync"
|
|
"time"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
|
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
"go.uber.org/zap"
|
|
)
|
|
|
|
func (p *Policer) Run(ctx context.Context) {
|
|
p.shardPolicyWorker(ctx)
|
|
p.log.Info(ctx, logs.PolicerRoutineStopped)
|
|
}
|
|
|
|
func (p *Policer) shardPolicyWorker(ctx context.Context) {
|
|
for {
|
|
select {
|
|
case <-ctx.Done():
|
|
p.taskPool.Release()
|
|
return
|
|
default:
|
|
}
|
|
|
|
addrs, err := p.keySpaceIterator.Next(ctx, p.batchSize)
|
|
if err != nil {
|
|
if errors.Is(err, engine.ErrEndOfListing) {
|
|
p.keySpaceIterator.Rewind()
|
|
time.Sleep(p.sleepDuration) // finished whole cycle, sleep a bit
|
|
continue
|
|
}
|
|
p.log.Warn(ctx, logs.PolicerFailureAtObjectSelectForReplication, zap.Error(err))
|
|
}
|
|
|
|
skipMap := newSkipMap()
|
|
for i := range addrs {
|
|
select {
|
|
case <-ctx.Done():
|
|
p.taskPool.Release()
|
|
return
|
|
default:
|
|
addr := addrs[i]
|
|
if p.objsInWork.inWork(addr.Address) {
|
|
// do not process an object
|
|
// that is in work
|
|
continue
|
|
}
|
|
|
|
err := p.taskPool.Submit(func() {
|
|
v, ok := p.cache.Get(addr.Address)
|
|
if ok && time.Since(v) < p.evictDuration {
|
|
return
|
|
}
|
|
|
|
if p.objsInWork.add(addr.Address) {
|
|
err := p.processObject(ctx, addr)
|
|
if err != nil && !skipMap.addSeenError(addr.Address.Container(), err) {
|
|
p.log.Error(ctx, logs.PolicerUnableToProcessObj,
|
|
zap.Stringer("object", addr.Address),
|
|
zap.Error(err))
|
|
}
|
|
p.cache.Add(addr.Address, time.Now())
|
|
p.objsInWork.remove(addr.Address)
|
|
p.metrics.IncProcessedObjects()
|
|
}
|
|
})
|
|
if err != nil {
|
|
p.log.Warn(ctx, logs.PolicerPoolSubmission, zap.Error(err))
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
type errMap struct {
|
|
sync.Mutex
|
|
skipMap map[cid.ID][]error
|
|
}
|
|
|
|
func newSkipMap() *errMap {
|
|
return &errMap{
|
|
skipMap: make(map[cid.ID][]error),
|
|
}
|
|
}
|
|
|
|
// addSeenError marks err as seen error for the container.
|
|
// Returns true is the error has already been added.
|
|
func (m *errMap) addSeenError(cnr cid.ID, err error) bool {
|
|
m.Lock()
|
|
defer m.Unlock()
|
|
|
|
for _, e := range m.skipMap[cnr] {
|
|
if errors.Is(err, e) {
|
|
return true
|
|
}
|
|
}
|
|
|
|
// Restrict list length to avoid possible OOM if some random error is added in future.
|
|
const maxErrListLength = 10
|
|
|
|
lst := m.skipMap[cnr]
|
|
if len(lst) < maxErrListLength {
|
|
m.skipMap[cnr] = append(lst, err)
|
|
}
|
|
return false
|
|
}
|