diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index c625a041f..b42275538 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -1072,6 +1072,7 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID return pool }), + shard.WithLimiter(shCfg.limiter), } return sh } diff --git a/internal/qos/limiter.go b/internal/qos/limiter.go index d2a1919f1..996cebea1 100644 --- a/internal/qos/limiter.go +++ b/internal/qos/limiter.go @@ -79,6 +79,10 @@ var ( noopLimiterInstance = &noopLimiter{} ) +func NewNoopLimiter() Limiter { + return &noopLimiter{} +} + type noopLimiter struct{} func (n *noopLimiter) ReadRequest(context.Context) (ReleaseFunc, error) { diff --git a/pkg/local_object_storage/engine/container.go b/pkg/local_object_storage/engine/container.go index b2d7a1037..3160d7f83 100644 --- a/pkg/local_object_storage/engine/container.go +++ b/pkg/local_object_storage/engine/container.go @@ -74,7 +74,7 @@ func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) var csPrm shard.ContainerSizePrm csPrm.SetContainerID(prm.cnr) - csRes, err := sh.Shard.ContainerSize(csPrm) + csRes, err := sh.Shard.ContainerSize(ctx, csPrm) if err != nil { e.reportShardError(ctx, sh, "can't get container size", err, zap.Stringer("container_id", prm.cnr)) diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index fb802ef2a..c8ee33b53 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -339,7 +339,7 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid var drop []cid.ID for id := range idMap { prm.SetContainerID(id) - s, err := sh.ContainerSize(prm) + s, err := sh.ContainerSize(ctx, prm) if err != nil { e.log.Warn(ctx, logs.EngineFailedToGetContainerSize, zap.Stringer("container_id", id), zap.Error(err)) failed = true diff --git a/pkg/local_object_storage/shard/container.go b/pkg/local_object_storage/shard/container.go index 0309f0c81..b4015ae8d 100644 --- a/pkg/local_object_storage/shard/container.go +++ b/pkg/local_object_storage/shard/container.go @@ -26,7 +26,7 @@ func (r ContainerSizeRes) Size() uint64 { return r.size } -func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) { +func (s *Shard) ContainerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) { s.m.RLock() defer s.m.RUnlock() @@ -34,6 +34,12 @@ func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) { return ContainerSizeRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ContainerSizeRes{}, err + } + defer release() + size, err := s.metaBase.ContainerSize(prm.cnr) if err != nil { return ContainerSizeRes{}, fmt.Errorf("get container size: %w", err) @@ -69,6 +75,12 @@ func (s *Shard) ContainerCount(ctx context.Context, prm ContainerCountPrm) (Cont return ContainerCountRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ContainerCountRes{}, err + } + defer release() + counters, err := s.metaBase.ContainerCount(ctx, prm.ContainerID) if err != nil { return ContainerCountRes{}, fmt.Errorf("get container counters: %w", err) @@ -100,6 +112,12 @@ func (s *Shard) DeleteContainerSize(ctx context.Context, id cid.ID) error { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + return s.metaBase.DeleteContainerSize(ctx, id) } @@ -122,5 +140,11 @@ func (s *Shard) DeleteContainerCount(ctx context.Context, id cid.ID) error { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + return s.metaBase.DeleteContainerCount(ctx, id) } diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index fedde2206..3520277c0 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -395,6 +395,10 @@ func (s *Shard) Close(ctx context.Context) error { s.gc.stop(ctx) } + if s.opsLimiter != nil { + s.opsLimiter.Close() + } + return lastErr } diff --git a/pkg/local_object_storage/shard/count.go b/pkg/local_object_storage/shard/count.go index b3bc6a30b..8dc1f0522 100644 --- a/pkg/local_object_storage/shard/count.go +++ b/pkg/local_object_storage/shard/count.go @@ -23,6 +23,12 @@ func (s *Shard) LogicalObjectsCount(ctx context.Context) (uint64, error) { return 0, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return 0, err + } + defer release() + cc, err := s.metaBase.ObjectCounters() if err != nil { return 0, err diff --git a/pkg/local_object_storage/shard/delete.go b/pkg/local_object_storage/shard/delete.go index 55231b032..0101817a8 100644 --- a/pkg/local_object_storage/shard/delete.go +++ b/pkg/local_object_storage/shard/delete.go @@ -54,6 +54,12 @@ func (s *Shard) delete(ctx context.Context, prm DeletePrm, skipFailed bool) (Del return DeleteRes{}, ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return DeleteRes{}, err + } + defer release() + result := DeleteRes{} for _, addr := range prm.addr { select { diff --git a/pkg/local_object_storage/shard/exists.go b/pkg/local_object_storage/shard/exists.go index 82ce48dde..2c11b6b01 100644 --- a/pkg/local_object_storage/shard/exists.go +++ b/pkg/local_object_storage/shard/exists.go @@ -53,10 +53,6 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) { )) defer span.End() - var exists bool - var locked bool - var err error - s.m.RLock() defer s.m.RUnlock() @@ -64,7 +60,18 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) { return ExistsRes{}, ErrShardDisabled } else if s.info.EvacuationInProgress { return ExistsRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) - } else if s.info.Mode.NoMetabase() { + } + + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ExistsRes{}, err + } + defer release() + + var exists bool + var locked bool + + if s.info.Mode.NoMetabase() { var p common.ExistsPrm p.Address = prm.Address diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go index 4a5ec7a71..32a377cd5 100644 --- a/pkg/local_object_storage/shard/gc.go +++ b/pkg/local_object_storage/shard/gc.go @@ -291,28 +291,7 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) { s.log.Debug(ctx, logs.ShardGCRemoveGarbageStarted) defer s.log.Debug(ctx, logs.ShardGCRemoveGarbageCompleted) - buf := make([]oid.Address, 0, s.rmBatchSize) - - var iterPrm meta.GarbageIterationPrm - iterPrm.SetHandler(func(g meta.GarbageObject) error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - buf = append(buf, g.Address()) - - if len(buf) == s.rmBatchSize { - return meta.ErrInterruptIterator - } - - return nil - }) - - // iterate over metabase's objects with GC mark - // (no more than s.rmBatchSize objects) - err := s.metaBase.IterateOverGarbage(ctx, iterPrm) + buf, err := s.getGarbage(ctx) if err != nil { s.log.Warn(ctx, logs.ShardIteratorOverMetabaseGraveyardFailed, zap.Error(err), @@ -344,6 +323,39 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) { return } +func (s *Shard) getGarbage(ctx context.Context) ([]oid.Address, error) { + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() + + buf := make([]oid.Address, 0, s.rmBatchSize) + + var iterPrm meta.GarbageIterationPrm + iterPrm.SetHandler(func(g meta.GarbageObject) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + buf = append(buf, g.Address()) + + if len(buf) == s.rmBatchSize { + return meta.ErrInterruptIterator + } + + return nil + }) + + if err := s.metaBase.IterateOverGarbage(ctx, iterPrm); err != nil { + return nil, err + } + + return buf, nil +} + func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) { workerCount = max(minExpiredWorkers, s.gc.gcCfg.expiredCollectorWorkerCount) batchSize = max(minExpiredBatchSize, s.gc.gcCfg.expiredCollectorBatchSize) @@ -422,18 +434,9 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address) return } - var inhumePrm meta.InhumePrm - - inhumePrm.SetAddresses(expired...) - inhumePrm.SetGCMark() - - // inhume the collected objects - res, err := s.metaBase.Inhume(ctx, inhumePrm) + res, err := s.inhumeGC(ctx, expired) if err != nil { - s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects, - zap.Error(err), - ) - + s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects, zap.Error(err)) return } @@ -451,6 +454,12 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address) } func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address) ([]oid.Address, error) { + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() + result := make([]oid.Address, 0, len(source)) parentToChildren, err := s.metaBase.GetChildren(ctx, source) if err != nil { @@ -464,6 +473,19 @@ func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address) return result, nil } +func (s *Shard) inhumeGC(ctx context.Context, addrs []oid.Address) (meta.InhumeRes, error) { + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return meta.InhumeRes{}, err + } + defer release() + + var inhumePrm meta.InhumePrm + inhumePrm.SetAddresses(addrs...) + inhumePrm.SetGCMark() + return s.metaBase.Inhume(ctx, inhumePrm) +} + func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { var err error startedAt := time.Now() @@ -505,11 +527,17 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { return } - err = s.metaBase.IterateOverGraveyard(ctx, iterPrm) + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err)) + s.m.RUnlock() + return + } + err = s.metaBase.IterateOverGraveyard(ctx, iterPrm) + release() if err != nil { log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err)) s.m.RUnlock() - return } @@ -598,7 +626,13 @@ func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFo return ErrDegradedMode } - err := s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error { + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return err + } + defer release() + + err = s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error { select { case <-ctx.Done(): return meta.ErrInterruptIterator @@ -621,6 +655,12 @@ func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid return nil, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() + return s.metaBase.FilterExpired(ctx, epoch, addresses) } @@ -636,12 +676,15 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston return } - res, err := s.metaBase.InhumeTombstones(ctx, tss) + release, err := s.opsLimiter.WriteRequest(ctx) if err != nil { - s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, - zap.Error(err), - ) - + s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err)) + return + } + res, err := s.metaBase.InhumeTombstones(ctx, tss) + release() + if err != nil { + s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err)) return } @@ -664,11 +707,16 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers [] if s.GetMode().NoMetabase() { return } - unlocked, err := s.metaBase.FreeLockedBy(lockers) + + release, err := s.opsLimiter.WriteRequest(ctx) if err != nil { - s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, - zap.Error(err), - ) + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) + return + } + unlocked, err := s.metaBase.FreeLockedBy(lockers) + release() + if err != nil { + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) return } @@ -676,13 +724,15 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers [] var pInhume meta.InhumePrm pInhume.SetAddresses(lockers...) pInhume.SetForceGCMark() - - res, err := s.metaBase.Inhume(ctx, pInhume) + release, err = s.opsLimiter.WriteRequest(ctx) if err != nil { - s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, - zap.Error(err), - ) - + s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err)) + return + } + res, err := s.metaBase.Inhume(ctx, pInhume) + release() + if err != nil { + s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err)) return } @@ -721,12 +771,15 @@ func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) { return } - _, err := s.metaBase.FreeLockedBy(lockers) + release, err := s.opsLimiter.WriteRequest(ctx) if err != nil { - s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, - zap.Error(err), - ) - + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) + return + } + _, err = s.metaBase.FreeLockedBy(lockers) + release() + if err != nil { + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) return } } @@ -750,7 +803,13 @@ func (s *Shard) collectExpiredMetrics(ctx context.Context, e Event) { } func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch uint64) { + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err)) + return + } ids, err := s.metaBase.ZeroSizeContainers(ctx) + release() if err != nil { s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err)) return @@ -762,7 +821,13 @@ func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch ui } func (s *Shard) collectExpiredContainerCountMetrics(ctx context.Context, epoch uint64) { + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err)) + return + } ids, err := s.metaBase.ZeroCountContainers(ctx) + release() if err != nil { s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err)) return diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go index 05823c62b..28f8912be 100644 --- a/pkg/local_object_storage/shard/get.go +++ b/pkg/local_object_storage/shard/get.go @@ -111,6 +111,12 @@ func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) { return c.Get(ctx, prm.addr) } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return GetRes{}, err + } + defer release() + skipMeta := prm.skipMeta || s.info.Mode.NoMetabase() obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc) diff --git a/pkg/local_object_storage/shard/head.go b/pkg/local_object_storage/shard/head.go index ff57e3bf9..34b8290d6 100644 --- a/pkg/local_object_storage/shard/head.go +++ b/pkg/local_object_storage/shard/head.go @@ -81,6 +81,12 @@ func (s *Shard) Head(ctx context.Context, prm HeadPrm) (HeadRes, error) { headParams.SetAddress(prm.addr) headParams.SetRaw(prm.raw) + release, limitErr := s.opsLimiter.ReadRequest(ctx) + if limitErr != nil { + return HeadRes{}, limitErr + } + defer release() + var res meta.GetRes res, err = s.metaBase.Get(ctx, headParams) obj = res.Header() diff --git a/pkg/local_object_storage/shard/inhume.go b/pkg/local_object_storage/shard/inhume.go index 9d5f66063..c0fd65f4b 100644 --- a/pkg/local_object_storage/shard/inhume.go +++ b/pkg/local_object_storage/shard/inhume.go @@ -81,6 +81,12 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { return InhumeRes{}, ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return InhumeRes{}, err + } + defer release() + if s.hasWriteCache() { for i := range prm.target { _ = s.writeCache.Delete(ctx, prm.target[i]) diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go index 7bc5ead1d..af87981ca 100644 --- a/pkg/local_object_storage/shard/list.go +++ b/pkg/local_object_storage/shard/list.go @@ -106,6 +106,12 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) { return SelectRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return SelectRes{}, err + } + defer release() + lst, err := s.metaBase.Containers(ctx) if err != nil { return res, fmt.Errorf("list stored containers: %w", err) @@ -145,6 +151,12 @@ func (s *Shard) ListContainers(ctx context.Context, _ ListContainersPrm) (ListCo return ListContainersRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ListContainersRes{}, err + } + defer release() + containers, err := s.metaBase.Containers(ctx) if err != nil { return ListContainersRes{}, fmt.Errorf("get list of containers: %w", err) @@ -173,6 +185,12 @@ func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (List return ListWithCursorRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ListWithCursorRes{}, err + } + defer release() + var metaPrm meta.ListPrm metaPrm.SetCount(prm.count) metaPrm.SetCursor(prm.cursor) @@ -202,9 +220,15 @@ func (s *Shard) IterateOverContainers(ctx context.Context, prm IterateOverContai return ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return err + } + defer release() + var metaPrm meta.IterateOverContainersPrm metaPrm.Handler = prm.Handler - err := s.metaBase.IterateOverContainers(ctx, metaPrm) + err = s.metaBase.IterateOverContainers(ctx, metaPrm) if err != nil { return fmt.Errorf("iterate over containers: %w", err) } @@ -227,11 +251,17 @@ func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOv return ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return err + } + defer release() + var metaPrm meta.IterateOverObjectsInContainerPrm metaPrm.ContainerID = prm.ContainerID metaPrm.ObjectType = prm.ObjectType metaPrm.Handler = prm.Handler - err := s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm) + err = s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm) if err != nil { return fmt.Errorf("iterate over objects: %w", err) } @@ -251,6 +281,12 @@ func (s *Shard) CountAliveObjectsInContainer(ctx context.Context, prm CountAlive return 0, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return 0, err + } + defer release() + var metaPrm meta.CountAliveObjectsInContainerPrm metaPrm.ObjectType = prm.ObjectType metaPrm.ContainerID = prm.ContainerID diff --git a/pkg/local_object_storage/shard/lock.go b/pkg/local_object_storage/shard/lock.go index 31ca16aa1..9c392fdac 100644 --- a/pkg/local_object_storage/shard/lock.go +++ b/pkg/local_object_storage/shard/lock.go @@ -38,7 +38,13 @@ func (s *Shard) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked [] return ErrDegradedMode } - err := s.metaBase.Lock(ctx, idCnr, locker, locked) + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + + err = s.metaBase.Lock(ctx, idCnr, locker, locked) if err != nil { return fmt.Errorf("metabase lock: %w", err) } @@ -61,6 +67,12 @@ func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) { return false, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return false, err + } + defer release() + var prm meta.IsLockedPrm prm.SetAddress(addr) @@ -86,5 +98,12 @@ func (s *Shard) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error if m.NoMetabase() { return nil, ErrDegradedMode } + + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() + return s.metaBase.GetLocks(ctx, addr) } diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go index 3f23111af..f8cb00a31 100644 --- a/pkg/local_object_storage/shard/put.go +++ b/pkg/local_object_storage/shard/put.go @@ -67,6 +67,12 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) { var res common.PutRes + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return PutRes{}, err + } + defer release() + // exist check are not performed there, these checks should be executed // ahead of `Put` by storage engine tryCache := s.hasWriteCache() && !m.NoMetabase() diff --git a/pkg/local_object_storage/shard/range.go b/pkg/local_object_storage/shard/range.go index 701268820..443689104 100644 --- a/pkg/local_object_storage/shard/range.go +++ b/pkg/local_object_storage/shard/range.go @@ -131,6 +131,12 @@ func (s *Shard) GetRange(ctx context.Context, prm RngPrm) (RngRes, error) { return obj, nil } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return RngRes{}, err + } + defer release() + skipMeta := prm.skipMeta || s.info.Mode.NoMetabase() obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc) diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go index 0593f5894..9fe1bbe8c 100644 --- a/pkg/local_object_storage/shard/rebuild.go +++ b/pkg/local_object_storage/shard/rebuild.go @@ -106,6 +106,7 @@ func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *lo } log.Info(ctx, logs.BlobstoreRebuildStarted) ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String()) + // TODO use shard limiter if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, limiter, fillPercent); err != nil { log.Warn(ctx, logs.FailedToRebuildBlobstore, zap.Error(err)) } else { diff --git a/pkg/local_object_storage/shard/select.go b/pkg/local_object_storage/shard/select.go index c7c7e11c2..fbc751e26 100644 --- a/pkg/local_object_storage/shard/select.go +++ b/pkg/local_object_storage/shard/select.go @@ -60,6 +60,12 @@ func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) { return SelectRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return SelectRes{}, nil + } + defer release() + var selectPrm meta.SelectPrm selectPrm.SetFilters(prm.filters) selectPrm.SetContainerID(prm.cnr) diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go index 1eb7f14d0..b9ec05f01 100644 --- a/pkg/local_object_storage/shard/shard.go +++ b/pkg/local_object_storage/shard/shard.go @@ -7,6 +7,7 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" @@ -98,6 +99,8 @@ type cfg struct { reportErrorFunc func(ctx context.Context, selfID string, message string, err error) containerInfo container.InfoProvider + + opsLimiter qos.Limiter } func defaultCfg() *cfg { @@ -109,6 +112,7 @@ func defaultCfg() *cfg { zeroSizeContainersCallback: func(context.Context, []cid.ID) {}, zeroCountContainersCallback: func(context.Context, []cid.ID) {}, metricsWriter: noopMetrics{}, + opsLimiter: qos.NewNoopLimiter(), } } @@ -368,6 +372,12 @@ func WithContainerInfoProvider(containerInfo container.InfoProvider) Option { } } +func WithLimiter(l qos.Limiter) Option { + return func(c *cfg) { + c.opsLimiter = l + } +} + func (s *Shard) fillInfo() { s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo() s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo() diff --git a/pkg/local_object_storage/shard/tree.go b/pkg/local_object_storage/shard/tree.go index 01a014cec..e9cd5f8c1 100644 --- a/pkg/local_object_storage/shard/tree.go +++ b/pkg/local_object_storage/shard/tree.go @@ -43,6 +43,11 @@ func (s *Shard) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, treeID s if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeMove(ctx, d, treeID, m) } @@ -75,6 +80,11 @@ func (s *Shard) TreeAddByPath(ctx context.Context, d pilorama.CIDDescriptor, tre if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeAddByPath(ctx, d, treeID, attr, path, meta) } @@ -103,6 +113,11 @@ func (s *Shard) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m * if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeApply(ctx, cnr, treeID, m, backgroundSync) } @@ -130,6 +145,11 @@ func (s *Shard) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeApplyBatch(ctx, cnr, treeID, m) } @@ -157,6 +177,11 @@ func (s *Shard) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeGetByPath(ctx, cid, treeID, attr, path, latest) } @@ -182,6 +207,11 @@ func (s *Shard) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, n if s.info.Mode.NoMetabase() { return pilorama.Meta{}, 0, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return pilorama.Meta{}, 0, err + } + defer release() return s.pilorama.TreeGetMeta(ctx, cid, treeID, nodeID) } @@ -207,6 +237,11 @@ func (s *Shard) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID strin if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeGetChildren(ctx, cid, treeID, nodeID) } @@ -231,6 +266,11 @@ func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID if s.info.Mode.NoMetabase() { return nil, last, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, last, err + } + defer release() return s.pilorama.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count) } @@ -256,6 +296,11 @@ func (s *Shard) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, if s.info.Mode.NoMetabase() { return pilorama.Move{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return pilorama.Move{}, err + } + defer release() return s.pilorama.TreeGetOpLog(ctx, cid, treeID, height) } @@ -280,6 +325,11 @@ func (s *Shard) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) erro if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeDrop(ctx, cid, treeID) } @@ -303,6 +353,11 @@ func (s *Shard) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) { if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeList(ctx, cid) } @@ -326,6 +381,11 @@ func (s *Shard) TreeHeight(ctx context.Context, cid cidSDK.ID, treeID string) (u if s.pilorama == nil { return 0, ErrPiloramaDisabled } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return 0, err + } + defer release() return s.pilorama.TreeHeight(ctx, cid, treeID) } @@ -350,6 +410,11 @@ func (s *Shard) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (b if s.info.Mode.NoMetabase() { return false, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return false, err + } + defer release() return s.pilorama.TreeExists(ctx, cid, treeID) } @@ -378,6 +443,11 @@ func (s *Shard) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, tre if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeUpdateLastSyncHeight(ctx, cid, treeID, height) } @@ -402,6 +472,11 @@ func (s *Shard) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID st if s.info.Mode.NoMetabase() { return 0, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return 0, err + } + defer release() return s.pilorama.TreeLastSyncHeight(ctx, cid, treeID) } @@ -423,6 +498,11 @@ func (s *Shard) TreeListTrees(ctx context.Context, prm pilorama.TreeListTreesPrm if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeListTrees(ctx, prm) } @@ -452,5 +532,10 @@ func (s *Shard) TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID strin if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeApplyStream(ctx, cnr, treeID, source) } diff --git a/pkg/local_object_storage/shard/writecache.go b/pkg/local_object_storage/shard/writecache.go index f655e477a..9edb89df8 100644 --- a/pkg/local_object_storage/shard/writecache.go +++ b/pkg/local_object_storage/shard/writecache.go @@ -67,6 +67,12 @@ func (s *Shard) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) error return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + return s.writeCache.Flush(ctx, p.ignoreErrors, p.seal) } @@ -124,6 +130,13 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error { close(started) defer cleanup() + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err)) + return + } + defer release() + s.log.Info(ctx, logs.StartedWritecacheSealAsync) if err := s.writeCache.Seal(ctx, prm); err != nil { s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err)) @@ -138,5 +151,11 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error { return nil } } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + return s.writeCache.Seal(ctx, prm) }