forked from TrueCloudLab/frostfs-node
[#1437] node: Use ctx for logging
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
parent
c16dae8b4d
commit
6db46257c0
157 changed files with 764 additions and 713 deletions
|
@ -90,7 +90,7 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, e
|
|||
if !prm.forceRemoval {
|
||||
locked, err := e.IsLocked(ctx, prm.addrs[i])
|
||||
if err != nil {
|
||||
e.log.Warn(logs.EngineRemovingAnObjectWithoutFullLockingCheck,
|
||||
e.log.Warn(ctx, logs.EngineRemovingAnObjectWithoutFullLockingCheck,
|
||||
zap.Error(err),
|
||||
zap.Stringer("addr", prm.addrs[i]),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
|
@ -264,7 +264,7 @@ func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, l
|
|||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
e.log.Info(logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(ctx.Err()))
|
||||
e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(ctx.Err()))
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
|
@ -278,7 +278,7 @@ func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.A
|
|||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
e.log.Info(logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(ctx.Err()))
|
||||
e.log.Info(ctx, logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(ctx.Err()))
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
|
@ -305,7 +305,7 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid
|
|||
e.iterateOverUnsortedShards(func(sh hashedShard) bool {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
e.log.Info(logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
|
||||
e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
|
||||
failed = true
|
||||
return true
|
||||
default:
|
||||
|
@ -316,7 +316,7 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid
|
|||
prm.SetContainerID(id)
|
||||
s, err := sh.ContainerSize(prm)
|
||||
if err != nil {
|
||||
e.log.Warn(logs.EngineFailedToGetContainerSize, zap.Stringer("container_id", id), zap.Error(err))
|
||||
e.log.Warn(ctx, logs.EngineFailedToGetContainerSize, zap.Stringer("container_id", id), zap.Error(err))
|
||||
failed = true
|
||||
return true
|
||||
}
|
||||
|
@ -338,7 +338,7 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid
|
|||
e.iterateOverUnsortedShards(func(sh hashedShard) bool {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
e.log.Info(logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
|
||||
e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
|
||||
failed = true
|
||||
return true
|
||||
default:
|
||||
|
@ -346,7 +346,7 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid
|
|||
|
||||
for id := range idMap {
|
||||
if err := sh.DeleteContainerSize(ctx, id); err != nil {
|
||||
e.log.Warn(logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
|
||||
e.log.Warn(ctx, logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
|
||||
failed = true
|
||||
return true
|
||||
}
|
||||
|
@ -383,7 +383,7 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci
|
|||
e.iterateOverUnsortedShards(func(sh hashedShard) bool {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
e.log.Info(logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
|
||||
e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
|
||||
failed = true
|
||||
return true
|
||||
default:
|
||||
|
@ -394,7 +394,7 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci
|
|||
prm.ContainerID = id
|
||||
s, err := sh.ContainerCount(ctx, prm)
|
||||
if err != nil {
|
||||
e.log.Warn(logs.EngineFailedToGetContainerCounters, zap.Stringer("container_id", id), zap.Error(err))
|
||||
e.log.Warn(ctx, logs.EngineFailedToGetContainerCounters, zap.Stringer("container_id", id), zap.Error(err))
|
||||
failed = true
|
||||
return true
|
||||
}
|
||||
|
@ -416,7 +416,7 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci
|
|||
e.iterateOverUnsortedShards(func(sh hashedShard) bool {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
e.log.Info(logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
|
||||
e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
|
||||
failed = true
|
||||
return true
|
||||
default:
|
||||
|
@ -424,7 +424,7 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci
|
|||
|
||||
for id := range idMap {
|
||||
if err := sh.DeleteContainerCount(ctx, id); err != nil {
|
||||
e.log.Warn(logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
|
||||
e.log.Warn(ctx, logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
|
||||
failed = true
|
||||
return true
|
||||
}
|
||||
|
@ -449,7 +449,7 @@ func (e *StorageEngine) selectNonExistentIDs(ctx context.Context, ids []cid.ID)
|
|||
for _, id := range ids {
|
||||
isAvailable, err := cs.IsContainerAvailable(ctx, id)
|
||||
if err != nil {
|
||||
e.log.Warn(logs.EngineFailedToCheckContainerAvailability, zap.Stringer("container_id", id), zap.Error(err))
|
||||
e.log.Warn(ctx, logs.EngineFailedToCheckContainerAvailability, zap.Stringer("container_id", id), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
if isAvailable {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue