forked from TrueCloudLab/frostfs-node
[#1437] ir: Fix contextcheck linters
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
parent
16598553d9
commit
6921a89061
27 changed files with 165 additions and 157 deletions
|
@ -12,13 +12,13 @@ import (
|
|||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (np *Processor) HandleNewEpochTick(ev event.Event) {
|
||||
func (np *Processor) HandleNewEpochTick(ctx context.Context, ev event.Event) {
|
||||
_ = ev.(timerEvent.NewEpochTick)
|
||||
np.log.Info(context.Background(), logs.NetmapTick, zap.String("type", "epoch"))
|
||||
|
||||
// send an event to the worker pool
|
||||
|
||||
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", np.processNewEpochTick)
|
||||
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", func() bool { return np.processNewEpochTick(ctx) })
|
||||
if err != nil {
|
||||
// there system can be moved into controlled degradation stage
|
||||
np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained,
|
||||
|
@ -26,28 +26,28 @@ func (np *Processor) HandleNewEpochTick(ev event.Event) {
|
|||
}
|
||||
}
|
||||
|
||||
func (np *Processor) handleNewEpoch(ev event.Event) {
|
||||
func (np *Processor) handleNewEpoch(ctx context.Context, ev event.Event) {
|
||||
epochEvent := ev.(netmapEvent.NewEpoch)
|
||||
np.log.Info(context.Background(), logs.Notification,
|
||||
np.log.Info(ctx, logs.Notification,
|
||||
zap.String("type", "new epoch"),
|
||||
zap.Uint64("value", epochEvent.EpochNumber()))
|
||||
|
||||
// send an event to the worker pool
|
||||
|
||||
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch", func() bool {
|
||||
return np.processNewEpoch(epochEvent)
|
||||
return np.processNewEpoch(ctx, epochEvent)
|
||||
})
|
||||
if err != nil {
|
||||
// there system can be moved into controlled degradation stage
|
||||
np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained,
|
||||
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
|
||||
zap.Int("capacity", np.pool.Cap()))
|
||||
}
|
||||
}
|
||||
|
||||
func (np *Processor) handleAddPeer(ev event.Event) {
|
||||
func (np *Processor) handleAddPeer(ctx context.Context, ev event.Event) {
|
||||
newPeer := ev.(netmapEvent.AddPeer)
|
||||
|
||||
np.log.Info(context.Background(), logs.Notification,
|
||||
np.log.Info(ctx, logs.Notification,
|
||||
zap.String("type", "add peer"),
|
||||
)
|
||||
|
||||
|
@ -58,14 +58,14 @@ func (np *Processor) handleAddPeer(ev event.Event) {
|
|||
})
|
||||
if err != nil {
|
||||
// there system can be moved into controlled degradation stage
|
||||
np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained,
|
||||
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
|
||||
zap.Int("capacity", np.pool.Cap()))
|
||||
}
|
||||
}
|
||||
|
||||
func (np *Processor) handleUpdateState(ev event.Event) {
|
||||
func (np *Processor) handleUpdateState(ctx context.Context, ev event.Event) {
|
||||
updPeer := ev.(netmapEvent.UpdatePeer)
|
||||
np.log.Info(context.Background(), logs.Notification,
|
||||
np.log.Info(ctx, logs.Notification,
|
||||
zap.String("type", "update peer state"),
|
||||
zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes())))
|
||||
|
||||
|
@ -76,21 +76,21 @@ func (np *Processor) handleUpdateState(ev event.Event) {
|
|||
})
|
||||
if err != nil {
|
||||
// there system can be moved into controlled degradation stage
|
||||
np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained,
|
||||
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
|
||||
zap.Int("capacity", np.pool.Cap()))
|
||||
}
|
||||
}
|
||||
|
||||
func (np *Processor) handleCleanupTick(ev event.Event) {
|
||||
func (np *Processor) handleCleanupTick(ctx context.Context, ev event.Event) {
|
||||
if !np.netmapSnapshot.enabled {
|
||||
np.log.Debug(context.Background(), logs.NetmapNetmapCleanUpRoutineIsDisabled518)
|
||||
np.log.Debug(ctx, logs.NetmapNetmapCleanUpRoutineIsDisabled518)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
cleanup := ev.(netmapCleanupTick)
|
||||
|
||||
np.log.Info(context.Background(), logs.NetmapTick, zap.String("type", "netmap cleaner"))
|
||||
np.log.Info(ctx, logs.NetmapTick, zap.String("type", "netmap cleaner"))
|
||||
|
||||
// send event to the worker pool
|
||||
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_cleanup_tick", func() bool {
|
||||
|
@ -98,7 +98,7 @@ func (np *Processor) handleCleanupTick(ev event.Event) {
|
|||
})
|
||||
if err != nil {
|
||||
// there system can be moved into controlled degradation stage
|
||||
np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained,
|
||||
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
|
||||
zap.Int("capacity", np.pool.Cap()))
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue