frostfs-node/pkg/innerring/processors/netmap/handlers.go

105 lines
3.3 KiB
Go
Raw Normal View History

2020-07-24 13:54:03 +00:00
package netmap
import (
"context"
"encoding/hex"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
2020-07-24 13:54:03 +00:00
"go.uber.org/zap"
)
func (np *Processor) HandleNewEpochTick(ctx context.Context, ev event.Event) {
_ = ev.(timerEvent.NewEpochTick)
np.log.Info(context.Background(), logs.NetmapTick, zap.String("type", "epoch"))
2020-07-24 13:54:03 +00:00
// send an event to the worker pool
2020-07-24 13:54:03 +00:00
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", func() bool { return np.processNewEpochTick(ctx) })
2020-07-24 13:54:03 +00:00
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained,
2020-07-24 13:54:03 +00:00
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleNewEpoch(ctx context.Context, ev event.Event) {
epochEvent := ev.(netmapEvent.NewEpoch)
np.log.Info(ctx, logs.Notification,
2020-07-24 13:54:03 +00:00
zap.String("type", "new epoch"),
zap.Uint64("value", epochEvent.EpochNumber()))
// send an event to the worker pool
2020-07-24 13:54:03 +00:00
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch", func() bool {
return np.processNewEpoch(ctx, epochEvent)
2020-07-24 13:54:03 +00:00
})
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
2020-07-24 13:54:03 +00:00
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleAddPeer(ctx context.Context, ev event.Event) {
newPeer := ev.(netmapEvent.AddPeer)
np.log.Info(ctx, logs.Notification,
zap.String("type", "add peer"),
)
// send an event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_add_peer", func() bool {
return np.processAddPeer(newPeer)
})
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleUpdateState(ctx context.Context, ev event.Event) {
updPeer := ev.(netmapEvent.UpdatePeer)
np.log.Info(ctx, logs.Notification,
zap.String("type", "update peer state"),
zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes())))
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_update_peer", func() bool {
return np.processUpdatePeer(updPeer)
})
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleCleanupTick(ctx context.Context, ev event.Event) {
if !np.netmapSnapshot.enabled {
np.log.Debug(ctx, logs.NetmapNetmapCleanUpRoutineIsDisabled518)
return
}
cleanup := ev.(netmapCleanupTick)
np.log.Info(ctx, logs.NetmapTick, zap.String("type", "netmap cleaner"))
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_cleanup_tick", func() bool {
return np.processNetmapCleanupTick(cleanup)
})
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}