forked from TrueCloudLab/frostfs-node
104 lines
3.3 KiB
Go
104 lines
3.3 KiB
Go
package netmap
|
|
|
|
import (
|
|
"context"
|
|
"encoding/hex"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
|
|
timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
|
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
|
|
"go.uber.org/zap"
|
|
)
|
|
|
|
func (np *Processor) HandleNewEpochTick(ctx context.Context, ev event.Event) {
|
|
_ = ev.(timerEvent.NewEpochTick)
|
|
np.log.Info(ctx, logs.NetmapTick, zap.String("type", "epoch"))
|
|
|
|
// send an event to the worker pool
|
|
|
|
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", func() bool { return np.processNewEpochTick(ctx) })
|
|
if err != nil {
|
|
// there system can be moved into controlled degradation stage
|
|
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
|
|
zap.Int("capacity", np.pool.Cap()))
|
|
}
|
|
}
|
|
|
|
func (np *Processor) handleNewEpoch(ctx context.Context, ev event.Event) {
|
|
epochEvent := ev.(netmapEvent.NewEpoch)
|
|
np.log.Info(ctx, logs.Notification,
|
|
zap.String("type", "new epoch"),
|
|
zap.Uint64("value", epochEvent.EpochNumber()))
|
|
|
|
// send an event to the worker pool
|
|
|
|
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch", func() bool {
|
|
return np.processNewEpoch(ctx, epochEvent)
|
|
})
|
|
if err != nil {
|
|
// there system can be moved into controlled degradation stage
|
|
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
|
|
zap.Int("capacity", np.pool.Cap()))
|
|
}
|
|
}
|
|
|
|
func (np *Processor) handleAddPeer(ctx context.Context, ev event.Event) {
|
|
newPeer := ev.(netmapEvent.AddPeer)
|
|
|
|
np.log.Info(ctx, logs.Notification,
|
|
zap.String("type", "add peer"),
|
|
)
|
|
|
|
// send an event to the worker pool
|
|
|
|
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_add_peer", func() bool {
|
|
return np.processAddPeer(ctx, newPeer)
|
|
})
|
|
if err != nil {
|
|
// there system can be moved into controlled degradation stage
|
|
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
|
|
zap.Int("capacity", np.pool.Cap()))
|
|
}
|
|
}
|
|
|
|
func (np *Processor) handleUpdateState(ctx context.Context, ev event.Event) {
|
|
updPeer := ev.(netmapEvent.UpdatePeer)
|
|
np.log.Info(ctx, logs.Notification,
|
|
zap.String("type", "update peer state"),
|
|
zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes())))
|
|
|
|
// send event to the worker pool
|
|
|
|
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_update_peer", func() bool {
|
|
return np.processUpdatePeer(ctx, updPeer)
|
|
})
|
|
if err != nil {
|
|
// there system can be moved into controlled degradation stage
|
|
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
|
|
zap.Int("capacity", np.pool.Cap()))
|
|
}
|
|
}
|
|
|
|
func (np *Processor) handleCleanupTick(ctx context.Context, ev event.Event) {
|
|
if !np.netmapSnapshot.enabled {
|
|
np.log.Debug(ctx, logs.NetmapNetmapCleanUpRoutineIsDisabled518)
|
|
|
|
return
|
|
}
|
|
|
|
cleanup := ev.(netmapCleanupTick)
|
|
|
|
np.log.Info(ctx, logs.NetmapTick, zap.String("type", "netmap cleaner"))
|
|
|
|
// send event to the worker pool
|
|
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_cleanup_tick", func() bool {
|
|
return np.processNetmapCleanupTick(ctx, cleanup)
|
|
})
|
|
if err != nil {
|
|
// there system can be moved into controlled degradation stage
|
|
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
|
|
zap.Int("capacity", np.pool.Cap()))
|
|
}
|
|
}
|