forked from TrueCloudLab/frostfs-node
[#240] logs: Fix log consts
Drop duplicate entities. Format entities. Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
parent
4b790be5f2
commit
bc8ecb89ff
24 changed files with 187 additions and 190 deletions
|
@ -81,13 +81,13 @@ func main() {
|
|||
err = innerRing.Start(ctx, intErr)
|
||||
exitErr(err)
|
||||
|
||||
log.Info(logs.FrostfsirApplicationStarted607,
|
||||
log.Info(logs.CommonApplicationStarted,
|
||||
zap.String("version", misc.Version))
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case err := <-intErr:
|
||||
log.Info(logs.FrostfsirInternalError608, zap.String("msg", err.Error()))
|
||||
log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
|
||||
}
|
||||
|
||||
innerRing.Stop()
|
||||
|
@ -99,14 +99,14 @@ func main() {
|
|||
go func() {
|
||||
err := srv.Shutdown()
|
||||
if err != nil {
|
||||
log.Debug(logs.FrostfsirCouldNotShutdownHTTPServer609,
|
||||
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
log.Info(logs.FrostfsirApplicationStopped610)
|
||||
log.Info(logs.FrostFSIRApplicationStopped)
|
||||
}
|
||||
|
||||
func initHTTPServers(cfg *viper.Viper, log *logger.Logger) []*httputil.Server {
|
||||
|
|
|
@ -343,13 +343,13 @@ type internals struct {
|
|||
func (c *cfg) startMaintenance() {
|
||||
c.isMaintenance.Store(true)
|
||||
c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE)
|
||||
c.log.Info(logs.FrostfsnodeStartedLocalNodesMaintenance624)
|
||||
c.log.Info(logs.FrostFSNodeStartedLocalNodesMaintenance)
|
||||
}
|
||||
|
||||
// stops node's maintenance.
|
||||
func (c *internals) stopMaintenance() {
|
||||
c.isMaintenance.Store(false)
|
||||
c.log.Info(logs.FrostfsnodeStoppedLocalNodesMaintenance625)
|
||||
c.log.Info(logs.FrostFSNodeStoppedLocalNodesMaintenance)
|
||||
}
|
||||
|
||||
// IsMaintenance checks if storage node is under maintenance.
|
||||
|
@ -882,10 +882,10 @@ func initLocalStorage(c *cfg) {
|
|||
for _, optsWithMeta := range c.shardOpts() {
|
||||
id, err := ls.AddShard(append(optsWithMeta.shOpts, shard.WithTombstoneSource(tombstoneSource))...)
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostfsnodeFailedToAttachShardToEngine626, zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
|
||||
} else {
|
||||
shardsAttached++
|
||||
c.log.Info(logs.FrostfsnodeShardAttachedToEngine627, zap.Stringer("id", id))
|
||||
c.log.Info(logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id))
|
||||
}
|
||||
}
|
||||
if shardsAttached == 0 {
|
||||
|
@ -895,15 +895,15 @@ func initLocalStorage(c *cfg) {
|
|||
c.cfgObject.cfgLocalStorage.localStorage = ls
|
||||
|
||||
c.onShutdown(func() {
|
||||
c.log.Info(logs.FrostfsnodeClosingComponentsOfTheStorageEngine628)
|
||||
c.log.Info(logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
|
||||
|
||||
err := ls.Close()
|
||||
if err != nil {
|
||||
c.log.Info(logs.FrostfsnodeStorageEngineClosingFailure629,
|
||||
c.log.Info(logs.FrostFSNodeStorageEngineClosingFailure,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
} else {
|
||||
c.log.Info(logs.FrostfsnodeAllComponentsOfTheStorageEngineClosedSuccessfully630)
|
||||
c.log.Info(logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -977,11 +977,11 @@ func (c *cfg) bootstrap() error {
|
|||
// switch to online except when under maintenance
|
||||
st := c.cfgNetmap.state.controlNetmapStatus()
|
||||
if st == control.NetmapStatus_MAINTENANCE {
|
||||
c.log.Info(logs.FrostfsnodeBootstrappingWithTheMaintenanceState631)
|
||||
c.log.Info(logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
|
||||
return c.bootstrapWithState((*netmap.NodeInfo).SetMaintenance)
|
||||
}
|
||||
|
||||
c.log.Info(logs.FrostfsnodeBootstrappingWithOnlineState632,
|
||||
c.log.Info(logs.FrostFSNodeBootstrappingWithOnlineState,
|
||||
zap.Stringer("previous", st),
|
||||
)
|
||||
|
||||
|
@ -1016,32 +1016,32 @@ func (c *cfg) signalWatcher(ctx context.Context) {
|
|||
case syscall.SIGHUP:
|
||||
c.reloadConfig(ctx)
|
||||
case syscall.SIGTERM, syscall.SIGINT:
|
||||
c.log.Info(logs.FrostfsnodeTerminationSignalHasBeenReceivedStopping633)
|
||||
c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||
// TODO (@acid-ant): #49 need to cover case when stuck at the middle(node health UNDEFINED or STARTING)
|
||||
|
||||
c.shutdown()
|
||||
|
||||
c.log.Info(logs.FrostfsnodeTerminationSignalProcessingIsComplete634)
|
||||
c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||
return
|
||||
}
|
||||
case err := <-c.internalErr: // internal application error
|
||||
c.log.Warn(logs.FrostfsnodeInternalApplicationError635,
|
||||
c.log.Warn(logs.FrostFSNodeInternalApplicationError,
|
||||
zap.String("message", err.Error()))
|
||||
|
||||
c.shutdown()
|
||||
|
||||
c.log.Info(logs.FrostfsnodeInternalErrorProcessingIsComplete636)
|
||||
c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cfg) reloadConfig(ctx context.Context) {
|
||||
c.log.Info(logs.FrostfsnodeSIGHUPHasBeenReceivedRereadingConfiguration637)
|
||||
c.log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
|
||||
|
||||
err := c.readConfig(c.appCfg)
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostfsnodeConfigurationReading638, zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1053,7 +1053,7 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
|||
|
||||
logPrm, err := c.loggerPrm()
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostfsnodeLoggerConfigurationPreparation639, zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1061,7 +1061,7 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
|||
components = append(components, dCmp{"tracing", func() error {
|
||||
updated, err := tracing.Setup(ctx, *tracingconfig.ToTracingConfig(c.appCfg))
|
||||
if updated {
|
||||
c.log.Info(logs.FrostfsnodeTracingConfigationUpdated640)
|
||||
c.log.Info(logs.FrostFSNodeTracingConfigationUpdated)
|
||||
}
|
||||
return err
|
||||
}})
|
||||
|
@ -1086,20 +1086,20 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
|||
|
||||
err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostfsnodeStorageEngineConfigurationUpdate641, zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
for _, component := range components {
|
||||
err = component.reloadFunc()
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostfsnodeUpdatedConfigurationApplying642,
|
||||
c.log.Error(logs.FrostFSNodeUpdatedConfigurationApplying,
|
||||
zap.String("component", component.name),
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
c.log.Info(logs.FrostfsnodeConfigurationHasBeenReloadedSuccessfully643)
|
||||
c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
||||
}
|
||||
|
||||
func (c *cfg) shutdown() {
|
||||
|
|
|
@ -137,13 +137,13 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
|
|||
} else {
|
||||
// unlike removal, we expect successful receive of the container
|
||||
// after successful creation, so logging can be useful
|
||||
c.log.Error(logs.FrostfsnodeReadNewlyCreatedContainerAfterTheNotification644,
|
||||
c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
|
||||
zap.Stringer("id", ev.ID),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
||||
c.log.Debug(logs.FrostfsnodeContainerCreationEventsReceipt645,
|
||||
c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt,
|
||||
zap.Stringer("id", ev.ID),
|
||||
)
|
||||
})
|
||||
|
@ -162,7 +162,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
|
|||
|
||||
cachedContainerStorage.handleRemoval(ev.ID)
|
||||
|
||||
c.log.Debug(logs.FrostfsnodeContainerRemovalEventsReceipt646,
|
||||
c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt,
|
||||
zap.Stringer("id", ev.ID),
|
||||
)
|
||||
})
|
||||
|
@ -296,7 +296,7 @@ type morphLoadWriter struct {
|
|||
}
|
||||
|
||||
func (w *morphLoadWriter) Put(a containerSDK.SizeEstimation) error {
|
||||
w.log.Debug(logs.FrostfsnodeSaveUsedSpaceAnnouncementInContract647,
|
||||
w.log.Debug(logs.FrostFSNodeSaveUsedSpaceAnnouncementInContract,
|
||||
zap.Uint64("epoch", a.Epoch()),
|
||||
zap.Stringer("cid", a.Container()),
|
||||
zap.Uint64("size", a.Value()),
|
||||
|
@ -459,7 +459,7 @@ func (d *localStorageLoad) Iterate(f loadcontroller.UsedSpaceFilter, h loadcontr
|
|||
for i := range idList {
|
||||
sz, err := engine.ContainerSize(d.engine, idList[i])
|
||||
if err != nil {
|
||||
d.log.Debug(logs.FrostfsnodeFailedToCalculateContainerSizeInStorageEngine648,
|
||||
d.log.Debug(logs.FrostFSNodeFailedToCalculateContainerSizeInStorageEngine,
|
||||
zap.Stringer("cid", idList[i]),
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
@ -467,7 +467,7 @@ func (d *localStorageLoad) Iterate(f loadcontroller.UsedSpaceFilter, h loadcontr
|
|||
continue
|
||||
}
|
||||
|
||||
d.log.Debug(logs.FrostfsnodeContainerSizeInStorageEngineCalculatedSuccessfully649,
|
||||
d.log.Debug(logs.FrostFSNodeContainerSizeInStorageEngineCalculatedSuccessfully,
|
||||
zap.Uint64("size", sz),
|
||||
zap.Stringer("cid", idList[i]),
|
||||
)
|
||||
|
|
|
@ -53,7 +53,7 @@ func initControlService(c *cfg) {
|
|||
|
||||
lis, err := net.Listen("tcp", endpoint)
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostfsnodeCantListenGRPCEndpointControl680, zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ func initGRPC(c *cfg) {
|
|||
if tlsCfg != nil {
|
||||
cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile())
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostfsnodeCouldNotReadCertificateFromFile611, zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -64,7 +64,7 @@ func initGRPC(c *cfg) {
|
|||
|
||||
lis, err := net.Listen("tcp", sc.Endpoint())
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostfsnodeCantListenGRPCEndpoint612, zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -94,14 +94,14 @@ func serveGRPC(c *cfg) {
|
|||
|
||||
go func() {
|
||||
defer func() {
|
||||
c.log.Info(logs.FrostfsnodeStopListeningGRPCEndpoint613,
|
||||
c.log.Info(logs.FrostFSNodeStopListeningGRPCEndpoint,
|
||||
zap.String("endpoint", lis.Addr().String()),
|
||||
)
|
||||
|
||||
c.wg.Done()
|
||||
}()
|
||||
|
||||
c.log.Info(logs.FrostfsnodeStartListeningGRPCEndpoint614,
|
||||
c.log.Info(logs.FrostFSNodeStartListeningGRPCEndpoint,
|
||||
zap.String("endpoint", lis.Addr().String()),
|
||||
)
|
||||
|
||||
|
@ -115,7 +115,7 @@ func serveGRPC(c *cfg) {
|
|||
func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
|
||||
l = &logger.Logger{Logger: l.With(zap.String("name", name))}
|
||||
|
||||
l.Info(logs.FrostfsnodeStoppingGRPCServer615)
|
||||
l.Info(logs.FrostFSNodeStoppingGRPCServer)
|
||||
|
||||
// GracefulStop() may freeze forever, see #1270
|
||||
done := make(chan struct{})
|
||||
|
@ -127,9 +127,9 @@ func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
|
|||
select {
|
||||
case <-done:
|
||||
case <-time.After(1 * time.Minute):
|
||||
l.Info(logs.FrostfsnodeGRPCCannotShutdownGracefullyForcingStop616)
|
||||
l.Info(logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
|
||||
s.Stop()
|
||||
}
|
||||
|
||||
l.Info(logs.FrostfsnodeGRPCServerStoppedSuccessfully617)
|
||||
l.Info(logs.FrostFSNodeGRPCServerStoppedSuccessfully)
|
||||
}
|
||||
|
|
|
@ -143,14 +143,14 @@ func bootUp(ctx context.Context, c *cfg) {
|
|||
}
|
||||
|
||||
func wait(c *cfg, cancel func()) {
|
||||
c.log.Info(logs.FrostfsnodeApplicationStarted618,
|
||||
c.log.Info(logs.CommonApplicationStarted,
|
||||
zap.String("version", misc.Version))
|
||||
|
||||
<-c.done // graceful shutdown
|
||||
|
||||
cancel()
|
||||
|
||||
c.log.Debug(logs.FrostfsnodeWaitingForAllProcessesToStop619)
|
||||
c.log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop)
|
||||
|
||||
c.wg.Wait()
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ func initMorphComponents(ctx context.Context, c *cfg) {
|
|||
client.WithSwitchInterval(morphconfig.SwitchInterval(c.appCfg)),
|
||||
)
|
||||
if err != nil {
|
||||
c.log.Info(logs.FrostfsnodeFailedToCreateNeoRPCClient661,
|
||||
c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient,
|
||||
zap.Any("endpoints", addresses),
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
@ -59,12 +59,12 @@ func initMorphComponents(ctx context.Context, c *cfg) {
|
|||
}
|
||||
|
||||
c.onShutdown(func() {
|
||||
c.log.Info(logs.FrostfsnodeClosingMorphComponents662)
|
||||
c.log.Info(logs.FrostFSNodeClosingMorphComponents)
|
||||
cli.Close()
|
||||
})
|
||||
|
||||
if err := cli.SetGroupSignerScope(); err != nil {
|
||||
c.log.Info(logs.FrostfsnodeFailedToSetGroupSignerScopeContinueWithGlobal663, zap.Error(err))
|
||||
c.log.Info(logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
|
||||
}
|
||||
|
||||
c.cfgMorph.client = cli
|
||||
|
@ -81,7 +81,7 @@ func initMorphComponents(ctx context.Context, c *cfg) {
|
|||
fatalOnErr(err)
|
||||
}
|
||||
|
||||
c.log.Info(logs.FrostfsnodeNotarySupport664,
|
||||
c.log.Info(logs.FrostFSNodeNotarySupport,
|
||||
zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled),
|
||||
)
|
||||
|
||||
|
@ -96,7 +96,7 @@ func initMorphComponents(ctx context.Context, c *cfg) {
|
|||
msPerBlock, err := c.cfgMorph.client.MsPerBlock()
|
||||
fatalOnErr(err)
|
||||
c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond
|
||||
c.log.Debug(logs.FrostfsnodeMorphcache_ttlFetchedFromNetwork665, zap.Duration("value", c.cfgMorph.cacheTTL))
|
||||
c.log.Debug(logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL))
|
||||
}
|
||||
|
||||
if c.cfgMorph.cacheTTL < 0 {
|
||||
|
@ -123,7 +123,7 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
|
|||
// non-error deposit with an empty TX hash means
|
||||
// that the deposit has already been made; no
|
||||
// need to wait it.
|
||||
c.log.Info(logs.FrostfsnodeNotaryDepositHasAlreadyBeenMade666)
|
||||
c.log.Info(logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -191,7 +191,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
|
|||
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
|
||||
if err != nil {
|
||||
fromSideChainBlock = 0
|
||||
c.log.Warn(logs.FrostfsnodeCantGetLastProcessedSideChainBlockNumber667, zap.String("error", err.Error()))
|
||||
c.log.Warn(logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
|
||||
}
|
||||
|
||||
subs, err = subscriber.New(ctx, &subscriber.Params{
|
||||
|
@ -216,7 +216,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
|
|||
setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) {
|
||||
res, err := netmapEvent.ParseNewEpoch(src)
|
||||
if err == nil {
|
||||
c.log.Info(logs.FrostfsnodeNewEpochEventFromSidechain668,
|
||||
c.log.Info(logs.FrostFSNodeNewEpochEventFromSidechain,
|
||||
zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()),
|
||||
)
|
||||
}
|
||||
|
@ -227,11 +227,11 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
|
|||
registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers)
|
||||
|
||||
registerBlockHandler(lis, func(block *block.Block) {
|
||||
c.log.Debug(logs.FrostfsnodeNewBlock669, zap.Uint32("index", block.Index))
|
||||
c.log.Debug(logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
|
||||
|
||||
err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index)
|
||||
if err != nil {
|
||||
c.log.Warn(logs.FrostfsnodeCantUpdatePersistentState670,
|
||||
c.log.Warn(logs.FrostFSNodeCantUpdatePersistentState,
|
||||
zap.String("chain", "side"),
|
||||
zap.Uint32("block_index", block.Index))
|
||||
}
|
||||
|
|
|
@ -194,7 +194,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
|
|||
if (n-c.cfgNetmap.startEpoch)%reBootstrapInterval == 0 {
|
||||
err := c.bootstrap()
|
||||
if err != nil {
|
||||
c.log.Warn(logs.FrostfsnodeCantSendRebootstrapTx671, zap.Error(err))
|
||||
c.log.Warn(logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
@ -204,7 +204,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
|
|||
|
||||
ni, err := c.netmapLocalNodeState(e)
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostfsnodeCouldNotUpdateNodeStateOnNewEpoch672,
|
||||
c.log.Error(logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
|
||||
zap.Uint64("epoch", e),
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
@ -219,7 +219,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
|
|||
addNewEpochAsyncNotificationHandler(c, func(ev event.Event) {
|
||||
_, err := makeNotaryDeposit(c)
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostfsnodeCouldNotMakeNotaryDeposit673,
|
||||
c.log.Error(logs.FrostFSNodeCouldNotMakeNotaryDeposit,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
|
@ -299,7 +299,7 @@ func initNetmapState(c *cfg) {
|
|||
}
|
||||
}
|
||||
|
||||
c.log.Info(logs.FrostfsnodeInitialNetworkState674,
|
||||
c.log.Info(logs.FrostFSNodeInitialNetworkState,
|
||||
zap.Uint64("epoch", epoch),
|
||||
zap.String("state", stateWord),
|
||||
)
|
||||
|
|
|
@ -29,7 +29,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler
|
|||
|
||||
listRes, err := n.e.ListContainers(engine.ListContainersPrm{})
|
||||
if err != nil {
|
||||
log.Error(logs.FrostfsnodeNotificatorCouldNotListContainers650, zap.Error(err))
|
||||
log.Error(logs.FrostFSNodeNotificatorCouldNotListContainers, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler
|
|||
|
||||
selectRes, err := n.e.Select(selectPrm)
|
||||
if err != nil {
|
||||
log.Error(logs.FrostfsnodeNotificatorCouldNotSelectObjectsFromContainer651,
|
||||
log.Error(logs.FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer,
|
||||
zap.Stringer("cid", c),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
@ -54,7 +54,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler
|
|||
for _, a := range selectRes.AddressList() {
|
||||
err = n.processAddress(ctx, a, handler)
|
||||
if err != nil {
|
||||
log.Error(logs.FrostfsnodeNotificatorCouldNotProcessObject652,
|
||||
log.Error(logs.FrostFSNodeNotificatorCouldNotProcessObject,
|
||||
zap.Stringer("address", a),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
@ -63,7 +63,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler
|
|||
}
|
||||
}
|
||||
|
||||
log.Debug(logs.FrostfsnodeNotificatorFinishedProcessingObjectNotifications653)
|
||||
log.Debug(logs.FrostFSNodeNotificatorFinishedProcessingObjectNotifications)
|
||||
}
|
||||
|
||||
func (n *notificationSource) processAddress(
|
||||
|
@ -102,7 +102,7 @@ type notificationWriter struct {
|
|||
|
||||
func (n notificationWriter) Notify(topic string, address oid.Address) {
|
||||
if err := n.w.Notify(topic, address); err != nil {
|
||||
n.l.Warn(logs.FrostfsnodeCouldNotWriteObjectNotification654,
|
||||
n.l.Warn(logs.FrostFSNodeCouldNotWriteObjectNotification,
|
||||
zap.Stringer("address", address),
|
||||
zap.String("topic", topic),
|
||||
zap.Error(err),
|
||||
|
|
|
@ -63,7 +63,7 @@ type objectSvc struct {
|
|||
func (c *cfg) MaxObjectSize() uint64 {
|
||||
sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostfsnodeCouldNotGetMaxObjectSizeValue655,
|
||||
c.log.Error(logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
|
@ -260,7 +260,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *reputati
|
|||
|
||||
_, err := ls.Inhume(ctx, inhumePrm)
|
||||
if err != nil {
|
||||
c.log.Warn(logs.FrostfsnodeCouldNotInhumeMarkRedundantCopyAsGarbage656,
|
||||
c.log.Warn(logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
|
@ -601,7 +601,7 @@ func (c *reputationClientConstructor) Get(info coreclient.NodeInfo) (coreclient.
|
|||
}
|
||||
}
|
||||
} else {
|
||||
c.log.Warn(logs.FrostfsnodeCouldNotGetLatestNetworkMapToOverloadTheClient658,
|
||||
c.log.Warn(logs.FrostFSNodeCouldNotGetLatestNetworkMapToOverloadTheClient,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
|
|
|
@ -106,7 +106,7 @@ func addReputationReportHandler(ctx context.Context, c *cfg) {
|
|||
addNewEpochAsyncNotificationHandler(
|
||||
c,
|
||||
func(ev event.Event) {
|
||||
c.log.Debug(logs.FrostfsnodeStartReportingReputationOnNewEpochEvent620)
|
||||
c.log.Debug(logs.FrostFSNodeStartReportingReputationOnNewEpochEvent)
|
||||
|
||||
var reportPrm localtrustcontroller.ReportPrm
|
||||
|
||||
|
@ -128,13 +128,13 @@ func addEigenTrustEpochHandler(ctx context.Context, c *cfg, eigenTrustController
|
|||
|
||||
duration, err := c.cfgNetmap.wrapper.EpochDuration()
|
||||
if err != nil {
|
||||
log.Debug(logs.FrostfsnodeCouldNotFetchEpochDuration621, zap.Error(err))
|
||||
log.Debug(logs.FrostFSNodeCouldNotFetchEpochDuration, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
iterations, err := c.cfgNetmap.wrapper.EigenTrustIterations()
|
||||
if err != nil {
|
||||
log.Debug(logs.FrostfsnodeCouldNotFetchIterationNumber622, zap.Error(err))
|
||||
log.Debug(logs.FrostFSNodeCouldNotFetchIterationNumber, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -146,7 +146,7 @@ func addEigenTrustEpochHandler(ctx context.Context, c *cfg, eigenTrustController
|
|||
)
|
||||
})
|
||||
if err != nil {
|
||||
log.Debug(logs.FrostfsnodeCouldNotCreateFixedEpochTimer623, zap.Error(err))
|
||||
log.Debug(logs.FrostFSNodeCouldNotCreateFixedEpochTimer, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -72,16 +72,16 @@ func NewRemoteTrustProvider(prm RemoteProviderPrm) *RemoteTrustProvider {
|
|||
}
|
||||
|
||||
func (rtp *RemoteTrustProvider) InitRemote(srv reputationcommon.ServerInfo) (reputationcommon.WriterProvider, error) {
|
||||
rtp.log.Debug(logs.CommonInitializingRemoteWriterProvider681)
|
||||
rtp.log.Debug(logs.CommonInitializingRemoteWriterProvider)
|
||||
|
||||
if srv == nil {
|
||||
rtp.log.Debug(logs.CommonRouteHasReachedDeadendProvider682)
|
||||
rtp.log.Debug(logs.CommonRouteHasReachedDeadendProvider)
|
||||
return rtp.deadEndProvider, nil
|
||||
}
|
||||
|
||||
if rtp.netmapKeys.IsLocalKey(srv.PublicKey()) {
|
||||
// if local => return no-op writer
|
||||
rtp.log.Debug(logs.CommonInitializingNoopWriterProvider683)
|
||||
rtp.log.Debug(logs.CommonInitializingNoopWriterProvider)
|
||||
return trustcontroller.SimpleWriterProvider(new(NopReputationWriter)), nil
|
||||
}
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ type ConsumerTrustWriter struct {
|
|||
}
|
||||
|
||||
func (w *ConsumerTrustWriter) Write(_ context.Context, t reputation.Trust) error {
|
||||
w.log.Debug(logs.IntermediateWritingReceivedConsumersTrusts684,
|
||||
w.log.Debug(logs.IntermediateWritingReceivedConsumersTrusts,
|
||||
zap.Uint64("epoch", w.iterInfo.Epoch()),
|
||||
zap.Uint32("iteration", w.iterInfo.I()),
|
||||
zap.Stringer("trusting_peer", t.TrustingPeer()),
|
||||
|
|
|
@ -72,7 +72,7 @@ type FinalWriter struct {
|
|||
}
|
||||
|
||||
func (fw FinalWriter) WriteIntermediateTrust(t eigentrust.IterationTrust) error {
|
||||
fw.l.Debug(logs.IntermediateStartWritingGlobalTrustsToContract685)
|
||||
fw.l.Debug(logs.IntermediateStartWritingGlobalTrustsToContract)
|
||||
|
||||
args := repClient.PutPrm{}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ type DaughterTrustWriter struct {
|
|||
}
|
||||
|
||||
func (w *DaughterTrustWriter) Write(_ context.Context, t reputation.Trust) error {
|
||||
w.log.Debug(logs.IntermediateWritingReceivedDaughtersTrusts689,
|
||||
w.log.Debug(logs.IntermediateWritingReceivedDaughtersTrusts,
|
||||
zap.Uint64("epoch", w.ep.Epoch()),
|
||||
zap.Stringer("trusting_peer", t.TrustingPeer()),
|
||||
zap.Stringer("trusted_peer", t.Peer()),
|
||||
|
|
|
@ -93,7 +93,7 @@ func (rtp *RemoteTrustWriter) Write(ctx context.Context, t reputation.Trust) err
|
|||
epoch := rtp.iterInfo.Epoch()
|
||||
i := rtp.iterInfo.I()
|
||||
|
||||
rtp.log.Debug(logs.IntermediateAnnouncingTrust690,
|
||||
rtp.log.Debug(logs.IntermediateAnnouncingTrust,
|
||||
zap.Uint64("epoch", epoch),
|
||||
zap.Uint32("iteration", i),
|
||||
zap.Stringer("trusting_peer", t.TrustingPeer()),
|
||||
|
|
|
@ -97,7 +97,7 @@ func (rtp *RemoteTrustWriter) Write(_ context.Context, t reputation.Trust) error
|
|||
func (rtp *RemoteTrustWriter) Close(ctx context.Context) error {
|
||||
epoch := rtp.ep.Epoch()
|
||||
|
||||
rtp.log.Debug(logs.LocalAnnouncingTrusts691,
|
||||
rtp.log.Debug(logs.LocalAnnouncingTrusts,
|
||||
zap.Uint64("epoch", epoch),
|
||||
)
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ type TrustStorage struct {
|
|||
func (s *TrustStorage) InitIterator(ep reputationcommon.EpochProvider) (trustcontroller.Iterator, error) {
|
||||
epoch := ep.Epoch()
|
||||
|
||||
s.Log.Debug(logs.LocalInitializingIteratorOverTrusts692,
|
||||
s.Log.Debug(logs.LocalInitializingIteratorOverTrusts,
|
||||
zap.Uint64("epoch", epoch),
|
||||
)
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ func initTracing(ctx context.Context, c *cfg) {
|
|||
|
||||
_, err := tracing.Setup(ctx, *conf)
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostfsnodeFailedInitTracing659, zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
|
||||
}
|
||||
|
||||
c.closers = append(c.closers, closer{
|
||||
|
@ -25,7 +25,7 @@ func initTracing(ctx context.Context, c *cfg) {
|
|||
defer cancel()
|
||||
err := tracing.Shutdown(ctx) //cfg context cancels before close
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostfsnodeFailedShutdownTracing660, zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeFailedShutdownTracing, zap.Error(err))
|
||||
}
|
||||
},
|
||||
})
|
||||
|
|
|
@ -38,7 +38,7 @@ func (c cnrSource) List() ([]cid.ID, error) {
|
|||
func initTreeService(c *cfg) {
|
||||
treeConfig := treeconfig.Tree(c.appCfg)
|
||||
if !treeConfig.Enabled() {
|
||||
c.log.Info(logs.FrostfsnodeTreeServiceIsNotEnabledSkipInitialization675)
|
||||
c.log.Info(logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -69,7 +69,7 @@ func initTreeService(c *cfg) {
|
|||
addNewEpochNotificationHandler(c, func(_ event.Event) {
|
||||
err := c.treeService.SynchronizeAll()
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostfsnodeCouldNotSynchronizeTreeService676, zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
|
||||
}
|
||||
})
|
||||
} else {
|
||||
|
@ -80,7 +80,7 @@ func initTreeService(c *cfg) {
|
|||
for range tick.C {
|
||||
err := c.treeService.SynchronizeAll()
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostfsnodeCouldNotSynchronizeTreeService677, zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
|
||||
if errors.Is(err, tree.ErrShuttingDown) {
|
||||
return
|
||||
}
|
||||
|
@ -93,11 +93,11 @@ func initTreeService(c *cfg) {
|
|||
ev := e.(containerEvent.DeleteSuccess)
|
||||
|
||||
// This is executed asynchronously, so we don't care about the operation taking some time.
|
||||
c.log.Debug(logs.FrostfsnodeRemovingAllTreesForContainer678, zap.Stringer("cid", ev.ID))
|
||||
c.log.Debug(logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID))
|
||||
err := c.treeService.DropTree(context.Background(), ev.ID, "")
|
||||
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
|
||||
// Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged.
|
||||
c.log.Error(logs.FrostfsnodeContainerRemovalEventReceivedButTreesWerentRemoved679,
|
||||
c.log.Error(logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
|
||||
zap.Stringer("cid", ev.ID),
|
||||
zap.String("error", err.Error()))
|
||||
}
|
||||
|
|
|
@ -597,101 +597,98 @@ const (
|
|||
AuditCouldNotGetContainerNodes593 = "could not get container nodes" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||
AuditEmptyListOfContainerNodes594 = "empty list of container nodes" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||
AuditNoneOfTheContainerNodesPassedTheAudit595 = "none of the container nodes passed the audit" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||
AuditCouldNotGetSGInfo596 = "could not get SG info" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||
AuditZeroSumSGSize597 = "zero sum SG size" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||
AuditCouldNotResolvePublicKeyOfTheStorageNode598 = "could not resolve public key of the storage node" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||
AuditCalculatingStorageNodeSalaryForAuditGASe12599 = "calculating storage node salary for audit (GASe-12)" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||
AuditCouldNotParsePublicKeyOfTheInnerRingNode600 = "could not parse public key of the inner ring node" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||
BasicCantGetBasicIncomeRate601 = "can't get basic income rate" // Error in ../node/pkg/innerring/processors/settlement/basic/collect.go
|
||||
BasicCantFetchContainerSizeEstimations602 = "can't fetch container size estimations" // Error in ../node/pkg/innerring/processors/settlement/basic/collect.go
|
||||
BasicCantFetchContainerInfo603 = "can't fetch container info" // Warn in ../node/pkg/innerring/processors/settlement/basic/collect.go
|
||||
BasicCantFetchContainerInfo604 = "can't fetch container info" // Debug in ../node/pkg/innerring/processors/settlement/basic/collect.go
|
||||
BasicCantFetchBalanceOfBankingAccount605 = "can't fetch balance of banking account" // Error in ../node/pkg/innerring/processors/settlement/basic/distribute.go
|
||||
BasicCantTransformPublicKeyToOwnerId606 = "can't transform public key to owner id" // Warn in ../node/pkg/innerring/processors/settlement/basic/distribute.go
|
||||
FrostfsirApplicationStarted607 = "application started" // Info in ../node/cmd/frostfs-ir/main.go
|
||||
FrostfsirInternalError608 = "internal error" // Info in ../node/cmd/frostfs-ir/main.go
|
||||
FrostfsirCouldNotShutdownHTTPServer609 = "could not shutdown HTTP server" // Debug in ../node/cmd/frostfs-ir/main.go
|
||||
FrostfsirApplicationStopped610 = "application stopped" // Info in ../node/cmd/frostfs-ir/main.go
|
||||
FrostfsnodeCouldNotReadCertificateFromFile611 = "could not read certificate from file" // Error in ../node/cmd/frostfs-node/grpc.go
|
||||
FrostfsnodeCantListenGRPCEndpoint612 = "can't listen gRPC endpoint" // Error in ../node/cmd/frostfs-node/grpc.go
|
||||
FrostfsnodeStopListeningGRPCEndpoint613 = "stop listening gRPC endpoint" // Info in ../node/cmd/frostfs-node/grpc.go
|
||||
FrostfsnodeStartListeningGRPCEndpoint614 = "start listening gRPC endpoint" // Info in ../node/cmd/frostfs-node/grpc.go
|
||||
FrostfsnodeStoppingGRPCServer615 = "stopping gRPC server..." // Info in ../node/cmd/frostfs-node/grpc.go
|
||||
FrostfsnodeGRPCCannotShutdownGracefullyForcingStop616 = "gRPC cannot shutdown gracefully, forcing stop" // Info in ../node/cmd/frostfs-node/grpc.go
|
||||
FrostfsnodeGRPCServerStoppedSuccessfully617 = "gRPC server stopped successfully" // Info in ../node/cmd/frostfs-node/grpc.go
|
||||
FrostfsnodeApplicationStarted618 = "application started" // Info in ../node/cmd/frostfs-node/main.go
|
||||
FrostfsnodeWaitingForAllProcessesToStop619 = "waiting for all processes to stop" // Debug in ../node/cmd/frostfs-node/main.go
|
||||
FrostfsnodeStartReportingReputationOnNewEpochEvent620 = "start reporting reputation on new epoch event" // Debug in ../node/cmd/frostfs-node/reputation.go
|
||||
FrostfsnodeCouldNotFetchEpochDuration621 = "could not fetch epoch duration" // Debug in ../node/cmd/frostfs-node/reputation.go
|
||||
FrostfsnodeCouldNotFetchIterationNumber622 = "could not fetch iteration number" // Debug in ../node/cmd/frostfs-node/reputation.go
|
||||
FrostfsnodeCouldNotCreateFixedEpochTimer623 = "could not create fixed epoch timer" // Debug in ../node/cmd/frostfs-node/reputation.go
|
||||
FrostfsnodeStartedLocalNodesMaintenance624 = "started local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostfsnodeStoppedLocalNodesMaintenance625 = "stopped local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostfsnodeFailedToAttachShardToEngine626 = "failed to attach shard to engine" // Error in ../node/cmd/frostfs-node/config.go
|
||||
FrostfsnodeShardAttachedToEngine627 = "shard attached to engine" // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostfsnodeClosingComponentsOfTheStorageEngine628 = "closing components of the storage engine..." // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostfsnodeStorageEngineClosingFailure629 = "storage engine closing failure" // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostfsnodeAllComponentsOfTheStorageEngineClosedSuccessfully630 = "all components of the storage engine closed successfully" // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostfsnodeBootstrappingWithTheMaintenanceState631 = "bootstrapping with the maintenance state" // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostfsnodeBootstrappingWithOnlineState632 = "bootstrapping with online state" // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostfsnodeTerminationSignalHasBeenReceivedStopping633 = "termination signal has been received, stopping..." // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostfsnodeTerminationSignalProcessingIsComplete634 = "termination signal processing is complete" // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostfsnodeInternalApplicationError635 = "internal application error" // Warn in ../node/cmd/frostfs-node/config.go
|
||||
FrostfsnodeInternalErrorProcessingIsComplete636 = "internal error processing is complete" // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostfsnodeSIGHUPHasBeenReceivedRereadingConfiguration637 = "SIGHUP has been received, rereading configuration..." // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostfsnodeConfigurationReading638 = "configuration reading" // Error in ../node/cmd/frostfs-node/config.go
|
||||
FrostfsnodeLoggerConfigurationPreparation639 = "logger configuration preparation" // Error in ../node/cmd/frostfs-node/config.go
|
||||
FrostfsnodeTracingConfigationUpdated640 = "tracing configation updated" // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostfsnodeStorageEngineConfigurationUpdate641 = "storage engine configuration update" // Error in ../node/cmd/frostfs-node/config.go
|
||||
FrostfsnodeUpdatedConfigurationApplying642 = "updated configuration applying" // Error in ../node/cmd/frostfs-node/config.go
|
||||
FrostfsnodeConfigurationHasBeenReloadedSuccessfully643 = "configuration has been reloaded successfully" // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostfsnodeReadNewlyCreatedContainerAfterTheNotification644 = "read newly created container after the notification" // Error in ../node/cmd/frostfs-node/container.go
|
||||
FrostfsnodeContainerCreationEventsReceipt645 = "container creation event's receipt" // Debug in ../node/cmd/frostfs-node/container.go
|
||||
FrostfsnodeContainerRemovalEventsReceipt646 = "container removal event's receipt" // Debug in ../node/cmd/frostfs-node/container.go
|
||||
FrostfsnodeSaveUsedSpaceAnnouncementInContract647 = "save used space announcement in contract" // Debug in ../node/cmd/frostfs-node/container.go
|
||||
FrostfsnodeFailedToCalculateContainerSizeInStorageEngine648 = "failed to calculate container size in storage engine" // Debug in ../node/cmd/frostfs-node/container.go
|
||||
FrostfsnodeContainerSizeInStorageEngineCalculatedSuccessfully649 = "container size in storage engine calculated successfully" // Debug in ../node/cmd/frostfs-node/container.go
|
||||
FrostfsnodeNotificatorCouldNotListContainers650 = "notificator: could not list containers" // Error in ../node/cmd/frostfs-node/notificator.go
|
||||
FrostfsnodeNotificatorCouldNotSelectObjectsFromContainer651 = "notificator: could not select objects from container" // Error in ../node/cmd/frostfs-node/notificator.go
|
||||
FrostfsnodeNotificatorCouldNotProcessObject652 = "notificator: could not process object" // Error in ../node/cmd/frostfs-node/notificator.go
|
||||
FrostfsnodeNotificatorFinishedProcessingObjectNotifications653 = "notificator: finished processing object notifications" // Debug in ../node/cmd/frostfs-node/notificator.go
|
||||
FrostfsnodeCouldNotWriteObjectNotification654 = "could not write object notification" // Warn in ../node/cmd/frostfs-node/notificator.go
|
||||
FrostfsnodeCouldNotGetMaxObjectSizeValue655 = "could not get max object size value" // Error in ../node/cmd/frostfs-node/object.go
|
||||
FrostfsnodeCouldNotInhumeMarkRedundantCopyAsGarbage656 = "could not inhume mark redundant copy as garbage" // Warn in ../node/cmd/frostfs-node/object.go
|
||||
FrostfsnodeWritingLocalReputationValues657 = "writing local reputation values" // Debug in ../node/cmd/frostfs-node/object.go
|
||||
FrostfsnodeCouldNotGetLatestNetworkMapToOverloadTheClient658 = "could not get latest network map to overload the client" // Warn in ../node/cmd/frostfs-node/object.go
|
||||
FrostfsnodeFailedInitTracing659 = "failed init tracing" // Error in ../node/cmd/frostfs-node/tracing.go
|
||||
FrostfsnodeFailedShutdownTracing660 = "failed shutdown tracing" // Error in ../node/cmd/frostfs-node/tracing.go
|
||||
FrostfsnodeFailedToCreateNeoRPCClient661 = "failed to create neo RPC client" // Info in ../node/cmd/frostfs-node/morph.go
|
||||
FrostfsnodeClosingMorphComponents662 = "closing morph components..." // Info in ../node/cmd/frostfs-node/morph.go
|
||||
FrostfsnodeFailedToSetGroupSignerScopeContinueWithGlobal663 = "failed to set group signer scope, continue with Global" // Info in ../node/cmd/frostfs-node/morph.go
|
||||
FrostfsnodeNotarySupport664 = "notary support" // Info in ../node/cmd/frostfs-node/morph.go
|
||||
FrostfsnodeMorphcache_ttlFetchedFromNetwork665 = "morph.cache_ttl fetched from network" // Debug in ../node/cmd/frostfs-node/morph.go
|
||||
FrostfsnodeNotaryDepositHasAlreadyBeenMade666 = "notary deposit has already been made" // Info in ../node/cmd/frostfs-node/morph.go
|
||||
FrostfsnodeCantGetLastProcessedSideChainBlockNumber667 = "can't get last processed side chain block number" // Warn in ../node/cmd/frostfs-node/morph.go
|
||||
FrostfsnodeNewEpochEventFromSidechain668 = "new epoch event from sidechain" // Info in ../node/cmd/frostfs-node/morph.go
|
||||
FrostfsnodeNewBlock669 = "new block" // Debug in ../node/cmd/frostfs-node/morph.go
|
||||
FrostfsnodeCantUpdatePersistentState670 = "can't update persistent state" // Warn in ../node/cmd/frostfs-node/morph.go
|
||||
FrostfsnodeCantSendRebootstrapTx671 = "can't send re-bootstrap tx" // Warn in ../node/cmd/frostfs-node/netmap.go
|
||||
FrostfsnodeCouldNotUpdateNodeStateOnNewEpoch672 = "could not update node state on new epoch" // Error in ../node/cmd/frostfs-node/netmap.go
|
||||
FrostfsnodeCouldNotMakeNotaryDeposit673 = "could not make notary deposit" // Error in ../node/cmd/frostfs-node/netmap.go
|
||||
FrostfsnodeInitialNetworkState674 = "initial network state" // Info in ../node/cmd/frostfs-node/netmap.go
|
||||
FrostfsnodeTreeServiceIsNotEnabledSkipInitialization675 = "tree service is not enabled, skip initialization" // Info in ../node/cmd/frostfs-node/tree.go
|
||||
FrostfsnodeCouldNotSynchronizeTreeService676 = "could not synchronize Tree Service" // Error in ../node/cmd/frostfs-node/tree.go
|
||||
FrostfsnodeCouldNotSynchronizeTreeService677 = "could not synchronize Tree Service" // Error in ../node/cmd/frostfs-node/tree.go
|
||||
FrostfsnodeRemovingAllTreesForContainer678 = "removing all trees for container" // Debug in ../node/cmd/frostfs-node/tree.go
|
||||
FrostfsnodeContainerRemovalEventReceivedButTreesWerentRemoved679 = "container removal event received, but trees weren't removed" // Error in ../node/cmd/frostfs-node/tree.go
|
||||
FrostfsnodeCantListenGRPCEndpointControl680 = "can't listen gRPC endpoint (control)" // Error in ../node/cmd/frostfs-node/control.go
|
||||
CommonInitializingRemoteWriterProvider681 = "initializing remote writer provider" // Debug in ../node/cmd/frostfs-node/reputation/common/remote.go
|
||||
CommonRouteHasReachedDeadendProvider682 = "route has reached dead-end provider" // Debug in ../node/cmd/frostfs-node/reputation/common/remote.go
|
||||
CommonInitializingNoopWriterProvider683 = "initializing no-op writer provider" // Debug in ../node/cmd/frostfs-node/reputation/common/remote.go
|
||||
IntermediateWritingReceivedConsumersTrusts684 = "writing received consumer's trusts" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/consumers.go
|
||||
IntermediateStartWritingGlobalTrustsToContract685 = "start writing global trusts to contract" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
|
||||
IntermediateFailedToSignGlobalTrust686 = "failed to sign global trust" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
|
||||
IntermediateFailedToWriteGlobalTrustToContract687 = "failed to write global trust to contract" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
|
||||
IntermediateSentGlobalTrustToContract688 = "sent global trust to contract" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
|
||||
IntermediateWritingReceivedDaughtersTrusts689 = "writing received daughter's trusts" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/daughters.go
|
||||
IntermediateAnnouncingTrust690 = "announcing trust" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/remote.go
|
||||
LocalAnnouncingTrusts691 = "announcing trusts" // Debug in ../node/cmd/frostfs-node/reputation/local/remote.go
|
||||
LocalInitializingIteratorOverTrusts692 = "initializing iterator over trusts" // Debug in ../node/cmd/frostfs-node/reputation/local/storage.go
|
||||
AuditCouldNotGetSGInfo = "could not get storage group info" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||
AuditZeroSumSGSize = "zero sum storage group size" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||
AuditCouldNotResolvePublicKeyOfTheStorageNode = "could not resolve public key of the storage node" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||
AuditCalculatingStorageNodeSalaryForAudit = "calculating storage node salary for audit (GASe-12)" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||
AuditCouldNotParsePublicKeyOfTheInnerRingNode = "could not parse public key of the inner ring node" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||
BasicCantGetBasicIncomeRate = "can't get basic income rate" // Error in ../node/pkg/innerring/processors/settlement/basic/collect.go
|
||||
BasicCantFetchContainerSizeEstimations = "can't fetch container size estimations" // Error in ../node/pkg/innerring/processors/settlement/basic/collect.go
|
||||
BasicCantFetchContainerInfo = "can't fetch container info" // Warn in ../node/pkg/innerring/processors/settlement/basic/collect.go
|
||||
BasicCantFetchBalanceOfBankingAccount = "can't fetch balance of banking account" // Error in ../node/pkg/innerring/processors/settlement/basic/distribute.go
|
||||
BasicCantTransformPublicKeyToOwnerID = "can't transform public key to owner id" // Warn in ../node/pkg/innerring/processors/settlement/basic/distribute.go
|
||||
FrostFSIRInternalError = "internal error" // Info in ../node/cmd/frostfs-ir/main.go
|
||||
FrostFSIRCouldNotShutdownHTTPServer = "could not shutdown HTTP server" // Debug in ../node/cmd/frostfs-ir/main.go
|
||||
FrostFSIRApplicationStopped = "application stopped" // Info in ../node/cmd/frostfs-ir/main.go
|
||||
FrostFSNodeCouldNotReadCertificateFromFile = "could not read certificate from file" // Error in ../node/cmd/frostfs-node/grpc.go
|
||||
FrostFSNodeCantListenGRPCEndpoint = "can't listen gRPC endpoint" // Error in ../node/cmd/frostfs-node/grpc.go
|
||||
FrostFSNodeStopListeningGRPCEndpoint = "stop listening gRPC endpoint" // Info in ../node/cmd/frostfs-node/grpc.go
|
||||
FrostFSNodeStartListeningGRPCEndpoint = "start listening gRPC endpoint" // Info in ../node/cmd/frostfs-node/grpc.go
|
||||
FrostFSNodeStoppingGRPCServer = "stopping gRPC server..." // Info in ../node/cmd/frostfs-node/grpc.go
|
||||
FrostFSNodeGRPCCannotShutdownGracefullyForcingStop = "gRPC cannot shutdown gracefully, forcing stop" // Info in ../node/cmd/frostfs-node/grpc.go
|
||||
FrostFSNodeGRPCServerStoppedSuccessfully = "gRPC server stopped successfully" // Info in ../node/cmd/frostfs-node/grpc.go
|
||||
FrostFSNodeWaitingForAllProcessesToStop = "waiting for all processes to stop" // Debug in ../node/cmd/frostfs-node/main.go
|
||||
FrostFSNodeStartReportingReputationOnNewEpochEvent = "start reporting reputation on new epoch event" // Debug in ../node/cmd/frostfs-node/reputation.go
|
||||
FrostFSNodeCouldNotFetchEpochDuration = "could not fetch epoch duration" // Debug in ../node/cmd/frostfs-node/reputation.go
|
||||
FrostFSNodeCouldNotFetchIterationNumber = "could not fetch iteration number" // Debug in ../node/cmd/frostfs-node/reputation.go
|
||||
FrostFSNodeCouldNotCreateFixedEpochTimer = "could not create fixed epoch timer" // Debug in ../node/cmd/frostfs-node/reputation.go
|
||||
FrostFSNodeStartedLocalNodesMaintenance = "started local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostFSNodeStoppedLocalNodesMaintenance = "stopped local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostFSNodeFailedToAttachShardToEngine = "failed to attach shard to engine" // Error in ../node/cmd/frostfs-node/config.go
|
||||
FrostFSNodeShardAttachedToEngine = "shard attached to engine" // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostFSNodeClosingComponentsOfTheStorageEngine = "closing components of the storage engine..." // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostFSNodeStorageEngineClosingFailure = "storage engine closing failure" // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully = "all components of the storage engine closed successfully" // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostFSNodeBootstrappingWithTheMaintenanceState = "bootstrapping with the maintenance state" // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostFSNodeBootstrappingWithOnlineState = "bootstrapping with online state" // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostFSNodeTerminationSignalHasBeenReceivedStopping = "termination signal has been received, stopping..." // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostFSNodeTerminationSignalProcessingIsComplete = "termination signal processing is complete" // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostFSNodeInternalApplicationError = "internal application error" // Warn in ../node/cmd/frostfs-node/config.go
|
||||
FrostFSNodeInternalErrorProcessingIsComplete = "internal error processing is complete" // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration = "SIGHUP has been received, rereading configuration..." // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostFSNodeConfigurationReading = "configuration reading" // Error in ../node/cmd/frostfs-node/config.go
|
||||
FrostFSNodeLoggerConfigurationPreparation = "logger configuration preparation" // Error in ../node/cmd/frostfs-node/config.go
|
||||
FrostFSNodeTracingConfigationUpdated = "tracing configation updated" // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update" // Error in ../node/cmd/frostfs-node/config.go
|
||||
FrostFSNodeUpdatedConfigurationApplying = "updated configuration applying" // Error in ../node/cmd/frostfs-node/config.go
|
||||
FrostFSNodeConfigurationHasBeenReloadedSuccessfully = "configuration has been reloaded successfully" // Info in ../node/cmd/frostfs-node/config.go
|
||||
FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification" // Error in ../node/cmd/frostfs-node/container.go
|
||||
FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt" // Debug in ../node/cmd/frostfs-node/container.go
|
||||
FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt" // Debug in ../node/cmd/frostfs-node/container.go
|
||||
FrostFSNodeSaveUsedSpaceAnnouncementInContract = "save used space announcement in contract" // Debug in ../node/cmd/frostfs-node/container.go
|
||||
FrostFSNodeFailedToCalculateContainerSizeInStorageEngine = "failed to calculate container size in storage engine" // Debug in ../node/cmd/frostfs-node/container.go
|
||||
FrostFSNodeContainerSizeInStorageEngineCalculatedSuccessfully = "container size in storage engine calculated successfully" // Debug in ../node/cmd/frostfs-node/container.go
|
||||
FrostFSNodeNotificatorCouldNotListContainers = "notificator: could not list containers" // Error in ../node/cmd/frostfs-node/notificator.go
|
||||
FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer = "notificator: could not select objects from container" // Error in ../node/cmd/frostfs-node/notificator.go
|
||||
FrostFSNodeNotificatorCouldNotProcessObject = "notificator: could not process object" // Error in ../node/cmd/frostfs-node/notificator.go
|
||||
FrostFSNodeNotificatorFinishedProcessingObjectNotifications = "notificator: finished processing object notifications" // Debug in ../node/cmd/frostfs-node/notificator.go
|
||||
FrostFSNodeCouldNotWriteObjectNotification = "could not write object notification" // Warn in ../node/cmd/frostfs-node/notificator.go
|
||||
FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value" // Error in ../node/cmd/frostfs-node/object.go
|
||||
FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage" // Warn in ../node/cmd/frostfs-node/object.go
|
||||
FrostFSNodeWritingLocalReputationValues = "writing local reputation values" // Debug in ../node/cmd/frostfs-node/object.go
|
||||
FrostFSNodeCouldNotGetLatestNetworkMapToOverloadTheClient = "could not get latest network map to overload the client" // Warn in ../node/cmd/frostfs-node/object.go
|
||||
FrostFSNodeFailedInitTracing = "failed init tracing" // Error in ../node/cmd/frostfs-node/tracing.go
|
||||
FrostFSNodeFailedShutdownTracing = "failed shutdown tracing" // Error in ../node/cmd/frostfs-node/tracing.go
|
||||
FrostFSNodeFailedToCreateNeoRPCClient = "failed to create neo RPC client" // Info in ../node/cmd/frostfs-node/morph.go
|
||||
FrostFSNodeClosingMorphComponents = "closing morph components..." // Info in ../node/cmd/frostfs-node/morph.go
|
||||
FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal = "failed to set group signer scope, continue with Global" // Info in ../node/cmd/frostfs-node/morph.go
|
||||
FrostFSNodeNotarySupport = "notary support" // Info in ../node/cmd/frostfs-node/morph.go
|
||||
FrostFSNodeMorphcacheTTLFetchedFromNetwork = "morph.cache_ttl fetched from network" // Debug in ../node/cmd/frostfs-node/morph.go
|
||||
FrostFSNodeNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" // Info in ../node/cmd/frostfs-node/morph.go
|
||||
FrostFSNodeCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number" // Warn in ../node/cmd/frostfs-node/morph.go
|
||||
FrostFSNodeNewEpochEventFromSidechain = "new epoch event from sidechain" // Info in ../node/cmd/frostfs-node/morph.go
|
||||
FrostFSNodeNewBlock = "new block" // Debug in ../node/cmd/frostfs-node/morph.go
|
||||
FrostFSNodeCantUpdatePersistentState = "can't update persistent state" // Warn in ../node/cmd/frostfs-node/morph.go
|
||||
FrostFSNodeCantSendRebootstrapTx = "can't send re-bootstrap tx" // Warn in ../node/cmd/frostfs-node/netmap.go
|
||||
FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch = "could not update node state on new epoch" // Error in ../node/cmd/frostfs-node/netmap.go
|
||||
FrostFSNodeCouldNotMakeNotaryDeposit = "could not make notary deposit" // Error in ../node/cmd/frostfs-node/netmap.go
|
||||
FrostFSNodeInitialNetworkState = "initial network state" // Info in ../node/cmd/frostfs-node/netmap.go
|
||||
FrostFSNodeTreeServiceIsNotEnabledSkipInitialization = "tree service is not enabled, skip initialization" // Info in ../node/cmd/frostfs-node/tree.go
|
||||
FrostFSNodeCouldNotSynchronizeTreeService = "could not synchronize Tree Service" // Error in ../node/cmd/frostfs-node/tree.go
|
||||
FrostFSNodeRemovingAllTreesForContainer = "removing all trees for container" // Debug in ../node/cmd/frostfs-node/tree.go
|
||||
FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved = "container removal event received, but trees weren't removed" // Error in ../node/cmd/frostfs-node/tree.go
|
||||
FrostFSNodeCantListenGRPCEndpointControl = "can't listen gRPC endpoint (control)" // Error in ../node/cmd/frostfs-node/control.go
|
||||
CommonApplicationStarted = "application started" // Info in ../node/cmd/frostfs-ir/main.go
|
||||
CommonInitializingRemoteWriterProvider = "initializing remote writer provider" // Debug in ../node/cmd/frostfs-node/reputation/common/remote.go
|
||||
CommonRouteHasReachedDeadendProvider = "route has reached dead-end provider" // Debug in ../node/cmd/frostfs-node/reputation/common/remote.go
|
||||
CommonInitializingNoopWriterProvider = "initializing no-op writer provider" // Debug in ../node/cmd/frostfs-node/reputation/common/remote.go
|
||||
IntermediateWritingReceivedConsumersTrusts = "writing received consumer's trusts" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/consumers.go
|
||||
IntermediateStartWritingGlobalTrustsToContract = "start writing global trusts to contract" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
|
||||
IntermediateFailedToSignGlobalTrust = "failed to sign global trust" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
|
||||
IntermediateFailedToWriteGlobalTrustToContract = "failed to write global trust to contract" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
|
||||
IntermediateSentGlobalTrustToContract = "sent global trust to contract" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
|
||||
IntermediateWritingReceivedDaughtersTrusts = "writing received daughter's trusts" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/daughters.go
|
||||
IntermediateAnnouncingTrust = "announcing trust" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/remote.go
|
||||
LocalAnnouncingTrusts = "announcing trusts" // Debug in ../node/cmd/frostfs-node/reputation/local/remote.go
|
||||
LocalInitializingIteratorOverTrusts = "initializing iterator over trusts" // Debug in ../node/cmd/frostfs-node/reputation/local/storage.go
|
||||
)
|
||||
|
|
|
@ -225,7 +225,7 @@ func (c *Calculator) sumSGSizes(ctx *singleResultCtx) bool {
|
|||
|
||||
sgInfo, err := c.prm.SGStorage.SGInfo(addr)
|
||||
if err != nil {
|
||||
ctx.log.Error(logs.AuditCouldNotGetSGInfo596,
|
||||
ctx.log.Error(logs.AuditCouldNotGetSGInfo,
|
||||
zap.String("id", id.String()),
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
@ -245,7 +245,7 @@ func (c *Calculator) sumSGSizes(ctx *singleResultCtx) bool {
|
|||
}
|
||||
|
||||
if sumPassSGSize == 0 {
|
||||
ctx.log.Debug(logs.AuditZeroSumSGSize597)
|
||||
ctx.log.Debug(logs.AuditZeroSumSGSize)
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -261,7 +261,7 @@ func (c *Calculator) fillTransferTable(ctx *singleResultCtx) bool {
|
|||
for k, info := range ctx.passNodes {
|
||||
ownerID, err := c.prm.AccountStorage.ResolveKey(info)
|
||||
if err != nil {
|
||||
ctx.log.Error(logs.AuditCouldNotResolvePublicKeyOfTheStorageNode598,
|
||||
ctx.log.Error(logs.AuditCouldNotResolvePublicKeyOfTheStorageNode,
|
||||
zap.String("error", err.Error()),
|
||||
zap.String("key", k),
|
||||
)
|
||||
|
@ -271,7 +271,7 @@ func (c *Calculator) fillTransferTable(ctx *singleResultCtx) bool {
|
|||
|
||||
price := info.Price()
|
||||
|
||||
ctx.log.Debug(logs.AuditCalculatingStorageNodeSalaryForAuditGASe12599,
|
||||
ctx.log.Debug(logs.AuditCalculatingStorageNodeSalaryForAudit,
|
||||
zap.Stringer("sum SG size", ctx.sumSGSize),
|
||||
zap.Stringer("price", price),
|
||||
)
|
||||
|
@ -293,7 +293,7 @@ func (c *Calculator) fillTransferTable(ctx *singleResultCtx) bool {
|
|||
// add txs to pay inner ring node for audit result
|
||||
auditIR, err := ownerFromKey(ctx.auditResult.AuditorKey())
|
||||
if err != nil {
|
||||
ctx.log.Error(logs.AuditCouldNotParsePublicKeyOfTheInnerRingNode600,
|
||||
ctx.log.Error(logs.AuditCouldNotParsePublicKeyOfTheInnerRingNode,
|
||||
zap.String("error", err.Error()),
|
||||
zap.String("key", hex.EncodeToString(ctx.auditResult.AuditorKey())),
|
||||
)
|
||||
|
|
|
@ -21,7 +21,7 @@ func (inc *IncomeSettlementContext) Collect() {
|
|||
|
||||
cachedRate, err := inc.rate.BasicRate()
|
||||
if err != nil {
|
||||
inc.log.Error(logs.BasicCantGetBasicIncomeRate601,
|
||||
inc.log.Error(logs.BasicCantGetBasicIncomeRate,
|
||||
zap.String("error", err.Error()))
|
||||
|
||||
return
|
||||
|
@ -34,7 +34,7 @@ func (inc *IncomeSettlementContext) Collect() {
|
|||
|
||||
cnrEstimations, err := inc.estimations.Estimations(inc.epoch)
|
||||
if err != nil {
|
||||
inc.log.Error(logs.BasicCantFetchContainerSizeEstimations602,
|
||||
inc.log.Error(logs.BasicCantFetchContainerSizeEstimations,
|
||||
zap.Uint64("epoch", inc.epoch),
|
||||
zap.String("error", err.Error()))
|
||||
|
||||
|
@ -46,7 +46,7 @@ func (inc *IncomeSettlementContext) Collect() {
|
|||
for i := range cnrEstimations {
|
||||
owner, err := inc.container.ContainerInfo(cnrEstimations[i].ContainerID)
|
||||
if err != nil {
|
||||
inc.log.Warn(logs.BasicCantFetchContainerInfo603,
|
||||
inc.log.Warn(logs.BasicCantFetchContainerInfo,
|
||||
zap.Uint64("epoch", inc.epoch),
|
||||
zap.Stringer("container_id", cnrEstimations[i].ContainerID),
|
||||
zap.String("error", err.Error()))
|
||||
|
@ -56,7 +56,7 @@ func (inc *IncomeSettlementContext) Collect() {
|
|||
|
||||
cnrNodes, err := inc.placement.ContainerNodes(inc.epoch, cnrEstimations[i].ContainerID)
|
||||
if err != nil {
|
||||
inc.log.Debug(logs.BasicCantFetchContainerInfo604,
|
||||
inc.log.Debug(logs.BasicCantFetchContainerInfo,
|
||||
zap.Uint64("epoch", inc.epoch),
|
||||
zap.Stringer("container_id", cnrEstimations[i].ContainerID),
|
||||
zap.String("error", err.Error()))
|
||||
|
|
|
@ -21,7 +21,7 @@ func (inc *IncomeSettlementContext) Distribute() {
|
|||
|
||||
bankBalance, err := inc.balances.Balance(inc.bankOwner)
|
||||
if err != nil {
|
||||
inc.log.Error(logs.BasicCantFetchBalanceOfBankingAccount605,
|
||||
inc.log.Error(logs.BasicCantFetchBalanceOfBankingAccount,
|
||||
zap.String("error", err.Error()))
|
||||
|
||||
return
|
||||
|
@ -32,7 +32,7 @@ func (inc *IncomeSettlementContext) Distribute() {
|
|||
inc.distributeTable.Iterate(func(key []byte, n *big.Int) {
|
||||
nodeOwner, err := inc.accounts.ResolveKey(nodeInfoWrapper(key))
|
||||
if err != nil {
|
||||
inc.log.Warn(logs.BasicCantTransformPublicKeyToOwnerId606,
|
||||
inc.log.Warn(logs.BasicCantTransformPublicKeyToOwnerID,
|
||||
zap.String("public_key", hex.EncodeToString(key)),
|
||||
zap.String("error", err.Error()))
|
||||
|
||||
|
|
Loading…
Reference in a new issue