From 0e31c12e63d1082cca5775b661104c5b47825326 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 12 Apr 2023 17:35:10 +0300 Subject: [PATCH] [#240] logs: Move log messages to constants Drop duplicate entities. Format entities. Signed-off-by: Dmitrii Stepanov Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-ir/main.go | 9 +- cmd/frostfs-node/config.go | 41 +- cmd/frostfs-node/container.go | 13 +- cmd/frostfs-node/control.go | 3 +- cmd/frostfs-node/grpc.go | 15 +- cmd/frostfs-node/main.go | 5 +- cmd/frostfs-node/morph.go | 21 +- cmd/frostfs-node/netmap.go | 9 +- cmd/frostfs-node/notificator.go | 11 +- cmd/frostfs-node/object.go | 7 +- cmd/frostfs-node/reputation.go | 9 +- cmd/frostfs-node/reputation/common/remote.go | 7 +- .../reputation/intermediate/consumers.go | 3 +- .../reputation/intermediate/contract.go | 3 +- .../reputation/intermediate/daughters.go | 3 +- .../reputation/intermediate/remote.go | 3 +- cmd/frostfs-node/reputation/local/remote.go | 3 +- cmd/frostfs-node/reputation/local/storage.go | 3 +- cmd/frostfs-node/tracing.go | 5 +- cmd/frostfs-node/tree.go | 11 +- internal/logs/logs.go | 643 ++++++++++++++++++ pkg/innerring/blocktimer.go | 5 +- pkg/innerring/initialization.go | 13 +- pkg/innerring/innerring.go | 13 +- pkg/innerring/notary.go | 7 +- pkg/innerring/processors/alphabet/handlers.go | 5 +- .../processors/alphabet/process_emit.go | 19 +- .../processors/alphabet/processor.go | 3 +- pkg/innerring/processors/audit/handlers.go | 5 +- pkg/innerring/processors/audit/process.go | 21 +- pkg/innerring/processors/audit/scheduler.go | 3 +- pkg/innerring/processors/balance/handlers.go | 5 +- .../processors/balance/process_assets.go | 5 +- pkg/innerring/processors/balance/processor.go | 3 +- .../processors/container/handlers.go | 13 +- .../processors/container/process_container.go | 13 +- .../processors/container/process_eacl.go | 7 +- .../processors/container/processor.go | 3 +- pkg/innerring/processors/frostfs/handlers.go | 25 +- .../processors/frostfs/process_assets.go | 23 +- .../processors/frostfs/process_bind.go | 7 +- .../processors/frostfs/process_config.go | 5 +- pkg/innerring/processors/frostfs/processor.go | 3 +- .../processors/governance/handlers.go | 5 +- .../processors/governance/process_update.go | 29 +- pkg/innerring/processors/netmap/handlers.go | 27 +- .../processors/netmap/process_cleanup.go | 11 +- .../processors/netmap/process_epoch.go | 17 +- .../processors/netmap/process_peers.go | 35 +- pkg/innerring/processors/netmap/processor.go | 3 +- .../processors/reputation/handlers.go | 5 +- .../processors/reputation/process_put.go | 11 +- .../processors/reputation/processor.go | 3 +- .../processors/settlement/audit/calculate.go | 47 +- .../processors/settlement/basic/collect.go | 9 +- .../processors/settlement/basic/distribute.go | 5 +- pkg/innerring/processors/settlement/calls.go | 29 +- .../processors/settlement/handlers.go | 9 +- .../processors/settlement/processor.go | 3 +- pkg/innerring/rpc.go | 5 +- pkg/innerring/settlement.go | 5 +- pkg/innerring/state.go | 13 +- pkg/innerring/subnet.go | 23 +- .../blobovnicza/control.go | 13 +- .../blobovnicza/delete.go | 3 +- .../blobstor/blobovniczatree/blobovnicza.go | 11 +- .../blobstor/blobovniczatree/control.go | 11 +- .../blobstor/blobovniczatree/delete.go | 7 +- .../blobstor/blobovniczatree/exists.go | 3 +- .../blobstor/blobovniczatree/get.go | 7 +- .../blobstor/blobovniczatree/get_range.go | 7 +- .../blobstor/blobovniczatree/put.go | 15 +- pkg/local_object_storage/blobstor/control.go | 9 +- pkg/local_object_storage/blobstor/exists.go | 3 +- pkg/local_object_storage/blobstor/iterate.go | 3 +- pkg/local_object_storage/engine/control.go | 15 +- pkg/local_object_storage/engine/delete.go | 5 +- pkg/local_object_storage/engine/engine.go | 11 +- pkg/local_object_storage/engine/evacuate.go | 7 +- pkg/local_object_storage/engine/inhume.go | 7 +- pkg/local_object_storage/engine/put.go | 5 +- .../engine/remove_copies.go | 9 +- pkg/local_object_storage/engine/shards.go | 5 +- pkg/local_object_storage/metabase/control.go | 7 +- pkg/local_object_storage/metabase/select.go | 17 +- pkg/local_object_storage/shard/control.go | 15 +- pkg/local_object_storage/shard/delete.go | 7 +- pkg/local_object_storage/shard/gc.go | 43 +- pkg/local_object_storage/shard/get.go | 7 +- pkg/local_object_storage/shard/inhume.go | 3 +- pkg/local_object_storage/shard/list.go | 3 +- pkg/local_object_storage/shard/mode.go | 5 +- pkg/local_object_storage/shard/move.go | 3 +- pkg/local_object_storage/shard/put.go | 3 +- pkg/local_object_storage/shard/shard.go | 7 +- pkg/local_object_storage/writecache/flush.go | 3 +- pkg/local_object_storage/writecache/init.go | 9 +- pkg/local_object_storage/writecache/mode.go | 3 +- .../writecache/storage.go | 7 +- pkg/morph/client/client.go | 11 +- pkg/morph/client/multi.go | 15 +- pkg/morph/client/notary.go | 9 +- pkg/morph/client/notifications.go | 7 +- pkg/morph/event/listener.go | 69 +- pkg/morph/event/utils.go | 3 +- pkg/morph/subscriber/subscriber.go | 15 +- pkg/services/audit/auditor/context.go | 7 +- pkg/services/audit/auditor/pdp.go | 5 +- pkg/services/audit/auditor/pop.go | 5 +- pkg/services/audit/auditor/por.go | 11 +- pkg/services/audit/taskmanager/listen.go | 13 +- .../announcement/load/controller/calls.go | 33 +- .../announcement/load/route/calls.go | 9 +- pkg/services/notificator/nats/service.go | 7 +- pkg/services/notificator/service.go | 5 +- pkg/services/object/acl/v2/classifier.go | 5 +- pkg/services/object/delete/container.go | 4 +- pkg/services/object/delete/delete.go | 7 +- pkg/services/object/delete/exec.go | 21 +- pkg/services/object/delete/local.go | 13 +- pkg/services/object/get/assemble.go | 11 +- pkg/services/object/get/container.go | 13 +- pkg/services/object/get/exec.go | 11 +- pkg/services/object/get/get.go | 13 +- pkg/services/object/get/local.go | 3 +- pkg/services/object/get/remote.go | 5 +- pkg/services/object/put/distributed.go | 3 +- pkg/services/object/search/container.go | 17 +- pkg/services/object/search/exec.go | 7 +- pkg/services/object/search/local.go | 3 +- pkg/services/object/search/search.go | 7 +- pkg/services/object/util/log.go | 5 +- .../object_manager/tombstone/checker.go | 3 +- pkg/services/policer/check.go | 17 +- pkg/services/policer/process.go | 9 +- pkg/services/replicator/process.go | 9 +- pkg/services/reputation/common/managers.go | 3 +- .../reputation/common/router/calls.go | 9 +- .../reputation/eigentrust/calculator/calls.go | 37 +- .../reputation/eigentrust/controller/calls.go | 5 +- .../reputation/eigentrust/routes/calls.go | 3 +- .../reputation/local/controller/calls.go | 19 +- pkg/services/reputation/local/routes/calls.go | 3 +- pkg/services/session/executor.go | 3 +- .../session/storage/persistent/storage.go | 7 +- pkg/services/tree/redirect.go | 3 +- pkg/services/tree/replicator.go | 9 +- pkg/services/tree/signature.go | 3 +- pkg/services/tree/sync.go | 29 +- 149 files changed, 1481 insertions(+), 687 deletions(-) create mode 100644 internal/logs/logs.go diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go index e4386a083..5db1db6b6 100644 --- a/cmd/frostfs-ir/main.go +++ b/cmd/frostfs-ir/main.go @@ -9,6 +9,7 @@ import ( "os/signal" "syscall" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/misc" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http" @@ -80,13 +81,13 @@ func main() { err = innerRing.Start(ctx, intErr) exitErr(err) - log.Info("application started", + log.Info(logs.CommonApplicationStarted, zap.String("version", misc.Version)) select { case <-ctx.Done(): case err := <-intErr: - log.Info("internal error", zap.String("msg", err.Error())) + log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error())) } innerRing.Stop() @@ -98,14 +99,14 @@ func main() { go func() { err := srv.Shutdown() if err != nil { - log.Debug("could not shutdown HTTP server", + log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer, zap.String("error", err.Error()), ) } }() } - log.Info("application stopped") + log.Info(logs.FrostFSIRApplicationStopped) } func initHTTPServers(cfg *viper.Viper, log *logger.Logger) []*httputil.Server { diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index d81e47b17..d110665f5 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -29,6 +29,7 @@ import ( objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object" replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator" tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" @@ -342,13 +343,13 @@ type internals struct { func (c *cfg) startMaintenance() { c.isMaintenance.Store(true) c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE) - c.log.Info("started local node's maintenance") + c.log.Info(logs.FrostFSNodeStartedLocalNodesMaintenance) } // stops node's maintenance. func (c *internals) stopMaintenance() { c.isMaintenance.Store(false) - c.log.Info("stopped local node's maintenance") + c.log.Info(logs.FrostFSNodeStoppedLocalNodesMaintenance) } // IsMaintenance checks if storage node is under maintenance. @@ -881,10 +882,10 @@ func initLocalStorage(c *cfg) { for _, optsWithMeta := range c.shardOpts() { id, err := ls.AddShard(append(optsWithMeta.shOpts, shard.WithTombstoneSource(tombstoneSource))...) if err != nil { - c.log.Error("failed to attach shard to engine", zap.Error(err)) + c.log.Error(logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err)) } else { shardsAttached++ - c.log.Info("shard attached to engine", zap.Stringer("id", id)) + c.log.Info(logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id)) } } if shardsAttached == 0 { @@ -894,15 +895,15 @@ func initLocalStorage(c *cfg) { c.cfgObject.cfgLocalStorage.localStorage = ls c.onShutdown(func() { - c.log.Info("closing components of the storage engine...") + c.log.Info(logs.FrostFSNodeClosingComponentsOfTheStorageEngine) err := ls.Close() if err != nil { - c.log.Info("storage engine closing failure", + c.log.Info(logs.FrostFSNodeStorageEngineClosingFailure, zap.String("error", err.Error()), ) } else { - c.log.Info("all components of the storage engine closed successfully") + c.log.Info(logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully) } }) } @@ -976,11 +977,11 @@ func (c *cfg) bootstrap() error { // switch to online except when under maintenance st := c.cfgNetmap.state.controlNetmapStatus() if st == control.NetmapStatus_MAINTENANCE { - c.log.Info("bootstrapping with the maintenance state") + c.log.Info(logs.FrostFSNodeBootstrappingWithTheMaintenanceState) return c.bootstrapWithState((*netmap.NodeInfo).SetMaintenance) } - c.log.Info("bootstrapping with online state", + c.log.Info(logs.FrostFSNodeBootstrappingWithOnlineState, zap.Stringer("previous", st), ) @@ -1015,32 +1016,32 @@ func (c *cfg) signalWatcher(ctx context.Context) { case syscall.SIGHUP: c.reloadConfig(ctx) case syscall.SIGTERM, syscall.SIGINT: - c.log.Info("termination signal has been received, stopping...") + c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) // TODO (@acid-ant): #49 need to cover case when stuck at the middle(node health UNDEFINED or STARTING) c.shutdown() - c.log.Info("termination signal processing is complete") + c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete) return } case err := <-c.internalErr: // internal application error - c.log.Warn("internal application error", + c.log.Warn(logs.FrostFSNodeInternalApplicationError, zap.String("message", err.Error())) c.shutdown() - c.log.Info("internal error processing is complete") + c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete) return } } } func (c *cfg) reloadConfig(ctx context.Context) { - c.log.Info("SIGHUP has been received, rereading configuration...") + c.log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration) err := c.readConfig(c.appCfg) if err != nil { - c.log.Error("configuration reading", zap.Error(err)) + c.log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err)) return } @@ -1052,7 +1053,7 @@ func (c *cfg) reloadConfig(ctx context.Context) { logPrm, err := c.loggerPrm() if err != nil { - c.log.Error("logger configuration preparation", zap.Error(err)) + c.log.Error(logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err)) return } @@ -1060,7 +1061,7 @@ func (c *cfg) reloadConfig(ctx context.Context) { components = append(components, dCmp{"tracing", func() error { updated, err := tracing.Setup(ctx, *tracingconfig.ToTracingConfig(c.appCfg)) if updated { - c.log.Info("tracing configation updated") + c.log.Info(logs.FrostFSNodeTracingConfigationUpdated) } return err }}) @@ -1085,20 +1086,20 @@ func (c *cfg) reloadConfig(ctx context.Context) { err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg) if err != nil { - c.log.Error("storage engine configuration update", zap.Error(err)) + c.log.Error(logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err)) return } for _, component := range components { err = component.reloadFunc() if err != nil { - c.log.Error("updated configuration applying", + c.log.Error(logs.FrostFSNodeUpdatedConfigurationApplying, zap.String("component", component.name), zap.Error(err)) } } - c.log.Info("configuration has been reloaded successfully") + c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully) } func (c *cfg) shutdown() { diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go index 6c864431d..d5d8601e3 100644 --- a/cmd/frostfs-node/container.go +++ b/cmd/frostfs-node/container.go @@ -11,6 +11,7 @@ import ( containerV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" @@ -136,13 +137,13 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c } else { // unlike removal, we expect successful receive of the container // after successful creation, so logging can be useful - c.log.Error("read newly created container after the notification", + c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification, zap.Stringer("id", ev.ID), zap.Error(err), ) } - c.log.Debug("container creation event's receipt", + c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt, zap.Stringer("id", ev.ID), ) }) @@ -161,7 +162,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c cachedContainerStorage.handleRemoval(ev.ID) - c.log.Debug("container removal event's receipt", + c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt, zap.Stringer("id", ev.ID), ) }) @@ -295,7 +296,7 @@ type morphLoadWriter struct { } func (w *morphLoadWriter) Put(a containerSDK.SizeEstimation) error { - w.log.Debug("save used space announcement in contract", + w.log.Debug(logs.FrostFSNodeSaveUsedSpaceAnnouncementInContract, zap.Uint64("epoch", a.Epoch()), zap.Stringer("cid", a.Container()), zap.Uint64("size", a.Value()), @@ -458,7 +459,7 @@ func (d *localStorageLoad) Iterate(f loadcontroller.UsedSpaceFilter, h loadcontr for i := range idList { sz, err := engine.ContainerSize(d.engine, idList[i]) if err != nil { - d.log.Debug("failed to calculate container size in storage engine", + d.log.Debug(logs.FrostFSNodeFailedToCalculateContainerSizeInStorageEngine, zap.Stringer("cid", idList[i]), zap.String("error", err.Error()), ) @@ -466,7 +467,7 @@ func (d *localStorageLoad) Iterate(f loadcontroller.UsedSpaceFilter, h loadcontr continue } - d.log.Debug("container size in storage engine calculated successfully", + d.log.Debug(logs.FrostFSNodeContainerSizeInStorageEngineCalculatedSuccessfully, zap.Uint64("size", sz), zap.Stringer("cid", idList[i]), ) diff --git a/cmd/frostfs-node/control.go b/cmd/frostfs-node/control.go index 5492f585f..f4b068419 100644 --- a/cmd/frostfs-node/control.go +++ b/cmd/frostfs-node/control.go @@ -5,6 +5,7 @@ import ( "net" controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" @@ -52,7 +53,7 @@ func initControlService(c *cfg) { lis, err := net.Listen("tcp", endpoint) if err != nil { - c.log.Error("can't listen gRPC endpoint (control)", zap.Error(err)) + c.log.Error(logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err)) return } diff --git a/cmd/frostfs-node/grpc.go b/cmd/frostfs-node/grpc.go index f3943f3ff..b0a587782 100644 --- a/cmd/frostfs-node/grpc.go +++ b/cmd/frostfs-node/grpc.go @@ -9,6 +9,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "go.uber.org/zap" "google.golang.org/grpc" @@ -33,7 +34,7 @@ func initGRPC(c *cfg) { if tlsCfg != nil { cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile()) if err != nil { - c.log.Error("could not read certificate from file", zap.Error(err)) + c.log.Error(logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err)) return } @@ -63,7 +64,7 @@ func initGRPC(c *cfg) { lis, err := net.Listen("tcp", sc.Endpoint()) if err != nil { - c.log.Error("can't listen gRPC endpoint", zap.Error(err)) + c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err)) return } @@ -93,14 +94,14 @@ func serveGRPC(c *cfg) { go func() { defer func() { - c.log.Info("stop listening gRPC endpoint", + c.log.Info(logs.FrostFSNodeStopListeningGRPCEndpoint, zap.String("endpoint", lis.Addr().String()), ) c.wg.Done() }() - c.log.Info("start listening gRPC endpoint", + c.log.Info(logs.FrostFSNodeStartListeningGRPCEndpoint, zap.String("endpoint", lis.Addr().String()), ) @@ -114,7 +115,7 @@ func serveGRPC(c *cfg) { func stopGRPC(name string, s *grpc.Server, l *logger.Logger) { l = &logger.Logger{Logger: l.With(zap.String("name", name))} - l.Info("stopping gRPC server...") + l.Info(logs.FrostFSNodeStoppingGRPCServer) // GracefulStop() may freeze forever, see #1270 done := make(chan struct{}) @@ -126,9 +127,9 @@ func stopGRPC(name string, s *grpc.Server, l *logger.Logger) { select { case <-done: case <-time.After(1 * time.Minute): - l.Info("gRPC cannot shutdown gracefully, forcing stop") + l.Info(logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop) s.Stop() } - l.Info("gRPC server stopped successfully") + l.Info(logs.FrostFSNodeGRPCServerStoppedSuccessfully) } diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go index a97ad3879..786843b0b 100644 --- a/cmd/frostfs-node/main.go +++ b/cmd/frostfs-node/main.go @@ -8,6 +8,7 @@ import ( "os" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/misc" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" "go.uber.org/zap" @@ -142,14 +143,14 @@ func bootUp(ctx context.Context, c *cfg) { } func wait(c *cfg, cancel func()) { - c.log.Info("application started", + c.log.Info(logs.CommonApplicationStarted, zap.String("version", misc.Version)) <-c.done // graceful shutdown cancel() - c.log.Debug("waiting for all processes to stop") + c.log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop) c.wg.Wait() } diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index 2db865ca3..72378d8f3 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -7,6 +7,7 @@ import ( "time" morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" @@ -49,7 +50,7 @@ func initMorphComponents(ctx context.Context, c *cfg) { client.WithSwitchInterval(morphconfig.SwitchInterval(c.appCfg)), ) if err != nil { - c.log.Info("failed to create neo RPC client", + c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient, zap.Any("endpoints", addresses), zap.String("error", err.Error()), ) @@ -58,12 +59,12 @@ func initMorphComponents(ctx context.Context, c *cfg) { } c.onShutdown(func() { - c.log.Info("closing morph components...") + c.log.Info(logs.FrostFSNodeClosingMorphComponents) cli.Close() }) if err := cli.SetGroupSignerScope(); err != nil { - c.log.Info("failed to set group signer scope, continue with Global", zap.Error(err)) + c.log.Info(logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err)) } c.cfgMorph.client = cli @@ -80,7 +81,7 @@ func initMorphComponents(ctx context.Context, c *cfg) { fatalOnErr(err) } - c.log.Info("notary support", + c.log.Info(logs.FrostFSNodeNotarySupport, zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled), ) @@ -95,7 +96,7 @@ func initMorphComponents(ctx context.Context, c *cfg) { msPerBlock, err := c.cfgMorph.client.MsPerBlock() fatalOnErr(err) c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond - c.log.Debug("morph.cache_ttl fetched from network", zap.Duration("value", c.cfgMorph.cacheTTL)) + c.log.Debug(logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL)) } if c.cfgMorph.cacheTTL < 0 { @@ -122,7 +123,7 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) { // non-error deposit with an empty TX hash means // that the deposit has already been made; no // need to wait it. - c.log.Info("notary deposit has already been made") + c.log.Info(logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade) return } @@ -190,7 +191,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey) if err != nil { fromSideChainBlock = 0 - c.log.Warn("can't get last processed side chain block number", zap.String("error", err.Error())) + c.log.Warn(logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error())) } subs, err = subscriber.New(ctx, &subscriber.Params{ @@ -215,7 +216,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) { res, err := netmapEvent.ParseNewEpoch(src) if err == nil { - c.log.Info("new epoch event from sidechain", + c.log.Info(logs.FrostFSNodeNewEpochEventFromSidechain, zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()), ) } @@ -226,11 +227,11 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers) registerBlockHandler(lis, func(block *block.Block) { - c.log.Debug("new block", zap.Uint32("index", block.Index)) + c.log.Debug(logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index)) err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index) if err != nil { - c.log.Warn("can't update persistent state", + c.log.Warn(logs.FrostFSNodeCantUpdatePersistentState, zap.String("chain", "side"), zap.Uint32("block_index", block.Index)) } diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index d9b1c9208..76cceeb6d 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -8,6 +8,7 @@ import ( netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap/grpc" nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" @@ -193,7 +194,7 @@ func addNewEpochNotificationHandlers(c *cfg) { if (n-c.cfgNetmap.startEpoch)%reBootstrapInterval == 0 { err := c.bootstrap() if err != nil { - c.log.Warn("can't send re-bootstrap tx", zap.Error(err)) + c.log.Warn(logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err)) } } }) @@ -203,7 +204,7 @@ func addNewEpochNotificationHandlers(c *cfg) { ni, err := c.netmapLocalNodeState(e) if err != nil { - c.log.Error("could not update node state on new epoch", + c.log.Error(logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch, zap.Uint64("epoch", e), zap.String("error", err.Error()), ) @@ -218,7 +219,7 @@ func addNewEpochNotificationHandlers(c *cfg) { addNewEpochAsyncNotificationHandler(c, func(ev event.Event) { _, err := makeNotaryDeposit(c) if err != nil { - c.log.Error("could not make notary deposit", + c.log.Error(logs.FrostFSNodeCouldNotMakeNotaryDeposit, zap.String("error", err.Error()), ) } @@ -298,7 +299,7 @@ func initNetmapState(c *cfg) { } } - c.log.Info("initial network state", + c.log.Info(logs.FrostFSNodeInitialNetworkState, zap.Uint64("epoch", epoch), zap.String("state", stateWord), ) diff --git a/cmd/frostfs-node/notificator.go b/cmd/frostfs-node/notificator.go index 4a310e5b0..9c90e052c 100644 --- a/cmd/frostfs-node/notificator.go +++ b/cmd/frostfs-node/notificator.go @@ -6,6 +6,7 @@ import ( "fmt" nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" @@ -28,7 +29,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler listRes, err := n.e.ListContainers(engine.ListContainersPrm{}) if err != nil { - log.Error("notificator: could not list containers", zap.Error(err)) + log.Error(logs.FrostFSNodeNotificatorCouldNotListContainers, zap.Error(err)) return } @@ -43,7 +44,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler selectRes, err := n.e.Select(selectPrm) if err != nil { - log.Error("notificator: could not select objects from container", + log.Error(logs.FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer, zap.Stringer("cid", c), zap.Error(err), ) @@ -53,7 +54,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler for _, a := range selectRes.AddressList() { err = n.processAddress(ctx, a, handler) if err != nil { - log.Error("notificator: could not process object", + log.Error(logs.FrostFSNodeNotificatorCouldNotProcessObject, zap.Stringer("address", a), zap.Error(err), ) @@ -62,7 +63,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler } } - log.Debug("notificator: finished processing object notifications") + log.Debug(logs.FrostFSNodeNotificatorFinishedProcessingObjectNotifications) } func (n *notificationSource) processAddress( @@ -101,7 +102,7 @@ type notificationWriter struct { func (n notificationWriter) Notify(topic string, address oid.Address) { if err := n.w.Notify(topic, address); err != nil { - n.l.Warn("could not write object notification", + n.l.Warn(logs.FrostFSNodeCouldNotWriteObjectNotification, zap.Stringer("address", address), zap.String("topic", topic), zap.Error(err), diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index ff4335ff9..8f5a83eb0 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -11,6 +11,7 @@ import ( metricsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/metrics" policerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/policer" replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" @@ -62,7 +63,7 @@ type objectSvc struct { func (c *cfg) MaxObjectSize() uint64 { sz, err := c.cfgNetmap.wrapper.MaxObjectSize() if err != nil { - c.log.Error("could not get max object size value", + c.log.Error(logs.FrostFSNodeCouldNotGetMaxObjectSizeValue, zap.String("error", err.Error()), ) } @@ -259,7 +260,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *reputati _, err := ls.Inhume(ctx, inhumePrm) if err != nil { - c.log.Warn("could not inhume mark redundant copy as garbage", + c.log.Warn(logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage, zap.String("error", err.Error()), ) } @@ -600,7 +601,7 @@ func (c *reputationClientConstructor) Get(info coreclient.NodeInfo) (coreclient. } } } else { - c.log.Warn("could not get latest network map to overload the client", + c.log.Warn(logs.FrostFSNodeCouldNotGetLatestNetworkMapToOverloadTheClient, zap.String("error", err.Error()), ) } diff --git a/cmd/frostfs-node/reputation.go b/cmd/frostfs-node/reputation.go index a96bd066e..b3acf7eb0 100644 --- a/cmd/frostfs-node/reputation.go +++ b/cmd/frostfs-node/reputation.go @@ -11,6 +11,7 @@ import ( intermediatereputation "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/intermediate" localreputation "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/local" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/ticker" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" @@ -105,7 +106,7 @@ func addReputationReportHandler(ctx context.Context, c *cfg) { addNewEpochAsyncNotificationHandler( c, func(ev event.Event) { - c.log.Debug("start reporting reputation on new epoch event") + c.log.Debug(logs.FrostFSNodeStartReportingReputationOnNewEpochEvent) var reportPrm localtrustcontroller.ReportPrm @@ -127,13 +128,13 @@ func addEigenTrustEpochHandler(ctx context.Context, c *cfg, eigenTrustController duration, err := c.cfgNetmap.wrapper.EpochDuration() if err != nil { - log.Debug("could not fetch epoch duration", zap.Error(err)) + log.Debug(logs.FrostFSNodeCouldNotFetchEpochDuration, zap.Error(err)) return } iterations, err := c.cfgNetmap.wrapper.EigenTrustIterations() if err != nil { - log.Debug("could not fetch iteration number", zap.Error(err)) + log.Debug(logs.FrostFSNodeCouldNotFetchIterationNumber, zap.Error(err)) return } @@ -145,7 +146,7 @@ func addEigenTrustEpochHandler(ctx context.Context, c *cfg, eigenTrustController ) }) if err != nil { - log.Debug("could not create fixed epoch timer", zap.Error(err)) + log.Debug(logs.FrostFSNodeCouldNotCreateFixedEpochTimer, zap.Error(err)) return } diff --git a/cmd/frostfs-node/reputation/common/remote.go b/cmd/frostfs-node/reputation/common/remote.go index cd0a024a9..f1982301f 100644 --- a/cmd/frostfs-node/reputation/common/remote.go +++ b/cmd/frostfs-node/reputation/common/remote.go @@ -3,6 +3,7 @@ package common import ( "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" @@ -71,16 +72,16 @@ func NewRemoteTrustProvider(prm RemoteProviderPrm) *RemoteTrustProvider { } func (rtp *RemoteTrustProvider) InitRemote(srv reputationcommon.ServerInfo) (reputationcommon.WriterProvider, error) { - rtp.log.Debug("initializing remote writer provider") + rtp.log.Debug(logs.CommonInitializingRemoteWriterProvider) if srv == nil { - rtp.log.Debug("route has reached dead-end provider") + rtp.log.Debug(logs.CommonRouteHasReachedDeadendProvider) return rtp.deadEndProvider, nil } if rtp.netmapKeys.IsLocalKey(srv.PublicKey()) { // if local => return no-op writer - rtp.log.Debug("initializing no-op writer provider") + rtp.log.Debug(logs.CommonInitializingNoopWriterProvider) return trustcontroller.SimpleWriterProvider(new(NopReputationWriter)), nil } diff --git a/cmd/frostfs-node/reputation/intermediate/consumers.go b/cmd/frostfs-node/reputation/intermediate/consumers.go index 33eab605b..02cdb2a2b 100644 --- a/cmd/frostfs-node/reputation/intermediate/consumers.go +++ b/cmd/frostfs-node/reputation/intermediate/consumers.go @@ -3,6 +3,7 @@ package intermediate import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust" @@ -31,7 +32,7 @@ type ConsumerTrustWriter struct { } func (w *ConsumerTrustWriter) Write(_ context.Context, t reputation.Trust) error { - w.log.Debug("writing received consumer's trusts", + w.log.Debug(logs.IntermediateWritingReceivedConsumersTrusts, zap.Uint64("epoch", w.iterInfo.Epoch()), zap.Uint32("iteration", w.iterInfo.I()), zap.Stringer("trusting_peer", t.TrustingPeer()), diff --git a/cmd/frostfs-node/reputation/intermediate/contract.go b/cmd/frostfs-node/reputation/intermediate/contract.go index 6303b1219..2d83598bc 100644 --- a/cmd/frostfs-node/reputation/intermediate/contract.go +++ b/cmd/frostfs-node/reputation/intermediate/contract.go @@ -4,6 +4,7 @@ import ( "crypto/ecdsa" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust" eigentrustcalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator" @@ -71,7 +72,7 @@ type FinalWriter struct { } func (fw FinalWriter) WriteIntermediateTrust(t eigentrust.IterationTrust) error { - fw.l.Debug("start writing global trusts to contract") + fw.l.Debug(logs.IntermediateStartWritingGlobalTrustsToContract) args := repClient.PutPrm{} diff --git a/cmd/frostfs-node/reputation/intermediate/daughters.go b/cmd/frostfs-node/reputation/intermediate/daughters.go index d72eead43..30237537c 100644 --- a/cmd/frostfs-node/reputation/intermediate/daughters.go +++ b/cmd/frostfs-node/reputation/intermediate/daughters.go @@ -3,6 +3,7 @@ package intermediate import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/daughters" @@ -27,7 +28,7 @@ type DaughterTrustWriter struct { } func (w *DaughterTrustWriter) Write(_ context.Context, t reputation.Trust) error { - w.log.Debug("writing received daughter's trusts", + w.log.Debug(logs.IntermediateWritingReceivedDaughtersTrusts, zap.Uint64("epoch", w.ep.Epoch()), zap.Stringer("trusting_peer", t.TrustingPeer()), zap.Stringer("trusted_peer", t.Peer()), diff --git a/cmd/frostfs-node/reputation/intermediate/remote.go b/cmd/frostfs-node/reputation/intermediate/remote.go index b1a218b94..8087463b5 100644 --- a/cmd/frostfs-node/reputation/intermediate/remote.go +++ b/cmd/frostfs-node/reputation/intermediate/remote.go @@ -6,6 +6,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/common" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/internal/client" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" @@ -92,7 +93,7 @@ func (rtp *RemoteTrustWriter) Write(ctx context.Context, t reputation.Trust) err epoch := rtp.iterInfo.Epoch() i := rtp.iterInfo.I() - rtp.log.Debug("announcing trust", + rtp.log.Debug(logs.IntermediateAnnouncingTrust, zap.Uint64("epoch", epoch), zap.Uint32("iteration", i), zap.Stringer("trusting_peer", t.TrustingPeer()), diff --git a/cmd/frostfs-node/reputation/local/remote.go b/cmd/frostfs-node/reputation/local/remote.go index 3c929a9ca..6197c6d69 100644 --- a/cmd/frostfs-node/reputation/local/remote.go +++ b/cmd/frostfs-node/reputation/local/remote.go @@ -6,6 +6,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/common" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/internal/client" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" @@ -96,7 +97,7 @@ func (rtp *RemoteTrustWriter) Write(_ context.Context, t reputation.Trust) error func (rtp *RemoteTrustWriter) Close(ctx context.Context) error { epoch := rtp.ep.Epoch() - rtp.log.Debug("announcing trusts", + rtp.log.Debug(logs.LocalAnnouncingTrusts, zap.Uint64("epoch", epoch), ) diff --git a/cmd/frostfs-node/reputation/local/storage.go b/cmd/frostfs-node/reputation/local/storage.go index 861151871..a0dc3d4ce 100644 --- a/cmd/frostfs-node/reputation/local/storage.go +++ b/cmd/frostfs-node/reputation/local/storage.go @@ -4,6 +4,7 @@ import ( "bytes" "errors" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" @@ -27,7 +28,7 @@ type TrustStorage struct { func (s *TrustStorage) InitIterator(ep reputationcommon.EpochProvider) (trustcontroller.Iterator, error) { epoch := ep.Epoch() - s.Log.Debug("initializing iterator over trusts", + s.Log.Debug(logs.LocalInitializingIteratorOverTrusts, zap.Uint64("epoch", epoch), ) diff --git a/cmd/frostfs-node/tracing.go b/cmd/frostfs-node/tracing.go index bbdb71c64..d963ba866 100644 --- a/cmd/frostfs-node/tracing.go +++ b/cmd/frostfs-node/tracing.go @@ -6,6 +6,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "go.uber.org/zap" ) @@ -14,7 +15,7 @@ func initTracing(ctx context.Context, c *cfg) { _, err := tracing.Setup(ctx, *conf) if err != nil { - c.log.Error("failed init tracing", zap.Error(err)) + c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err)) } c.closers = append(c.closers, closer{ @@ -24,7 +25,7 @@ func initTracing(ctx context.Context, c *cfg) { defer cancel() err := tracing.Shutdown(ctx) //cfg context cancels before close if err != nil { - c.log.Error("failed shutdown tracing", zap.Error(err)) + c.log.Error(logs.FrostFSNodeFailedShutdownTracing, zap.Error(err)) } }, }) diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go index 93a364471..b4f43acac 100644 --- a/cmd/frostfs-node/tree.go +++ b/cmd/frostfs-node/tree.go @@ -6,6 +6,7 @@ import ( "time" treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" containerClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" @@ -37,7 +38,7 @@ func (c cnrSource) List() ([]cid.ID, error) { func initTreeService(c *cfg) { treeConfig := treeconfig.Tree(c.appCfg) if !treeConfig.Enabled() { - c.log.Info("tree service is not enabled, skip initialization") + c.log.Info(logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization) return } @@ -68,7 +69,7 @@ func initTreeService(c *cfg) { addNewEpochNotificationHandler(c, func(_ event.Event) { err := c.treeService.SynchronizeAll() if err != nil { - c.log.Error("could not synchronize Tree Service", zap.Error(err)) + c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err)) } }) } else { @@ -79,7 +80,7 @@ func initTreeService(c *cfg) { for range tick.C { err := c.treeService.SynchronizeAll() if err != nil { - c.log.Error("could not synchronize Tree Service", zap.Error(err)) + c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err)) if errors.Is(err, tree.ErrShuttingDown) { return } @@ -92,11 +93,11 @@ func initTreeService(c *cfg) { ev := e.(containerEvent.DeleteSuccess) // This is executed asynchronously, so we don't care about the operation taking some time. - c.log.Debug("removing all trees for container", zap.Stringer("cid", ev.ID)) + c.log.Debug(logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID)) err := c.treeService.DropTree(context.Background(), ev.ID, "") if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) { // Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged. - c.log.Error("container removal event received, but trees weren't removed", + c.log.Error(logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved, zap.Stringer("cid", ev.ID), zap.String("error", err.Error())) } diff --git a/internal/logs/logs.go b/internal/logs/logs.go new file mode 100644 index 000000000..46ed8e867 --- /dev/null +++ b/internal/logs/logs.go @@ -0,0 +1,643 @@ +package logs + +const ( + InnerringAmountCanNotBeRepresentedAsAnInt64 = "amount can not be represented as an int64" // Error in ../node/pkg/innerring/settlement.go + InnerringCantGetUsedSpaceEstimation = "can't get used space estimation" // Warn in ../node/pkg/innerring/settlement.go + InnerringSubnetCreationQueueFailure = "subnet creation queue failure" // Error in ../node/pkg/innerring/subnet.go + InnerringDiscardSubnetCreation = "discard subnet creation" // Info in ../node/pkg/innerring/subnet.go + InnerringApproveSubnetCreation = "approve subnet creation" // Error in ../node/pkg/innerring/subnet.go + InnerringSubnetRemovalHandlingFailure = "subnet removal handling failure" // Error in ../node/pkg/innerring/subnet.go + InnerringGettingNetmapCandidates = "getting netmap candidates" // Error in ../node/pkg/innerring/subnet.go + InnerringUnmarshallingRemovedSubnetID = "unmarshalling removed subnet ID" // Error in ../node/pkg/innerring/subnet.go + InnerringIteratingNodesSubnets = "iterating node's subnets" // Error in ../node/pkg/innerring/subnet.go + InnerringRemovingNodeFromNetmapCandidates = "removing node from netmap candidates" // Debug in ../node/pkg/innerring/subnet.go + InnerringRemovingNodeFromCandidates = "removing node from candidates" // Error in ../node/pkg/innerring/subnet.go + InnerringRemovingSubnetFromTheNode = "removing subnet from the node" // Debug in ../node/pkg/innerring/subnet.go + InnerringUpdatingSubnetInfo = "updating subnet info" // Error in ../node/pkg/innerring/subnet.go + InnerringNonalphabetModeDoNotStopContainerEstimations = "non-alphabet mode, do not stop container estimations" // Debug in ../node/pkg/innerring/blocktimer.go + InnerringCantStopEpochEstimation = "can't stop epoch estimation" // Warn in ../node/pkg/innerring/blocktimer.go + InnerringCantMakeNotaryDepositInMainChain = "can't make notary deposit in main chain" // Error in ../node/pkg/innerring/notary.go + InnerringCantMakeNotaryDepositInSideChain = "can't make notary deposit in side chain" // Error in ../node/pkg/innerring/notary.go + InnerringNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" // Info in ../node/pkg/innerring/notary.go + InnerringCantGetInnerRingIndex = "can't get inner ring index" // Error in ../node/pkg/innerring/state.go + InnerringCantGetInnerRingSize = "can't get inner ring size" // Error in ../node/pkg/innerring/state.go + InnerringCantGetAlphabetIndex = "can't get alphabet index" // Error in ../node/pkg/innerring/state.go + InnerringIgnoreValidatorVoteNodeNotInAlphabetRange = "ignore validator vote: node not in alphabet range" // Info in ../node/pkg/innerring/state.go + InnerringIgnoreValidatorVoteEmptyValidatorsList = "ignore validator vote: empty validators list" // Info in ../node/pkg/innerring/state.go + InnerringCantInvokeVoteMethodInAlphabetContract = "can't invoke vote method in alphabet contract" // Warn in ../node/pkg/innerring/state.go + InnerringCantGetLastProcessedMainChainBlockNumber = "can't get last processed main chain block number" // Warn in ../node/pkg/innerring/initialization.go + InnerringNotarySupport = "notary support" // Info in ../node/pkg/innerring/initialization.go + InnerringAlphabetKeysSyncIsDisabled = "alphabet keys sync is disabled" // Debug in ../node/pkg/innerring/initialization.go + InnerringNoControlServerEndpointSpecified = "no Control server endpoint specified, service is disabled" // Info in ../node/pkg/innerring/initialization.go + InnerringCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number" // Warn in ../node/pkg/innerring/initialization.go + InnerringFailedToSetGroupSignerScope = "failed to set group signer scope, continue with Global" // Info in ../node/pkg/innerring/initialization.go + InnerringCantVoteForPreparedValidators = "can't vote for prepared validators" // Warn in ../node/pkg/innerring/innerring.go + InnerringNewBlock = "new block" // Debug in ../node/pkg/innerring/innerring.go + InnerringCantUpdatePersistentState = "can't update persistent state" // Warn in ../node/pkg/innerring/innerring.go + InnerringCloserError = "closer error" // Warn in ../node/pkg/innerring/innerring.go + InnerringReadConfigFromBlockchain = "read config from blockchain" // Debug in ../node/pkg/innerring/innerring.go + InnerringCantSetupRemoteConnection = "can't setup remote connection" // Warn in ../node/pkg/innerring/rpc.go + InnerringCantGetStorageGroupObject = "can't get storage group object" // Warn in ../node/pkg/innerring/rpc.go + NotificatorNotificatorStartProcessingObjectNotifications = "notificator: start processing object notifications" // Debug in ../node/pkg/services/notificator/service.go + NotificatorNotificatorProcessingObjectNotification = "notificator: processing object notification" // Debug in ../node/pkg/services/notificator/service.go + PolicerCouldNotGetContainer = "could not get container" // Error in ../node/pkg/services/policer/check.go + PolicerCouldNotInhumeObjectWithMissingContainer = "could not inhume object with missing container" // Error in ../node/pkg/services/policer/check.go + PolicerCouldNotBuildPlacementVectorForObject = "could not build placement vector for object" // Error in ../node/pkg/services/policer/check.go + PolicerRedundantLocalObjectCopyDetected = "redundant local object copy detected" // Info in ../node/pkg/services/policer/check.go + PolicerReceiveObjectHeaderToCheckPolicyCompliance = "receive object header to check policy compliance" // Error in ../node/pkg/services/policer/check.go + PolicerConsiderNodeUnderMaintenanceAsOK = "consider node under maintenance as OK" // Debug in ../node/pkg/services/policer/check.go + PolicerShortageOfObjectCopiesDetected = "shortage of object copies detected" // Debug in ../node/pkg/services/policer/check.go + PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance = "some of the copies are stored on nodes under maintenance, save local copy" // Debug in ../node/pkg/services/policer/check.go + PolicerRoutineStopped = "routine stopped" // Info in ../node/pkg/services/policer/process.go + PolicerFailureAtObjectSelectForReplication = "failure at object select for replication" // Warn in ../node/pkg/services/policer/process.go + PolicerPoolSubmission = "pool submission" // Warn in ../node/pkg/services/policer/process.go + PolicerTuneReplicationCapacity = "tune replication capacity" // Debug in ../node/pkg/services/policer/process.go + ReplicatorFinishWork = "finish work" // Debug in ../node/pkg/services/replicator/process.go + ReplicatorCouldNotGetObjectFromLocalStorage = "could not get object from local storage" // Error in ../node/pkg/services/replicator/process.go + ReplicatorCouldNotReplicateObject = "could not replicate object" // Error in ../node/pkg/services/replicator/process.go + ReplicatorObjectSuccessfullyReplicated = "object successfully replicated" // Debug in ../node/pkg/services/replicator/process.go + SessionServingRequest = "serving request..." // Debug in ../node/pkg/services/session/executor.go + TreeRedirectingTreeServiceQuery = "redirecting tree service query" // Debug in ../node/pkg/services/tree/redirect.go + TreeBearerPresentedButNotAllowedByACL = "bearer presented but not allowed by ACL" // Debug in ../node/pkg/services/tree/signature.go + TreeCouldNotGetLastSynchronizedHeightForATree = "could not get last synchronized height for a tree" // Warn in ../node/pkg/services/tree/sync.go + TreeCouldNotUpdateLastSynchronizedHeightForATree = "could not update last synchronized height for a tree" // Warn in ../node/pkg/services/tree/sync.go + TreeSynchronizeTree = "synchronize tree" // Debug in ../node/pkg/services/tree/sync.go + TreeFailedToRunTreeSynchronizationOverAllNodes = "failed to run tree synchronization over all nodes" // Warn in ../node/pkg/services/tree/sync.go + TreeSyncingTrees = "syncing trees..." // Debug in ../node/pkg/services/tree/sync.go + TreeCouldNotFetchContainers = "could not fetch containers" // Error in ../node/pkg/services/tree/sync.go + TreeTreesHaveBeenSynchronized = "trees have been synchronized" // Debug in ../node/pkg/services/tree/sync.go + TreeSyncingContainerTrees = "syncing container trees..." // Debug in ../node/pkg/services/tree/sync.go + TreeCouldNotSyncTrees = "could not sync trees" // Error in ../node/pkg/services/tree/sync.go + TreeContainerTreesHaveBeenSynced = "container trees have been synced" // Debug in ../node/pkg/services/tree/sync.go + TreeCouldNotQueryTreesForSynchronization = "could not query trees for synchronization" // Error in ../node/pkg/services/tree/sync.go + TreeRemovingRedundantTrees = "removing redundant trees..." // Debug in ../node/pkg/services/tree/sync.go + TreeCouldNotRemoveRedundantTree = "could not remove redundant tree" // Error in ../node/pkg/services/tree/sync.go + TreeCouldNotCalculateContainerNodes = "could not calculate container nodes" // Error in ../node/pkg/services/tree/sync.go + TreeFailedToApplyReplicatedOperation = "failed to apply replicated operation" // Error in ../node/pkg/services/tree/replicator.go + TreeDoNotSendUpdateToTheNode = "do not send update to the node" // Debug in ../node/pkg/services/tree/replicator.go + TreeFailedToSentUpdateToTheNode = "failed to sent update to the node" // Warn in ../node/pkg/services/tree/replicator.go + TreeErrorDuringReplication = "error during replication" // Error in ../node/pkg/services/tree/replicator.go + PersistentCouldNotGetSessionFromPersistentStorage = "could not get session from persistent storage" // Error in ../node/pkg/services/session/storage/persistent/storage.go + PersistentCouldNotDeleteSToken = "could not delete token" // Error in ../node/pkg/services/session/storage/persistent/storage.go + PersistentCouldNotCleanUpExpiredTokens = "could not clean up expired tokens" // Error in ../node/pkg/services/session/storage/persistent/storage.go + CommonStartBuildingManagers = "start building managers" // Debug in ../node/pkg/services/reputation/common/managers.go + ControllerReportIsAlreadyStarted = "report is already started" // Debug in ../node/pkg/services/reputation/local/controller/calls.go + ControllerStartingToReportLocalTrustValues = "starting to report local trust values" // Debug in ../node/pkg/services/reputation/local/controller/calls.go + ControllerCouldNotInitializeIteratorOverLocalTrustValues = "could not initialize iterator over local trust values" // Debug in ../node/pkg/services/reputation/local/controller/calls.go + ControllerCouldNotInitializeLocalTrustTarget = "could not initialize local trust target" // Debug in ../node/pkg/services/reputation/local/controller/calls.go + ControllerIteratorOverLocalTrustFailed = "iterator over local trust failed" // Debug in ../node/pkg/services/reputation/local/controller/calls.go + ControllerCouldNotFinishWritingLocalTrustValues = "could not finish writing local trust values" // Debug in ../node/pkg/services/reputation/local/controller/calls.go + ControllerReportingSuccessfullyFinished = "reporting successfully finished" // Debug in ../node/pkg/services/reputation/local/controller/calls.go + ControllerReportingSuccessfullyInterrupted = "reporting successfully interrupted" // Debug in ../node/pkg/services/reputation/local/controller/calls.go + ControllerReportingIsNotStartedOrAlreadyInterrupted = "reporting is not started or already interrupted" // Debug in ../node/pkg/services/reputation/local/controller/calls.go + RoutesBuildingNextStageForLocalTrustRoute = "building next stage for local trust route" // Debug in ../node/pkg/services/reputation/local/routes/calls.go + CalculatorFailedToGetAlphaParam = "failed to get alpha param" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go + CalculatorConsumersTrustIteratorsInitFailure = "consumers trust iterator's init failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go + CalculatorWorkerPoolSubmitFailure = "worker pool submit failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go + CalculatorIterateDaughtersConsumersFailed = "iterate daughter's consumers failed" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go + CalculatorGetInitialTrustFailure = "get initial trust failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go + CalculatorDaughterTrustIteratorsInitFailure = "daughter trust iterator's init failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go + CalculatorIterateOverDaughtersTrustsFailure = "iterate over daughter's trusts failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go + CalculatorInitWriterFailure = "init writer failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go + CalculatorWriteFinalResultFailure = "write final result failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go + CalculatorWriteValueFailure = "write value failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go + CalculatorIterateDaughterTrustsFailure = "iterate daughter trusts failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go + CalculatorCouldNotCloseWriter = "could not close writer" // Error in ../node/pkg/services/reputation/eigentrust/calculator/calls.go + CalculatorAllDaughtersTrustIteratorsInitFailure = "all daughters trust iterator's init failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go + CalculatorIterateOverAllDaughtersFailure = "iterate over all daughters failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go + ControllerCouldNotGetEigenTrustIterationNumber = "could not get EigenTrust iteration number" // Error in ../node/pkg/services/reputation/eigentrust/controller/calls.go + ControllerIterationSubmitFailure = "iteration submit failure" // Debug in ../node/pkg/services/reputation/eigentrust/controller/calls.go + RoutesBuildingNextStageForTrustRoute = "building next stage for trust route" // Debug in ../node/pkg/services/reputation/eigentrust/routes/calls.go + RouterCouldNotInitializeWriterProvider = "could not initialize writer provider" // Debug in ../node/pkg/services/reputation/common/router/calls.go + RouterCouldNotInitializeWriter = "could not initialize writer" // Debug in ../node/pkg/services/reputation/common/router/calls.go + RouterCouldNotWriteTheValue = "could not write the value" // Debug in ../node/pkg/services/reputation/common/router/calls.go + RouterCouldNotCloseRemoteServerWriter = "could not close remote server writer" // Debug in ../node/pkg/services/reputation/common/router/calls.go + TombstoneCouldNotGetTheTombstoneTheSource = "tombstone getter: could not get the tombstone the source" // Warn in ../node/pkg/services/object_manager/tombstone/checker.go + TombstoneCouldNotParseTombstoneExpirationEpoch = "tombstone getter: could not parse tombstone expiration epoch" // Warn in ../node/pkg/services/object_manager/tombstone/checker.go + DeleteRequestIsNotRolledOverToTheContainer = "request is not rolled over to the container" // Debug in ../node/pkg/services/object/delete/container.go + DeleteServingRequest = "serving request..." // Debug in ../node/pkg/services/object/delete/delete.go + DeleteOperationFinishedSuccessfully = "operation finished successfully" // Debug in ../node/pkg/services/object/delete/delete.go + DeleteOperationFinishedWithError = "operation finished with error" // Debug in ../node/pkg/services/object/delete/delete.go + DeleteCouldNotComposeSplitInfo = "could not compose split info" // Debug in ../node/pkg/services/object/delete/exec.go + DeleteNoSplitInfoObjectIsPHY = "no split info, object is PHY" // Debug in ../node/pkg/services/object/delete/exec.go + DeleteAssemblingChain = "assembling chain..." // Debug in ../node/pkg/services/object/delete/exec.go + DeleteCouldNotGetPreviousSplitElement = "could not get previous split element" // Debug in ../node/pkg/services/object/delete/exec.go + DeleteCollectingChildren = "collecting children..." // Debug in ../node/pkg/services/object/delete/exec.go + DeleteCouldNotCollectObjectChildren = "could not collect object children" // Debug in ../node/pkg/services/object/delete/exec.go + DeleteSupplementBySplitID = "supplement by split ID" // Debug in ../node/pkg/services/object/delete/exec.go + DeleteCouldNotSearchForSplitChainMembers = "could not search for split chain members" // Debug in ../node/pkg/services/object/delete/exec.go + DeleteCouldNotMarshalTombstoneStructure = "could not marshal tombstone structure" // Debug in ../node/pkg/services/object/delete/exec.go + DeleteCouldNotSaveTheTombstone = "could not save the tombstone" // Debug in ../node/pkg/services/object/delete/exec.go + DeleteFormingTombstoneStructure = "forming tombstone structure..." // Debug in ../node/pkg/services/object/delete/local.go + DeleteTombstoneStructureSuccessfullyFormedSaving = "tombstone structure successfully formed, saving..." // Debug in ../node/pkg/services/object/delete/local.go + DeleteCouldNotReadTombstoneLifetimeConfig = "could not read tombstone lifetime config" // Debug in ../node/pkg/services/object/delete/local.go + DeleteFormingSplitInfo = "forming split info..." // Debug in ../node/pkg/services/object/delete/local.go + DeleteSplitInfoSuccessfullyFormedCollectingMembers = "split info successfully formed, collecting members..." // Debug in ../node/pkg/services/object/delete/local.go + DeleteMembersSuccessfullyCollected = "members successfully collected" // Debug in ../node/pkg/services/object/delete/local.go + GetProcessingNode = "processing node..." // Debug in ../node/pkg/services/object/get/remote.go + GetRemoteCallFailed = "remote call failed" // Debug in ../node/pkg/services/object/get/remote.go + GetCanNotAssembleTheObject = "can not assemble the object" // Debug in ../node/pkg/services/object/get/assemble.go + GetTryingToAssembleTheObject = "trying to assemble the object..." // Debug in ../node/pkg/services/object/get/assemble.go + GetAssemblingSplittedObject = "assembling splitted object..." // Debug in ../node/pkg/services/object/get/assemble.go + GetAssemblingSplittedObjectCompleted = "assembling splitted object completed" // Debug in ../node/pkg/services/object/get/assemble.go + GetFailedToAssembleSplittedObject = "failed to assemble splitted object" // Warn in ../node/pkg/services/object/get/assemble.go + GetCouldNotGetCurrentEpochNumber = "could not get current epoch number" // Debug in ../node/pkg/services/object/get/exec.go + GetCouldNotGenerateContainerTraverser = "could not generate container traverser" // Debug in ../node/pkg/services/object/get/exec.go + GetCouldNotConstructRemoteNodeClient = "could not construct remote node client" // Debug in ../node/pkg/services/object/get/exec.go + GetCouldNotWriteHeader = "could not write header" // Debug in ../node/pkg/services/object/get/exec.go + GetCouldNotWritePayloadChunk = "could not write payload chunk" // Debug in ../node/pkg/services/object/get/exec.go + GetLocalGetFailed = "local get failed" // Debug in ../node/pkg/services/object/get/local.go + GetReturnResultDirectly = "return result directly" // Debug in ../node/pkg/services/object/get/container.go + GetTryingToExecuteInContainer = "trying to execute in container..." // Debug in ../node/pkg/services/object/get/container.go + GetProcessEpoch = "process epoch" // Debug in ../node/pkg/services/object/get/container.go + GetNoMoreNodesAbortPlacementIteration = "no more nodes, abort placement iteration" // Debug in ../node/pkg/services/object/get/container.go + GetInterruptPlacementIterationByContext = "interrupt placement iteration by context" // Debug in ../node/pkg/services/object/get/container.go + GetCompletingTheOperation = "completing the operation" // Debug in ../node/pkg/services/object/get/container.go + GetServingRequest = "serving request..." // Debug in ../node/pkg/services/object/get/get.go + GetOperationFinishedSuccessfully = "operation finished successfully" // Debug in ../node/pkg/services/object/get/get.go + GetRequestedObjectWasMarkedAsRemoved = "requested object was marked as removed" // Debug in ../node/pkg/services/object/get/get.go + GetRequestedObjectIsVirtual = "requested object is virtual" // Debug in ../node/pkg/services/object/get/get.go + GetRequestedRangeIsOutOfObjectBounds = "requested range is out of object bounds" // Debug in ../node/pkg/services/object/get/get.go + GetOperationFinishedWithError = "operation finished with error" // Debug in ../node/pkg/services/object/get/get.go + PutAdditionalContainerBroadcastFailure = "additional container broadcast failure" // Error in ../node/pkg/services/object/put/distributed.go + SearchReturnResultDirectly = "return result directly" // Debug in ../node/pkg/services/object/search/container.go + SearchTryingToExecuteInContainer = "trying to execute in container..." // Debug in ../node/pkg/services/object/search/container.go + SearchProcessEpoch = "process epoch" // Debug in ../node/pkg/services/object/search/container.go + SearchNoMoreNodesAbortPlacementIteration = "no more nodes, abort placement iteration" // Debug in ../node/pkg/services/object/search/container.go + SearchInterruptPlacementIterationByContext = "interrupt placement iteration by context" // Debug in ../node/pkg/services/object/search/container.go + SearchProcessingNode = "processing node..." // Debug in ../node/pkg/services/object/search/container.go + SearchCouldNotConstructRemoteNodeClient = "could not construct remote node client" // Debug in ../node/pkg/services/object/search/container.go + SearchRemoteOperationFailed = "remote operation failed" // Debug in ../node/pkg/services/object/search/container.go + SearchCouldNotGetCurrentEpochNumber = "could not get current epoch number" // Debug in ../node/pkg/services/object/search/exec.go + SearchCouldNotGenerateContainerTraverser = "could not generate container traverser" // Debug in ../node/pkg/services/object/search/exec.go + SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers" // Debug in ../node/pkg/services/object/search/exec.go + SearchLocalOperationFailed = "local operation failed" // Debug in ../node/pkg/services/object/search/local.go + SearchServingRequest = "serving request..." // Debug in ../node/pkg/services/object/search/search.go + SearchOperationFinishedWithError = "operation finished with error" // Debug in ../node/pkg/services/object/search/search.go + SearchOperationFinishedSuccessfully = "operation finished successfully" // Debug in ../node/pkg/services/object/search/search.go + UtilObjectServiceError = "object service error" // Error in ../node/pkg/services/object/util/log.go + UtilCouldNotPushTaskToWorkerPool = "could not push task to worker pool" // Error in ../node/pkg/services/object/util/log.go + V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring" // Debug in ../node/pkg/services/object/acl/v2/classifier.go + V2CantCheckIfRequestFromContainerNode = "can't check if request from container node" // Debug in ../node/pkg/services/object/acl/v2/classifier.go + NatsNatsConnectionWasLost = "nats: connection was lost" // Error in ../node/pkg/services/notificator/nats/service.go + NatsNatsReconnectedToTheServer = "nats: reconnected to the server" // Warn in ../node/pkg/services/notificator/nats/service.go + NatsNatsClosingConnectionAsTheContextIsDone = "nats: closing connection as the context is done" // Info in ../node/pkg/services/notificator/nats/service.go + ControllerStartingToAnnounceTheValuesOfTheMetrics = "starting to announce the values of the metrics" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerCouldNotInitializeIteratorOverLocallyCollectedMetrics = "could not initialize iterator over locally collected metrics" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerCouldNotInitializeAnnouncementAccumulator = "could not initialize announcement accumulator" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerIteratorOverLocallyCollectedMetricsAborted = "iterator over locally collected metrics aborted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerCouldNotFinishWritingLocalAnnouncements = "could not finish writing local announcements" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerTrustAnnouncementSuccessfullyFinished = "trust announcement successfully finished" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerAnnouncementIsAlreadyStarted = "announcement is already started" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerAnnouncementSuccessfullyInterrupted = "announcement successfully interrupted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerAnnouncementIsNotStartedOrAlreadyInterrupted = "announcement is not started or already interrupted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerCouldNotInitializeIteratorOverLocallyAccumulatedAnnouncements = "could not initialize iterator over locally accumulated announcements" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerCouldNotInitializeResultTarget = "could not initialize result target" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerIteratorOverLocalAnnouncementsAborted = "iterator over local announcements aborted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerCouldNotFinishWritingLoadEstimations = "could not finish writing load estimations" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + RouteCouldNotInitializeWriterProvider = "could not initialize writer provider" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go + RouteCouldNotInitializeWriter = "could not initialize writer" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go + RouteCouldNotPutTheValue = "could not put the value" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go + RouteCouldNotCloseRemoteServerWriter = "could not close remote server writer" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go + AuditorCouldNotGetObjectHeaderFromCandidate = "could not get object header from candidate" // Debug in ../node/pkg/services/audit/auditor/pop.go + AuditorCouldNotBuildPlacementForObject = "could not build placement for object" // Debug in ../node/pkg/services/audit/auditor/pop.go + AuditorCantHeadObject = "can't head object" // Debug in ../node/pkg/services/audit/auditor/por.go + AuditorCantConcatenateTzHash = "can't concatenate tz hash" // Debug in ../node/pkg/services/audit/auditor/por.go + AuditorStorageGroupSizeCheckFailed = "storage group size check failed" // Debug in ../node/pkg/services/audit/auditor/por.go + AuditorStorageGroupTzHashCheckFailed = "storage group tz hash check failed" // Debug in ../node/pkg/services/audit/auditor/por.go + AuditorCantBuildPlacementForStorageGroupMember = "can't build placement for storage group member" // Info in ../node/pkg/services/audit/auditor/por.go + AuditorAuditContextIsDone = "audit context is done" // Debug in ../node/pkg/services/audit/auditor/context.go + AuditorWritingAuditReport = "writing audit report..." // Debug in ../node/pkg/services/audit/auditor/context.go + AuditorCouldNotWriteAuditReport = "could not write audit report" // Error in ../node/pkg/services/audit/auditor/context.go + AuditorSleepBeforeGetRangeHash = "sleep before get range hash" // Debug in ../node/pkg/services/audit/auditor/pdp.go + AuditorCouldNotGetPayloadRangeHash = "could not get payload range hash" // Debug in ../node/pkg/services/audit/auditor/pdp.go + TaskmanagerProcessRoutine = "process routine" // Info in ../node/pkg/services/audit/taskmanager/listen.go + TaskmanagerStopListenerByContext = "stop listener by context" // Warn in ../node/pkg/services/audit/taskmanager/listen.go + TaskmanagerQueueChannelIsClosed = "queue channel is closed" // Warn in ../node/pkg/services/audit/taskmanager/listen.go + TaskmanagerCouldNotGeneratePDPWorkerPool = "could not generate PDP worker pool" // Error in ../node/pkg/services/audit/taskmanager/listen.go + TaskmanagerCouldNotGeneratePoRWorkerPool = "could not generate PoR worker pool" // Error in ../node/pkg/services/audit/taskmanager/listen.go + TaskmanagerCouldNotSubmitAuditTask = "could not submit audit task" // Warn in ../node/pkg/services/audit/taskmanager/listen.go + ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch = "could not restore block subscription after RPC switch" // Error in ../node/pkg/morph/client/notifications.go + ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch = "could not restore notification subscription after RPC switch" // Error in ../node/pkg/morph/client/notifications.go + ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch = "could not restore notary notification subscription after RPC switch" // Error in ../node/pkg/morph/client/notifications.go + ClientCouldNotEstablishConnectionToTheSwitchedRPCNode = "could not establish connection to the switched RPC node" // Warn in ../node/pkg/morph/client/multi.go + ClientConnectionToTheNewRPCNodeHasBeenEstablished = "connection to the new RPC node has been established" // Info in ../node/pkg/morph/client/multi.go + ClientSwitchingToTheNextRPCNode = "switching to the next RPC node" // Warn in ../node/pkg/morph/client/multi.go + ClientCouldNotEstablishConnectionToAnyRPCNode = "could not establish connection to any RPC node" // Error in ../node/pkg/morph/client/multi.go + ClientCouldNotCreateClientToTheHigherPriorityNode = "could not create client to the higher priority node" // Warn in ../node/pkg/morph/client/multi.go + ClientSwitchedToTheHigherPriorityRPC = "switched to the higher priority RPC" // Info in ../node/pkg/morph/client/multi.go + ClientCouldNotRestoreSideChainSubscriptionsUsingNode = "could not restore side chain subscriptions using node" // Warn in ../node/pkg/morph/client/multi.go + ClientNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" // Info in ../node/pkg/morph/client/notary.go + ClientNotaryDepositInvoke = "notary deposit invoke" // Info in ../node/pkg/morph/client/notary.go + ClientNotaryRequestWithPreparedMainTXInvoked = "notary request with prepared main TX invoked" // Debug in ../node/pkg/morph/client/notary.go + ClientNotaryRequestInvoked = "notary request invoked" // Debug in ../node/pkg/morph/client/notary.go + ClientNeoClientInvoke = "neo client invoke" // Debug in ../node/pkg/morph/client/client.go + ClientNativeGasTransferInvoke = "native gas transfer invoke" // Debug in ../node/pkg/morph/client/client.go + ClientBatchGasTransferInvoke = "batch gas transfer invoke" // Debug in ../node/pkg/morph/client/client.go + ClientCantGetBlockchainHeight = "can't get blockchain height" // Error in ../node/pkg/morph/client/client.go + ClientCantGetBlockchainHeight243 = "can't get blockchain height" // Error in ../node/pkg/morph/client/client.go + EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool" // Warn in ../node/pkg/morph/event/utils.go + EventCouldNotStartListenToEvents = "could not start listen to events" // Error in ../node/pkg/morph/event/listener.go + EventStopEventListenerByError = "stop event listener by error" // Error in ../node/pkg/morph/event/listener.go + EventStopEventListenerByContext = "stop event listener by context" // Info in ../node/pkg/morph/event/listener.go + EventStopEventListenerByNotificationChannel = "stop event listener by notification channel" // Warn in ../node/pkg/morph/event/listener.go + EventNilNotificationEventWasCaught = "nil notification event was caught" // Warn in ../node/pkg/morph/event/listener.go + EventStopEventListenerByNotaryChannel = "stop event listener by notary channel" // Warn in ../node/pkg/morph/event/listener.go + EventNilNotaryEventWasCaught = "nil notary event was caught" // Warn in ../node/pkg/morph/event/listener.go + EventStopEventListenerByBlockChannel = "stop event listener by block channel" // Warn in ../node/pkg/morph/event/listener.go + EventNilBlockWasCaught = "nil block was caught" // Warn in ../node/pkg/morph/event/listener.go + EventListenerWorkerPoolDrained = "listener worker pool drained" // Warn in ../node/pkg/morph/event/listener.go + EventEventParserNotSet = "event parser not set" // Debug in ../node/pkg/morph/event/listener.go + EventCouldNotParseNotificationEvent = "could not parse notification event" // Warn in ../node/pkg/morph/event/listener.go + EventNotificationHandlersForParsedNotificationEventWereNotRegistered = "notification handlers for parsed notification event were not registered" // Info in ../node/pkg/morph/event/listener.go + EventSkipExpiredMainTXNotaryEvent = "skip expired main TX notary event" // Warn in ../node/pkg/morph/event/listener.go + EventCouldNotPrepareAndValidateNotaryEvent = "could not prepare and validate notary event" // Warn in ../node/pkg/morph/event/listener.go + EventNotaryParserNotSet = "notary parser not set" // Debug in ../node/pkg/morph/event/listener.go + EventCouldNotParseNotaryEvent = "could not parse notary event" // Warn in ../node/pkg/morph/event/listener.go + EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered" // Info in ../node/pkg/morph/event/listener.go + EventIgnoreNilEventParser = "ignore nil event parser" // Info in ../node/pkg/morph/event/listener.go + EventListenerHasBeenAlreadyStartedIgnoreParser = "listener has been already started, ignore parser" // Warn in ../node/pkg/morph/event/listener.go + EventRegisteredNewEventParser = "registered new event parser" // Debug in ../node/pkg/morph/event/listener.go + EventIgnoreNilEventHandler = "ignore nil event handler" // Warn in ../node/pkg/morph/event/listener.go + EventIgnoreHandlerOfEventWoParser = "ignore handler of event w/o parser" // Warn in ../node/pkg/morph/event/listener.go + EventRegisteredNewEventHandler = "registered new event handler" // Debug in ../node/pkg/morph/event/listener.go + EventIgnoreNilNotaryEventParser = "ignore nil notary event parser" // Info in ../node/pkg/morph/event/listener.go + EventListenerHasBeenAlreadyStartedIgnoreNotaryParser = "listener has been already started, ignore notary parser" // Warn in ../node/pkg/morph/event/listener.go + EventIgnoreNilNotaryEventHandler = "ignore nil notary event handler" // Warn in ../node/pkg/morph/event/listener.go + EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser" // Warn in ../node/pkg/morph/event/listener.go + EventIgnoreNilBlockHandler = "ignore nil block handler" // Warn in ../node/pkg/morph/event/listener.go + SubscriberUnsubscribeForNotification = "unsubscribe for notification" // Error in ../node/pkg/morph/subscriber/subscriber.go + SubscriberRemoteNotificationChannelHasBeenClosed = "remote notification channel has been closed" // Warn in ../node/pkg/morph/subscriber/subscriber.go + SubscriberCantCastNotifyEventValueToTheNotifyStruct = "can't cast notify event value to the notify struct" // Error in ../node/pkg/morph/subscriber/subscriber.go + SubscriberNewNotificationEventFromSidechain = "new notification event from sidechain" // Debug in ../node/pkg/morph/subscriber/subscriber.go + SubscriberCantCastBlockEventValueToBlock = "can't cast block event value to block" // Error in ../node/pkg/morph/subscriber/subscriber.go + SubscriberCantCastNotifyEventValueToTheNotaryRequestStruct = "can't cast notify event value to the notary request struct" // Error in ../node/pkg/morph/subscriber/subscriber.go + SubscriberUnsupportedNotificationFromTheChain = "unsupported notification from the chain" // Debug in ../node/pkg/morph/subscriber/subscriber.go + BlobovniczaCreatingDirectoryForBoltDB = "creating directory for BoltDB" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go + BlobovniczaOpeningBoltDB = "opening BoltDB" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go + BlobovniczaInitializing = "initializing..." // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go + BlobovniczaAlreadyInitialized = "already initialized" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go + BlobovniczaCreatingBucketForSizeRange = "creating bucket for size range" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go + BlobovniczaClosingBoltDB = "closing BoltDB" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go + BlobovniczaObjectWasRemovedFromBucket = "object was removed from bucket" // Debug in ../node/pkg/local_object_storage/blobovnicza/delete.go + BlobstorOpening = "opening..." // Debug in ../node/pkg/local_object_storage/blobstor/control.go + BlobstorInitializing = "initializing..." // Debug in ../node/pkg/local_object_storage/blobstor/control.go + BlobstorClosing = "closing..." // Debug in ../node/pkg/local_object_storage/blobstor/control.go + BlobstorCouldntCloseStorage = "couldn't close storage" // Info in ../node/pkg/local_object_storage/blobstor/control.go + BlobstorErrorOccurredDuringObjectExistenceChecking = "error occurred during object existence checking" // Warn in ../node/pkg/local_object_storage/blobstor/exists.go + BlobstorErrorOccurredDuringTheIteration = "error occurred during the iteration" // Warn in ../node/pkg/local_object_storage/blobstor/iterate.go + EngineShardHasBeenRemoved = "shard has been removed" // Info in ../node/pkg/local_object_storage/engine/shards.go + EngineCouldNotCloseRemovedShard = "could not close removed shard" // Error in ../node/pkg/local_object_storage/engine/shards.go + EngineCouldNotOpenShardClosingAndSkipping = "could not open shard, closing and skipping" // Error in ../node/pkg/local_object_storage/engine/control.go + EngineCouldNotClosePartiallyInitializedShard = "could not close partially initialized shard" // Error in ../node/pkg/local_object_storage/engine/control.go + EngineCouldNotInitializeShardClosingAndSkipping = "could not initialize shard, closing and skipping" // Error in ../node/pkg/local_object_storage/engine/control.go + EngineCouldNotCloseShard = "could not close shard" // Debug in ../node/pkg/local_object_storage/engine/control.go + EngineCouldNotReloadAShard = "could not reload a shard" // Error in ../node/pkg/local_object_storage/engine/control.go + EngineAddedNewShard = "added new shard" // Info in ../node/pkg/local_object_storage/engine/control.go + EngineCouldNotMarkObjectForShardRelocation = "could not mark object for shard relocation" // Warn in ../node/pkg/local_object_storage/engine/put.go + EngineCouldNotPutObjectToShard = "could not put object to shard" // Warn in ../node/pkg/local_object_storage/engine/put.go + EngineErrorDuringSearchingForObjectChildren = "error during searching for object children" // Warn in ../node/pkg/local_object_storage/engine/delete.go + EngineCouldNotInhumeObjectInShard = "could not inhume object in shard" // Debug in ../node/pkg/local_object_storage/engine/delete.go + EngineStartingRemovalOfLocallyredundantCopies = "starting removal of locally-redundant copies" // Info in ../node/pkg/local_object_storage/engine/remove_copies.go + EngineStartedDuplicatesRemovalRoutine = "started duplicates removal routine" // Debug in ../node/pkg/local_object_storage/engine/remove_copies.go + EngineFinishedRemovalOfLocallyredundantCopies = "finished removal of locally-redundant copies" // Error in ../node/pkg/local_object_storage/engine/remove_copies.go + EngineRemovingAnObjectWithoutFullLockingCheck = "removing an object without full locking check" // Warn in ../node/pkg/local_object_storage/engine/inhume.go + EngineInterruptProcessingTheExpiredLocks = "interrupt processing the expired locks" // Info in ../node/pkg/local_object_storage/engine/inhume.go + EngineInterruptProcessingTheDeletedLocks = "interrupt processing the deleted locks" // Info in ../node/pkg/local_object_storage/engine/inhume.go + EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly = "failed to move shard in degraded-read-only mode, moving to read-only" // Error in ../node/pkg/local_object_storage/engine/engine.go + EngineFailedToMoveShardInReadonlyMode = "failed to move shard in read-only mode" // Error in ../node/pkg/local_object_storage/engine/engine.go + EngineShardIsMovedInReadonlyModeDueToErrorThreshold = "shard is moved in read-only mode due to error threshold" // Info in ../node/pkg/local_object_storage/engine/engine.go + EngineShardIsMovedInDegradedModeDueToErrorThreshold = "shard is moved in degraded mode due to error threshold" // Info in ../node/pkg/local_object_storage/engine/engine.go + EngineModeChangeIsInProgressIgnoringSetmodeRequest = "mode change is in progress, ignoring set-mode request" // Debug in ../node/pkg/local_object_storage/engine/engine.go + EngineStartedShardsEvacuation = "started shards evacuation" // Info in ../node/pkg/local_object_storage/engine/evacuate.go + EngineFinishedShardsEvacuation = "finished shards evacuation" // Info in ../node/pkg/local_object_storage/engine/evacuate.go + EngineObjectIsMovedToAnotherShard = "object is moved to another shard" // Debug in ../node/pkg/local_object_storage/engine/evacuate.go + MetabaseMissingMatcher = "missing matcher" // Debug in ../node/pkg/local_object_storage/metabase/select.go + MetabaseErrorInFKBTSelection = "error in FKBT selection" // Debug in ../node/pkg/local_object_storage/metabase/select.go + MetabaseCantDecodeListBucketLeaf = "can't decode list bucket leaf" // Debug in ../node/pkg/local_object_storage/metabase/select.go + MetabaseUnknownOperation = "unknown operation" // Debug in ../node/pkg/local_object_storage/metabase/select.go + MetabaseCantIterateOverTheBucket = "can't iterate over the bucket" // Debug in ../node/pkg/local_object_storage/metabase/select.go + MetabaseCouldNotIterateOverTheBuckets = "could not iterate over the buckets" // Debug in ../node/pkg/local_object_storage/metabase/select.go + MetabaseCreatedDirectoryForMetabase = "created directory for Metabase" // Debug in ../node/pkg/local_object_storage/metabase/control.go + MetabaseOpenedBoltDBInstanceForMetabase = "opened boltDB instance for Metabase" // Debug in ../node/pkg/local_object_storage/metabase/control.go + MetabaseCheckingMetabaseVersion = "checking metabase version" // Debug in ../node/pkg/local_object_storage/metabase/control.go + ShardCantSelectAllObjects = "can't select all objects" // Debug in ../node/pkg/local_object_storage/shard/list.go + ShardSettingShardMode = "setting shard mode" // Info in ../node/pkg/local_object_storage/shard/mode.go + ShardShardModeSetSuccessfully = "shard mode set successfully" // Info in ../node/pkg/local_object_storage/shard/mode.go + ShardCouldNotMarkObjectForShardRelocationInMetabase = "could not mark object for shard relocation in metabase" // Debug in ../node/pkg/local_object_storage/shard/move.go + ShardCantDeleteObjectFromWriteCache = "can't delete object from write cache" // Warn in ../node/pkg/local_object_storage/shard/delete.go + ShardCantGetStorageIDFromMetabase = "can't get storage ID from metabase" // Debug in ../node/pkg/local_object_storage/shard/delete.go + ShardCantRemoveObjectFromBlobStor = "can't remove object from blobStor" // Debug in ../node/pkg/local_object_storage/shard/delete.go + ShardFetchingObjectWithoutMeta = "fetching object without meta" // Warn in ../node/pkg/local_object_storage/shard/get.go + ShardObjectIsMissingInWritecache = "object is missing in write-cache" // Debug in ../node/pkg/local_object_storage/shard/get.go + ShardFailedToFetchObjectFromWritecache = "failed to fetch object from write-cache" // Error in ../node/pkg/local_object_storage/shard/get.go + ShardCantPutObjectToTheWritecacheTryingBlobstor = "can't put object to the write-cache, trying blobstor" // Debug in ../node/pkg/local_object_storage/shard/put.go + ShardMetaObjectCounterRead = "meta: object counter read" // Warn in ../node/pkg/local_object_storage/shard/shard.go + ShardMetaCantReadContainerList = "meta: can't read container list" // Warn in ../node/pkg/local_object_storage/shard/shard.go + ShardMetaCantReadContainerSize = "meta: can't read container size" // Warn in ../node/pkg/local_object_storage/shard/shard.go + ShardMetabaseFailureSwitchingMode = "metabase failure, switching mode" // Error in ../node/pkg/local_object_storage/shard/control.go + ShardCantMoveShardToReadonlySwitchMode = "can't move shard to readonly, switch mode" // Error in ../node/pkg/local_object_storage/shard/control.go + ShardCouldNotUnmarshalObject = "could not unmarshal object" // Warn in ../node/pkg/local_object_storage/shard/control.go + ShardCouldNotCloseShardComponent = "could not close shard component" // Error in ../node/pkg/local_object_storage/shard/control.go + ShardCantOpenMetabaseMoveToADegradedMode = "can't open metabase, move to a degraded mode" // Error in ../node/pkg/local_object_storage/shard/control.go + ShardCantInitializeMetabaseMoveToADegradedreadonlyMode = "can't initialize metabase, move to a degraded-read-only mode" // Error in ../node/pkg/local_object_storage/shard/control.go + ShardTryingToRestoreReadwriteMode = "trying to restore read-write mode" // Info in ../node/pkg/local_object_storage/shard/control.go + ShardStopEventListenerByClosedChannel = "stop event listener by closed channel" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardCouldNotSubmitGCJobToWorkerPool = "could not submit GC job to worker pool" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardGCIsStopped = "GC is stopped" // Debug in ../node/pkg/local_object_storage/shard/gc.go + ShardWaitingForGCWorkersToStop = "waiting for GC workers to stop..." // Info in ../node/pkg/local_object_storage/shard/gc.go + ShardIteratorOverMetabaseGraveyardFailed = "iterator over metabase graveyard failed" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardCouldNotDeleteTheObjects = "could not delete the objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardIteratorOverExpiredObjectsFailed = "iterator over expired objects failed" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardCouldNotInhumeTheObjects = "could not inhume the objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardStartedExpiredTombstonesHandling = "started expired tombstones handling" // Debug in ../node/pkg/local_object_storage/shard/gc.go + ShardIteratingTombstones = "iterating tombstones" // Debug in ../node/pkg/local_object_storage/shard/gc.go + ShardShardIsInADegradedModeSkipCollectingExpiredTombstones = "shard is in a degraded mode, skip collecting expired tombstones" // Debug in ../node/pkg/local_object_storage/shard/gc.go + ShardIteratorOverGraveyardFailed = "iterator over graveyard failed" // Error in ../node/pkg/local_object_storage/shard/gc.go + ShardHandlingExpiredTombstonesBatch = "handling expired tombstones batch" // Debug in ../node/pkg/local_object_storage/shard/gc.go + ShardFinishedExpiredTombstonesHandling = "finished expired tombstones handling" // Debug in ../node/pkg/local_object_storage/shard/gc.go + ShardIteratorOverExpiredLocksFailed = "iterator over expired locks failed" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardCouldNotMarkTombstonesAsGarbage = "could not mark tombstones as garbage" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardCouldNotDropExpiredGraveRecords = "could not drop expired grave records" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardFailureToUnlockObjects = "failure to unlock objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase" // Debug in ../node/pkg/local_object_storage/shard/inhume.go + WritecacheTriedToFlushItemsFromWritecache = "tried to flush items from write-cache" // Debug in ../node/pkg/local_object_storage/writecache/flush.go + WritecacheWaitingForChannelsToFlush = "waiting for channels to flush" // Info in ../node/pkg/local_object_storage/writecache/mode.go + WritecacheFillingFlushMarksForObjectsInFSTree = "filling flush marks for objects in FSTree" // Info in ../node/pkg/local_object_storage/writecache/init.go + WritecacheFinishedUpdatingFSTreeFlushMarks = "finished updating FSTree flush marks" // Info in ../node/pkg/local_object_storage/writecache/init.go + WritecacheFillingFlushMarksForObjectsInDatabase = "filling flush marks for objects in database" // Info in ../node/pkg/local_object_storage/writecache/init.go + WritecacheFinishedUpdatingFlushMarks = "finished updating flush marks" // Info in ../node/pkg/local_object_storage/writecache/init.go + WritecacheCantRemoveObjectsFromTheDatabase = "can't remove objects from the database" // Error in ../node/pkg/local_object_storage/writecache/storage.go + WritecacheCantParseAddress = "can't parse address" // Error in ../node/pkg/local_object_storage/writecache/storage.go + WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache" // Error in ../node/pkg/local_object_storage/writecache/storage.go + BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go + BlobovniczatreeCouldNotReadPayloadRangeFromOpenedBlobovnicza = "could not read payload range from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go + BlobovniczatreeCouldNotReadPayloadRangeFromActiveBlobovnicza = "could not read payload range from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go + BlobovniczatreeCouldNotCloseBlobovnicza = "could not close Blobovnicza" // Error in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go + BlobovniczatreeBlobovniczaSuccessfullyClosedOnEvict = "blobovnicza successfully closed on evict" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go + BlobovniczatreeUpdatingActiveBlobovnicza = "updating active blobovnicza..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go + BlobovniczatreeActiveBlobovniczaSuccessfullyUpdated = "active blobovnicza successfully updated" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go + BlobovniczatreeBlobovniczaSuccessfullyActivated = "blobovnicza successfully activated" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go + BlobovniczatreeCouldNotRemoveObjectFromLevel = "could not remove object from level" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go + BlobovniczatreeCouldNotRemoveObjectFromOpenedBlobovnicza = "could not remove object from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go + BlobovniczatreeCouldNotRemoveObjectFromActiveBlobovnicza = "could not remove object from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go + BlobovniczatreeCouldNotGetActiveBlobovnicza = "could not get active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go + BlobovniczatreeBlobovniczaOverflowed = "blobovnicza overflowed" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go + BlobovniczatreeCouldNotUpdateActiveBlobovnicza = "could not update active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go + BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza = "could not put object to active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go + BlobovniczatreeCouldNotReadObjectFromOpenedBlobovnicza = "could not read object from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get.go + BlobovniczatreeCouldNotGetObjectFromActiveBlobovnicza = "could not get object from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get.go + BlobovniczatreeInitializingBlobovniczas = "initializing Blobovnicza's" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go + BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization = "read-only mode, skip blobovniczas initialization..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go + BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing = "blobovnicza successfully initialized, closing..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go + BlobovniczatreeCouldNotCloseActiveBlobovnicza = "could not close active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go + AlphabetTick = "tick" // Info in ../node/pkg/innerring/processors/alphabet/handlers.go + AlphabetAlphabetProcessorWorkerPoolDrained = "alphabet processor worker pool drained" // Warn in ../node/pkg/innerring/processors/alphabet/handlers.go + AlphabetNonAlphabetModeIgnoreGasEmissionEvent = "non alphabet mode, ignore gas emission event" // Info in ../node/pkg/innerring/processors/alphabet/process_emit.go + AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent = "node is out of alphabet range, ignore gas emission event" // Debug in ../node/pkg/innerring/processors/alphabet/process_emit.go + AlphabetCantInvokeAlphabetEmitMethod = "can't invoke alphabet emit method" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go + AlphabetStorageNodeEmissionIsOff = "storage node emission is off" // Info in ../node/pkg/innerring/processors/alphabet/process_emit.go + AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes = "can't get netmap snapshot to emit gas to storage nodes" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go + AlphabetGasEmission = "gas emission" // Debug in ../node/pkg/innerring/processors/alphabet/process_emit.go + AlphabetCantParseNodePublicKey = "can't parse node public key" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go + AlphabetCantTransferGas = "can't transfer gas" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go + AlphabetCantTransferGasToWallet = "can't transfer gas to wallet" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go + AlphabetAlphabetWorkerPool = "alphabet worker pool" // Debug in ../node/pkg/innerring/processors/alphabet/processor.go + AuditContainerListingFinished = "container listing finished" // Debug in ../node/pkg/innerring/processors/audit/scheduler.go + AuditNewRoundOfAudit = "new round of audit" // Info in ../node/pkg/innerring/processors/audit/handlers.go + AuditPreviousRoundOfAuditPrepareHasntFinishedYet = "previous round of audit prepare hasn't finished yet" // Warn in ../node/pkg/innerring/processors/audit/handlers.go + AuditSomeTasksFromPreviousEpochAreSkipped = "some tasks from previous epoch are skipped" // Info in ../node/pkg/innerring/processors/audit/process.go + AuditContainerSelectionFailure = "container selection failure" // Error in ../node/pkg/innerring/processors/audit/process.go + AuditSelectContainersForAudit = "select containers for audit" // Info in ../node/pkg/innerring/processors/audit/process.go + AuditCantFetchNetworkMap = "can't fetch network map" // Error in ../node/pkg/innerring/processors/audit/process.go + AuditCantGetContainerInfoIgnore = "can't get container info, ignore" // Error in ../node/pkg/innerring/processors/audit/process.go + AuditCantBuildPlacementForContainerIgnore = "can't build placement for container, ignore" // Info in ../node/pkg/innerring/processors/audit/process.go + AuditSelectStorageGroupsForAudit = "select storage groups for audit" // Info in ../node/pkg/innerring/processors/audit/process.go + AuditFilterExpiredStorageGroupsForAudit = "filter expired storage groups for audit" // Info in ../node/pkg/innerring/processors/audit/process.go + AuditParseClientNodeInfo = "parse client node info" // Warn in ../node/pkg/innerring/processors/audit/process.go + AuditErrorInStorageGroupSearch = "error in storage group search" // Warn in ../node/pkg/innerring/processors/audit/process.go + AuditCouldNotGetStorageGroupObjectForAuditSkipping = "could not get storage group object for audit, skipping" // Error in ../node/pkg/innerring/processors/audit/process.go + BalanceNotification = "notification" // Info in ../node/pkg/innerring/processors/balance/handlers.go + BalanceBalanceWorkerPoolDrained = "balance worker pool drained" // Warn in ../node/pkg/innerring/processors/balance/handlers.go + BalanceNonAlphabetModeIgnoreBalanceLock = "non alphabet mode, ignore balance lock" // Info in ../node/pkg/innerring/processors/balance/process_assets.go + BalanceCantSendLockAssetTx = "can't send lock asset tx" // Error in ../node/pkg/innerring/processors/balance/process_assets.go + BalanceBalanceWorkerPool = "balance worker pool" // Debug in ../node/pkg/innerring/processors/balance/processor.go + ContainerContainerWorkerPool = "container worker pool" // Debug in ../node/pkg/innerring/processors/container/processor.go + ContainerNotification = "notification" // Info in ../node/pkg/innerring/processors/container/handlers.go + ContainerContainerProcessorWorkerPoolDrained = "container processor worker pool drained" // Warn in ../node/pkg/innerring/processors/container/handlers.go + ContainerNonAlphabetModeIgnoreContainerPut = "non alphabet mode, ignore container put" // Info in ../node/pkg/innerring/processors/container/process_container.go + ContainerPutContainerCheckFailed = "put container check failed" // Error in ../node/pkg/innerring/processors/container/process_container.go + ContainerCouldNotApprovePutContainer = "could not approve put container" // Error in ../node/pkg/innerring/processors/container/process_container.go + ContainerNonAlphabetModeIgnoreContainerDelete = "non alphabet mode, ignore container delete" // Info in ../node/pkg/innerring/processors/container/process_container.go + ContainerDeleteContainerCheckFailed = "delete container check failed" // Error in ../node/pkg/innerring/processors/container/process_container.go + ContainerCouldNotApproveDeleteContainer = "could not approve delete container" // Error in ../node/pkg/innerring/processors/container/process_container.go + ContainerNonAlphabetModeIgnoreSetEACL = "non alphabet mode, ignore set EACL" // Info in ../node/pkg/innerring/processors/container/process_eacl.go + ContainerSetEACLCheckFailed = "set EACL check failed" // Error in ../node/pkg/innerring/processors/container/process_eacl.go + ContainerCouldNotApproveSetEACL = "could not approve set EACL" // Error in ../node/pkg/innerring/processors/container/process_eacl.go + FrostFSNonAlphabetModeIgnoreBind = "non alphabet mode, ignore bind" // Info in ../node/pkg/innerring/processors/frostfs/process_bind.go + FrostFSInvalidManageKeyEvent = "invalid manage key event" // Error in ../node/pkg/innerring/processors/frostfs/process_bind.go + FrostFSCouldNotDecodeScriptHashFromBytes = "could not decode script hash from bytes" // Error in ../node/pkg/innerring/processors/frostfs/process_bind.go + FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config" // Info in ../node/pkg/innerring/processors/frostfs/process_config.go + FrostFSCantRelaySetConfigEvent = "can't relay set config event" // Error in ../node/pkg/innerring/processors/frostfs/process_config.go + FrostFSFrostfsWorkerPool = "frostfs worker pool" // Debug in ../node/pkg/innerring/processors/frostfs/processor.go + FrostFSNotification = "notification" // Info in ../node/pkg/innerring/processors/frostfs/handlers.go + FrostFSFrostfsProcessorWorkerPoolDrained = "frostfs processor worker pool drained" // Warn in ../node/pkg/innerring/processors/frostfs/handlers.go + FrostFSNonAlphabetModeIgnoreDeposit = "non alphabet mode, ignore deposit" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go + FrostFSCantTransferAssetsToBalanceContract = "can't transfer assets to balance contract" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go + FrostFSDoubleMintEmissionDeclined = "double mint emission declined" // Warn in ../node/pkg/innerring/processors/frostfs/process_assets.go + FrostFSCantGetGasBalanceOfTheNode = "can't get gas balance of the node" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go + FrostFSGasBalanceThresholdHasBeenReached = "gas balance threshold has been reached" // Warn in ../node/pkg/innerring/processors/frostfs/process_assets.go + FrostFSCantTransferNativeGasToReceiver = "can't transfer native gas to receiver" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go + FrostFSNonAlphabetModeIgnoreWithdraw = "non alphabet mode, ignore withdraw" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go + FrostFSCantCreateLockAccount = "can't create lock account" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go + FrostFSCantLockAssetsForWithdraw = "can't lock assets for withdraw" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go + FrostFSNonAlphabetModeIgnoreCheque = "non alphabet mode, ignore cheque" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go + FrostFSCantTransferAssetsToFedContract = "can't transfer assets to fed contract" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go + GovernanceNewEvent = "new event" // Info in ../node/pkg/innerring/processors/governance/handlers.go + GovernanceGovernanceWorkerPoolDrained = "governance worker pool drained" // Warn in ../node/pkg/innerring/processors/governance/handlers.go + GovernanceNonAlphabetModeIgnoreAlphabetSync = "non alphabet mode, ignore alphabet sync" // Info in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceCantFetchAlphabetListFromMainNet = "can't fetch alphabet list from main net" // Error in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceCantFetchAlphabetListFromSideChain = "can't fetch alphabet list from side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceCantMergeAlphabetListsFromMainNetAndSideChain = "can't merge alphabet lists from main net and side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged = "no governance update, alphabet list has not been changed" // Info in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceAlphabetListHasBeenChangedStartingUpdate = "alphabet list has been changed, starting update" // Info in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceCantVoteForSideChainCommittee = "can't vote for side chain committee" // Error in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceFinishedAlphabetListUpdate = "finished alphabet list update" // Info in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceCantFetchInnerRingListFromSideChain = "can't fetch inner ring list from side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys = "can't create new inner ring list with new alphabet keys" // Error in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceUpdateOfTheInnerRingList = "update of the inner ring list" // Info in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceCantUpdateInnerRingListWithNewAlphabetKeys = "can't update inner ring list with new alphabet keys" // Error in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceCantUpdateListOfNotaryNodesInSideChain = "can't update list of notary nodes in side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract = "can't update list of alphabet nodes in frostfs contract" // Error in ../node/pkg/innerring/processors/governance/process_update.go + NetmapNetmapWorkerPool = "netmap worker pool" // Debug in ../node/pkg/innerring/processors/netmap/processor.go + NetmapTick = "tick" // Info in ../node/pkg/innerring/processors/netmap/handlers.go + NetmapNetmapWorkerPoolDrained = "netmap worker pool drained" // Warn in ../node/pkg/innerring/processors/netmap/handlers.go + NetmapNotification = "notification" // Info in ../node/pkg/innerring/processors/netmap/handlers.go + NetmapNetmapCleanUpRoutineIsDisabled518 = "netmap clean up routine is disabled" // Debug in ../node/pkg/innerring/processors/netmap/handlers.go + NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick = "non alphabet mode, ignore new netmap cleanup tick" // Info in ../node/pkg/innerring/processors/netmap/process_cleanup.go + NetmapCantDecodePublicKeyOfNetmapNode = "can't decode public key of netmap node" // Warn in ../node/pkg/innerring/processors/netmap/process_cleanup.go + NetmapVoteToRemoveNodeFromNetmap = "vote to remove node from netmap" // Info in ../node/pkg/innerring/processors/netmap/process_cleanup.go + NetmapCantInvokeNetmapUpdateState = "can't invoke netmap.UpdateState" // Error in ../node/pkg/innerring/processors/netmap/process_cleanup.go + NetmapCantIterateOnNetmapCleanerCache = "can't iterate on netmap cleaner cache" // Warn in ../node/pkg/innerring/processors/netmap/process_cleanup.go + NetmapCantGetEpochDuration = "can't get epoch duration" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go + NetmapCantGetTransactionHeight = "can't get transaction height" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go + NetmapCantResetEpochTimer = "can't reset epoch timer" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go + NetmapCantGetNetmapSnapshotToPerformCleanup = "can't get netmap snapshot to perform cleanup" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go + NetmapCantStartContainerSizeEstimation = "can't start container size estimation" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go + NetmapNonAlphabetModeIgnoreNewEpochTick = "non alphabet mode, ignore new epoch tick" // Info in ../node/pkg/innerring/processors/netmap/process_epoch.go + NetmapNextEpoch = "next epoch" // Debug in ../node/pkg/innerring/processors/netmap/process_epoch.go + NetmapCantInvokeNetmapNewEpoch = "can't invoke netmap.NewEpoch" // Error in ../node/pkg/innerring/processors/netmap/process_epoch.go + NetmapNonAlphabetModeIgnoreNewPeerNotification = "non alphabet mode, ignore new peer notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go + NetmapNonhaltNotaryTransaction = "non-halt notary transaction" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go + NetmapCantParseNetworkMapCandidate = "can't parse network map candidate" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go + NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate = "could not verify and update information about network map candidate" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go + NetmapApprovingNetworkMapCandidate = "approving network map candidate" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go + NetmapCantInvokeNetmapAddPeer = "can't invoke netmap.AddPeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go + NetmapNonAlphabetModeIgnoreUpdatePeerNotification = "non alphabet mode, ignore update peer notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go + NetmapPreventSwitchingNodeToMaintenanceState = "prevent switching node to maintenance state" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go + NetmapCantInvokeNetmapUpdatePeer = "can't invoke netmap.UpdatePeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go + NetmapNonAlphabetModeIgnoreRemoveNodeFromSubnetNotification = "non alphabet mode, ignore remove node from subnet notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go + NetmapCouldNotGetNetworkMapCandidates = "could not get network map candidates" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go + NetmapCouldNotUnmarshalSubnetId = "could not unmarshal subnet id" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go + NetmapGotZeroSubnetInRemoveNodeNotification = "got zero subnet in remove node notification" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go + NetmapCouldNotIterateOverSubnetworksOfTheNode = "could not iterate over subnetworks of the node" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go + NetmapCouldNotInvokeNetmapUpdateState = "could not invoke netmap.UpdateState" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go + NetmapCouldNotInvokeNetmapAddPeer = "could not invoke netmap.AddPeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go + ReputationNotification = "notification" // Info in ../node/pkg/innerring/processors/reputation/handlers.go + ReputationReputationWorkerPoolDrained = "reputation worker pool drained" // Warn in ../node/pkg/innerring/processors/reputation/handlers.go + ReputationNonAlphabetModeIgnoreReputationPutNotification = "non alphabet mode, ignore reputation put notification" // Info in ../node/pkg/innerring/processors/reputation/process_put.go + ReputationIgnoreReputationValue = "ignore reputation value" // Info in ../node/pkg/innerring/processors/reputation/process_put.go + ReputationCantSendApprovalTxForReputationValue = "can't send approval tx for reputation value" // Warn in ../node/pkg/innerring/processors/reputation/process_put.go + ReputationReputationWorkerPool = "reputation worker pool" // Debug in ../node/pkg/innerring/processors/reputation/processor.go + SettlementNonAlphabetModeIgnoreAuditPayments = "non alphabet mode, ignore audit payments" // Info in ../node/pkg/innerring/processors/settlement/calls.go + SettlementNewAuditSettlementEvent = "new audit settlement event" // Info in ../node/pkg/innerring/processors/settlement/calls.go + SettlementIgnoreGenesisEpoch = "ignore genesis epoch" // Debug in ../node/pkg/innerring/processors/settlement/calls.go + SettlementCouldNotAddHandlerOfAuditEventToQueue = "could not add handler of AuditEvent to queue" // Warn in ../node/pkg/innerring/processors/settlement/calls.go + SettlementAuditEventHandlingSuccessfullyScheduled = "AuditEvent handling successfully scheduled" // Debug in ../node/pkg/innerring/processors/settlement/calls.go + SettlementNonAlphabetModeIgnoreIncomeCollectionEvent = "non alphabet mode, ignore income collection event" // Info in ../node/pkg/innerring/processors/settlement/calls.go + SettlementStartBasicIncomeCollection = "start basic income collection" // Info in ../node/pkg/innerring/processors/settlement/calls.go + SettlementIncomeContextAlreadyExists = "income context already exists" // Error in ../node/pkg/innerring/processors/settlement/calls.go + SettlementCantCreateIncomeContext = "can't create income context" // Error in ../node/pkg/innerring/processors/settlement/calls.go + SettlementCouldNotAddHandlerOfBasicIncomeCollectionToQueue = "could not add handler of basic income collection to queue" // Warn in ../node/pkg/innerring/processors/settlement/calls.go + SettlementNonAlphabetModeIgnoreIncomeDistributionEvent = "non alphabet mode, ignore income distribution event" // Info in ../node/pkg/innerring/processors/settlement/calls.go + SettlementStartBasicIncomeDistribution = "start basic income distribution" // Info in ../node/pkg/innerring/processors/settlement/calls.go + SettlementIncomeContextDistributionDoesNotExists = "income context distribution does not exists" // Warn in ../node/pkg/innerring/processors/settlement/calls.go + SettlementCouldNotAddHandlerOfBasicIncomeDistributionToQueue = "could not add handler of basic income distribution to queue" // Warn in ../node/pkg/innerring/processors/settlement/calls.go + SettlementProcessAuditSettlements = "process audit settlements" // Info in ../node/pkg/innerring/processors/settlement/handlers.go + SettlementAuditProcessingFinished = "audit processing finished" // Info in ../node/pkg/innerring/processors/settlement/handlers.go + SettlementWorkerPoolForSettlementProcessorSuccessfullyInitialized = "worker pool for settlement processor successfully initialized" // Debug in ../node/pkg/innerring/processors/settlement/processor.go + AuditSettlementsAreIgnoredForZeroEpoch = "settlements are ignored for zero epoch" // Info in ../node/pkg/innerring/processors/settlement/audit/calculate.go + AuditCalculateAuditSettlements = "calculate audit settlements" // Info in ../node/pkg/innerring/processors/settlement/audit/calculate.go + AuditGettingResultsForThePreviousEpoch = "getting results for the previous epoch" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go + AuditCouldNotCollectAuditResults = "could not collect audit results" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go + AuditNoAuditResultsInPreviousEpoch = "no audit results in previous epoch" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go + AuditCantFetchAuditFeeFromNetworkConfig = "can't fetch audit fee from network config" // Warn in ../node/pkg/innerring/processors/settlement/audit/calculate.go + AuditProcessingAuditResults = "processing audit results" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go + AuditProcessingTransfers = "processing transfers" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go + AuditReadingInformationAboutTheContainer = "reading information about the container" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go + AuditBuildingPlacement = "building placement" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go + AuditCollectingPassedNodes = "collecting passed nodes" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go + AuditCalculatingSumOfTheSizesOfAllStorageGroups = "calculating sum of the sizes of all storage groups" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go + AuditFillingTransferTable = "filling transfer table" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go + AuditMissingContainerInAuditResult = "missing container in audit result" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go + AuditCouldNotGetContainerInfo = "could not get container info" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go + AuditCouldNotGetContainerNodes = "could not get container nodes" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go + AuditEmptyListOfContainerNodes = "empty list of container nodes" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go + AuditNoneOfTheContainerNodesPassedTheAudit = "none of the container nodes passed the audit" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go + AuditCouldNotGetSGInfo = "could not get storage group info" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go + AuditZeroSumSGSize = "zero sum storage group size" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go + AuditCouldNotResolvePublicKeyOfTheStorageNode = "could not resolve public key of the storage node" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go + AuditCalculatingStorageNodeSalaryForAudit = "calculating storage node salary for audit (GASe-12)" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go + AuditCouldNotParsePublicKeyOfTheInnerRingNode = "could not parse public key of the inner ring node" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go + BasicCantGetBasicIncomeRate = "can't get basic income rate" // Error in ../node/pkg/innerring/processors/settlement/basic/collect.go + BasicCantFetchContainerSizeEstimations = "can't fetch container size estimations" // Error in ../node/pkg/innerring/processors/settlement/basic/collect.go + BasicCantFetchContainerInfo = "can't fetch container info" // Warn in ../node/pkg/innerring/processors/settlement/basic/collect.go + BasicCantFetchBalanceOfBankingAccount = "can't fetch balance of banking account" // Error in ../node/pkg/innerring/processors/settlement/basic/distribute.go + BasicCantTransformPublicKeyToOwnerID = "can't transform public key to owner id" // Warn in ../node/pkg/innerring/processors/settlement/basic/distribute.go + FrostFSIRInternalError = "internal error" // Info in ../node/cmd/frostfs-ir/main.go + FrostFSIRCouldNotShutdownHTTPServer = "could not shutdown HTTP server" // Debug in ../node/cmd/frostfs-ir/main.go + FrostFSIRApplicationStopped = "application stopped" // Info in ../node/cmd/frostfs-ir/main.go + FrostFSNodeCouldNotReadCertificateFromFile = "could not read certificate from file" // Error in ../node/cmd/frostfs-node/grpc.go + FrostFSNodeCantListenGRPCEndpoint = "can't listen gRPC endpoint" // Error in ../node/cmd/frostfs-node/grpc.go + FrostFSNodeStopListeningGRPCEndpoint = "stop listening gRPC endpoint" // Info in ../node/cmd/frostfs-node/grpc.go + FrostFSNodeStartListeningGRPCEndpoint = "start listening gRPC endpoint" // Info in ../node/cmd/frostfs-node/grpc.go + FrostFSNodeStoppingGRPCServer = "stopping gRPC server..." // Info in ../node/cmd/frostfs-node/grpc.go + FrostFSNodeGRPCCannotShutdownGracefullyForcingStop = "gRPC cannot shutdown gracefully, forcing stop" // Info in ../node/cmd/frostfs-node/grpc.go + FrostFSNodeGRPCServerStoppedSuccessfully = "gRPC server stopped successfully" // Info in ../node/cmd/frostfs-node/grpc.go + FrostFSNodeWaitingForAllProcessesToStop = "waiting for all processes to stop" // Debug in ../node/cmd/frostfs-node/main.go + FrostFSNodeStartReportingReputationOnNewEpochEvent = "start reporting reputation on new epoch event" // Debug in ../node/cmd/frostfs-node/reputation.go + FrostFSNodeCouldNotFetchEpochDuration = "could not fetch epoch duration" // Debug in ../node/cmd/frostfs-node/reputation.go + FrostFSNodeCouldNotFetchIterationNumber = "could not fetch iteration number" // Debug in ../node/cmd/frostfs-node/reputation.go + FrostFSNodeCouldNotCreateFixedEpochTimer = "could not create fixed epoch timer" // Debug in ../node/cmd/frostfs-node/reputation.go + FrostFSNodeStartedLocalNodesMaintenance = "started local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeStoppedLocalNodesMaintenance = "stopped local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeFailedToAttachShardToEngine = "failed to attach shard to engine" // Error in ../node/cmd/frostfs-node/config.go + FrostFSNodeShardAttachedToEngine = "shard attached to engine" // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeClosingComponentsOfTheStorageEngine = "closing components of the storage engine..." // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeStorageEngineClosingFailure = "storage engine closing failure" // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully = "all components of the storage engine closed successfully" // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeBootstrappingWithTheMaintenanceState = "bootstrapping with the maintenance state" // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeBootstrappingWithOnlineState = "bootstrapping with online state" // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeTerminationSignalHasBeenReceivedStopping = "termination signal has been received, stopping..." // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeTerminationSignalProcessingIsComplete = "termination signal processing is complete" // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeInternalApplicationError = "internal application error" // Warn in ../node/cmd/frostfs-node/config.go + FrostFSNodeInternalErrorProcessingIsComplete = "internal error processing is complete" // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration = "SIGHUP has been received, rereading configuration..." // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeConfigurationReading = "configuration reading" // Error in ../node/cmd/frostfs-node/config.go + FrostFSNodeLoggerConfigurationPreparation = "logger configuration preparation" // Error in ../node/cmd/frostfs-node/config.go + FrostFSNodeTracingConfigationUpdated = "tracing configation updated" // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update" // Error in ../node/cmd/frostfs-node/config.go + FrostFSNodeUpdatedConfigurationApplying = "updated configuration applying" // Error in ../node/cmd/frostfs-node/config.go + FrostFSNodeConfigurationHasBeenReloadedSuccessfully = "configuration has been reloaded successfully" // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification" // Error in ../node/cmd/frostfs-node/container.go + FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt" // Debug in ../node/cmd/frostfs-node/container.go + FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt" // Debug in ../node/cmd/frostfs-node/container.go + FrostFSNodeSaveUsedSpaceAnnouncementInContract = "save used space announcement in contract" // Debug in ../node/cmd/frostfs-node/container.go + FrostFSNodeFailedToCalculateContainerSizeInStorageEngine = "failed to calculate container size in storage engine" // Debug in ../node/cmd/frostfs-node/container.go + FrostFSNodeContainerSizeInStorageEngineCalculatedSuccessfully = "container size in storage engine calculated successfully" // Debug in ../node/cmd/frostfs-node/container.go + FrostFSNodeNotificatorCouldNotListContainers = "notificator: could not list containers" // Error in ../node/cmd/frostfs-node/notificator.go + FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer = "notificator: could not select objects from container" // Error in ../node/cmd/frostfs-node/notificator.go + FrostFSNodeNotificatorCouldNotProcessObject = "notificator: could not process object" // Error in ../node/cmd/frostfs-node/notificator.go + FrostFSNodeNotificatorFinishedProcessingObjectNotifications = "notificator: finished processing object notifications" // Debug in ../node/cmd/frostfs-node/notificator.go + FrostFSNodeCouldNotWriteObjectNotification = "could not write object notification" // Warn in ../node/cmd/frostfs-node/notificator.go + FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value" // Error in ../node/cmd/frostfs-node/object.go + FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage" // Warn in ../node/cmd/frostfs-node/object.go + FrostFSNodeWritingLocalReputationValues = "writing local reputation values" // Debug in ../node/cmd/frostfs-node/object.go + FrostFSNodeCouldNotGetLatestNetworkMapToOverloadTheClient = "could not get latest network map to overload the client" // Warn in ../node/cmd/frostfs-node/object.go + FrostFSNodeFailedInitTracing = "failed init tracing" // Error in ../node/cmd/frostfs-node/tracing.go + FrostFSNodeFailedShutdownTracing = "failed shutdown tracing" // Error in ../node/cmd/frostfs-node/tracing.go + FrostFSNodeFailedToCreateNeoRPCClient = "failed to create neo RPC client" // Info in ../node/cmd/frostfs-node/morph.go + FrostFSNodeClosingMorphComponents = "closing morph components..." // Info in ../node/cmd/frostfs-node/morph.go + FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal = "failed to set group signer scope, continue with Global" // Info in ../node/cmd/frostfs-node/morph.go + FrostFSNodeNotarySupport = "notary support" // Info in ../node/cmd/frostfs-node/morph.go + FrostFSNodeMorphcacheTTLFetchedFromNetwork = "morph.cache_ttl fetched from network" // Debug in ../node/cmd/frostfs-node/morph.go + FrostFSNodeNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" // Info in ../node/cmd/frostfs-node/morph.go + FrostFSNodeCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number" // Warn in ../node/cmd/frostfs-node/morph.go + FrostFSNodeNewEpochEventFromSidechain = "new epoch event from sidechain" // Info in ../node/cmd/frostfs-node/morph.go + FrostFSNodeNewBlock = "new block" // Debug in ../node/cmd/frostfs-node/morph.go + FrostFSNodeCantUpdatePersistentState = "can't update persistent state" // Warn in ../node/cmd/frostfs-node/morph.go + FrostFSNodeCantSendRebootstrapTx = "can't send re-bootstrap tx" // Warn in ../node/cmd/frostfs-node/netmap.go + FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch = "could not update node state on new epoch" // Error in ../node/cmd/frostfs-node/netmap.go + FrostFSNodeCouldNotMakeNotaryDeposit = "could not make notary deposit" // Error in ../node/cmd/frostfs-node/netmap.go + FrostFSNodeInitialNetworkState = "initial network state" // Info in ../node/cmd/frostfs-node/netmap.go + FrostFSNodeTreeServiceIsNotEnabledSkipInitialization = "tree service is not enabled, skip initialization" // Info in ../node/cmd/frostfs-node/tree.go + FrostFSNodeCouldNotSynchronizeTreeService = "could not synchronize Tree Service" // Error in ../node/cmd/frostfs-node/tree.go + FrostFSNodeRemovingAllTreesForContainer = "removing all trees for container" // Debug in ../node/cmd/frostfs-node/tree.go + FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved = "container removal event received, but trees weren't removed" // Error in ../node/cmd/frostfs-node/tree.go + FrostFSNodeCantListenGRPCEndpointControl = "can't listen gRPC endpoint (control)" // Error in ../node/cmd/frostfs-node/control.go + CommonApplicationStarted = "application started" // Info in ../node/cmd/frostfs-ir/main.go + CommonInitializingRemoteWriterProvider = "initializing remote writer provider" // Debug in ../node/cmd/frostfs-node/reputation/common/remote.go + CommonRouteHasReachedDeadendProvider = "route has reached dead-end provider" // Debug in ../node/cmd/frostfs-node/reputation/common/remote.go + CommonInitializingNoopWriterProvider = "initializing no-op writer provider" // Debug in ../node/cmd/frostfs-node/reputation/common/remote.go + IntermediateWritingReceivedConsumersTrusts = "writing received consumer's trusts" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/consumers.go + IntermediateStartWritingGlobalTrustsToContract = "start writing global trusts to contract" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go + IntermediateFailedToSignGlobalTrust = "failed to sign global trust" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go + IntermediateFailedToWriteGlobalTrustToContract = "failed to write global trust to contract" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go + IntermediateSentGlobalTrustToContract = "sent global trust to contract" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go + IntermediateWritingReceivedDaughtersTrusts = "writing received daughter's trusts" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/daughters.go + IntermediateAnnouncingTrust = "announcing trust" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/remote.go + LocalAnnouncingTrusts = "announcing trusts" // Debug in ../node/cmd/frostfs-node/reputation/local/remote.go + LocalInitializingIteratorOverTrusts = "initializing iterator over trusts" // Debug in ../node/cmd/frostfs-node/reputation/local/storage.go +) diff --git a/pkg/innerring/blocktimer.go b/pkg/innerring/blocktimer.go index eb74e44d4..94e262099 100644 --- a/pkg/innerring/blocktimer.go +++ b/pkg/innerring/blocktimer.go @@ -3,6 +3,7 @@ package innerring import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement" timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" @@ -98,7 +99,7 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer { args.stopEstimationDDiv, func() { if !args.alphabetState.IsAlphabet() { - args.l.Debug("non-alphabet mode, do not stop container estimations") + args.l.Debug(logs.InnerringNonalphabetModeDoNotStopContainerEstimations) return } @@ -112,7 +113,7 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer { err := args.cnrWrapper.StopEstimation(prm) if err != nil { - args.l.Warn("can't stop epoch estimation", + args.l.Warn(logs.InnerringCantStopEpochEstimation, zap.Uint64("epoch", epochN), zap.String("error", err.Error())) } diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go index 8db6328a2..2f5e89e39 100644 --- a/pkg/innerring/initialization.go +++ b/pkg/innerring/initialization.go @@ -6,6 +6,7 @@ import ( "fmt" "net" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/audit" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/balance" @@ -129,7 +130,7 @@ func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain * fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey) if err != nil { fromMainChainBlock = 0 - s.log.Warn("can't get last processed main chain block number", zap.String("error", err.Error())) + s.log.Warn(logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error())) } mainnetChain.from = fromMainChainBlock @@ -177,7 +178,7 @@ func (s *Server) initNotaryConfig(cfg *viper.Viper) { !s.withoutMainNet && s.mainnetClient.ProbeNotary(), // if mainnet disabled then notary flag must be disabled too ) - s.log.Info("notary support", + s.log.Info(logs.InnerringNotarySupport, zap.Bool("sidechain_enabled", !s.sideNotaryConfig.disabled), zap.Bool("mainchain_enabled", !s.mainNotaryConfig.disabled), ) @@ -275,7 +276,7 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli if s.withoutMainNet || cfg.GetBool("governance.disable") { alphaSync = func(event.Event) { - s.log.Debug("alphabet keys sync is disabled") + s.log.Debug(logs.InnerringAlphabetKeysSyncIsDisabled) } } else { // create governance processor @@ -496,7 +497,7 @@ func (s *Server) initReputationProcessor(cfg *viper.Viper, sidechainFee fixedn.F func (s *Server) initGRPCServer(cfg *viper.Viper) error { controlSvcEndpoint := cfg.GetString("control.grpc.endpoint") if controlSvcEndpoint == "" { - s.log.Info("no Control server endpoint specified, service is disabled") + s.log.Info(logs.InnerringNoControlServerEndpointSpecified) return nil } @@ -692,7 +693,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<- fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey) if err != nil { fromSideChainBlock = 0 - s.log.Warn("can't get last processed side chain block number", zap.String("error", err.Error())) + s.log.Warn(logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error())) } morphChain := &chainParams{ @@ -715,7 +716,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<- return nil, err } if err := s.morphClient.SetGroupSignerScope(); err != nil { - morphChain.log.Info("failed to set group signer scope, continue with Global", zap.Error(err)) + morphChain.log.Info(logs.InnerringFailedToSetGroupSignerScope, zap.Error(err)) } return morphChain, nil diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go index a91d2fd0d..b6c5ae2ac 100644 --- a/pkg/innerring/innerring.go +++ b/pkg/innerring/innerring.go @@ -6,6 +6,7 @@ import ( "fmt" "io" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/config" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap" @@ -168,7 +169,7 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) { err = s.voteForSidechainValidator(prm) if err != nil { // we don't stop inner ring execution on this error - s.log.Warn("can't vote for prepared validators", + s.log.Warn(logs.InnerringCantVoteForPreparedValidators, zap.String("error", err.Error())) } @@ -210,13 +211,13 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) { func (s *Server) registerMorphNewBlockEventHandler() { s.morphListener.RegisterBlockHandler(func(b *block.Block) { - s.log.Debug("new block", + s.log.Debug(logs.InnerringNewBlock, zap.Uint32("index", b.Index), ) err := s.persistate.SetUInt32(persistateSideChainLastBlockKey, b.Index) if err != nil { - s.log.Warn("can't update persistent state", + s.log.Warn(logs.InnerringCantUpdatePersistentState, zap.String("chain", "side"), zap.Uint32("block_index", b.Index)) } @@ -230,7 +231,7 @@ func (s *Server) registerMainnetNewBlockEventHandler() { s.mainnetListener.RegisterBlockHandler(func(b *block.Block) { err := s.persistate.SetUInt32(persistateMainChainLastBlockKey, b.Index) if err != nil { - s.log.Warn("can't update persistent state", + s.log.Warn(logs.InnerringCantUpdatePersistentState, zap.String("chain", "main"), zap.Uint32("block_index", b.Index)) } @@ -302,7 +303,7 @@ func (s *Server) Stop() { for _, c := range s.closers { if err := c(); err != nil { - s.log.Warn("closer error", + s.log.Warn(logs.InnerringCloserError, zap.String("error", err.Error()), ) } @@ -547,7 +548,7 @@ func (s *Server) initConfigFromBlockchain() error { return err } - s.log.Debug("read config from blockchain", + s.log.Debug(logs.InnerringReadConfigFromBlockchain, zap.Bool("active", s.IsActive()), zap.Bool("alphabet", s.IsAlphabet()), zap.Uint64("epoch", epoch), diff --git a/pkg/innerring/notary.go b/pkg/innerring/notary.go index 50353b574..30916cb99 100644 --- a/pkg/innerring/notary.go +++ b/pkg/innerring/notary.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/util" @@ -52,14 +53,14 @@ func (s *Server) notaryHandler(_ event.Event) { if !s.mainNotaryConfig.disabled { _, err := s.depositMainNotary() if err != nil { - s.log.Error("can't make notary deposit in main chain", zap.Error(err)) + s.log.Error(logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err)) } } if !s.sideNotaryConfig.disabled { _, err := s.depositSideNotary() if err != nil { - s.log.Error("can't make notary deposit in side chain", zap.Error(err)) + s.log.Error(logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err)) } } } @@ -82,7 +83,7 @@ func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaite // non-error deposit with an empty TX hash means // that the deposit has already been made; no // need to wait it. - s.log.Info("notary deposit has already been made") + s.log.Info(logs.InnerringNotaryDepositHasAlreadyBeenMade) return nil } diff --git a/pkg/innerring/processors/alphabet/handlers.go b/pkg/innerring/processors/alphabet/handlers.go index 9d61aa812..c0668a4f9 100644 --- a/pkg/innerring/processors/alphabet/handlers.go +++ b/pkg/innerring/processors/alphabet/handlers.go @@ -1,6 +1,7 @@ package alphabet import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "go.uber.org/zap" @@ -8,14 +9,14 @@ import ( func (ap *Processor) HandleGasEmission(ev event.Event) { _ = ev.(timers.NewAlphabetEmitTick) - ap.log.Info("tick", zap.String("type", "alphabet gas emit")) + ap.log.Info(logs.AlphabetTick, zap.String("type", "alphabet gas emit")) // send event to the worker pool err := ap.pool.Submit(func() { ap.processEmit() }) if err != nil { // there system can be moved into controlled degradation stage - ap.log.Warn("alphabet processor worker pool drained", + ap.log.Warn(logs.AlphabetAlphabetProcessorWorkerPoolDrained, zap.Int("capacity", ap.pool.Cap())) } } diff --git a/pkg/innerring/processors/alphabet/process_emit.go b/pkg/innerring/processors/alphabet/process_emit.go index 90c484b88..b8d65dbc5 100644 --- a/pkg/innerring/processors/alphabet/process_emit.go +++ b/pkg/innerring/processors/alphabet/process_emit.go @@ -3,6 +3,7 @@ package alphabet import ( "crypto/elliptic" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" @@ -14,14 +15,14 @@ const emitMethod = "emit" func (ap *Processor) processEmit() { index := ap.irList.AlphabetIndex() if index < 0 { - ap.log.Info("non alphabet mode, ignore gas emission event") + ap.log.Info(logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent) return } contract, ok := ap.alphabetContracts.GetByIndex(index) if !ok { - ap.log.Debug("node is out of alphabet range, ignore gas emission event", + ap.log.Debug(logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent, zap.Int("index", index)) return @@ -30,20 +31,20 @@ func (ap *Processor) processEmit() { // there is no signature collecting, so we don't need extra fee err := ap.morphClient.Invoke(contract, 0, emitMethod) if err != nil { - ap.log.Warn("can't invoke alphabet emit method", zap.String("error", err.Error())) + ap.log.Warn(logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error())) return } if ap.storageEmission == 0 { - ap.log.Info("storage node emission is off") + ap.log.Info(logs.AlphabetStorageNodeEmissionIsOff) return } networkMap, err := ap.netmapClient.NetMap() if err != nil { - ap.log.Warn("can't get netmap snapshot to emit gas to storage nodes", + ap.log.Warn(logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes, zap.String("error", err.Error())) return @@ -53,7 +54,7 @@ func (ap *Processor) processEmit() { nmLen := len(nmNodes) extraLen := len(ap.parsedWallets) - ap.log.Debug("gas emission", + ap.log.Debug(logs.AlphabetGasEmission, zap.Int("network_map", nmLen), zap.Int("extra_wallets", extraLen)) @@ -74,7 +75,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256()) if err != nil { - ap.log.Warn("can't parse node public key", + ap.log.Warn(logs.AlphabetCantParseNodePublicKey, zap.String("error", err.Error())) continue @@ -82,7 +83,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN err = ap.morphClient.TransferGas(key.GetScriptHash(), gasPerNode) if err != nil { - ap.log.Warn("can't transfer gas", + ap.log.Warn(logs.AlphabetCantTransferGas, zap.String("receiver", key.Address()), zap.Int64("amount", int64(gasPerNode)), zap.String("error", err.Error()), @@ -99,7 +100,7 @@ func (ap *Processor) transferGasToExtraNodes(extraLen int, gasPerNode fixedn.Fix for i, addr := range ap.parsedWallets { receiversLog[i] = addr.StringLE() } - ap.log.Warn("can't transfer gas to wallet", + ap.log.Warn(logs.AlphabetCantTransferGasToWallet, zap.Strings("receivers", receiversLog), zap.Int64("amount", int64(gasPerNode)), zap.String("error", err.Error()), diff --git a/pkg/innerring/processors/alphabet/processor.go b/pkg/innerring/processors/alphabet/processor.go index 980158132..79b61f14f 100644 --- a/pkg/innerring/processors/alphabet/processor.go +++ b/pkg/innerring/processors/alphabet/processor.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" @@ -67,7 +68,7 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/alphabet: global state is not set") } - p.Log.Debug("alphabet worker pool", zap.Int("size", p.PoolSize)) + p.Log.Debug(logs.AlphabetAlphabetWorkerPool, zap.Int("size", p.PoolSize)) pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { diff --git a/pkg/innerring/processors/audit/handlers.go b/pkg/innerring/processors/audit/handlers.go index 8b2354bb8..06c656fa2 100644 --- a/pkg/innerring/processors/audit/handlers.go +++ b/pkg/innerring/processors/audit/handlers.go @@ -1,6 +1,7 @@ package audit import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "go.uber.org/zap" ) @@ -10,12 +11,12 @@ func (ap *Processor) handleNewAuditRound(ev event.Event) { epoch := auditEvent.Epoch() - ap.log.Info("new round of audit", zap.Uint64("epoch", epoch)) + ap.log.Info(logs.AuditNewRoundOfAudit, zap.Uint64("epoch", epoch)) // send an event to the worker pool err := ap.pool.Submit(func() { ap.processStartAudit(epoch) }) if err != nil { - ap.log.Warn("previous round of audit prepare hasn't finished yet") + ap.log.Warn(logs.AuditPreviousRoundOfAuditPrepareHasntFinishedYet) } } diff --git a/pkg/innerring/processors/audit/process.go b/pkg/innerring/processors/audit/process.go index 656927816..000279f01 100644 --- a/pkg/innerring/processors/audit/process.go +++ b/pkg/innerring/processors/audit/process.go @@ -4,6 +4,7 @@ import ( "context" "crypto/sha256" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/storagegroup" @@ -24,23 +25,23 @@ func (ap *Processor) processStartAudit(epoch uint64) { skipped := ap.taskManager.Reset() if skipped > 0 { - ap.log.Info("some tasks from previous epoch are skipped", + ap.log.Info(logs.AuditSomeTasksFromPreviousEpochAreSkipped, zap.Int("amount", skipped), ) } containers, err := ap.selectContainersToAudit(epoch) if err != nil { - log.Error("container selection failure", zap.String("error", err.Error())) + log.Error(logs.AuditContainerSelectionFailure, zap.String("error", err.Error())) return } - log.Info("select containers for audit", zap.Int("amount", len(containers))) + log.Info(logs.AuditSelectContainersForAudit, zap.Int("amount", len(containers))) nm, err := ap.netmapClient.GetNetMap(0) if err != nil { - ap.log.Error("can't fetch network map", + ap.log.Error(logs.AuditCantFetchNetworkMap, zap.String("error", err.Error())) return @@ -64,7 +65,7 @@ func (ap *Processor) startAuditTasksOnContainers(cancelChannel <-chan struct{}, for i := range containers { cnr, err := cntClient.Get(ap.containerClient, containers[i]) // get container structure if err != nil { - log.Error("can't get container info, ignore", + log.Error(logs.AuditCantGetContainerInfoIgnore, zap.Stringer("cid", containers[i]), zap.String("error", err.Error())) @@ -76,7 +77,7 @@ func (ap *Processor) startAuditTasksOnContainers(cancelChannel <-chan struct{}, // find all container nodes for current epoch nodes, err := nm.ContainerNodes(cnr.Value.PlacementPolicy(), pivot) if err != nil { - log.Info("can't build placement for container, ignore", + log.Info(logs.AuditCantBuildPlacementForContainerIgnore, zap.Stringer("cid", containers[i]), zap.String("error", err.Error())) @@ -92,13 +93,13 @@ func (ap *Processor) startAuditTasksOnContainers(cancelChannel <-chan struct{}, // search storage groups storageGroupsIDs := ap.findStorageGroups(containers[i], n) - log.Info("select storage groups for audit", + log.Info(logs.AuditSelectStorageGroupsForAudit, zap.Stringer("cid", containers[i]), zap.Int("amount", len(storageGroupsIDs))) // filter expired storage groups storageGroups := ap.filterExpiredSG(containers[i], storageGroupsIDs, nodes, *nm) - log.Info("filter expired storage groups for audit", + log.Info(logs.AuditFilterExpiredStorageGroupsForAudit, zap.Stringer("cid", containers[i]), zap.Int("amount", len(storageGroups))) @@ -146,7 +147,7 @@ func (ap *Processor) findStorageGroups(cnr cid.ID, shuffled netmapcore.Nodes) [] err := clientcore.NodeInfoFromRawNetmapElement(&info, netmapcore.Node(shuffled[i])) if err != nil { - log.Warn("parse client node info", zap.String("error", err.Error())) + log.Warn(logs.AuditParseClientNodeInfo, zap.String("error", err.Error())) continue } @@ -162,7 +163,7 @@ func (ap *Processor) findStorageGroups(cnr cid.ID, shuffled netmapcore.Nodes) [] cancel() if err != nil { - log.Warn("error in storage group search", zap.String("error", err.Error())) + log.Warn(logs.AuditErrorInStorageGroupSearch, zap.String("error", err.Error())) continue } diff --git a/pkg/innerring/processors/audit/scheduler.go b/pkg/innerring/processors/audit/scheduler.go index e1a521bad..fbc5fa927 100644 --- a/pkg/innerring/processors/audit/scheduler.go +++ b/pkg/innerring/processors/audit/scheduler.go @@ -6,6 +6,7 @@ import ( "sort" "strings" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "go.uber.org/zap" ) @@ -20,7 +21,7 @@ func (ap *Processor) selectContainersToAudit(epoch uint64) ([]cid.ID, error) { // consider getting extra information about container complexity from // audit contract there - ap.log.Debug("container listing finished", + ap.log.Debug(logs.AuditContainerListingFinished, zap.Int("total amount", len(containers)), ) diff --git a/pkg/innerring/processors/balance/handlers.go b/pkg/innerring/processors/balance/handlers.go index 4c5a2ddc6..3360af916 100644 --- a/pkg/innerring/processors/balance/handlers.go +++ b/pkg/innerring/processors/balance/handlers.go @@ -3,6 +3,7 @@ package balance import ( "encoding/hex" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance" "go.uber.org/zap" @@ -10,7 +11,7 @@ import ( func (bp *Processor) handleLock(ev event.Event) { lock := ev.(balanceEvent.Lock) - bp.log.Info("notification", + bp.log.Info(logs.BalanceNotification, zap.String("type", "lock"), zap.String("value", hex.EncodeToString(lock.ID()))) @@ -19,7 +20,7 @@ func (bp *Processor) handleLock(ev event.Event) { err := bp.pool.Submit(func() { bp.processLock(&lock) }) if err != nil { // there system can be moved into controlled degradation stage - bp.log.Warn("balance worker pool drained", + bp.log.Warn(logs.BalanceBalanceWorkerPoolDrained, zap.Int("capacity", bp.pool.Cap())) } } diff --git a/pkg/innerring/processors/balance/process_assets.go b/pkg/innerring/processors/balance/process_assets.go index 754dda34a..3f86a3cb7 100644 --- a/pkg/innerring/processors/balance/process_assets.go +++ b/pkg/innerring/processors/balance/process_assets.go @@ -1,6 +1,7 @@ package balance import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" frostfsContract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance" "go.uber.org/zap" @@ -10,7 +11,7 @@ import ( // back to the withdraw issuer. func (bp *Processor) processLock(lock *balanceEvent.Lock) { if !bp.alphabetState.IsAlphabet() { - bp.log.Info("non alphabet mode, ignore balance lock") + bp.log.Info(logs.BalanceNonAlphabetModeIgnoreBalanceLock) return } @@ -24,6 +25,6 @@ func (bp *Processor) processLock(lock *balanceEvent.Lock) { err := bp.frostfsClient.Cheque(prm) if err != nil { - bp.log.Error("can't send lock asset tx", zap.Error(err)) + bp.log.Error(logs.BalanceCantSendLockAssetTx, zap.Error(err)) } } diff --git a/pkg/innerring/processors/balance/processor.go b/pkg/innerring/processors/balance/processor.go index 2527b7ec3..370d06f44 100644 --- a/pkg/innerring/processors/balance/processor.go +++ b/pkg/innerring/processors/balance/processor.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance" @@ -60,7 +61,7 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/balance: balance precision converter is not set") } - p.Log.Debug("balance worker pool", zap.Int("size", p.PoolSize)) + p.Log.Debug(logs.BalanceBalanceWorkerPool, zap.Int("size", p.PoolSize)) pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { diff --git a/pkg/innerring/processors/container/handlers.go b/pkg/innerring/processors/container/handlers.go index f9f8b5841..3d1946b4f 100644 --- a/pkg/innerring/processors/container/handlers.go +++ b/pkg/innerring/processors/container/handlers.go @@ -3,6 +3,7 @@ package container import ( "crypto/sha256" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container" "github.com/mr-tron/base58" @@ -13,7 +14,7 @@ func (cp *Processor) handlePut(ev event.Event) { put := ev.(putEvent) id := sha256.Sum256(put.Container()) - cp.log.Info("notification", + cp.log.Info(logs.ContainerNotification, zap.String("type", "container put"), zap.String("id", base58.Encode(id[:]))) @@ -22,14 +23,14 @@ func (cp *Processor) handlePut(ev event.Event) { err := cp.pool.Submit(func() { cp.processContainerPut(put) }) if err != nil { // there system can be moved into controlled degradation stage - cp.log.Warn("container processor worker pool drained", + cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained, zap.Int("capacity", cp.pool.Cap())) } } func (cp *Processor) handleDelete(ev event.Event) { del := ev.(containerEvent.Delete) - cp.log.Info("notification", + cp.log.Info(logs.ContainerNotification, zap.String("type", "container delete"), zap.String("id", base58.Encode(del.ContainerID()))) @@ -38,7 +39,7 @@ func (cp *Processor) handleDelete(ev event.Event) { err := cp.pool.Submit(func() { cp.processContainerDelete(&del) }) if err != nil { // there system can be moved into controlled degradation stage - cp.log.Warn("container processor worker pool drained", + cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained, zap.Int("capacity", cp.pool.Cap())) } } @@ -46,7 +47,7 @@ func (cp *Processor) handleDelete(ev event.Event) { func (cp *Processor) handleSetEACL(ev event.Event) { e := ev.(containerEvent.SetEACL) - cp.log.Info("notification", + cp.log.Info(logs.ContainerNotification, zap.String("type", "set EACL"), ) @@ -57,7 +58,7 @@ func (cp *Processor) handleSetEACL(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - cp.log.Warn("container processor worker pool drained", + cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained, zap.Int("capacity", cp.pool.Cap())) } } diff --git a/pkg/innerring/processors/container/process_container.go b/pkg/innerring/processors/container/process_container.go index 8b244aa5d..5ebe58375 100644 --- a/pkg/innerring/processors/container/process_container.go +++ b/pkg/innerring/processors/container/process_container.go @@ -3,6 +3,7 @@ package container import ( "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" @@ -35,7 +36,7 @@ type putContainerContext struct { // and sending approve tx back to the morph. func (cp *Processor) processContainerPut(put putEvent) { if !cp.alphabetState.IsAlphabet() { - cp.log.Info("non alphabet mode, ignore container put") + cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerPut) return } @@ -45,7 +46,7 @@ func (cp *Processor) processContainerPut(put putEvent) { err := cp.checkPutContainer(ctx) if err != nil { - cp.log.Error("put container check failed", + cp.log.Error(logs.ContainerPutContainerCheckFailed, zap.String("error", err.Error()), ) @@ -119,7 +120,7 @@ func (cp *Processor) approvePutContainer(ctx *putContainerContext) { err = cp.cnrClient.Put(prm) } if err != nil { - cp.log.Error("could not approve put container", + cp.log.Error(logs.ContainerCouldNotApprovePutContainer, zap.String("error", err.Error()), ) } @@ -129,13 +130,13 @@ func (cp *Processor) approvePutContainer(ctx *putContainerContext) { // and sending approve tx back to morph. func (cp *Processor) processContainerDelete(e *containerEvent.Delete) { if !cp.alphabetState.IsAlphabet() { - cp.log.Info("non alphabet mode, ignore container delete") + cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerDelete) return } err := cp.checkDeleteContainer(e) if err != nil { - cp.log.Error("delete container check failed", + cp.log.Error(logs.ContainerDeleteContainerCheckFailed, zap.String("error", err.Error()), ) @@ -194,7 +195,7 @@ func (cp *Processor) approveDeleteContainer(e *containerEvent.Delete) { err = cp.cnrClient.Delete(prm) } if err != nil { - cp.log.Error("could not approve delete container", + cp.log.Error(logs.ContainerCouldNotApproveDeleteContainer, zap.String("error", err.Error()), ) } diff --git a/pkg/innerring/processors/container/process_eacl.go b/pkg/innerring/processors/container/process_eacl.go index e8bbb5db6..fce75c678 100644 --- a/pkg/innerring/processors/container/process_eacl.go +++ b/pkg/innerring/processors/container/process_eacl.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" @@ -13,13 +14,13 @@ import ( func (cp *Processor) processSetEACL(e container.SetEACL) { if !cp.alphabetState.IsAlphabet() { - cp.log.Info("non alphabet mode, ignore set EACL") + cp.log.Info(logs.ContainerNonAlphabetModeIgnoreSetEACL) return } err := cp.checkSetEACL(e) if err != nil { - cp.log.Error("set EACL check failed", + cp.log.Error(logs.ContainerSetEACLCheckFailed, zap.String("error", err.Error()), ) @@ -91,7 +92,7 @@ func (cp *Processor) approveSetEACL(e container.SetEACL) { err = cp.cnrClient.PutEACL(prm) } if err != nil { - cp.log.Error("could not approve set EACL", + cp.log.Error(logs.ContainerCouldNotApproveSetEACL, zap.String("error", err.Error()), ) } diff --git a/pkg/innerring/processors/container/processor.go b/pkg/innerring/processors/container/processor.go index ae0d28729..123ba77b8 100644 --- a/pkg/innerring/processors/container/processor.go +++ b/pkg/innerring/processors/container/processor.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid" morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet" @@ -88,7 +89,7 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/container: subnet client is not set") } - p.Log.Debug("container worker pool", zap.Int("size", p.PoolSize)) + p.Log.Debug(logs.ContainerContainerWorkerPool, zap.Int("size", p.PoolSize)) pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { diff --git a/pkg/innerring/processors/frostfs/handlers.go b/pkg/innerring/processors/frostfs/handlers.go index bc0dbec7f..4eff15abe 100644 --- a/pkg/innerring/processors/frostfs/handlers.go +++ b/pkg/innerring/processors/frostfs/handlers.go @@ -3,6 +3,7 @@ package frostfs import ( "encoding/hex" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs" "github.com/nspcc-dev/neo-go/pkg/util/slice" @@ -11,7 +12,7 @@ import ( func (np *Processor) handleDeposit(ev event.Event) { deposit := ev.(frostfsEvent.Deposit) - np.log.Info("notification", + np.log.Info(logs.FrostFSNotification, zap.String("type", "deposit"), zap.String("id", hex.EncodeToString(slice.CopyReverse(deposit.ID())))) @@ -20,14 +21,14 @@ func (np *Processor) handleDeposit(ev event.Event) { err := np.pool.Submit(func() { np.processDeposit(&deposit) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn("frostfs processor worker pool drained", + np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } func (np *Processor) handleWithdraw(ev event.Event) { withdraw := ev.(frostfsEvent.Withdraw) - np.log.Info("notification", + np.log.Info(logs.FrostFSNotification, zap.String("type", "withdraw"), zap.String("id", hex.EncodeToString(slice.CopyReverse(withdraw.ID())))) @@ -36,14 +37,14 @@ func (np *Processor) handleWithdraw(ev event.Event) { err := np.pool.Submit(func() { np.processWithdraw(&withdraw) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn("frostfs processor worker pool drained", + np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } func (np *Processor) handleCheque(ev event.Event) { cheque := ev.(frostfsEvent.Cheque) - np.log.Info("notification", + np.log.Info(logs.FrostFSNotification, zap.String("type", "cheque"), zap.String("id", hex.EncodeToString(cheque.ID()))) @@ -52,14 +53,14 @@ func (np *Processor) handleCheque(ev event.Event) { err := np.pool.Submit(func() { np.processCheque(&cheque) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn("frostfs processor worker pool drained", + np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } func (np *Processor) handleConfig(ev event.Event) { cfg := ev.(frostfsEvent.Config) - np.log.Info("notification", + np.log.Info(logs.FrostFSNotification, zap.String("type", "set config"), zap.String("key", hex.EncodeToString(cfg.Key())), zap.String("value", hex.EncodeToString(cfg.Value()))) @@ -69,14 +70,14 @@ func (np *Processor) handleConfig(ev event.Event) { err := np.pool.Submit(func() { np.processConfig(&cfg) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn("frostfs processor worker pool drained", + np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } func (np *Processor) handleBind(ev event.Event) { e := ev.(frostfsEvent.Bind) - np.log.Info("notification", + np.log.Info(logs.FrostFSNotification, zap.String("type", "bind"), ) @@ -85,14 +86,14 @@ func (np *Processor) handleBind(ev event.Event) { err := np.pool.Submit(func() { np.processBind(e) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn("frostfs processor worker pool drained", + np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } func (np *Processor) handleUnbind(ev event.Event) { e := ev.(frostfsEvent.Unbind) - np.log.Info("notification", + np.log.Info(logs.FrostFSNotification, zap.String("type", "unbind"), ) @@ -101,7 +102,7 @@ func (np *Processor) handleUnbind(ev event.Event) { err := np.pool.Submit(func() { np.processBind(e) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn("frostfs processor worker pool drained", + np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } diff --git a/pkg/innerring/processors/frostfs/process_assets.go b/pkg/innerring/processors/frostfs/process_assets.go index b28efaa33..e066975f7 100644 --- a/pkg/innerring/processors/frostfs/process_assets.go +++ b/pkg/innerring/processors/frostfs/process_assets.go @@ -1,6 +1,7 @@ package frostfs import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs" "github.com/nspcc-dev/neo-go/pkg/util" @@ -16,7 +17,7 @@ const ( // gas in the sidechain. func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) { if !np.alphabetState.IsAlphabet() { - np.log.Info("non alphabet mode, ignore deposit") + np.log.Info(logs.FrostFSNonAlphabetModeIgnoreDeposit) return } @@ -29,7 +30,7 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) { // send transferX to a balance contract err := np.balanceClient.Mint(prm) if err != nil { - np.log.Error("can't transfer assets to balance contract", zap.Error(err)) + np.log.Error(logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err)) } curEpoch := np.epochState.EpochCounter() @@ -43,7 +44,7 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) { val, ok := np.mintEmitCache.Get(receiver.String()) if ok && val+np.mintEmitThreshold >= curEpoch { - np.log.Warn("double mint emission declined", + np.log.Warn(logs.FrostFSDoubleMintEmissionDeclined, zap.String("receiver", receiver.String()), zap.Uint64("last_emission", val), zap.Uint64("current_epoch", curEpoch)) @@ -55,12 +56,12 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) { // before gas transfer check if the balance is greater than the threshold balance, err := np.morphClient.GasBalance() if err != nil { - np.log.Error("can't get gas balance of the node", zap.Error(err)) + np.log.Error(logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err)) return } if balance < np.gasBalanceThreshold { - np.log.Warn("gas balance threshold has been reached", + np.log.Warn(logs.FrostFSGasBalanceThresholdHasBeenReached, zap.Int64("balance", balance), zap.Int64("threshold", np.gasBalanceThreshold)) @@ -69,7 +70,7 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) { err = np.morphClient.TransferGas(receiver, np.mintEmitValue) if err != nil { - np.log.Error("can't transfer native gas to receiver", + np.log.Error(logs.FrostFSCantTransferNativeGasToReceiver, zap.String("error", err.Error())) return @@ -81,14 +82,14 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) { // Process withdraw event by locking assets in the balance account. func (np *Processor) processWithdraw(withdraw *frostfsEvent.Withdraw) { if !np.alphabetState.IsAlphabet() { - np.log.Info("non alphabet mode, ignore withdraw") + np.log.Info(logs.FrostFSNonAlphabetModeIgnoreWithdraw) return } // create lock account lock, err := util.Uint160DecodeBytesBE(withdraw.ID()[:util.Uint160Size]) if err != nil { - np.log.Error("can't create lock account", zap.Error(err)) + np.log.Error(logs.FrostFSCantCreateLockAccount, zap.Error(err)) return } @@ -104,7 +105,7 @@ func (np *Processor) processWithdraw(withdraw *frostfsEvent.Withdraw) { err = np.balanceClient.Lock(prm) if err != nil { - np.log.Error("can't lock assets for withdraw", zap.Error(err)) + np.log.Error(logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err)) } } @@ -112,7 +113,7 @@ func (np *Processor) processWithdraw(withdraw *frostfsEvent.Withdraw) { // the reserve account. func (np *Processor) processCheque(cheque *frostfsEvent.Cheque) { if !np.alphabetState.IsAlphabet() { - np.log.Info("non alphabet mode, ignore cheque") + np.log.Info(logs.FrostFSNonAlphabetModeIgnoreCheque) return } @@ -124,6 +125,6 @@ func (np *Processor) processCheque(cheque *frostfsEvent.Cheque) { err := np.balanceClient.Burn(prm) if err != nil { - np.log.Error("can't transfer assets to fed contract", zap.Error(err)) + np.log.Error(logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err)) } } diff --git a/pkg/innerring/processors/frostfs/process_bind.go b/pkg/innerring/processors/frostfs/process_bind.go index 0abce5827..c5f8a930e 100644 --- a/pkg/innerring/processors/frostfs/process_bind.go +++ b/pkg/innerring/processors/frostfs/process_bind.go @@ -4,6 +4,7 @@ import ( "crypto/elliptic" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" @@ -20,7 +21,7 @@ type bindCommon interface { func (np *Processor) processBind(e bindCommon) { if !np.alphabetState.IsAlphabet() { - np.log.Info("non alphabet mode, ignore bind") + np.log.Info(logs.FrostFSNonAlphabetModeIgnoreBind) return } @@ -32,7 +33,7 @@ func (np *Processor) processBind(e bindCommon) { err := np.checkBindCommon(c) if err != nil { - np.log.Error("invalid manage key event", + np.log.Error(logs.FrostFSInvalidManageKeyEvent, zap.Bool("bind", c.bind), zap.String("error", err.Error()), ) @@ -77,7 +78,7 @@ func (np *Processor) approveBindCommon(e *bindCommonContext) { u160, err := util.Uint160DecodeBytesBE(scriptHash) if err != nil { - np.log.Error("could not decode script hash from bytes", + np.log.Error(logs.FrostFSCouldNotDecodeScriptHashFromBytes, zap.String("error", err.Error()), ) diff --git a/pkg/innerring/processors/frostfs/process_config.go b/pkg/innerring/processors/frostfs/process_config.go index ecc90332f..471edb9b7 100644 --- a/pkg/innerring/processors/frostfs/process_config.go +++ b/pkg/innerring/processors/frostfs/process_config.go @@ -1,6 +1,7 @@ package frostfs import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs" "go.uber.org/zap" @@ -10,7 +11,7 @@ import ( // the sidechain. func (np *Processor) processConfig(config *frostfsEvent.Config) { if !np.alphabetState.IsAlphabet() { - np.log.Info("non alphabet mode, ignore config") + np.log.Info(logs.FrostFSNonAlphabetModeIgnoreConfig) return } @@ -23,6 +24,6 @@ func (np *Processor) processConfig(config *frostfsEvent.Config) { err := np.netmapClient.SetConfig(prm) if err != nil { - np.log.Error("can't relay set config event", zap.Error(err)) + np.log.Error(logs.FrostFSCantRelaySetConfigEvent, zap.Error(err)) } } diff --git a/pkg/innerring/processors/frostfs/processor.go b/pkg/innerring/processors/frostfs/processor.go index e9504cdb4..4d5bdee78 100644 --- a/pkg/innerring/processors/frostfs/processor.go +++ b/pkg/innerring/processors/frostfs/processor.go @@ -5,6 +5,7 @@ import ( "fmt" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid" @@ -98,7 +99,7 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/frostfs: balance precision converter is not set") } - p.Log.Debug("frostfs worker pool", zap.Int("size", p.PoolSize)) + p.Log.Debug(logs.FrostFSFrostfsWorkerPool, zap.Int("size", p.PoolSize)) pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { diff --git a/pkg/innerring/processors/governance/handlers.go b/pkg/innerring/processors/governance/handlers.go index bfa88d3f0..727acc21a 100644 --- a/pkg/innerring/processors/governance/handlers.go +++ b/pkg/innerring/processors/governance/handlers.go @@ -1,6 +1,7 @@ package governance import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/rolemanagement" "github.com/nspcc-dev/neo-go/pkg/core/native" @@ -30,14 +31,14 @@ func (gp *Processor) HandleAlphabetSync(e event.Event) { return } - gp.log.Info("new event", zap.String("type", typ)) + gp.log.Info(logs.GovernanceNewEvent, zap.String("type", typ)) // send event to the worker pool err := gp.pool.Submit(func() { gp.processAlphabetSync(hash) }) if err != nil { // there system can be moved into controlled degradation stage - gp.log.Warn("governance worker pool drained", + gp.log.Warn(logs.GovernanceGovernanceWorkerPoolDrained, zap.Int("capacity", gp.pool.Cap())) } } diff --git a/pkg/innerring/processors/governance/process_update.go b/pkg/innerring/processors/governance/process_update.go index 3504e7a53..629d8741e 100644 --- a/pkg/innerring/processors/governance/process_update.go +++ b/pkg/innerring/processors/governance/process_update.go @@ -6,6 +6,7 @@ import ( "sort" "strings" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" @@ -20,37 +21,37 @@ const ( func (gp *Processor) processAlphabetSync(txHash util.Uint256) { if !gp.alphabetState.IsAlphabet() { - gp.log.Info("non alphabet mode, ignore alphabet sync") + gp.log.Info(logs.GovernanceNonAlphabetModeIgnoreAlphabetSync) return } mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList() if err != nil { - gp.log.Error("can't fetch alphabet list from main net", + gp.log.Error(logs.GovernanceCantFetchAlphabetListFromMainNet, zap.String("error", err.Error())) return } sidechainAlphabet, err := gp.morphClient.Committee() if err != nil { - gp.log.Error("can't fetch alphabet list from side chain", + gp.log.Error(logs.GovernanceCantFetchAlphabetListFromSideChain, zap.String("error", err.Error())) return } newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet) if err != nil { - gp.log.Error("can't merge alphabet lists from main net and side chain", + gp.log.Error(logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain, zap.String("error", err.Error())) return } if newAlphabet == nil { - gp.log.Info("no governance update, alphabet list has not been changed") + gp.log.Info(logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged) return } - gp.log.Info("alphabet list has been changed, starting update", + gp.log.Info(logs.GovernanceAlphabetListHasBeenChangedStartingUpdate, zap.String("side_chain_alphabet", prettyKeys(sidechainAlphabet)), zap.String("new_alphabet", prettyKeys(newAlphabet)), ) @@ -63,7 +64,7 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) { // 1. Vote to sidechain committee via alphabet contracts. err = gp.voter.VoteForSidechainValidator(votePrm) if err != nil { - gp.log.Error("can't vote for side chain committee", + gp.log.Error(logs.GovernanceCantVoteForSideChainCommittee, zap.String("error", err.Error())) } @@ -76,7 +77,7 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) { // 4. Update FrostFS contract in the mainnet. gp.updateFrostFSContractInMainnet(newAlphabet) - gp.log.Info("finished alphabet list update") + gp.log.Info(logs.GovernanceFinishedAlphabetListUpdate) } func prettyKeys(keys keys.PublicKeys) string { @@ -94,21 +95,21 @@ func prettyKeys(keys keys.PublicKeys) string { func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) { innerRing, err := gp.irFetcher.InnerRingKeys() if err != nil { - gp.log.Error("can't fetch inner ring list from side chain", + gp.log.Error(logs.GovernanceCantFetchInnerRingListFromSideChain, zap.String("error", err.Error())) return } newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet) if err != nil { - gp.log.Error("can't create new inner ring list with new alphabet keys", + gp.log.Error(logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys, zap.String("error", err.Error())) return } sort.Sort(newInnerRing) - gp.log.Info("update of the inner ring list", + gp.log.Info(logs.GovernanceUpdateOfTheInnerRingList, zap.String("before", prettyKeys(innerRing)), zap.String("after", prettyKeys(newInnerRing)), ) @@ -130,7 +131,7 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAl } if err != nil { - gp.log.Error("can't update inner ring list with new alphabet keys", + gp.log.Error(logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys, zap.String("error", err.Error())) } } @@ -147,7 +148,7 @@ func (gp *Processor) updateNotaryRoleInSidechain(newAlphabet keys.PublicKeys, tx err := gp.morphClient.UpdateNotaryList(updPrm) if err != nil { - gp.log.Error("can't update list of notary nodes in side chain", + gp.log.Error(logs.GovernanceCantUpdateListOfNotaryNodesInSideChain, zap.String("error", err.Error())) } } @@ -167,7 +168,7 @@ func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys) err := gp.frostfsClient.AlphabetUpdate(prm) if err != nil { - gp.log.Error("can't update list of alphabet nodes in frostfs contract", + gp.log.Error(logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract, zap.String("error", err.Error())) } } diff --git a/pkg/innerring/processors/netmap/handlers.go b/pkg/innerring/processors/netmap/handlers.go index 54e4ea3ab..76b27c891 100644 --- a/pkg/innerring/processors/netmap/handlers.go +++ b/pkg/innerring/processors/netmap/handlers.go @@ -3,6 +3,7 @@ package netmap import ( "encoding/hex" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" @@ -12,21 +13,21 @@ import ( func (np *Processor) HandleNewEpochTick(ev event.Event) { _ = ev.(timerEvent.NewEpochTick) - np.log.Info("tick", zap.String("type", "epoch")) + np.log.Info(logs.NetmapTick, zap.String("type", "epoch")) // send an event to the worker pool err := np.pool.Submit(func() { np.processNewEpochTick() }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn("netmap worker pool drained", + np.log.Warn(logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } func (np *Processor) handleNewEpoch(ev event.Event) { epochEvent := ev.(netmapEvent.NewEpoch) - np.log.Info("notification", + np.log.Info(logs.NetmapNotification, zap.String("type", "new epoch"), zap.Uint64("value", epochEvent.EpochNumber())) @@ -37,7 +38,7 @@ func (np *Processor) handleNewEpoch(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn("netmap worker pool drained", + np.log.Warn(logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } @@ -45,7 +46,7 @@ func (np *Processor) handleNewEpoch(ev event.Event) { func (np *Processor) handleAddPeer(ev event.Event) { newPeer := ev.(netmapEvent.AddPeer) - np.log.Info("notification", + np.log.Info(logs.NetmapNotification, zap.String("type", "add peer"), ) @@ -56,14 +57,14 @@ func (np *Processor) handleAddPeer(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn("netmap worker pool drained", + np.log.Warn(logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } func (np *Processor) handleUpdateState(ev event.Event) { updPeer := ev.(netmapEvent.UpdatePeer) - np.log.Info("notification", + np.log.Info(logs.NetmapNotification, zap.String("type", "update peer state"), zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes()))) @@ -74,21 +75,21 @@ func (np *Processor) handleUpdateState(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn("netmap worker pool drained", + np.log.Warn(logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } func (np *Processor) handleCleanupTick(ev event.Event) { if !np.netmapSnapshot.enabled { - np.log.Debug("netmap clean up routine is disabled") + np.log.Debug(logs.NetmapNetmapCleanUpRoutineIsDisabled518) return } cleanup := ev.(netmapCleanupTick) - np.log.Info("tick", zap.String("type", "netmap cleaner")) + np.log.Info(logs.NetmapTick, zap.String("type", "netmap cleaner")) // send event to the worker pool err := np.pool.Submit(func() { @@ -96,7 +97,7 @@ func (np *Processor) handleCleanupTick(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn("netmap worker pool drained", + np.log.Warn(logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } @@ -104,7 +105,7 @@ func (np *Processor) handleCleanupTick(ev event.Event) { func (np *Processor) handleRemoveNode(ev event.Event) { removeNode := ev.(subnetevents.RemoveNode) - np.log.Info("notification", + np.log.Info(logs.NetmapNotification, zap.String("type", "remove node from subnet"), zap.String("subnetID", hex.EncodeToString(removeNode.SubnetworkID())), zap.String("key", hex.EncodeToString(removeNode.Node())), @@ -115,7 +116,7 @@ func (np *Processor) handleRemoveNode(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn("netmap worker pool drained", + np.log.Warn(logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } diff --git a/pkg/innerring/processors/netmap/process_cleanup.go b/pkg/innerring/processors/netmap/process_cleanup.go index e4425ef17..d50c69c78 100644 --- a/pkg/innerring/processors/netmap/process_cleanup.go +++ b/pkg/innerring/processors/netmap/process_cleanup.go @@ -2,6 +2,7 @@ package netmap import ( v2netmap "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "go.uber.org/zap" @@ -9,7 +10,7 @@ import ( func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) { if !np.alphabetState.IsAlphabet() { - np.log.Info("non alphabet mode, ignore new netmap cleanup tick") + np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick) return } @@ -17,13 +18,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) { err := np.netmapSnapshot.forEachRemoveCandidate(ev.epoch, func(s string) error { key, err := keys.NewPublicKeyFromString(s) if err != nil { - np.log.Warn("can't decode public key of netmap node", + np.log.Warn(logs.NetmapCantDecodePublicKeyOfNetmapNode, zap.String("key", s)) return nil } - np.log.Info("vote to remove node from netmap", zap.String("key", s)) + np.log.Info(logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s)) // In notary environments we call UpdateStateIR method instead of UpdateState. // It differs from UpdateState only by name, so we can do this in the same form. @@ -48,13 +49,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) { ) } if err != nil { - np.log.Error("can't invoke netmap.UpdateState", zap.Error(err)) + np.log.Error(logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err)) } return nil }) if err != nil { - np.log.Warn("can't iterate on netmap cleaner cache", + np.log.Warn(logs.NetmapCantIterateOnNetmapCleanerCache, zap.String("error", err.Error())) } } diff --git a/pkg/innerring/processors/netmap/process_epoch.go b/pkg/innerring/processors/netmap/process_epoch.go index ffcddc497..ebf128f82 100644 --- a/pkg/innerring/processors/netmap/process_epoch.go +++ b/pkg/innerring/processors/netmap/process_epoch.go @@ -1,6 +1,7 @@ package netmap import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/audit" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement" @@ -16,7 +17,7 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) { epochDuration, err := np.netmapClient.EpochDuration() if err != nil { - np.log.Warn("can't get epoch duration", + np.log.Warn(logs.NetmapCantGetEpochDuration, zap.String("error", err.Error())) } else { np.epochState.SetEpochDuration(epochDuration) @@ -26,20 +27,20 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) { h, err := np.netmapClient.Morph().TxHeight(ev.TxHash()) if err != nil { - np.log.Warn("can't get transaction height", + np.log.Warn(logs.NetmapCantGetTransactionHeight, zap.String("hash", ev.TxHash().StringLE()), zap.String("error", err.Error())) } if err := np.epochTimer.ResetEpochTimer(h); err != nil { - np.log.Warn("can't reset epoch timer", + np.log.Warn(logs.NetmapCantResetEpochTimer, zap.String("error", err.Error())) } // get new netmap snapshot networkMap, err := np.netmapClient.NetMap() if err != nil { - np.log.Warn("can't get netmap snapshot to perform cleanup", + np.log.Warn(logs.NetmapCantGetNetmapSnapshotToPerformCleanup, zap.String("error", err.Error())) return @@ -54,7 +55,7 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) { err = np.containerWrp.StartEstimation(prm) if err != nil { - np.log.Warn("can't start container size estimation", + np.log.Warn(logs.NetmapCantStartContainerSizeEstimation, zap.Uint64("epoch", epoch), zap.String("error", err.Error())) } @@ -71,15 +72,15 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) { // Process new epoch tick by invoking new epoch method in network map contract. func (np *Processor) processNewEpochTick() { if !np.alphabetState.IsAlphabet() { - np.log.Info("non alphabet mode, ignore new epoch tick") + np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewEpochTick) return } nextEpoch := np.epochState.EpochCounter() + 1 - np.log.Debug("next epoch", zap.Uint64("value", nextEpoch)) + np.log.Debug(logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch)) err := np.netmapClient.NewEpoch(nextEpoch) if err != nil { - np.log.Error("can't invoke netmap.NewEpoch", zap.Error(err)) + np.log.Error(logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err)) } } diff --git a/pkg/innerring/processors/netmap/process_peers.go b/pkg/innerring/processors/netmap/process_peers.go index 3734bae01..ffaad3b4e 100644 --- a/pkg/innerring/processors/netmap/process_peers.go +++ b/pkg/innerring/processors/netmap/process_peers.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/hex" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" subnetEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/subnet" @@ -16,7 +17,7 @@ import ( // local epoch timer. func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) { if !np.alphabetState.IsAlphabet() { - np.log.Info("non alphabet mode, ignore new peer notification") + np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewPeerNotification) return } @@ -25,7 +26,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) { tx := originalRequest.MainTransaction ok, err := np.netmapClient.Morph().IsValidScript(tx.Script, tx.Signers) if err != nil || !ok { - np.log.Warn("non-halt notary transaction", + np.log.Warn(logs.NetmapNonhaltNotaryTransaction, zap.String("method", "netmap.AddPeer"), zap.String("hash", tx.Hash().StringLE()), zap.Error(err)) @@ -37,14 +38,14 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) { var nodeInfo netmap.NodeInfo if err := nodeInfo.Unmarshal(ev.Node()); err != nil { // it will be nice to have tx id at event structure to log it - np.log.Warn("can't parse network map candidate") + np.log.Warn(logs.NetmapCantParseNetworkMapCandidate) return } // validate and update node info err := np.nodeValidator.VerifyAndUpdate(&nodeInfo) if err != nil { - np.log.Warn("could not verify and update information about network map candidate", + np.log.Warn(logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate, zap.String("error", err.Error()), ) @@ -62,7 +63,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) { updated := np.netmapSnapshot.touch(keyString, np.epochState.EpochCounter(), nodeInfoBinary) if updated { - np.log.Info("approving network map candidate", + np.log.Info(logs.NetmapApprovingNetworkMapCandidate, zap.String("key", keyString)) prm := netmapclient.AddPeerPrm{} @@ -89,7 +90,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) { } if err != nil { - np.log.Error("can't invoke netmap.AddPeer", zap.Error(err)) + np.log.Error(logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err)) } } } @@ -97,7 +98,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) { // Process update peer notification by sending approval tx to the smart contract. func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) { if !np.alphabetState.IsAlphabet() { - np.log.Info("non alphabet mode, ignore update peer notification") + np.log.Info(logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification) return } @@ -110,7 +111,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) { if ev.Maintenance() { err = np.nodeStateSettings.MaintenanceModeAllowed() if err != nil { - np.log.Info("prevent switching node to maintenance state", + np.log.Info(logs.NetmapPreventSwitchingNodeToMaintenanceState, zap.Error(err), ) @@ -135,19 +136,19 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) { err = np.netmapClient.UpdatePeerState(prm) } if err != nil { - np.log.Error("can't invoke netmap.UpdatePeer", zap.Error(err)) + np.log.Error(logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err)) } } func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) { if !np.alphabetState.IsAlphabet() { - np.log.Info("non alphabet mode, ignore remove node from subnet notification") + np.log.Info(logs.NetmapNonAlphabetModeIgnoreRemoveNodeFromSubnetNotification) return } candidates, err := np.netmapClient.GetCandidates() if err != nil { - np.log.Warn("could not get network map candidates", + np.log.Warn(logs.NetmapCouldNotGetNetworkMapCandidates, zap.Error(err), ) return @@ -158,14 +159,14 @@ func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) { err = subnetToRemoveFrom.Unmarshal(rawSubnet) if err != nil { - np.log.Warn("could not unmarshal subnet id", + np.log.Warn(logs.NetmapCouldNotUnmarshalSubnetId, zap.Error(err), ) return } if subnetid.IsZero(subnetToRemoveFrom) { - np.log.Warn("got zero subnet in remove node notification") + np.log.Warn(logs.NetmapGotZeroSubnetInRemoveNodeNotification) return } @@ -182,8 +183,8 @@ func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) { return nil }) if err != nil { - np.log.Warn("could not iterate over subnetworks of the node", zap.Error(err)) - np.log.Info("vote to remove node from netmap", zap.String("key", hex.EncodeToString(ev.Node()))) + np.log.Warn(logs.NetmapCouldNotIterateOverSubnetworksOfTheNode, zap.Error(err)) + np.log.Info(logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", hex.EncodeToString(ev.Node()))) prm := netmapclient.UpdatePeerPrm{} prm.SetKey(ev.Node()) @@ -191,7 +192,7 @@ func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) { err = np.netmapClient.UpdatePeerState(prm) if err != nil { - np.log.Error("could not invoke netmap.UpdateState", zap.Error(err)) + np.log.Error(logs.NetmapCouldNotInvokeNetmapUpdateState, zap.Error(err)) return } } else { @@ -201,7 +202,7 @@ func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) { err = np.netmapClient.AddPeer(prm) if err != nil { - np.log.Error("could not invoke netmap.AddPeer", zap.Error(err)) + np.log.Error(logs.NetmapCouldNotInvokeNetmapAddPeer, zap.Error(err)) return } } diff --git a/pkg/innerring/processors/netmap/processor.go b/pkg/innerring/processors/netmap/processor.go index de145d48c..85a123ef3 100644 --- a/pkg/innerring/processors/netmap/processor.go +++ b/pkg/innerring/processors/netmap/processor.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" @@ -142,7 +143,7 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/netmap: node state settings is not set") } - p.Log.Debug("netmap worker pool", zap.Int("size", p.PoolSize)) + p.Log.Debug(logs.NetmapNetmapWorkerPool, zap.Int("size", p.PoolSize)) pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { diff --git a/pkg/innerring/processors/reputation/handlers.go b/pkg/innerring/processors/reputation/handlers.go index 36c9579e5..30e3e9503 100644 --- a/pkg/innerring/processors/reputation/handlers.go +++ b/pkg/innerring/processors/reputation/handlers.go @@ -3,6 +3,7 @@ package reputation import ( "encoding/hex" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation" "go.uber.org/zap" @@ -13,7 +14,7 @@ func (rp *Processor) handlePutReputation(ev event.Event) { peerID := put.PeerID() // FIXME: #1147 do not use `ToV2` method outside frostfs-api-go library - rp.log.Info("notification", + rp.log.Info(logs.ReputationNotification, zap.String("type", "reputation put"), zap.String("peer_id", hex.EncodeToString(peerID.PublicKey()))) @@ -22,7 +23,7 @@ func (rp *Processor) handlePutReputation(ev event.Event) { err := rp.pool.Submit(func() { rp.processPut(&put) }) if err != nil { // there system can be moved into controlled degradation stage - rp.log.Warn("reputation worker pool drained", + rp.log.Warn(logs.ReputationReputationWorkerPoolDrained, zap.Int("capacity", rp.pool.Cap())) } } diff --git a/pkg/innerring/processors/reputation/process_put.go b/pkg/innerring/processors/reputation/process_put.go index 31e93763b..f8814dd06 100644 --- a/pkg/innerring/processors/reputation/process_put.go +++ b/pkg/innerring/processors/reputation/process_put.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation" reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation" apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" @@ -16,7 +17,7 @@ var errWrongManager = errors.New("got manager that is incorrect for peer") func (rp *Processor) processPut(e *reputationEvent.Put) { if !rp.alphabetState.IsAlphabet() { - rp.log.Info("non alphabet mode, ignore reputation put notification") + rp.log.Info(logs.ReputationNonAlphabetModeIgnoreReputationPutNotification) return } @@ -27,7 +28,7 @@ func (rp *Processor) processPut(e *reputationEvent.Put) { // check if epoch is valid currentEpoch := rp.epochState.EpochCounter() if epoch >= currentEpoch { - rp.log.Info("ignore reputation value", + rp.log.Info(logs.ReputationIgnoreReputationValue, zap.String("reason", "invalid epoch number"), zap.Uint64("trust_epoch", epoch), zap.Uint64("local_epoch", currentEpoch)) @@ -37,7 +38,7 @@ func (rp *Processor) processPut(e *reputationEvent.Put) { // check signature if !value.VerifySignature() { - rp.log.Info("ignore reputation value", + rp.log.Info(logs.ReputationIgnoreReputationValue, zap.String("reason", "invalid signature"), ) @@ -46,7 +47,7 @@ func (rp *Processor) processPut(e *reputationEvent.Put) { // check if manager is correct if err := rp.checkManagers(epoch, value.Manager(), id); err != nil { - rp.log.Info("ignore reputation value", + rp.log.Info(logs.ReputationIgnoreReputationValue, zap.String("reason", "wrong manager"), zap.String("error", err.Error())) @@ -91,7 +92,7 @@ func (rp *Processor) approvePutReputation(e *reputationEvent.Put) { } if err != nil { // FIXME: #1147 do not use `ToV2` method outside frostfs-api-go library - rp.log.Warn("can't send approval tx for reputation value", + rp.log.Warn(logs.ReputationCantSendApprovalTxForReputationValue, zap.String("peer_id", hex.EncodeToString(id.PublicKey())), zap.String("error", err.Error())) } diff --git a/pkg/innerring/processors/reputation/processor.go b/pkg/innerring/processors/reputation/processor.go index 990358257..a248fa75f 100644 --- a/pkg/innerring/processors/reputation/processor.go +++ b/pkg/innerring/processors/reputation/processor.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation" @@ -71,7 +72,7 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/reputation: manager builder is not set") } - p.Log.Debug("reputation worker pool", zap.Int("size", p.PoolSize)) + p.Log.Debug(logs.ReputationReputationWorkerPool, zap.Int("size", p.PoolSize)) pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { diff --git a/pkg/innerring/processors/settlement/audit/calculate.go b/pkg/innerring/processors/settlement/audit/calculate.go index d819865d8..75b8c56a4 100644 --- a/pkg/innerring/processors/settlement/audit/calculate.go +++ b/pkg/innerring/processors/settlement/audit/calculate.go @@ -7,6 +7,7 @@ import ( "encoding/hex" "math/big" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/audit" @@ -58,32 +59,32 @@ func (c *Calculator) Calculate(p *CalculatePrm) { )} if p.Epoch == 0 { - log.Info("settlements are ignored for zero epoch") + log.Info(logs.AuditSettlementsAreIgnoredForZeroEpoch) return } - log.Info("calculate audit settlements") + log.Info(logs.AuditCalculateAuditSettlements) - log.Debug("getting results for the previous epoch") + log.Debug(logs.AuditGettingResultsForThePreviousEpoch) prevEpoch := p.Epoch - 1 auditResults, err := c.prm.ResultStorage.AuditResultsForEpoch(prevEpoch) if err != nil { - log.Error("could not collect audit results") + log.Error(logs.AuditCouldNotCollectAuditResults) return } else if len(auditResults) == 0 { - log.Debug("no audit results in previous epoch") + log.Debug(logs.AuditNoAuditResultsInPreviousEpoch) return } auditFee, err := c.prm.AuditFeeFetcher.AuditFee() if err != nil { - log.Warn("can't fetch audit fee from network config", + log.Warn(logs.AuditCantFetchAuditFeeFromNetworkConfig, zap.String("error", err.Error())) auditFee = 0 } - log.Debug("processing audit results", + log.Debug(logs.AuditProcessingAuditResults, zap.Int("number", len(auditResults)), ) @@ -98,7 +99,7 @@ func (c *Calculator) Calculate(p *CalculatePrm) { }) } - log.Debug("processing transfers") + log.Debug(logs.AuditProcessingTransfers) common.TransferAssets(c.prm.Exchanger, table, common.AuditSettlementDetails(prevEpoch)) } @@ -109,35 +110,35 @@ func (c *Calculator) processResult(ctx *singleResultCtx) { zap.Uint64("audit epoch", ctx.auditResult.Epoch()), )} - ctx.log.Debug("reading information about the container") + ctx.log.Debug(logs.AuditReadingInformationAboutTheContainer) ok := c.readContainerInfo(ctx) if !ok { return } - ctx.log.Debug("building placement") + ctx.log.Debug(logs.AuditBuildingPlacement) ok = c.buildPlacement(ctx) if !ok { return } - ctx.log.Debug("collecting passed nodes") + ctx.log.Debug(logs.AuditCollectingPassedNodes) ok = c.collectPassNodes(ctx) if !ok { return } - ctx.log.Debug("calculating sum of the sizes of all storage groups") + ctx.log.Debug(logs.AuditCalculatingSumOfTheSizesOfAllStorageGroups) ok = c.sumSGSizes(ctx) if !ok { return } - ctx.log.Debug("filling transfer table") + ctx.log.Debug(logs.AuditFillingTransferTable) c.fillTransferTable(ctx) } @@ -145,7 +146,7 @@ func (c *Calculator) processResult(ctx *singleResultCtx) { func (c *Calculator) readContainerInfo(ctx *singleResultCtx) bool { cnr, ok := ctx.auditResult.Container() if !ok { - ctx.log.Error("missing container in audit result") + ctx.log.Error(logs.AuditMissingContainerInAuditResult) return false } @@ -153,7 +154,7 @@ func (c *Calculator) readContainerInfo(ctx *singleResultCtx) bool { ctx.cnrInfo, err = c.prm.ContainerStorage.ContainerInfo(cnr) if err != nil { - ctx.log.Error("could not get container info", + ctx.log.Error(logs.AuditCouldNotGetContainerInfo, zap.String("error", err.Error()), ) } @@ -166,14 +167,14 @@ func (c *Calculator) buildPlacement(ctx *singleResultCtx) bool { ctx.cnrNodes, err = c.prm.PlacementCalculator.ContainerNodes(ctx.auditEpoch(), ctx.containerID()) if err != nil { - ctx.log.Error("could not get container nodes", + ctx.log.Error(logs.AuditCouldNotGetContainerNodes, zap.String("error", err.Error()), ) } empty := len(ctx.cnrNodes) == 0 if empty { - ctx.log.Debug("empty list of container nodes") + ctx.log.Debug(logs.AuditEmptyListOfContainerNodes) } return err == nil && !empty @@ -206,7 +207,7 @@ func (c *Calculator) collectPassNodes(ctx *singleResultCtx) bool { empty := len(ctx.passNodes) == 0 if empty { - ctx.log.Debug("none of the container nodes passed the audit") + ctx.log.Debug(logs.AuditNoneOfTheContainerNodesPassedTheAudit) } return !empty @@ -224,7 +225,7 @@ func (c *Calculator) sumSGSizes(ctx *singleResultCtx) bool { sgInfo, err := c.prm.SGStorage.SGInfo(addr) if err != nil { - ctx.log.Error("could not get SG info", + ctx.log.Error(logs.AuditCouldNotGetSGInfo, zap.String("id", id.String()), zap.String("error", err.Error()), ) @@ -244,7 +245,7 @@ func (c *Calculator) sumSGSizes(ctx *singleResultCtx) bool { } if sumPassSGSize == 0 { - ctx.log.Debug("zero sum SG size") + ctx.log.Debug(logs.AuditZeroSumSGSize) return false } @@ -260,7 +261,7 @@ func (c *Calculator) fillTransferTable(ctx *singleResultCtx) bool { for k, info := range ctx.passNodes { ownerID, err := c.prm.AccountStorage.ResolveKey(info) if err != nil { - ctx.log.Error("could not resolve public key of the storage node", + ctx.log.Error(logs.AuditCouldNotResolvePublicKeyOfTheStorageNode, zap.String("error", err.Error()), zap.String("key", k), ) @@ -270,7 +271,7 @@ func (c *Calculator) fillTransferTable(ctx *singleResultCtx) bool { price := info.Price() - ctx.log.Debug("calculating storage node salary for audit (GASe-12)", + ctx.log.Debug(logs.AuditCalculatingStorageNodeSalaryForAudit, zap.Stringer("sum SG size", ctx.sumSGSize), zap.Stringer("price", price), ) @@ -292,7 +293,7 @@ func (c *Calculator) fillTransferTable(ctx *singleResultCtx) bool { // add txs to pay inner ring node for audit result auditIR, err := ownerFromKey(ctx.auditResult.AuditorKey()) if err != nil { - ctx.log.Error("could not parse public key of the inner ring node", + ctx.log.Error(logs.AuditCouldNotParsePublicKeyOfTheInnerRingNode, zap.String("error", err.Error()), zap.String("key", hex.EncodeToString(ctx.auditResult.AuditorKey())), ) diff --git a/pkg/innerring/processors/settlement/basic/collect.go b/pkg/innerring/processors/settlement/basic/collect.go index ee7354c4f..024769c06 100644 --- a/pkg/innerring/processors/settlement/basic/collect.go +++ b/pkg/innerring/processors/settlement/basic/collect.go @@ -3,6 +3,7 @@ package basic import ( "math/big" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common" cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" "go.uber.org/zap" @@ -20,7 +21,7 @@ func (inc *IncomeSettlementContext) Collect() { cachedRate, err := inc.rate.BasicRate() if err != nil { - inc.log.Error("can't get basic income rate", + inc.log.Error(logs.BasicCantGetBasicIncomeRate, zap.String("error", err.Error())) return @@ -33,7 +34,7 @@ func (inc *IncomeSettlementContext) Collect() { cnrEstimations, err := inc.estimations.Estimations(inc.epoch) if err != nil { - inc.log.Error("can't fetch container size estimations", + inc.log.Error(logs.BasicCantFetchContainerSizeEstimations, zap.Uint64("epoch", inc.epoch), zap.String("error", err.Error())) @@ -45,7 +46,7 @@ func (inc *IncomeSettlementContext) Collect() { for i := range cnrEstimations { owner, err := inc.container.ContainerInfo(cnrEstimations[i].ContainerID) if err != nil { - inc.log.Warn("can't fetch container info", + inc.log.Warn(logs.BasicCantFetchContainerInfo, zap.Uint64("epoch", inc.epoch), zap.Stringer("container_id", cnrEstimations[i].ContainerID), zap.String("error", err.Error())) @@ -55,7 +56,7 @@ func (inc *IncomeSettlementContext) Collect() { cnrNodes, err := inc.placement.ContainerNodes(inc.epoch, cnrEstimations[i].ContainerID) if err != nil { - inc.log.Debug("can't fetch container info", + inc.log.Debug(logs.BasicCantFetchContainerInfo, zap.Uint64("epoch", inc.epoch), zap.Stringer("container_id", cnrEstimations[i].ContainerID), zap.String("error", err.Error())) diff --git a/pkg/innerring/processors/settlement/basic/distribute.go b/pkg/innerring/processors/settlement/basic/distribute.go index e085f1e22..44a8ccea3 100644 --- a/pkg/innerring/processors/settlement/basic/distribute.go +++ b/pkg/innerring/processors/settlement/basic/distribute.go @@ -4,6 +4,7 @@ import ( "encoding/hex" "math/big" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common" "go.uber.org/zap" ) @@ -20,7 +21,7 @@ func (inc *IncomeSettlementContext) Distribute() { bankBalance, err := inc.balances.Balance(inc.bankOwner) if err != nil { - inc.log.Error("can't fetch balance of banking account", + inc.log.Error(logs.BasicCantFetchBalanceOfBankingAccount, zap.String("error", err.Error())) return @@ -31,7 +32,7 @@ func (inc *IncomeSettlementContext) Distribute() { inc.distributeTable.Iterate(func(key []byte, n *big.Int) { nodeOwner, err := inc.accounts.ResolveKey(nodeInfoWrapper(key)) if err != nil { - inc.log.Warn("can't transform public key to owner id", + inc.log.Warn(logs.BasicCantTransformPublicKeyToOwnerID, zap.String("public_key", hex.EncodeToString(key)), zap.String("error", err.Error())) diff --git a/pkg/innerring/processors/settlement/calls.go b/pkg/innerring/processors/settlement/calls.go index 33191662b..2687ad206 100644 --- a/pkg/innerring/processors/settlement/calls.go +++ b/pkg/innerring/processors/settlement/calls.go @@ -1,6 +1,7 @@ package settlement import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "go.uber.org/zap" @@ -14,7 +15,7 @@ func (p *Processor) HandleAuditEvent(e event.Event) { epoch := ev.Epoch() if !p.state.IsAlphabet() { - p.log.Info("non alphabet mode, ignore audit payments") + p.log.Info(logs.SettlementNonAlphabetModeIgnoreAuditPayments) return } @@ -23,10 +24,10 @@ func (p *Processor) HandleAuditEvent(e event.Event) { zap.Uint64("epoch", epoch), )} - log.Info("new audit settlement event") + log.Info(logs.SettlementNewAuditSettlementEvent) if epoch == 0 { - log.Debug("ignore genesis epoch") + log.Debug(logs.SettlementIgnoreGenesisEpoch) return } @@ -38,14 +39,14 @@ func (p *Processor) HandleAuditEvent(e event.Event) { err := p.pool.Submit(handler.handle) if err != nil { - log.Warn("could not add handler of AuditEvent to queue", + log.Warn(logs.SettlementCouldNotAddHandlerOfAuditEventToQueue, zap.String("error", err.Error()), ) return } - log.Debug("AuditEvent handling successfully scheduled") + log.Debug(logs.SettlementAuditEventHandlingSuccessfullyScheduled) } func (p *Processor) HandleIncomeCollectionEvent(e event.Event) { @@ -53,19 +54,19 @@ func (p *Processor) HandleIncomeCollectionEvent(e event.Event) { epoch := ev.Epoch() if !p.state.IsAlphabet() { - p.log.Info("non alphabet mode, ignore income collection event") + p.log.Info(logs.SettlementNonAlphabetModeIgnoreIncomeCollectionEvent) return } - p.log.Info("start basic income collection", + p.log.Info(logs.SettlementStartBasicIncomeCollection, zap.Uint64("epoch", epoch)) p.contextMu.Lock() defer p.contextMu.Unlock() if _, ok := p.incomeContexts[epoch]; ok { - p.log.Error("income context already exists", + p.log.Error(logs.SettlementIncomeContextAlreadyExists, zap.Uint64("epoch", epoch)) return @@ -73,7 +74,7 @@ func (p *Processor) HandleIncomeCollectionEvent(e event.Event) { incomeCtx, err := p.basicIncome.CreateContext(epoch) if err != nil { - p.log.Error("can't create income context", + p.log.Error(logs.SettlementCantCreateIncomeContext, zap.String("error", err.Error())) return @@ -85,7 +86,7 @@ func (p *Processor) HandleIncomeCollectionEvent(e event.Event) { incomeCtx.Collect() }) if err != nil { - p.log.Warn("could not add handler of basic income collection to queue", + p.log.Warn(logs.SettlementCouldNotAddHandlerOfBasicIncomeCollectionToQueue, zap.String("error", err.Error()), ) @@ -98,12 +99,12 @@ func (p *Processor) HandleIncomeDistributionEvent(e event.Event) { epoch := ev.Epoch() if !p.state.IsAlphabet() { - p.log.Info("non alphabet mode, ignore income distribution event") + p.log.Info(logs.SettlementNonAlphabetModeIgnoreIncomeDistributionEvent) return } - p.log.Info("start basic income distribution", + p.log.Info(logs.SettlementStartBasicIncomeDistribution, zap.Uint64("epoch", epoch)) p.contextMu.Lock() @@ -113,7 +114,7 @@ func (p *Processor) HandleIncomeDistributionEvent(e event.Event) { delete(p.incomeContexts, epoch) if !ok { - p.log.Warn("income context distribution does not exists", + p.log.Warn(logs.SettlementIncomeContextDistributionDoesNotExists, zap.Uint64("epoch", epoch)) return @@ -123,7 +124,7 @@ func (p *Processor) HandleIncomeDistributionEvent(e event.Event) { incomeCtx.Distribute() }) if err != nil { - p.log.Warn("could not add handler of basic income distribution to queue", + p.log.Warn(logs.SettlementCouldNotAddHandlerOfBasicIncomeDistributionToQueue, zap.String("error", err.Error()), ) diff --git a/pkg/innerring/processors/settlement/handlers.go b/pkg/innerring/processors/settlement/handlers.go index f73b61983..e69d829eb 100644 --- a/pkg/innerring/processors/settlement/handlers.go +++ b/pkg/innerring/processors/settlement/handlers.go @@ -1,6 +1,9 @@ package settlement -import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" +) type auditEventHandler struct { log *logger.Logger @@ -11,9 +14,9 @@ type auditEventHandler struct { } func (p *auditEventHandler) handle() { - p.log.Info("process audit settlements") + p.log.Info(logs.SettlementProcessAuditSettlements) p.proc.ProcessAuditSettlements(p.epoch) - p.log.Info("audit processing finished") + p.log.Info(logs.SettlementAuditProcessingFinished) } diff --git a/pkg/innerring/processors/settlement/processor.go b/pkg/innerring/processors/settlement/processor.go index e86666d5c..1870a0351 100644 --- a/pkg/innerring/processors/settlement/processor.go +++ b/pkg/innerring/processors/settlement/processor.go @@ -4,6 +4,7 @@ import ( "fmt" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/basic" nodeutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" @@ -63,7 +64,7 @@ func New(prm Prm, opts ...Option) *Processor { panic(fmt.Errorf("could not create worker pool: %w", err)) } - o.log.Debug("worker pool for settlement processor successfully initialized", + o.log.Debug(logs.SettlementWorkerPoolForSettlementProcessorSuccessfullyInitialized, zap.Int("capacity", o.poolSize), ) diff --git a/pkg/innerring/rpc.go b/pkg/innerring/rpc.go index 8e96deb7b..9a89c4fcb 100644 --- a/pkg/innerring/rpc.go +++ b/pkg/innerring/rpc.go @@ -6,6 +6,7 @@ import ( "fmt" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" storagegroup2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/storagegroup" @@ -94,7 +95,7 @@ func (c *ClientCache) getSG(ctx context.Context, addr oid.Address, nm *netmap.Ne cli, err := c.getWrappedClient(info) if err != nil { - c.log.Warn("can't setup remote connection", + c.log.Warn(logs.InnerringCantSetupRemoteConnection, zap.String("error", err.Error())) continue @@ -109,7 +110,7 @@ func (c *ClientCache) getSG(ctx context.Context, addr oid.Address, nm *netmap.Ne cancel() if err != nil { - c.log.Warn("can't get storage group object", + c.log.Warn(logs.InnerringCantGetStorageGroupObject, zap.String("error", err.Error())) continue diff --git a/pkg/innerring/settlement.go b/pkg/innerring/settlement.go index 08e7a9f4d..90255f5c1 100644 --- a/pkg/innerring/settlement.go +++ b/pkg/innerring/settlement.go @@ -9,6 +9,7 @@ import ( "fmt" "math/big" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/audit" @@ -223,7 +224,7 @@ func (s settlementDeps) Transfer(sender, recipient user.ID, amount *big.Int, det ) if !amount.IsInt64() { - s.log.Error("amount can not be represented as an int64") + s.log.Error(logs.InnerringAmountCanNotBeRepresentedAsAnInt64) return } @@ -262,7 +263,7 @@ func (b basicIncomeSettlementDeps) Estimations(epoch uint64) ([]*containerClient for i := range estimationIDs { estimation, err := b.cnrClient.GetUsedSpaceEstimations(estimationIDs[i]) if err != nil { - b.log.Warn("can't get used space estimation", + b.log.Warn(logs.InnerringCantGetUsedSpaceEstimation, zap.String("estimation_id", hex.EncodeToString(estimationIDs[i])), zap.String("error", err.Error())) diff --git a/pkg/innerring/state.go b/pkg/innerring/state.go index 903d9c876..e3bf7886e 100644 --- a/pkg/innerring/state.go +++ b/pkg/innerring/state.go @@ -4,6 +4,7 @@ import ( "fmt" "sort" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance" auditClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/audit" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit" @@ -61,7 +62,7 @@ func (s *Server) IsAlphabet() bool { func (s *Server) InnerRingIndex() int { index, err := s.statusIndex.InnerRingIndex() if err != nil { - s.log.Error("can't get inner ring index", zap.String("error", err.Error())) + s.log.Error(logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error())) return -1 } @@ -73,7 +74,7 @@ func (s *Server) InnerRingIndex() int { func (s *Server) InnerRingSize() int { size, err := s.statusIndex.InnerRingSize() if err != nil { - s.log.Error("can't get inner ring size", zap.String("error", err.Error())) + s.log.Error(logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error())) return 0 } @@ -85,7 +86,7 @@ func (s *Server) InnerRingSize() int { func (s *Server) AlphabetIndex() int { index, err := s.statusIndex.AlphabetIndex() if err != nil { - s.log.Error("can't get alphabet index", zap.String("error", err.Error())) + s.log.Error(logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error())) return -1 } @@ -97,13 +98,13 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro index := s.InnerRingIndex() if s.contracts.alphabet.indexOutOfRange(index) { - s.log.Info("ignore validator vote: node not in alphabet range") + s.log.Info(logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange) return nil } if len(validators) == 0 { - s.log.Info("ignore validator vote: empty validators list") + s.log.Info(logs.InnerringIgnoreValidatorVoteEmptyValidatorsList) return nil } @@ -128,7 +129,7 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro s.contracts.alphabet.iterate(func(letter GlagoliticLetter, contract util.Uint160) { err := s.morphClient.NotaryInvoke(contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators) if err != nil { - s.log.Warn("can't invoke vote method in alphabet contract", + s.log.Warn(logs.InnerringCantInvokeVoteMethodInAlphabetContract, zap.Int8("alphabet_index", int8(letter)), zap.Uint64("epoch", epoch), zap.String("error", err.Error())) diff --git a/pkg/innerring/subnet.go b/pkg/innerring/subnet.go index 5375029d4..03108aac2 100644 --- a/pkg/innerring/subnet.go +++ b/pkg/innerring/subnet.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" irsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/subnet" netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet" @@ -173,7 +174,7 @@ func (s *Server) catchSubnetCreation(e event.Event) { s.handleSubnetCreation(e) }) if err != nil { - s.log.Error("subnet creation queue failure", + s.log.Error(logs.InnerringSubnetCreationQueueFailure, zap.String("error", err.Error()), ) } @@ -225,7 +226,7 @@ func (s *Server) handleSubnetCreation(e event.Event) { ev: putEv, }) if err != nil { - s.log.Info("discard subnet creation", + s.log.Info(logs.InnerringDiscardSubnetCreation, zap.String("reason", err.Error()), ) @@ -251,7 +252,7 @@ func (s *Server) handleSubnetCreation(e event.Event) { } if err != nil { - s.log.Error("approve subnet creation", + s.log.Error(logs.InnerringApproveSubnetCreation, zap.Bool("notary", isNotary), zap.String("error", err.Error()), ) @@ -266,7 +267,7 @@ func (s *Server) catchSubnetRemoval(e event.Event) { s.handleSubnetRemoval(e) }) if err != nil { - s.log.Error("subnet removal handling failure", + s.log.Error(logs.InnerringSubnetRemovalHandlingFailure, zap.String("error", err.Error()), ) } @@ -280,7 +281,7 @@ func (s *Server) handleSubnetRemoval(e event.Event) { candidates, err := s.netmapClient.GetCandidates() if err != nil { - s.log.Error("getting netmap candidates", + s.log.Error(logs.InnerringGettingNetmapCandidates, zap.Error(err), ) @@ -290,7 +291,7 @@ func (s *Server) handleSubnetRemoval(e event.Event) { var removedID subnetid.ID err = removedID.Unmarshal(delEv.ID()) if err != nil { - s.log.Error("unmarshalling removed subnet ID", + s.log.Error(logs.InnerringUnmarshallingRemovedSubnetID, zap.String("error", err.Error()), ) @@ -318,8 +319,8 @@ func (s *Server) processCandidate(txHash neogoutil.Uint256, removedID subnetid.I return nil }) if err != nil { - log.Error("iterating node's subnets", zap.Error(err)) - log.Debug("removing node from netmap candidates") + log.Error(logs.InnerringIteratingNodesSubnets, zap.Error(err)) + log.Debug(logs.InnerringRemovingNodeFromNetmapCandidates) var updateStatePrm netmapclient.UpdatePeerPrm updateStatePrm.SetKey(c.PublicKey()) @@ -327,7 +328,7 @@ func (s *Server) processCandidate(txHash neogoutil.Uint256, removedID subnetid.I err = s.netmapClient.UpdatePeerState(updateStatePrm) if err != nil { - log.Error("removing node from candidates", + log.Error(logs.InnerringRemovingNodeFromCandidates, zap.Error(err), ) } @@ -338,7 +339,7 @@ func (s *Server) processCandidate(txHash neogoutil.Uint256, removedID subnetid.I // remove subnet from node's information // if it contains removed subnet if removeSubnet { - log.Debug("removing subnet from the node") + log.Debug(logs.InnerringRemovingSubnetFromTheNode) var addPeerPrm netmapclient.AddPeerPrm addPeerPrm.SetNodeInfo(c) @@ -346,7 +347,7 @@ func (s *Server) processCandidate(txHash neogoutil.Uint256, removedID subnetid.I err = s.netmapClient.AddPeer(addPeerPrm) if err != nil { - log.Error("updating subnet info", + log.Error(logs.InnerringUpdatingSubnetInfo, zap.Error(err), ) } diff --git a/pkg/local_object_storage/blobovnicza/control.go b/pkg/local_object_storage/blobovnicza/control.go index 3912deac0..84274528a 100644 --- a/pkg/local_object_storage/blobovnicza/control.go +++ b/pkg/local_object_storage/blobovnicza/control.go @@ -5,6 +5,7 @@ import ( "os" "path/filepath" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "go.etcd.io/bbolt" "go.uber.org/zap" @@ -14,7 +15,7 @@ import ( // // If the database file does not exist, it will be created automatically. func (b *Blobovnicza) Open() error { - b.log.Debug("creating directory for BoltDB", + b.log.Debug(logs.BlobovniczaCreatingDirectoryForBoltDB, zap.String("path", b.path), zap.Bool("ro", b.boltOptions.ReadOnly), ) @@ -28,7 +29,7 @@ func (b *Blobovnicza) Open() error { } } - b.log.Debug("opening BoltDB", + b.log.Debug(logs.BlobovniczaOpeningBoltDB, zap.String("path", b.path), zap.Stringer("permissions", b.perm), ) @@ -44,13 +45,13 @@ func (b *Blobovnicza) Open() error { // // Should not be called in read-only configuration. func (b *Blobovnicza) Init() error { - b.log.Debug("initializing...", + b.log.Debug(logs.BlobovniczaInitializing, zap.Uint64("object size limit", b.objSizeLimit), zap.Uint64("storage size limit", b.fullSizeLimit), ) if size := b.filled.Load(); size != 0 { - b.log.Debug("already initialized", zap.Uint64("size", size)) + b.log.Debug(logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size)) return nil } @@ -59,7 +60,7 @@ func (b *Blobovnicza) Init() error { // create size range bucket rangeStr := stringifyBounds(lower, upper) - b.log.Debug("creating bucket for size range", + b.log.Debug(logs.BlobovniczaCreatingBucketForSizeRange, zap.String("range", rangeStr)) _, err := tx.CreateBucketIfNotExists(key) @@ -86,7 +87,7 @@ func (b *Blobovnicza) Init() error { // Close releases all internal database resources. func (b *Blobovnicza) Close() error { - b.log.Debug("closing BoltDB", + b.log.Debug(logs.BlobovniczaClosingBoltDB, zap.String("path", b.path), ) diff --git a/pkg/local_object_storage/blobovnicza/delete.go b/pkg/local_object_storage/blobovnicza/delete.go index 1f885bd8e..6ce6f349c 100644 --- a/pkg/local_object_storage/blobovnicza/delete.go +++ b/pkg/local_object_storage/blobovnicza/delete.go @@ -1,6 +1,7 @@ package blobovnicza import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.etcd.io/bbolt" @@ -51,7 +52,7 @@ func (b *Blobovnicza) Delete(prm DeletePrm) (DeleteRes, error) { err := buck.Delete(addrKey) if err == nil { - b.log.Debug("object was removed from bucket", + b.log.Debug(logs.BlobovniczaObjectWasRemovedFromBucket, zap.String("binary size", stringifyByteSize(sz)), zap.String("range", stringifyBounds(lower, upper)), ) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go index c628c96be..af976f977 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go @@ -7,6 +7,7 @@ import ( "strconv" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" @@ -104,12 +105,12 @@ func NewBlobovniczaTree(opts ...Option) (blz *Blobovniczas) { // it from opened cache. return } else if err := value.Close(); err != nil { - blz.log.Error("could not close Blobovnicza", + blz.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza, zap.String("id", p), zap.String("error", err.Error()), ) } else { - blz.log.Debug("blobovnicza successfully closed on evict", + blz.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyClosedOnEvict, zap.String("id", p), ) } @@ -141,11 +142,11 @@ func (b *Blobovniczas) getActivated(lvlPath string) (blobovniczaWithIndex, error // // if current active blobovnicza's index is not old, it remains unchanged. func (b *Blobovniczas) updateActive(lvlPath string, old *uint64) error { - b.log.Debug("updating active blobovnicza...", zap.String("path", lvlPath)) + b.log.Debug(logs.BlobovniczatreeUpdatingActiveBlobovnicza, zap.String("path", lvlPath)) _, err := b.updateAndGet(lvlPath, old) - b.log.Debug("active blobovnicza successfully updated", zap.String("path", lvlPath)) + b.log.Debug(logs.BlobovniczatreeActiveBlobovniczaSuccessfullyUpdated, zap.String("path", lvlPath)) return err } @@ -201,7 +202,7 @@ func (b *Blobovniczas) updateAndGet(lvlPath string, old *uint64) (blobovniczaWit } b.lruMtx.Unlock() - b.log.Debug("blobovnicza successfully activated", + b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyActivated, zap.String("path", activePath)) return active, nil diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control.go b/pkg/local_object_storage/blobstor/blobovniczatree/control.go index e7e890e50..0240c7a97 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/control.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/control.go @@ -4,6 +4,7 @@ import ( "fmt" "path/filepath" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "go.uber.org/zap" ) @@ -18,10 +19,10 @@ func (b *Blobovniczas) Open(readOnly bool) error { // // Should be called exactly once. func (b *Blobovniczas) Init() error { - b.log.Debug("initializing Blobovnicza's") + b.log.Debug(logs.BlobovniczatreeInitializingBlobovniczas) if b.readOnly { - b.log.Debug("read-only mode, skip blobovniczas initialization...") + b.log.Debug(logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization) return nil } @@ -36,7 +37,7 @@ func (b *Blobovniczas) Init() error { return true, fmt.Errorf("could not initialize blobovnicza structure %s: %w", p, err) } - b.log.Debug("blobovnicza successfully initialized, closing...", zap.String("id", p)) + b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p)) return false, nil }) } @@ -49,7 +50,7 @@ func (b *Blobovniczas) Close() error { for p, v := range b.active { if err := v.blz.Close(); err != nil { - b.log.Debug("could not close active blobovnicza", + b.log.Debug(logs.BlobovniczatreeCouldNotCloseActiveBlobovnicza, zap.String("path", p), zap.String("error", err.Error()), ) @@ -59,7 +60,7 @@ func (b *Blobovniczas) Close() error { for _, k := range b.opened.Keys() { blz, _ := b.opened.Get(k) if err := blz.Close(); err != nil { - b.log.Debug("could not close active blobovnicza", + b.log.Debug(logs.BlobovniczatreeCouldNotCloseActiveBlobovnicza, zap.String("path", k), zap.String("error", err.Error()), ) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go index 7e14d6d8d..202807653 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go @@ -3,6 +3,7 @@ package blobovniczatree import ( "path/filepath" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" @@ -44,7 +45,7 @@ func (b *Blobovniczas) Delete(prm common.DeletePrm) (res common.DeleteRes, err e res, err = b.deleteObjectFromLevel(bPrm, p, !ok, prm) if err != nil { if !blobovnicza.IsErrNotFound(err) { - b.log.Debug("could not remove object from level", + b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromLevel, zap.String("level", p), zap.String("error", err.Error()), ) @@ -83,7 +84,7 @@ func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath if res, err := b.deleteObject(v, prm, dp); err == nil { return res, err } else if !blobovnicza.IsErrNotFound(err) { - b.log.Debug("could not remove object from opened blobovnicza", + b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromOpenedBlobovnicza, zap.String("path", blzPath), zap.String("error", err.Error()), ) @@ -102,7 +103,7 @@ func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath if res, err := b.deleteObject(active.blz, prm, dp); err == nil { return res, err } else if !blobovnicza.IsErrNotFound(err) { - b.log.Debug("could not remove object from active blobovnicza", + b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromActiveBlobovnicza, zap.String("path", blzPath), zap.String("error", err.Error()), ) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go index 748843ee9..9d9fd4cba 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go @@ -6,6 +6,7 @@ import ( "path/filepath" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "go.opentelemetry.io/otel/attribute" @@ -47,7 +48,7 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common _, err := b.getObjectFromLevel(ctx, gPrm, p, !ok) if err != nil { if !blobovnicza.IsErrNotFound(err) { - b.log.Debug("could not get object from level", + b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), zap.String("error", err.Error())) } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get.go b/pkg/local_object_storage/blobstor/blobovniczatree/get.go index bb84db086..0b8ccb64f 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get.go @@ -7,6 +7,7 @@ import ( "path/filepath" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" @@ -53,7 +54,7 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G res, err = b.getObjectFromLevel(ctx, bPrm, p, !ok) if err != nil { if !blobovnicza.IsErrNotFound(err) { - b.log.Debug("could not get object from level", + b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), zap.String("error", err.Error()), ) @@ -88,7 +89,7 @@ func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.G if res, err := b.getObject(ctx, v, prm); err == nil { return res, err } else if !blobovnicza.IsErrNotFound(err) { - b.log.Debug("could not read object from opened blobovnicza", + b.log.Debug(logs.BlobovniczatreeCouldNotReadObjectFromOpenedBlobovnicza, zap.String("path", blzPath), zap.String("error", err.Error()), ) @@ -108,7 +109,7 @@ func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.G if res, err := b.getObject(ctx, active.blz, prm); err == nil { return res, err } else if !blobovnicza.IsErrNotFound(err) { - b.log.Debug("could not get object from active blobovnicza", + b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromActiveBlobovnicza, zap.String("path", blzPath), zap.String("error", err.Error()), ) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go index b12cb32d4..d6dfe51bd 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go @@ -8,6 +8,7 @@ import ( "strconv" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" @@ -54,7 +55,7 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re if err != nil { outOfBounds := isErrOutOfRange(err) if !outOfBounds && !blobovnicza.IsErrNotFound(err) { - b.log.Debug("could not get object from level", + b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), zap.String("error", err.Error()), ) @@ -98,7 +99,7 @@ func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRang return res, err default: if !blobovnicza.IsErrNotFound(err) { - b.log.Debug("could not read payload range from opened blobovnicza", + b.log.Debug(logs.BlobovniczatreeCouldNotReadPayloadRangeFromOpenedBlobovnicza, zap.String("path", blzPath), zap.String("error", err.Error()), ) @@ -123,7 +124,7 @@ func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRang return res, err default: if !blobovnicza.IsErrNotFound(err) { - b.log.Debug("could not read payload range from active blobovnicza", + b.log.Debug(logs.BlobovniczatreeCouldNotReadPayloadRangeFromActiveBlobovnicza, zap.String("path", blzPath), zap.String("error", err.Error()), ) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/put.go b/pkg/local_object_storage/blobstor/blobovniczatree/put.go index db7ca1082..8b29119c6 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/put.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/put.go @@ -4,6 +4,7 @@ import ( "errors" "path/filepath" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "go.etcd.io/bbolt" @@ -56,9 +57,9 @@ func (i *putIterator) iterate(path string) (bool, error) { active, err := i.B.getActivated(path) if err != nil { if !isLogical(err) { - i.B.reportError("could not get active blobovnicza", err) + i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) } else { - i.B.log.Debug("could not get active blobovnicza", + i.B.log.Debug(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.String("error", err.Error())) } @@ -71,15 +72,15 @@ func (i *putIterator) iterate(path string) (bool, error) { // and `updateActive` takes care of not updating the active blobovnicza twice. if isFull := errors.Is(err, blobovnicza.ErrFull); isFull || errors.Is(err, bbolt.ErrDatabaseNotOpen) { if isFull { - i.B.log.Debug("blobovnicza overflowed", + i.B.log.Debug(logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("path", filepath.Join(path, u64ToHexString(active.ind)))) } if err := i.B.updateActive(path, &active.ind); err != nil { if !isLogical(err) { - i.B.reportError("could not update active blobovnicza", err) + i.B.reportError(logs.BlobovniczatreeCouldNotUpdateActiveBlobovnicza, err) } else { - i.B.log.Debug("could not update active blobovnicza", + i.B.log.Debug(logs.BlobovniczatreeCouldNotUpdateActiveBlobovnicza, zap.String("level", path), zap.String("error", err.Error())) } @@ -92,9 +93,9 @@ func (i *putIterator) iterate(path string) (bool, error) { i.AllFull = false if !isLogical(err) { - i.B.reportError("could not put object to active blobovnicza", err) + i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err) } else { - i.B.log.Debug("could not put object to active blobovnicza", + i.B.log.Debug(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", filepath.Join(path, u64ToHexString(active.ind))), zap.String("error", err.Error())) } diff --git a/pkg/local_object_storage/blobstor/control.go b/pkg/local_object_storage/blobstor/control.go index 6ceb9cefa..abe39575b 100644 --- a/pkg/local_object_storage/blobstor/control.go +++ b/pkg/local_object_storage/blobstor/control.go @@ -4,12 +4,13 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "go.uber.org/zap" ) // Open opens BlobStor. func (b *BlobStor) Open(readOnly bool) error { - b.log.Debug("opening...") + b.log.Debug(logs.BlobstorOpening) for i := range b.storage { err := b.storage[i].Storage.Open(readOnly) @@ -29,7 +30,7 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag // // Returns wrapped ErrInitBlobovniczas on blobovnicza tree's initializaiton failure. func (b *BlobStor) Init() error { - b.log.Debug("initializing...") + b.log.Debug(logs.BlobstorInitializing) if err := b.compression.Init(); err != nil { return err @@ -46,13 +47,13 @@ func (b *BlobStor) Init() error { // Close releases all internal resources of BlobStor. func (b *BlobStor) Close() error { - b.log.Debug("closing...") + b.log.Debug(logs.BlobstorClosing) var firstErr error for i := range b.storage { err := b.storage[i].Storage.Close() if err != nil { - b.log.Info("couldn't close storage", zap.String("error", err.Error())) + b.log.Info(logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error())) if firstErr == nil { firstErr = err } diff --git a/pkg/local_object_storage/blobstor/exists.go b/pkg/local_object_storage/blobstor/exists.go index 5882c33e0..3c76764a9 100644 --- a/pkg/local_object_storage/blobstor/exists.go +++ b/pkg/local_object_storage/blobstor/exists.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -57,7 +58,7 @@ func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi } for _, err := range errors[:len(errors)-1] { - b.log.Warn("error occurred during object existence checking", + b.log.Warn(logs.BlobstorErrorOccurredDuringObjectExistenceChecking, zap.Stringer("address", prm.Address), zap.String("error", err.Error())) } diff --git a/pkg/local_object_storage/blobstor/iterate.go b/pkg/local_object_storage/blobstor/iterate.go index 0461dd803..2c37ee776 100644 --- a/pkg/local_object_storage/blobstor/iterate.go +++ b/pkg/local_object_storage/blobstor/iterate.go @@ -3,6 +3,7 @@ package blobstor import ( "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.uber.org/zap" @@ -38,7 +39,7 @@ func IterateBinaryObjects(blz *BlobStor, f func(addr oid.Address, data []byte, d } prm.IgnoreErrors = true prm.ErrorHandler = func(addr oid.Address, err error) error { - blz.log.Warn("error occurred during the iteration", + blz.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration, zap.Stringer("address", addr), zap.String("err", err.Error())) return nil diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go index 3e176dc91..0c422ccc8 100644 --- a/pkg/local_object_storage/engine/control.go +++ b/pkg/local_object_storage/engine/control.go @@ -8,6 +8,7 @@ import ( "strings" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "go.uber.org/zap" @@ -47,7 +48,7 @@ func (e *StorageEngine) open() error { for res := range errCh { if res.err != nil { - e.log.Error("could not open shard, closing and skipping", + e.log.Error(logs.EngineCouldNotOpenShardClosingAndSkipping, zap.String("id", res.id), zap.Error(res.err)) @@ -56,7 +57,7 @@ func (e *StorageEngine) open() error { err := sh.Close() if err != nil { - e.log.Error("could not close partially initialized shard", + e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard, zap.String("id", res.id), zap.Error(res.err)) } @@ -94,7 +95,7 @@ func (e *StorageEngine) Init(ctx context.Context) error { for res := range errCh { if res.err != nil { if errors.Is(res.err, blobstor.ErrInitBlobovniczas) { - e.log.Error("could not initialize shard, closing and skipping", + e.log.Error(logs.EngineCouldNotInitializeShardClosingAndSkipping, zap.String("id", res.id), zap.Error(res.err)) @@ -103,7 +104,7 @@ func (e *StorageEngine) Init(ctx context.Context) error { err := sh.Close() if err != nil { - e.log.Error("could not close partially initialized shard", + e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard, zap.String("id", res.id), zap.Error(res.err)) } @@ -149,7 +150,7 @@ func (e *StorageEngine) close(releasePools bool) error { for id, sh := range e.shards { if err := sh.Close(); err != nil { - e.log.Debug("could not close shard", + e.log.Debug(logs.EngineCouldNotCloseShard, zap.String("id", id), zap.String("error", err.Error()), ) @@ -309,7 +310,7 @@ loop: for _, p := range shardsToReload { err := p.sh.Reload(p.opts...) if err != nil { - e.log.Error("could not reload a shard", + e.log.Error(logs.EngineCouldNotReloadAShard, zap.Stringer("shard id", p.sh.ID()), zap.Error(err)) } @@ -338,7 +339,7 @@ loop: return fmt.Errorf("could not add %s shard: %w", idStr, err) } - e.log.Info("added new shard", zap.String("id", idStr)) + e.log.Info(logs.EngineAddedNewShard, zap.String("id", idStr)) } return nil diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go index 2105c452f..1f3c142a5 100644 --- a/pkg/local_object_storage/engine/delete.go +++ b/pkg/local_object_storage/engine/delete.go @@ -4,6 +4,7 @@ import ( "context" "errors" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -136,7 +137,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { res, err := sh.Select(selectPrm) if err != nil { - e.log.Warn("error during searching for object children", + e.log.Warn(logs.EngineErrorDuringSearchingForObjectChildren, zap.Stringer("addr", addr), zap.String("error", err.Error())) return false @@ -147,7 +148,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo _, err = sh.Inhume(ctx, inhumePrm) if err != nil { - e.log.Debug("could not inhume object in shard", + e.log.Debug(logs.EngineCouldNotInhumeObjectInShard, zap.Stringer("addr", addr), zap.String("err", err.Error())) continue diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go index e0161bfe3..20c8a946b 100644 --- a/pkg/local_object_storage/engine/engine.go +++ b/pkg/local_object_storage/engine/engine.go @@ -4,6 +4,7 @@ import ( "errors" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" @@ -87,24 +88,24 @@ func (e *StorageEngine) moveToDegraded(sh *shard.Shard, errCount uint32) { sid := sh.ID() err := sh.SetMode(mode.DegradedReadOnly) if err != nil { - e.log.Error("failed to move shard in degraded-read-only mode, moving to read-only", + e.log.Error(logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly, zap.Stringer("shard_id", sid), zap.Uint32("error count", errCount), zap.Error(err)) err = sh.SetMode(mode.ReadOnly) if err != nil { - e.log.Error("failed to move shard in read-only mode", + e.log.Error(logs.EngineFailedToMoveShardInReadonlyMode, zap.Stringer("shard_id", sid), zap.Uint32("error count", errCount), zap.Error(err)) } else { - e.log.Info("shard is moved in read-only mode due to error threshold", + e.log.Info(logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold, zap.Stringer("shard_id", sid), zap.Uint32("error count", errCount)) } } else { - e.log.Info("shard is moved in degraded mode due to error threshold", + e.log.Info(logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold, zap.Stringer("shard_id", sid), zap.Uint32("error count", errCount)) } @@ -182,7 +183,7 @@ func (e *StorageEngine) reportShardErrorWithFlags( default: // For background workers we can have a lot of such errors, // thus logging is done with DEBUG level. - e.log.Debug("mode change is in progress, ignoring set-mode request", + e.log.Debug(logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest, zap.Stringer("shard_id", sid), zap.Uint32("error_count", errCount)) } diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go index f16413ea2..2ec2c2b35 100644 --- a/pkg/local_object_storage/engine/evacuate.go +++ b/pkg/local_object_storage/engine/evacuate.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" @@ -79,7 +80,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (Eva } } - e.log.Info("started shards evacuation", zap.Strings("shard_ids", shardIDs)) + e.log.Info(logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs)) var res EvacuateShardRes @@ -89,7 +90,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (Eva } } - e.log.Info("finished shards evacuation", zap.Strings("shard_ids", shardIDs)) + e.log.Info(logs.EngineFinishedShardsEvacuation, zap.Strings("shard_ids", shardIDs)) return res, nil } @@ -206,7 +207,7 @@ func (e *StorageEngine) tryEvacuateObject(ctx context.Context, addr oid.Address, putDone, exists := e.putToShard(ctx, shards[j].hashedShard, j, shards[j].pool, addr, object) if putDone || exists { if putDone { - e.log.Debug("object is moved to another shard", + e.log.Debug(logs.EngineObjectIsMovedToAnotherShard, zap.Stringer("from", sh.ID()), zap.Stringer("to", shards[j].ID()), zap.Stringer("addr", addr)) diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index db9988338..696e78742 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -4,6 +4,7 @@ import ( "context" "errors" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -83,7 +84,7 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, e if !prm.forceRemoval { locked, err := e.IsLocked(prm.addrs[i]) if err != nil { - e.log.Warn("removing an object without full locking check", + e.log.Warn(logs.EngineRemovingAnObjectWithoutFullLockingCheck, zap.Error(err), zap.Stringer("addr", prm.addrs[i])) } else if locked { @@ -222,7 +223,7 @@ func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, l select { case <-ctx.Done(): - e.log.Info("interrupt processing the expired locks", zap.Error(ctx.Err())) + e.log.Info(logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(ctx.Err())) return true default: return false @@ -236,7 +237,7 @@ func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.A select { case <-ctx.Done(): - e.log.Info("interrupt processing the deleted locks", zap.Error(ctx.Err())) + e.log.Info(logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(ctx.Err())) return true default: return false diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go index 5f9105efc..aea296cc4 100644 --- a/pkg/local_object_storage/engine/put.go +++ b/pkg/local_object_storage/engine/put.go @@ -4,6 +4,7 @@ import ( "context" "errors" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -118,7 +119,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, ind int, _, err = sh.ToMoveIt(toMoveItPrm) if err != nil { - e.log.Warn("could not mark object for shard relocation", + e.log.Warn(logs.EngineCouldNotMarkObjectForShardRelocation, zap.Stringer("shard", sh.ID()), zap.String("error", err.Error()), ) @@ -135,7 +136,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, ind int, if err != nil { if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) || errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) { - e.log.Warn("could not put object to shard", + e.log.Warn(logs.EngineCouldNotPutObjectToShard, zap.Stringer("shard_id", sh.ID()), zap.String("error", err.Error())) return diff --git a/pkg/local_object_storage/engine/remove_copies.go b/pkg/local_object_storage/engine/remove_copies.go index c50c0844c..d365fc7b4 100644 --- a/pkg/local_object_storage/engine/remove_copies.go +++ b/pkg/local_object_storage/engine/remove_copies.go @@ -4,6 +4,7 @@ import ( "context" "errors" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -42,7 +43,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat prm.Concurrency = defaultRemoveDuplicatesConcurrency } - e.log.Info("starting removal of locally-redundant copies", + e.log.Info(logs.EngineStartingRemovalOfLocallyredundantCopies, zap.Int("concurrency", prm.Concurrency)) // The mutext must be taken for the whole duration to avoid target shard being removed @@ -54,7 +55,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat // This is not currently the case, because `FreeSpace` metric used by weight sorting is always 0. // However we could change weights in future and easily forget this function. for _, sh := range e.shards { - e.log.Debug("started duplicates removal routine", zap.String("shard_id", sh.ID().String())) + e.log.Debug(logs.EngineStartedDuplicatesRemovalRoutine, zap.String("shard_id", sh.ID().String())) ch := make(chan oid.Address) errG, ctx := errgroup.WithContext(ctx) @@ -92,12 +93,12 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat }) } if err := errG.Wait(); err != nil { - e.log.Error("finished removal of locally-redundant copies", zap.Error(err)) + e.log.Error(logs.EngineFinishedRemovalOfLocallyredundantCopies, zap.Error(err)) return err } } - e.log.Info("finished removal of locally-redundant copies") + e.log.Info(logs.EngineFinishedRemovalOfLocallyredundantCopies) return nil } diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go index 2b1146ff2..64546d9ef 100644 --- a/pkg/local_object_storage/engine/shards.go +++ b/pkg/local_object_storage/engine/shards.go @@ -3,6 +3,7 @@ package engine import ( "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" @@ -168,7 +169,7 @@ func (e *StorageEngine) removeShards(ids ...string) { delete(e.shardPools, id) } - e.log.Info("shard has been removed", + e.log.Info(logs.EngineShardHasBeenRemoved, zap.String("id", id)) } e.mtx.Unlock() @@ -176,7 +177,7 @@ func (e *StorageEngine) removeShards(ids ...string) { for _, sh := range ss { err := sh.Close() if err != nil { - e.log.Error("could not close removed shard", + e.log.Error(logs.EngineCouldNotCloseRemovedShard, zap.Stringer("id", sh.ID()), zap.Error(err), ) diff --git a/pkg/local_object_storage/metabase/control.go b/pkg/local_object_storage/metabase/control.go index 1a19c3e2a..4ae802aaa 100644 --- a/pkg/local_object_storage/metabase/control.go +++ b/pkg/local_object_storage/metabase/control.go @@ -5,6 +5,7 @@ import ( "fmt" "path/filepath" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" @@ -25,7 +26,7 @@ func (db *DB) Open(readOnly bool) error { return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err) } - db.log.Debug("created directory for Metabase", zap.String("path", db.info.Path)) + db.log.Debug(logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path)) if db.boltOptions == nil { opts := *bbolt.DefaultOptions @@ -46,9 +47,9 @@ func (db *DB) openBolt() error { db.boltDB.MaxBatchDelay = db.boltBatchDelay db.boltDB.MaxBatchSize = db.boltBatchSize - db.log.Debug("opened boltDB instance for Metabase") + db.log.Debug(logs.MetabaseOpenedBoltDBInstanceForMetabase) - db.log.Debug("checking metabase version") + db.log.Debug(logs.MetabaseCheckingMetabaseVersion) return db.boltDB.View(func(tx *bbolt.Tx) error { // The safest way to check if the metabase is fresh is to check if it has no buckets. // However, shard info can be present. So here we check that the number of buckets is diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go index 20985f47a..74c261d35 100644 --- a/pkg/local_object_storage/metabase/select.go +++ b/pkg/local_object_storage/metabase/select.go @@ -7,6 +7,7 @@ import ( "strings" v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -267,7 +268,7 @@ func (db *DB) selectFromFKBT( ) { // matchFunc, ok := db.matchers[f.Operation()] if !ok { - db.log.Debug("missing matcher", zap.Uint32("operation", uint32(f.Operation()))) + db.log.Debug(logs.MetabaseMissingMatcher, zap.Uint32("operation", uint32(f.Operation()))) return } @@ -290,7 +291,7 @@ func (db *DB) selectFromFKBT( }) }) if err != nil { - db.log.Debug("error in FKBT selection", zap.String("error", err.Error())) + db.log.Debug(logs.MetabaseErrorInFKBTSelection, zap.String("error", err.Error())) } } @@ -360,13 +361,13 @@ func (db *DB) selectFromList( case object.MatchStringEqual: lst, err = decodeList(bkt.Get(bucketKeyHelper(f.Header(), f.Value()))) if err != nil { - db.log.Debug("can't decode list bucket leaf", zap.String("error", err.Error())) + db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf, zap.String("error", err.Error())) return } default: fMatch, ok := db.matchers[op] if !ok { - db.log.Debug("unknown operation", zap.Uint32("operation", uint32(op))) + db.log.Debug(logs.MetabaseUnknownOperation, zap.Uint32("operation", uint32(op))) return } @@ -374,7 +375,7 @@ func (db *DB) selectFromList( if err = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(key, val []byte) error { l, err := decodeList(val) if err != nil { - db.log.Debug("can't decode list bucket leaf", + db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf, zap.String("error", err.Error()), ) @@ -385,7 +386,7 @@ func (db *DB) selectFromList( return nil }); err != nil { - db.log.Debug("can't iterate over the bucket", + db.log.Debug(logs.MetabaseCantIterateOverTheBucket, zap.String("error", err.Error()), ) @@ -429,7 +430,7 @@ func (db *DB) selectObjectID( default: fMatch, ok := db.matchers[op] if !ok { - db.log.Debug("unknown operation", + db.log.Debug(logs.MetabaseUnknownOperation, zap.Uint32("operation", uint32(f.Operation())), ) @@ -451,7 +452,7 @@ func (db *DB) selectObjectID( return nil }) if err != nil { - db.log.Debug("could not iterate over the buckets", + db.log.Debug(logs.MetabaseCouldNotIterateOverTheBuckets, zap.String("error", err.Error()), ) } diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index d727d27a5..3d0f72922 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" @@ -15,7 +16,7 @@ import ( ) func (s *Shard) handleMetabaseFailure(stage string, err error) error { - s.log.Error("metabase failure, switching mode", + s.log.Error(logs.ShardMetabaseFailureSwitchingMode, zap.String("stage", stage), zap.Stringer("mode", mode.ReadOnly), zap.Error(err)) @@ -25,7 +26,7 @@ func (s *Shard) handleMetabaseFailure(stage string, err error) error { return nil } - s.log.Error("can't move shard to readonly, switch mode", + s.log.Error(logs.ShardCantMoveShardToReadonlySwitchMode, zap.String("stage", stage), zap.Stringer("mode", mode.DegradedReadOnly), zap.Error(err)) @@ -167,7 +168,7 @@ func (s *Shard) refillMetabase() error { err = blobstor.IterateBinaryObjects(s.blobStor, func(addr oid.Address, data []byte, descriptor []byte) error { if err := obj.Unmarshal(data); err != nil { - s.log.Warn("could not unmarshal object", + s.log.Warn(logs.ShardCouldNotUnmarshalObject, zap.Stringer("address", addr), zap.String("err", err.Error())) return nil @@ -274,7 +275,7 @@ func (s *Shard) Close() error { for _, component := range components { if err := component.Close(); err != nil { lastErr = err - s.log.Error("could not close shard component", zap.Error(err)) + s.log.Error(logs.ShardCouldNotCloseShardComponent, zap.Error(err)) } } @@ -302,7 +303,7 @@ func (s *Shard) Reload(opts ...Option) error { ok, err := s.metaBase.Reload(c.metaOpts...) if err != nil { if errors.Is(err, meta.ErrDegradedMode) { - s.log.Error("can't open metabase, move to a degraded mode", zap.Error(err)) + s.log.Error(logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err)) _ = s.setMode(mode.DegradedReadOnly) } return err @@ -318,12 +319,12 @@ func (s *Shard) Reload(opts ...Option) error { err = s.metaBase.Init() } if err != nil { - s.log.Error("can't initialize metabase, move to a degraded-read-only mode", zap.Error(err)) + s.log.Error(logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err)) _ = s.setMode(mode.DegradedReadOnly) return err } } - s.log.Info("trying to restore read-write mode") + s.log.Info(logs.ShardTryingToRestoreReadwriteMode) return s.setMode(mode.ReadWrite) } diff --git a/pkg/local_object_storage/shard/delete.go b/pkg/local_object_storage/shard/delete.go index 6ae3bf7dd..ed05f9982 100644 --- a/pkg/local_object_storage/shard/delete.go +++ b/pkg/local_object_storage/shard/delete.go @@ -3,6 +3,7 @@ package shard import ( "errors" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" @@ -49,7 +50,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) { if s.hasWriteCache() { err := s.writeCache.Delete(prm.addr[i]) if err != nil && !IsErrNotFound(err) && !errors.Is(err, writecache.ErrReadOnly) { - s.log.Warn("can't delete object from write cache", zap.String("error", err.Error())) + s.log.Warn(logs.ShardCantDeleteObjectFromWriteCache, zap.String("error", err.Error())) } } @@ -58,7 +59,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) { res, err := s.metaBase.StorageID(sPrm) if err != nil { - s.log.Debug("can't get storage ID from metabase", + s.log.Debug(logs.ShardCantGetStorageIDFromMetabase, zap.Stringer("object", prm.addr[i]), zap.String("error", err.Error())) @@ -100,7 +101,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) { _, err = s.blobStor.Delete(delPrm) if err != nil { - s.log.Debug("can't remove object from blobStor", + s.log.Debug(logs.ShardCantRemoveObjectFromBlobStor, zap.Stringer("object_address", prm.addr[i]), zap.String("error", err.Error())) } diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go index 6f18e6c3a..5ea9ecedf 100644 --- a/pkg/local_object_storage/shard/gc.go +++ b/pkg/local_object_storage/shard/gc.go @@ -5,6 +5,7 @@ import ( "sync" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" @@ -124,7 +125,7 @@ func (gc *gc) listenEvents(ctx context.Context) { for { event, ok := <-gc.eventChan if !ok { - gc.log.Warn("stop event listener by closed channel") + gc.log.Warn(logs.ShardStopEventListenerByClosedChannel) return } @@ -149,7 +150,7 @@ func (gc *gc) listenEvents(ctx context.Context) { v.prevGroup.Done() }) if err != nil { - gc.log.Warn("could not submit GC job to worker pool", + gc.log.Warn(logs.ShardCouldNotSubmitGCJobToWorkerPool, zap.String("error", err.Error()), ) @@ -174,7 +175,7 @@ func (gc *gc) tickRemover() { close(gc.eventChan) - gc.log.Debug("GC is stopped") + gc.log.Debug(logs.ShardGCIsStopped) return case <-timer.C: gc.remover() @@ -188,7 +189,7 @@ func (gc *gc) stop() { gc.stopChannel <- struct{}{} }) - gc.log.Info("waiting for GC workers to stop...") + gc.log.Info(logs.ShardWaitingForGCWorkersToStop) gc.wg.Wait() } @@ -220,7 +221,7 @@ func (s *Shard) removeGarbage() { // (no more than s.rmBatchSize objects) err := s.metaBase.IterateOverGarbage(iterPrm) if err != nil { - s.log.Warn("iterator over metabase graveyard failed", + s.log.Warn(logs.ShardIteratorOverMetabaseGraveyardFailed, zap.String("error", err.Error()), ) @@ -235,7 +236,7 @@ func (s *Shard) removeGarbage() { // delete accumulated objects _, err = s.delete(deletePrm) if err != nil { - s.log.Warn("could not delete the objects", + s.log.Warn(logs.ShardCouldNotDeleteTheObjects, zap.String("error", err.Error()), ) @@ -295,7 +296,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) { }) if err := errGroup.Wait(); err != nil { - s.log.Warn("iterator over expired objects failed", zap.String("error", err.Error())) + s.log.Warn(logs.ShardIteratorOverExpiredObjectsFailed, zap.String("error", err.Error())) } } @@ -321,7 +322,7 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address) // inhume the collected objects res, err := s.metaBase.Inhume(inhumePrm) if err != nil { - s.log.Warn("could not inhume the objects", + s.log.Warn(logs.ShardCouldNotInhumeTheObjects, zap.String("error", err.Error()), ) @@ -342,7 +343,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { epoch := e.(newEpoch).epoch log := s.log.With(zap.Uint64("epoch", epoch)) - log.Debug("started expired tombstones handling") + log.Debug(logs.ShardStartedExpiredTombstonesHandling) const tssDeleteBatch = 50 tss := make([]meta.TombstonedObject, 0, tssDeleteBatch) @@ -360,12 +361,12 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { }) for { - log.Debug("iterating tombstones") + log.Debug(logs.ShardIteratingTombstones) s.m.RLock() if s.info.Mode.NoMetabase() { - s.log.Debug("shard is in a degraded mode, skip collecting expired tombstones") + s.log.Debug(logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones) s.m.RUnlock() return @@ -373,7 +374,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { err := s.metaBase.IterateOverGraveyard(iterPrm) if err != nil { - log.Error("iterator over graveyard failed", zap.Error(err)) + log.Error(logs.ShardIteratorOverGraveyardFailed, zap.Error(err)) s.m.RUnlock() return @@ -392,7 +393,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { } } - log.Debug("handling expired tombstones batch", zap.Int("number", len(tssExp))) + log.Debug(logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp))) s.expiredTombstonesCallback(ctx, tssExp) iterPrm.SetOffset(tss[tssLen-1].Address()) @@ -400,7 +401,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { tssExp = tssExp[:0] } - log.Debug("finished expired tombstones handling") + log.Debug(logs.ShardFinishedExpiredTombstonesHandling) } func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { @@ -442,7 +443,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { }) if err := errGroup.Wait(); err != nil { - s.log.Warn("iterator over expired locks failed", zap.String("error", err.Error())) + s.log.Warn(logs.ShardIteratorOverExpiredLocksFailed, zap.String("error", err.Error())) } } @@ -503,7 +504,7 @@ func (s *Shard) HandleExpiredTombstones(tss []meta.TombstonedObject) { // inhume tombstones res, err := s.metaBase.Inhume(pInhume) if err != nil { - s.log.Warn("could not mark tombstones as garbage", + s.log.Warn(logs.ShardCouldNotMarkTombstonesAsGarbage, zap.String("error", err.Error()), ) @@ -523,7 +524,7 @@ func (s *Shard) HandleExpiredTombstones(tss []meta.TombstonedObject) { // from graveyard err = s.metaBase.DropGraves(tss) if err != nil { - s.log.Warn("could not drop expired grave records", zap.Error(err)) + s.log.Warn(logs.ShardCouldNotDropExpiredGraveRecords, zap.Error(err)) } } @@ -535,7 +536,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers [] } unlocked, err := s.metaBase.FreeLockedBy(lockers) if err != nil { - s.log.Warn("failure to unlock objects", + s.log.Warn(logs.ShardFailureToUnlockObjects, zap.String("error", err.Error()), ) @@ -548,7 +549,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers [] res, err := s.metaBase.Inhume(pInhume) if err != nil { - s.log.Warn("failure to mark lockers as garbage", + s.log.Warn(logs.ShardFailureToMarkLockersAsGarbage, zap.String("error", err.Error()), ) @@ -570,7 +571,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers [] func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unlocked []oid.Address) { expiredUnlocked, err := s.selectExpired(ctx, epoch, unlocked) if err != nil { - s.log.Warn("failure to get expired unlocked objects", zap.Error(err)) + s.log.Warn(logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err)) return } @@ -589,7 +590,7 @@ func (s *Shard) HandleDeletedLocks(lockers []oid.Address) { _, err := s.metaBase.FreeLockedBy(lockers) if err != nil { - s.log.Warn("failure to unlock objects", + s.log.Warn(logs.ShardFailureToUnlockObjects, zap.String("error", err.Error()), ) diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go index 3406b9338..8a0296ac6 100644 --- a/pkg/local_object_storage/shard/get.go +++ b/pkg/local_object_storage/shard/get.go @@ -5,6 +5,7 @@ import ( "fmt" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" @@ -126,7 +127,7 @@ func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher, return nil, false, logicerr.Wrap(apistatus.ObjectNotFound{}) } } else { - s.log.Warn("fetching object without meta", zap.Stringer("addr", addr)) + s.log.Warn(logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr)) } if s.hasWriteCache() { @@ -135,11 +136,11 @@ func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher, return res, false, err } if IsErrNotFound(err) { - s.log.Debug("object is missing in write-cache", + s.log.Debug(logs.ShardObjectIsMissingInWritecache, zap.Stringer("addr", addr), zap.Bool("skip_meta", skipMeta)) } else { - s.log.Error("failed to fetch object from write-cache", + s.log.Error(logs.ShardFailedToFetchObjectFromWritecache, zap.Error(err), zap.Stringer("addr", addr), zap.Bool("skip_meta", skipMeta)) diff --git a/pkg/local_object_storage/shard/inhume.go b/pkg/local_object_storage/shard/inhume.go index 40a5bf22e..3457188be 100644 --- a/pkg/local_object_storage/shard/inhume.go +++ b/pkg/local_object_storage/shard/inhume.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.uber.org/zap" @@ -98,7 +99,7 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { return InhumeRes{}, ErrLockObjectRemoval } - s.log.Debug("could not mark object to delete in metabase", + s.log.Debug(logs.ShardCouldNotMarkObjectToDeleteInMetabase, zap.String("error", err.Error()), ) diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go index 9efca8983..bab1090eb 100644 --- a/pkg/local_object_storage/shard/list.go +++ b/pkg/local_object_storage/shard/list.go @@ -3,6 +3,7 @@ package shard import ( "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -86,7 +87,7 @@ func (s *Shard) List() (res SelectRes, err error) { sRes, err := s.metaBase.Select(sPrm) // consider making List in metabase if err != nil { - s.log.Debug("can't select all objects", + s.log.Debug(logs.ShardCantSelectAllObjects, zap.Stringer("cid", lst[i]), zap.String("error", err.Error())) diff --git a/pkg/local_object_storage/shard/mode.go b/pkg/local_object_storage/shard/mode.go index 17ed3f3c8..50c52accc 100644 --- a/pkg/local_object_storage/shard/mode.go +++ b/pkg/local_object_storage/shard/mode.go @@ -1,6 +1,7 @@ package shard import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "go.uber.org/zap" @@ -25,7 +26,7 @@ func (s *Shard) SetMode(m mode.Mode) error { } func (s *Shard) setMode(m mode.Mode) error { - s.log.Info("setting shard mode", + s.log.Info(logs.ShardSettingShardMode, zap.Stringer("old_mode", s.info.Mode), zap.Stringer("new_mode", m)) @@ -66,7 +67,7 @@ func (s *Shard) setMode(m mode.Mode) error { s.metricsWriter.SetReadonly(s.info.Mode != mode.ReadWrite) } - s.log.Info("shard mode set successfully", + s.log.Info(logs.ShardShardModeSetSuccessfully, zap.Stringer("mode", s.info.Mode)) return nil } diff --git a/pkg/local_object_storage/shard/move.go b/pkg/local_object_storage/shard/move.go index c6bf8409e..f3199ac07 100644 --- a/pkg/local_object_storage/shard/move.go +++ b/pkg/local_object_storage/shard/move.go @@ -1,6 +1,7 @@ package shard import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.uber.org/zap" @@ -38,7 +39,7 @@ func (s *Shard) ToMoveIt(prm ToMoveItPrm) (ToMoveItRes, error) { _, err := s.metaBase.ToMoveIt(toMovePrm) if err != nil { - s.log.Debug("could not mark object for shard relocation in metabase", + s.log.Debug(logs.ShardCouldNotMarkObjectForShardRelocationInMetabase, zap.String("error", err.Error()), ) } diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go index 48dbe1be2..a4cb2cb1f 100644 --- a/pkg/local_object_storage/shard/put.go +++ b/pkg/local_object_storage/shard/put.go @@ -3,6 +3,7 @@ package shard import ( "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" @@ -58,7 +59,7 @@ func (s *Shard) Put(prm PutPrm) (PutRes, error) { } if err != nil || !tryCache { if err != nil { - s.log.Debug("can't put object to the write-cache, trying blobstor", + s.log.Debug(logs.ShardCantPutObjectToTheWritecacheTryingBlobstor, zap.String("err", err.Error())) } diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go index 6d1fba141..44ec54645 100644 --- a/pkg/local_object_storage/shard/shard.go +++ b/pkg/local_object_storage/shard/shard.go @@ -5,6 +5,7 @@ import ( "sync" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" @@ -349,7 +350,7 @@ func (s *Shard) updateMetrics() { if s.cfg.metricsWriter != nil && !s.GetMode().NoMetabase() { cc, err := s.metaBase.ObjectCounters() if err != nil { - s.log.Warn("meta: object counter read", + s.log.Warn(logs.ShardMetaObjectCounterRead, zap.Error(err), ) @@ -361,7 +362,7 @@ func (s *Shard) updateMetrics() { cnrList, err := s.metaBase.Containers() if err != nil { - s.log.Warn("meta: can't read container list", zap.Error(err)) + s.log.Warn(logs.ShardMetaCantReadContainerList, zap.Error(err)) return } @@ -370,7 +371,7 @@ func (s *Shard) updateMetrics() { for i := range cnrList { size, err := s.metaBase.ContainerSize(cnrList[i]) if err != nil { - s.log.Warn("meta: can't read container size", + s.log.Warn(logs.ShardMetaCantReadContainerSize, zap.String("cid", cnrList[i].EncodeToString()), zap.Error(err)) continue diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go index 0437367e7..3ca3aa905 100644 --- a/pkg/local_object_storage/writecache/flush.go +++ b/pkg/local_object_storage/writecache/flush.go @@ -5,6 +5,7 @@ import ( "errors" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -134,7 +135,7 @@ func (c *cache) flushDB() { c.modeMtx.RUnlock() - c.log.Debug("tried to flush items from write-cache", + c.log.Debug(logs.WritecacheTriedToFlushItemsFromWritecache, zap.Int("count", count), zap.String("start", base58.Encode(lastKey))) } diff --git a/pkg/local_object_storage/writecache/init.go b/pkg/local_object_storage/writecache/init.go index ffe7a0129..0ac8cea99 100644 --- a/pkg/local_object_storage/writecache/init.go +++ b/pkg/local_object_storage/writecache/init.go @@ -5,6 +5,7 @@ import ( "errors" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" @@ -54,7 +55,7 @@ func (c *cache) initFlushMarks() { var errStopIter = errors.New("stop iteration") func (c *cache) fsTreeFlushMarkUpdate() { - c.log.Info("filling flush marks for objects in FSTree") + c.log.Info(logs.WritecacheFillingFlushMarksForObjectsInFSTree) var prm common.IteratePrm prm.LazyHandler = func(addr oid.Address, _ func() ([]byte, error)) error { @@ -86,11 +87,11 @@ func (c *cache) fsTreeFlushMarkUpdate() { return nil } _, _ = c.fsTree.Iterate(prm) - c.log.Info("finished updating FSTree flush marks") + c.log.Info(logs.WritecacheFinishedUpdatingFSTreeFlushMarks) } func (c *cache) dbFlushMarkUpdate() { - c.log.Info("filling flush marks for objects in database") + c.log.Info(logs.WritecacheFillingFlushMarksForObjectsInDatabase) var m []string var indices []int @@ -158,7 +159,7 @@ func (c *cache) dbFlushMarkUpdate() { lastKey = append([]byte(m[len(m)-1]), 0) } - c.log.Info("finished updating flush marks") + c.log.Info(logs.WritecacheFinishedUpdatingFlushMarks) } // flushStatus returns info about the object state in the main storage. diff --git a/pkg/local_object_storage/writecache/mode.go b/pkg/local_object_storage/writecache/mode.go index 997310d9e..939dc5b06 100644 --- a/pkg/local_object_storage/writecache/mode.go +++ b/pkg/local_object_storage/writecache/mode.go @@ -4,6 +4,7 @@ import ( "fmt" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" ) @@ -59,7 +60,7 @@ func (c *cache) setMode(m mode.Mode) error { // flushCh is populated by `flush` with `modeMtx` taken, thus waiting until it is empty // guarantees that there are no in-fly operations. for len(c.flushCh) != 0 { - c.log.Info("waiting for channels to flush") + c.log.Info(logs.WritecacheWaitingForChannelsToFlush) time.Sleep(time.Second) } diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go index 667d34cb9..ff7eb1d6a 100644 --- a/pkg/local_object_storage/writecache/storage.go +++ b/pkg/local_object_storage/writecache/storage.go @@ -5,6 +5,7 @@ import ( "fmt" "os" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log" @@ -124,7 +125,7 @@ func (c *cache) deleteFromDB(keys []string) []string { ) } if err != nil { - c.log.Error("can't remove objects from the database", zap.Error(err)) + c.log.Error(logs.WritecacheCantRemoveObjectsFromTheDatabase, zap.Error(err)) } copy(keys, keys[errorIndex:]) @@ -141,13 +142,13 @@ func (c *cache) deleteFromDisk(keys []string) []string { for i := range keys { if err := addr.DecodeString(keys[i]); err != nil { - c.log.Error("can't parse address", zap.String("address", keys[i])) + c.log.Error(logs.WritecacheCantParseAddress, zap.String("address", keys[i])) continue } _, err := c.fsTree.Delete(common.DeletePrm{Address: addr}) if err != nil && !errors.As(err, new(apistatus.ObjectNotFound)) { - c.log.Error("can't remove object from write-cache", zap.Error(err)) + c.log.Error(logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err)) // Save the key for the next iteration. keys[copyIndex] = keys[i] diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go index 5e98211c4..1c33fa5e0 100644 --- a/pkg/morph/client/client.go +++ b/pkg/morph/client/client.go @@ -8,6 +8,7 @@ import ( "sync" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" lru "github.com/hashicorp/golang-lru/v2" "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" @@ -186,7 +187,7 @@ func (c *Client) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, return fmt.Errorf("could not invoke %s: %w", method, err) } - c.logger.Debug("neo client invoke", + c.logger.Debug(logs.ClientNeoClientInvoke, zap.String("method", method), zap.Uint32("vub", vub), zap.Stringer("tx_hash", txHash.Reverse())) @@ -269,7 +270,7 @@ func (c *Client) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error return err } - c.logger.Debug("native gas transfer invoke", + c.logger.Debug(logs.ClientNativeGasTransferInvoke, zap.String("to", receiver.StringLE()), zap.Stringer("tx_hash", txHash.Reverse()), zap.Uint32("vub", vub)) @@ -303,7 +304,7 @@ func (c *Client) BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8 return err } - c.logger.Debug("batch gas transfer invoke", + c.logger.Debug(logs.ClientBatchGasTransferInvoke, zap.Strings("to", receiversLog), zap.Stringer("tx_hash", txHash.Reverse()), zap.Uint32("vub", vub)) @@ -330,7 +331,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error { height, err = c.rpcActor.GetBlockCount() if err != nil { - c.logger.Error("can't get blockchain height", + c.logger.Error(logs.ClientCantGetBlockchainHeight, zap.String("error", err.Error())) return nil } @@ -344,7 +345,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error { newHeight, err = c.rpcActor.GetBlockCount() if err != nil { - c.logger.Error("can't get blockchain height", + c.logger.Error(logs.ClientCantGetBlockchainHeight243, zap.String("error", err.Error())) return nil } diff --git a/pkg/morph/client/multi.go b/pkg/morph/client/multi.go index 5d736839a..fab90b446 100644 --- a/pkg/morph/client/multi.go +++ b/pkg/morph/client/multi.go @@ -5,6 +5,7 @@ import ( "sort" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "github.com/nspcc-dev/neo-go/pkg/core/block" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/neorpc" @@ -44,7 +45,7 @@ func (c *Client) switchRPC(ctx context.Context) bool { newEndpoint := c.endpoints.list[c.endpoints.curr].Address cli, act, err := c.newCli(ctx, newEndpoint) if err != nil { - c.logger.Warn("could not establish connection to the switched RPC node", + c.logger.Warn(logs.ClientCouldNotEstablishConnectionToTheSwitchedRPCNode, zap.String("endpoint", newEndpoint), zap.Error(err), ) @@ -54,7 +55,7 @@ func (c *Client) switchRPC(ctx context.Context) bool { c.cache.invalidate() - c.logger.Info("connection to the new RPC node has been established", + c.logger.Info(logs.ClientConnectionToTheNewRPCNodeHasBeenEstablished, zap.String("endpoint", newEndpoint)) subs, ok := c.restoreSubscriptions(ctx, cli, newEndpoint, false) @@ -147,7 +148,7 @@ func (c *Client) routeEvent(ctx context.Context, e any) { func (c *Client) reconnect(ctx context.Context) bool { if closeErr := c.client.GetError(); closeErr != nil { - c.logger.Warn("switching to the next RPC node", + c.logger.Warn(logs.ClientSwitchingToTheNextRPCNode, zap.String("reason", closeErr.Error()), ) } else { @@ -158,7 +159,7 @@ func (c *Client) reconnect(ctx context.Context) bool { } if !c.switchRPC(ctx) { - c.logger.Error("could not establish connection to any RPC node") + c.logger.Error(logs.ClientCouldNotEstablishConnectionToAnyRPCNode) // could not connect to all endpoints => // switch client to inactive mode @@ -210,7 +211,7 @@ mainLoop: cli, act, err := c.newCli(ctx, tryE) if err != nil { - c.logger.Warn("could not create client to the higher priority node", + c.logger.Warn(logs.ClientCouldNotCreateClientToTheHigherPriorityNode, zap.String("endpoint", tryE), zap.Error(err), ) @@ -237,13 +238,13 @@ mainLoop: c.switchLock.Unlock() - c.logger.Info("switched to the higher priority RPC", + c.logger.Info(logs.ClientSwitchedToTheHigherPriorityRPC, zap.String("endpoint", tryE)) return } - c.logger.Warn("could not restore side chain subscriptions using node", + c.logger.Warn(logs.ClientCouldNotRestoreSideChainSubscriptionsUsingNode, zap.String("endpoint", tryE), zap.Error(err), ) diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go index 7399c19cd..427554372 100644 --- a/pkg/morph/client/notary.go +++ b/pkg/morph/client/notary.go @@ -8,6 +8,7 @@ import ( "math/big" "strings" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand" "github.com/nspcc-dev/neo-go/pkg/core/native/nativenames" "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" @@ -203,7 +204,7 @@ func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (res util.Uint2 // Transaction is already in mempool waiting to be processed. // This is an expected situation if we restart the service. - c.logger.Info("notary deposit has already been made", + c.logger.Info(logs.ClientNotaryDepositHasAlreadyBeenMade, zap.Int64("amount", int64(amount)), zap.Int64("expire_at", till), zap.Uint32("vub", vub), @@ -211,7 +212,7 @@ func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (res util.Uint2 return util.Uint256{}, nil } - c.logger.Info("notary deposit invoke", + c.logger.Info(logs.ClientNotaryDepositInvoke, zap.Int64("amount", int64(amount)), zap.Int64("expire_at", till), zap.Uint32("vub", vub), @@ -430,7 +431,7 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error { return err } - c.logger.Debug("notary request with prepared main TX invoked", + c.logger.Debug(logs.ClientNotaryRequestWithPreparedMainTXInvoked, zap.Uint32("fallback_valid_for", c.notary.fallbackTime), zap.Stringer("tx_hash", resp.Hash().Reverse())) @@ -489,7 +490,7 @@ func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint return err } - c.logger.Debug("notary request invoked", + c.logger.Debug(logs.ClientNotaryRequestInvoked, zap.String("method", method), zap.Uint32("valid_until_block", until), zap.Uint32("fallback_valid_for", c.notary.fallbackTime), diff --git a/pkg/morph/client/notifications.go b/pkg/morph/client/notifications.go index 300bab825..69eafc659 100644 --- a/pkg/morph/client/notifications.go +++ b/pkg/morph/client/notifications.go @@ -3,6 +3,7 @@ package client import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "github.com/nspcc-dev/neo-go/pkg/core/block" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/neorpc" @@ -260,7 +261,7 @@ func (c *Client) restoreSubscriptions(ctx context.Context, cli *rpcclient.WSClie if si.subscribedToBlocks { _, err = cli.ReceiveBlocks(nil, blockRcv) if err != nil { - c.logger.Error("could not restore block subscription after RPC switch", + c.logger.Error(logs.ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch, zap.String("endpoint", endpoint), zap.Error(err), ) @@ -274,7 +275,7 @@ func (c *Client) restoreSubscriptions(ctx context.Context, cli *rpcclient.WSClie contract := contract // See https://github.com/nspcc-dev/neo-go/issues/2890 id, err = cli.ReceiveExecutionNotifications(&neorpc.NotificationFilter{Contract: &contract}, notificationRcv) if err != nil { - c.logger.Error("could not restore notification subscription after RPC switch", + c.logger.Error(logs.ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch, zap.String("endpoint", endpoint), zap.Error(err), ) @@ -291,7 +292,7 @@ func (c *Client) restoreSubscriptions(ctx context.Context, cli *rpcclient.WSClie signer := signer // See https://github.com/nspcc-dev/neo-go/issues/2890 id, err = cli.ReceiveNotaryRequests(&neorpc.TxFilter{Signer: &signer}, notaryReqRcv) if err != nil { - c.logger.Error("could not restore notary notification subscription after RPC switch", + c.logger.Error(logs.ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch, zap.String("endpoint", endpoint), zap.Error(err), ) diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go index 3de199328..405165702 100644 --- a/pkg/morph/event/listener.go +++ b/pkg/morph/event/listener.go @@ -6,6 +6,7 @@ import ( "fmt" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" @@ -133,7 +134,7 @@ var ( func (l *listener) Listen(ctx context.Context) { l.startOnce.Do(func() { if err := l.listen(ctx, nil); err != nil { - l.log.Error("could not start listen to events", + l.log.Error(logs.EventCouldNotStartListenToEvents, zap.String("error", err.Error()), ) } @@ -149,7 +150,7 @@ func (l *listener) Listen(ctx context.Context) { func (l *listener) ListenWithError(ctx context.Context, intError chan<- error) { l.startOnce.Do(func() { if err := l.listen(ctx, intError); err != nil { - l.log.Error("could not start listen to events", + l.log.Error(logs.EventCouldNotStartListenToEvents, zap.String("error", err.Error()), ) intError <- err @@ -221,53 +222,53 @@ loop: if intErr != nil { intErr <- err } else { - l.log.Error("stop event listener by error", zap.Error(err)) + l.log.Error(logs.EventStopEventListenerByError, zap.Error(err)) } break loop case <-ctx.Done(): - l.log.Info("stop event listener by context", + l.log.Info(logs.EventStopEventListenerByContext, zap.String("reason", ctx.Err().Error()), ) break loop case notifyEvent, ok := <-chs.NotificationsCh: if !ok { - l.log.Warn("stop event listener by notification channel") + l.log.Warn(logs.EventStopEventListenerByNotificationChannel) if intErr != nil { intErr <- errors.New("event subscriber connection has been terminated") } break loop } else if notifyEvent == nil { - l.log.Warn("nil notification event was caught") + l.log.Warn(logs.EventNilNotificationEventWasCaught) continue loop } l.handleNotifyEvent(notifyEvent) case notaryEvent, ok := <-chs.NotaryRequestsCh: if !ok { - l.log.Warn("stop event listener by notary channel") + l.log.Warn(logs.EventStopEventListenerByNotaryChannel) if intErr != nil { intErr <- errors.New("notary event subscriber connection has been terminated") } break loop } else if notaryEvent == nil { - l.log.Warn("nil notary event was caught") + l.log.Warn(logs.EventNilNotaryEventWasCaught) continue loop } l.handleNotaryEvent(notaryEvent) case b, ok := <-chs.BlockCh: if !ok { - l.log.Warn("stop event listener by block channel") + l.log.Warn(logs.EventStopEventListenerByBlockChannel) if intErr != nil { intErr <- errors.New("new block notification channel is closed") } break loop } else if b == nil { - l.log.Warn("nil block was caught") + l.log.Warn(logs.EventNilBlockWasCaught) continue loop } @@ -282,7 +283,7 @@ func (l *listener) handleBlockEvent(b *block.Block) { l.blockHandlers[i](b) } }); err != nil { - l.log.Warn("listener worker pool drained", + l.log.Warn(logs.EventListenerWorkerPoolDrained, zap.Int("capacity", l.pool.Cap())) } } @@ -291,7 +292,7 @@ func (l *listener) handleNotaryEvent(notaryEvent *result.NotaryRequestEvent) { if err := l.pool.Submit(func() { l.parseAndHandleNotary(notaryEvent) }); err != nil { - l.log.Warn("listener worker pool drained", + l.log.Warn(logs.EventListenerWorkerPoolDrained, zap.Int("capacity", l.pool.Cap())) } } @@ -300,7 +301,7 @@ func (l *listener) handleNotifyEvent(notifyEvent *state.ContainedNotificationEve if err := l.pool.Submit(func() { l.parseAndHandleNotification(notifyEvent) }); err != nil { - l.log.Warn("listener worker pool drained", + l.log.Warn(logs.EventListenerWorkerPoolDrained, zap.Int("capacity", l.pool.Cap())) } } @@ -327,7 +328,7 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi l.mtx.RUnlock() if !ok { - log.Debug("event parser not set") + log.Debug(logs.EventEventParserNotSet) return } @@ -335,7 +336,7 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi // parse the notification event event, err := parser(notifyEvent) if err != nil { - log.Warn("could not parse notification event", + log.Warn(logs.EventCouldNotParseNotificationEvent, zap.String("error", err.Error()), ) @@ -348,7 +349,7 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi l.mtx.RUnlock() if len(handlers) == 0 { - log.Info("notification handlers for parsed notification event were not registered", + log.Info(logs.EventNotificationHandlersForParsedNotificationEventWereNotRegistered, zap.Any("event", event), ) @@ -367,11 +368,11 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { switch { case errors.Is(err, ErrTXAlreadyHandled): case errors.Is(err, ErrMainTXExpired): - l.log.Warn("skip expired main TX notary event", + l.log.Warn(logs.EventSkipExpiredMainTXNotaryEvent, zap.String("error", err.Error()), ) default: - l.log.Warn("could not prepare and validate notary event", + l.log.Warn(logs.EventCouldNotPrepareAndValidateNotaryEvent, zap.String("error", err.Error()), ) } @@ -395,7 +396,7 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { l.mtx.RUnlock() if !ok { - log.Debug("notary parser not set") + log.Debug(logs.EventNotaryParserNotSet) return } @@ -403,7 +404,7 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { // parse the notary event event, err := parser(notaryEvent) if err != nil { - log.Warn("could not parse notary event", + log.Warn(logs.EventCouldNotParseNotaryEvent, zap.String("error", err.Error()), ) @@ -416,7 +417,7 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { l.mtx.RUnlock() if !ok { - log.Info("notary handlers for parsed notification event were not registered", + log.Info(logs.EventNotaryHandlersForParsedNotificationEventWereNotRegistered, zap.Any("event", event), ) @@ -438,7 +439,7 @@ func (l *listener) SetNotificationParser(pi NotificationParserInfo) { parser := pi.parser() if parser == nil { - log.Info("ignore nil event parser") + log.Info(logs.EventIgnoreNilEventParser) return } @@ -447,7 +448,7 @@ func (l *listener) SetNotificationParser(pi NotificationParserInfo) { // check if the listener was started if l.started { - log.Warn("listener has been already started, ignore parser") + log.Warn(logs.EventListenerHasBeenAlreadyStartedIgnoreParser) return } @@ -456,7 +457,7 @@ func (l *listener) SetNotificationParser(pi NotificationParserInfo) { l.notificationParsers[pi.scriptHashWithType] = pi.parser() } - log.Debug("registered new event parser") + log.Debug(logs.EventRegisteredNewEventParser) } // RegisterNotificationHandler registers the handler for particular notification event of contract. @@ -471,7 +472,7 @@ func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) { handler := hi.Handler() if handler == nil { - log.Warn("ignore nil event handler") + log.Warn(logs.EventIgnoreNilEventHandler) return } @@ -481,7 +482,7 @@ func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) { l.mtx.RUnlock() if !ok { - log.Warn("ignore handler of event w/o parser") + log.Warn(logs.EventIgnoreHandlerOfEventWoParser) return } @@ -493,7 +494,7 @@ func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) { ) l.mtx.Unlock() - log.Debug("registered new event handler") + log.Debug(logs.EventRegisteredNewEventHandler) } // EnableNotarySupport enables notary request listening. Passed hash is @@ -534,7 +535,7 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) { parser := pi.parser() if parser == nil { - log.Info("ignore nil notary event parser") + log.Info(logs.EventIgnoreNilNotaryEventParser) return } @@ -543,7 +544,7 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) { // check if the listener was started if l.started { - log.Warn("listener has been already started, ignore notary parser") + log.Warn(logs.EventListenerHasBeenAlreadyStartedIgnoreNotaryParser) return } @@ -552,7 +553,7 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) { l.notaryParsers[pi.notaryRequestTypes] = pi.parser() } - log.Info("registered new event parser") + log.Info(logs.EventRegisteredNewEventParser) } // RegisterNotaryHandler registers the handler for particular notification notary request event. @@ -572,7 +573,7 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) { handler := hi.Handler() if handler == nil { - log.Warn("ignore nil notary event handler") + log.Warn(logs.EventIgnoreNilNotaryEventHandler) return } @@ -582,7 +583,7 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) { l.mtx.RUnlock() if !ok { - log.Warn("ignore handler of notary event w/o parser") + log.Warn(logs.EventIgnoreHandlerOfNotaryEventWoParser) return } @@ -591,7 +592,7 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) { l.notaryHandlers[hi.notaryRequestTypes] = hi.Handler() l.mtx.Unlock() - log.Info("registered new event handler") + log.Info(logs.EventRegisteredNewEventHandler) } // Stop closes subscription channel with remote neo node. @@ -603,7 +604,7 @@ func (l *listener) Stop() { func (l *listener) RegisterBlockHandler(handler BlockHandler) { if handler == nil { - l.log.Warn("ignore nil block handler") + l.log.Warn(logs.EventIgnoreNilBlockHandler) return } diff --git a/pkg/morph/event/utils.go b/pkg/morph/event/utils.go index 355fd5b4d..2a7c6250d 100644 --- a/pkg/morph/event/utils.go +++ b/pkg/morph/event/utils.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" util2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" @@ -89,7 +90,7 @@ func WorkerPoolHandler(w util2.WorkerPool, h Handler, log *logger.Logger) Handle }) if err != nil { - log.Warn("could not Submit handler to worker pool", + log.Warn(logs.EventCouldNotSubmitHandlerToWorkerPool, zap.String("error", err.Error()), ) } diff --git a/pkg/morph/subscriber/subscriber.go b/pkg/morph/subscriber/subscriber.go index 17bed5b2d..a2e1c32eb 100644 --- a/pkg/morph/subscriber/subscriber.go +++ b/pkg/morph/subscriber/subscriber.go @@ -6,6 +6,7 @@ import ( "fmt" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "github.com/nspcc-dev/neo-go/pkg/core/block" @@ -99,7 +100,7 @@ func (s *subscriber) SubscribeForNotification(contracts ...util.Uint160) error { func (s *subscriber) UnsubscribeForNotification() { err := s.client.UnsubscribeAll() if err != nil { - s.log.Error("unsubscribe for notification", + s.log.Error(logs.SubscriberUnsubscribeForNotification, zap.Error(err)) } } @@ -133,7 +134,7 @@ func (s *subscriber) routeNotifications(ctx context.Context) { return case notification, ok := <-notificationChan: if !ok { - s.log.Warn("remote notification channel has been closed") + s.log.Warn(logs.SubscriberRemoteNotificationChannelHasBeenClosed) close(s.notifyChan) close(s.blockChan) close(s.notaryChan) @@ -145,13 +146,13 @@ func (s *subscriber) routeNotifications(ctx context.Context) { case neorpc.NotificationEventID: notifyEvent, ok := notification.Value.(*state.ContainedNotificationEvent) if !ok { - s.log.Error("can't cast notify event value to the notify struct", + s.log.Error(logs.SubscriberCantCastNotifyEventValueToTheNotifyStruct, zap.String("received type", fmt.Sprintf("%T", notification.Value)), ) continue } - s.log.Debug("new notification event from sidechain", + s.log.Debug(logs.SubscriberNewNotificationEventFromSidechain, zap.String("name", notifyEvent.Name), ) @@ -159,7 +160,7 @@ func (s *subscriber) routeNotifications(ctx context.Context) { case neorpc.BlockEventID: b, ok := notification.Value.(*block.Block) if !ok { - s.log.Error("can't cast block event value to block", + s.log.Error(logs.SubscriberCantCastBlockEventValueToBlock, zap.String("received type", fmt.Sprintf("%T", notification.Value)), ) continue @@ -169,7 +170,7 @@ func (s *subscriber) routeNotifications(ctx context.Context) { case neorpc.NotaryRequestEventID: notaryRequest, ok := notification.Value.(*result.NotaryRequestEvent) if !ok { - s.log.Error("can't cast notify event value to the notary request struct", + s.log.Error(logs.SubscriberCantCastNotifyEventValueToTheNotaryRequestStruct, zap.String("received type", fmt.Sprintf("%T", notification.Value)), ) continue @@ -177,7 +178,7 @@ func (s *subscriber) routeNotifications(ctx context.Context) { s.notaryChan <- notaryRequest default: - s.log.Debug("unsupported notification from the chain", + s.log.Debug(logs.SubscriberUnsupportedNotificationFromTheChain, zap.Uint8("type", uint8(notification.Type)), ) } diff --git a/pkg/services/audit/auditor/context.go b/pkg/services/audit/auditor/context.go index bf720c330..194c5188a 100644 --- a/pkg/services/audit/auditor/context.go +++ b/pkg/services/audit/auditor/context.go @@ -5,6 +5,7 @@ import ( "sync" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" @@ -197,7 +198,7 @@ func (c *Context) init() { func (c *Context) expired(ctx context.Context) bool { select { case <-ctx.Done(): - c.log.Debug("audit context is done", + c.log.Debug(logs.AuditorAuditContextIsDone, zap.String("error", ctx.Err().Error()), ) @@ -212,10 +213,10 @@ func (c *Context) complete() { } func (c *Context) writeReport() { - c.log.Debug("writing audit report...") + c.log.Debug(logs.AuditorWritingAuditReport) if err := c.task.Reporter().WriteReport(c.report); err != nil { - c.log.Error("could not write audit report") + c.log.Error(logs.AuditorCouldNotWriteAuditReport) } } diff --git a/pkg/services/audit/auditor/pdp.go b/pkg/services/audit/auditor/pdp.go index 8a184eb7e..d5ad0fea4 100644 --- a/pkg/services/audit/auditor/pdp.go +++ b/pkg/services/audit/auditor/pdp.go @@ -6,6 +6,7 @@ import ( "sync" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -130,7 +131,7 @@ func (c *Context) collectHashes(ctx context.Context, p *gamePair) { sleepDur = time.Duration(rand.Uint64() % c.maxPDPSleep) } - c.log.Debug("sleep before get range hash", + c.log.Debug(logs.AuditorSleepBeforeGetRangeHash, zap.Stringer("interval", sleepDur), ) @@ -140,7 +141,7 @@ func (c *Context) collectHashes(ctx context.Context, p *gamePair) { h, err := c.cnrCom.GetRangeHash(ctx, getRangeHashPrm) if err != nil { - c.log.Debug("could not get payload range hash", + c.log.Debug(logs.AuditorCouldNotGetPayloadRangeHash, zap.Stringer("id", p.id), zap.String("node", netmap.StringifyPublicKey(n)), zap.String("error", err.Error()), diff --git a/pkg/services/audit/auditor/pop.go b/pkg/services/audit/auditor/pop.go index 32b837794..b64004bbc 100644 --- a/pkg/services/audit/auditor/pop.go +++ b/pkg/services/audit/auditor/pop.go @@ -3,6 +3,7 @@ package auditor import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "git.frostfs.info/TrueCloudLab/tzhash/tz" @@ -61,7 +62,7 @@ func (c *Context) processObjectPlacement(ctx context.Context, id oid.ID, nodes [ // try to get object header from node hdr, err := c.cnrCom.GetHeader(ctx, getHeaderPrm) if err != nil { - c.log.Debug("could not get object header from candidate", + c.log.Debug(logs.AuditorCouldNotGetObjectHeaderFromCandidate, zap.Stringer("id", id), zap.String("error", err.Error()), ) @@ -133,7 +134,7 @@ func (c *Context) iterateSGMembersPlacementRand(f func(oid.ID, int, []netmap.Nod // build placement vector for the current object nn, err := c.buildPlacement(id) if err != nil { - c.log.Debug("could not build placement for object", + c.log.Debug(logs.AuditorCouldNotBuildPlacementForObject, zap.Stringer("id", id), zap.String("error", err.Error()), ) diff --git a/pkg/services/audit/auditor/por.go b/pkg/services/audit/auditor/por.go index aebc25c68..d579b3a7c 100644 --- a/pkg/services/audit/auditor/por.go +++ b/pkg/services/audit/auditor/por.go @@ -5,6 +5,7 @@ import ( "context" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" @@ -71,7 +72,7 @@ func (c *Context) checkStorageGroupPoR(ctx context.Context, sgID oid.ID, sg stor hdr, err := c.cnrCom.GetHeader(ctx, getHeaderPrm) if err != nil { - c.log.Debug("can't head object", + c.log.Debug(logs.AuditorCantHeadObject, zap.String("remote_node", netmap.StringifyPublicKey(flat[j])), zap.Stringer("oid", members[i]), ) @@ -92,7 +93,7 @@ func (c *Context) checkStorageGroupPoR(ctx context.Context, sgID oid.ID, sg stor cs.Value(), }) if err != nil { - c.log.Debug("can't concatenate tz hash", + c.log.Debug(logs.AuditorCantConcatenateTzHash, zap.String("oid", members[i].String()), zap.String("error", err.Error())) @@ -122,13 +123,13 @@ func (c *Context) writeCheckReport(sizeCheck, tzCheck bool, sgID oid.ID, sg stor c.report.PassedPoR(sgID) } else { if !sizeCheck { - c.log.Debug("storage group size check failed", + c.log.Debug(logs.AuditorStorageGroupSizeCheckFailed, zap.Uint64("expected", sg.ValidationDataSize()), zap.Uint64("got", totalSize)) } if !tzCheck { - c.log.Debug("storage group tz hash check failed") + c.log.Debug(logs.AuditorStorageGroupTzHashCheckFailed) } c.report.FailedPoR(sgID) @@ -138,7 +139,7 @@ func (c *Context) writeCheckReport(sizeCheck, tzCheck bool, sgID oid.ID, sg stor func (c *Context) getShuffledNodes(member oid.ID, sgID oid.ID) ([]netmap.NodeInfo, bool) { objectPlacement, err := c.buildPlacement(member) if err != nil { - c.log.Info("can't build placement for storage group member", + c.log.Info(logs.AuditorCantBuildPlacementForStorageGroupMember, zap.Stringer("sg", sgID), zap.String("member_id", member.String()), ) diff --git a/pkg/services/audit/taskmanager/listen.go b/pkg/services/audit/taskmanager/listen.go index a16052e13..bfc37c2a1 100644 --- a/pkg/services/audit/taskmanager/listen.go +++ b/pkg/services/audit/taskmanager/listen.go @@ -3,6 +3,7 @@ package audittask import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit/auditor" "go.uber.org/zap" @@ -12,7 +13,7 @@ import ( // // The listener is terminated by context. func (m *Manager) Listen(ctx context.Context) { - m.log.Info("process routine", + m.log.Info(logs.TaskmanagerProcessRoutine, zap.Uint32("queue_capacity", m.queueCap), ) @@ -21,7 +22,7 @@ func (m *Manager) Listen(ctx context.Context) { for { select { case <-ctx.Done(): - m.log.Warn("stop listener by context", + m.log.Warn(logs.TaskmanagerStopListenerByContext, zap.String("error", ctx.Err().Error()), ) m.workerPool.Release() @@ -29,7 +30,7 @@ func (m *Manager) Listen(ctx context.Context) { return case task, ok := <-m.ch: if !ok { - m.log.Warn("queue channel is closed") + m.log.Warn(logs.TaskmanagerQueueChannelIsClosed) return } @@ -51,7 +52,7 @@ func (m *Manager) Listen(ctx context.Context) { func (m *Manager) handleTask(ctx context.Context, task *audit.Task, onCompleted func()) { pdpPool, err := m.pdpPoolGenerator() if err != nil { - m.log.Error("could not generate PDP worker pool", + m.log.Error(logs.TaskmanagerCouldNotGeneratePDPWorkerPool, zap.String("error", err.Error()), ) onCompleted() @@ -60,7 +61,7 @@ func (m *Manager) handleTask(ctx context.Context, task *audit.Task, onCompleted porPool, err := m.pdpPoolGenerator() if err != nil { - m.log.Error("could not generate PoR worker pool", + m.log.Error(logs.TaskmanagerCouldNotGeneratePoRWorkerPool, zap.String("error", err.Error()), ) onCompleted() @@ -73,7 +74,7 @@ func (m *Manager) handleTask(ctx context.Context, task *audit.Task, onCompleted if err := m.workerPool.Submit(func() { auditContext.Execute(ctx, onCompleted) }); err != nil { // may be we should report it - m.log.Warn("could not submit audit task") + m.log.Warn(logs.TaskmanagerCouldNotSubmitAuditTask) onCompleted() } } diff --git a/pkg/services/container/announcement/load/controller/calls.go b/pkg/services/container/announcement/load/controller/calls.go index f5d5d1a3d..e1ed6e496 100644 --- a/pkg/services/container/announcement/load/controller/calls.go +++ b/pkg/services/container/announcement/load/controller/calls.go @@ -3,6 +3,7 @@ package loadcontroller import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "go.uber.org/zap" @@ -52,7 +53,7 @@ func (c *Controller) Start(ctx context.Context, prm StartPrm) { } func (c *announcer) announce(ctx context.Context) { - c.log.Debug("starting to announce the values of the metrics") + c.log.Debug(logs.ControllerStartingToAnnounceTheValuesOfTheMetrics) var ( metricsIterator Iterator @@ -62,7 +63,7 @@ func (c *announcer) announce(ctx context.Context) { // initialize iterator over locally collected metrics metricsIterator, err = c.ctrl.prm.LocalMetrics.InitIterator() if err != nil { - c.log.Debug("could not initialize iterator over locally collected metrics", + c.log.Debug(logs.ControllerCouldNotInitializeIteratorOverLocallyCollectedMetrics, zap.String("error", err.Error()), ) @@ -72,7 +73,7 @@ func (c *announcer) announce(ctx context.Context) { // initialize target of local announcements targetWriter, err := c.ctrl.prm.LocalAnnouncementTarget.InitWriter(nil) if err != nil { - c.log.Debug("could not initialize announcement accumulator", + c.log.Debug(logs.ControllerCouldNotInitializeAnnouncementAccumulator, zap.String("error", err.Error()), ) @@ -90,7 +91,7 @@ func (c *announcer) announce(ctx context.Context) { }, ) if err != nil { - c.log.Debug("iterator over locally collected metrics aborted", + c.log.Debug(logs.ControllerIteratorOverLocallyCollectedMetricsAborted, zap.String("error", err.Error()), ) @@ -100,14 +101,14 @@ func (c *announcer) announce(ctx context.Context) { // finish writing err = targetWriter.Close(ctx) if err != nil { - c.log.Debug("could not finish writing local announcements", + c.log.Debug(logs.ControllerCouldNotFinishWritingLocalAnnouncements, zap.String("error", err.Error()), ) return } - c.log.Debug("trust announcement successfully finished") + c.log.Debug(logs.ControllerTrustAnnouncementSuccessfullyFinished) } func (c *Controller) acquireAnnouncement(ctx context.Context, prm StartPrm) (context.Context, *announcer) { @@ -127,7 +128,7 @@ func (c *Controller) acquireAnnouncement(ctx context.Context, prm StartPrm) (con )} if started { - log.Debug("announcement is already started") + log.Debug(logs.ControllerAnnouncementIsAlreadyStarted) return ctx, nil } @@ -159,9 +160,9 @@ func (c *commonContext) freeAnnouncement() { c.ctrl.announceMtx.Unlock() if stopped { - c.log.Debug("announcement successfully interrupted") + c.log.Debug(logs.ControllerAnnouncementSuccessfullyInterrupted) } else { - c.log.Debug("announcement is not started or already interrupted") + c.log.Debug(logs.ControllerAnnouncementIsNotStartedOrAlreadyInterrupted) } } @@ -219,7 +220,7 @@ func (c *Controller) acquireReport(ctx context.Context, prm StopPrm) (context.Co )} if started { - log.Debug("report is already started") + log.Debug(logs.ControllerReportIsAlreadyStarted) return ctx, nil } @@ -251,9 +252,9 @@ func (c *commonContext) freeReport() { c.ctrl.reportMtx.Unlock() if stopped { - c.log.Debug("announcement successfully interrupted") + c.log.Debug(logs.ControllerAnnouncementSuccessfullyInterrupted) } else { - c.log.Debug("announcement is not started or already interrupted") + c.log.Debug(logs.ControllerAnnouncementIsNotStartedOrAlreadyInterrupted) } } @@ -266,7 +267,7 @@ func (c *reporter) report(ctx context.Context) { // initialize iterator over locally accumulated announcements localIterator, err = c.ctrl.prm.AnnouncementAccumulator.InitIterator() if err != nil { - c.log.Debug("could not initialize iterator over locally accumulated announcements", + c.log.Debug(logs.ControllerCouldNotInitializeIteratorOverLocallyAccumulatedAnnouncements, zap.String("error", err.Error()), ) @@ -276,7 +277,7 @@ func (c *reporter) report(ctx context.Context) { // initialize final destination of load estimations resultWriter, err := c.ctrl.prm.ResultReceiver.InitWriter(nil) if err != nil { - c.log.Debug("could not initialize result target", + c.log.Debug(logs.ControllerCouldNotInitializeResultTarget, zap.String("error", err.Error()), ) @@ -289,7 +290,7 @@ func (c *reporter) report(ctx context.Context) { resultWriter.Put, ) if err != nil { - c.log.Debug("iterator over local announcements aborted", + c.log.Debug(logs.ControllerIteratorOverLocalAnnouncementsAborted, zap.String("error", err.Error()), ) @@ -299,7 +300,7 @@ func (c *reporter) report(ctx context.Context) { // finish writing err = resultWriter.Close(ctx) if err != nil { - c.log.Debug("could not finish writing load estimations", + c.log.Debug(logs.ControllerCouldNotFinishWritingLoadEstimations, zap.String("error", err.Error()), ) } diff --git a/pkg/services/container/announcement/load/route/calls.go b/pkg/services/container/announcement/load/route/calls.go index 83c368f57..9a483aed0 100644 --- a/pkg/services/container/announcement/load/route/calls.go +++ b/pkg/services/container/announcement/load/route/calls.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" loadcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/announcement/load/controller" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "go.uber.org/zap" @@ -97,7 +98,7 @@ func (w *loadWriter) Put(a container.SizeEstimation) error { if !ok { provider, err := w.router.remoteProvider.InitRemote(remoteInfo) if err != nil { - w.router.log.Debug("could not initialize writer provider", + w.router.log.Debug(logs.RouteCouldNotInitializeWriterProvider, zap.String("error", err.Error()), ) @@ -106,7 +107,7 @@ func (w *loadWriter) Put(a container.SizeEstimation) error { remoteWriter, err = provider.InitWriter(w.route) if err != nil { - w.router.log.Debug("could not initialize writer", + w.router.log.Debug(logs.RouteCouldNotInitializeWriter, zap.String("error", err.Error()), ) @@ -118,7 +119,7 @@ func (w *loadWriter) Put(a container.SizeEstimation) error { err := remoteWriter.Put(a) if err != nil { - w.router.log.Debug("could not put the value", + w.router.log.Debug(logs.RouteCouldNotPutTheValue, zap.String("error", err.Error()), ) } @@ -133,7 +134,7 @@ func (w *loadWriter) Close(ctx context.Context) error { for key, wRemote := range w.mServers { err := wRemote.Close(ctx) if err != nil { - w.router.log.Debug("could not close remote server writer", + w.router.log.Debug(logs.RouteCouldNotCloseRemoteServerWriter, zap.String("key", key), zap.String("error", err.Error()), ) diff --git a/pkg/services/notificator/nats/service.go b/pkg/services/notificator/nats/service.go index 54eb373ec..6a7e80a53 100644 --- a/pkg/services/notificator/nats/service.go +++ b/pkg/services/notificator/nats/service.go @@ -6,6 +6,7 @@ import ( "fmt" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "github.com/nats-io/nats.go" @@ -98,10 +99,10 @@ func New(oo ...Option) *Writer { w.opts.nOpts = append(w.opts.nOpts, nats.NoCallbacksAfterClientClose(), // do not call callbacks when it was planned writer stop nats.DisconnectErrHandler(func(conn *nats.Conn, err error) { - w.log.Error("nats: connection was lost", zap.Error(err)) + w.log.Error(logs.NatsNatsConnectionWasLost, zap.Error(err)) }), nats.ReconnectHandler(func(conn *nats.Conn) { - w.log.Warn("nats: reconnected to the server") + w.log.Warn(logs.NatsNatsReconnectedToTheServer) }), ) @@ -124,7 +125,7 @@ func (n *Writer) Connect(ctx context.Context, endpoint string) error { go func() { <-ctx.Done() - n.opts.log.Info("nats: closing connection as the context is done") + n.opts.log.Info(logs.NatsNatsClosingConnectionAsTheContextIsDone) nc.Close() }() diff --git a/pkg/services/notificator/service.go b/pkg/services/notificator/service.go index 0a8a5d96d..bbf4e4823 100644 --- a/pkg/services/notificator/service.go +++ b/pkg/services/notificator/service.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.uber.org/zap" @@ -74,10 +75,10 @@ func New(prm *Prm) *Notificator { // and passes their addresses to the NotificationWriter. func (n *Notificator) ProcessEpoch(ctx context.Context, epoch uint64) { logger := n.l.With(zap.Uint64("epoch", epoch)) - logger.Debug("notificator: start processing object notifications") + logger.Debug(logs.NotificatorNotificatorStartProcessingObjectNotifications) n.ns.Iterate(ctx, epoch, func(topic string, addr oid.Address) { - n.l.Debug("notificator: processing object notification", + n.l.Debug(logs.NotificatorNotificatorProcessingObjectNotification, zap.String("topic", topic), zap.Stringer("address", addr), ) diff --git a/pkg/services/object/acl/v2/classifier.go b/pkg/services/object/acl/v2/classifier.go index 2bf5a3958..cdc5fb623 100644 --- a/pkg/services/object/acl/v2/classifier.go +++ b/pkg/services/object/acl/v2/classifier.go @@ -4,6 +4,7 @@ import ( "bytes" "crypto/sha256" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" @@ -48,7 +49,7 @@ func (c senderClassifier) classify( isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes) if err != nil { // do not throw error, try best case matching - c.log.Debug("can't check if request from inner ring", + c.log.Debug(logs.V2CantCheckIfRequestFromInnerRing, zap.String("error", err.Error())) } else if isInnerRingNode { return &classifyResult{ @@ -65,7 +66,7 @@ func (c senderClassifier) classify( // error might happen if request has `RoleOther` key and placement // is not possible for previous epoch, so // do not throw error, try best case matching - c.log.Debug("can't check if request from container node", + c.log.Debug(logs.V2CantCheckIfRequestFromContainerNode, zap.String("error", err.Error())) } else if isContainerNode { return &classifyResult{ diff --git a/pkg/services/object/delete/container.go b/pkg/services/object/delete/container.go index a2f099d5b..3106d8efd 100644 --- a/pkg/services/object/delete/container.go +++ b/pkg/services/object/delete/container.go @@ -1,5 +1,7 @@ package deletesvc +import "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + func (exec *execCtx) executeOnContainer() { - exec.log.Debug("request is not rolled over to the container") + exec.log.Debug(logs.DeleteRequestIsNotRolledOverToTheContainer) } diff --git a/pkg/services/object/delete/delete.go b/pkg/services/object/delete/delete.go index a959b53cb..971f0a6f5 100644 --- a/pkg/services/object/delete/delete.go +++ b/pkg/services/object/delete/delete.go @@ -3,6 +3,7 @@ package deletesvc import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "go.uber.org/zap" ) @@ -34,7 +35,7 @@ func (s *Service) Delete(ctx context.Context, prm Prm) error { } func (exec *execCtx) execute(ctx context.Context) { - exec.log.Debug("serving request...") + exec.log.Debug(logs.DeleteServingRequest) // perform local operation exec.executeLocal(ctx) @@ -46,9 +47,9 @@ func (exec *execCtx) analyzeStatus(execCnr bool) { // analyze local result switch exec.status { case statusOK: - exec.log.Debug("operation finished successfully") + exec.log.Debug(logs.DeleteOperationFinishedSuccessfully) default: - exec.log.Debug("operation finished with error", + exec.log.Debug(logs.DeleteOperationFinishedWithError, zap.String("error", exec.err.Error()), ) diff --git a/pkg/services/object/delete/exec.go b/pkg/services/object/delete/exec.go index 782cad71b..91bc6b3d7 100644 --- a/pkg/services/object/delete/exec.go +++ b/pkg/services/object/delete/exec.go @@ -5,6 +5,7 @@ import ( "strconv" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -83,7 +84,7 @@ func (exec *execCtx) formSplitInfo(ctx context.Context) bool { exec.status = statusUndefined exec.err = err - exec.log.Debug("could not compose split info", + exec.log.Debug(logs.DeleteCouldNotComposeSplitInfo, zap.String("error", err.Error()), ) case err == nil: @@ -96,7 +97,7 @@ func (exec *execCtx) formSplitInfo(ctx context.Context) bool { func (exec *execCtx) collectMembers(ctx context.Context) (ok bool) { if exec.splitInfo == nil { - exec.log.Debug("no split info, object is PHY") + exec.log.Debug(logs.DeleteNoSplitInfoObjectIsPHY) return true } @@ -119,7 +120,7 @@ func (exec *execCtx) collectMembers(ctx context.Context) (ok bool) { func (exec *execCtx) collectChain(ctx context.Context) bool { var chain []oid.ID - exec.log.Debug("assembling chain...") + exec.log.Debug(logs.DeleteAssemblingChain) for prev, withPrev := exec.splitInfo.LastPart(); withPrev; { chain = append(chain, prev) @@ -131,7 +132,7 @@ func (exec *execCtx) collectChain(ctx context.Context) bool { exec.status = statusUndefined exec.err = err - exec.log.Debug("could not get previous split element", + exec.log.Debug(logs.DeleteCouldNotGetPreviousSplitElement, zap.Stringer("id", prev), zap.String("error", err.Error()), ) @@ -154,7 +155,7 @@ func (exec *execCtx) collectChain(ctx context.Context) bool { } func (exec *execCtx) collectChildren(ctx context.Context) bool { - exec.log.Debug("collecting children...") + exec.log.Debug(logs.DeleteCollectingChildren) children, err := exec.svc.header.children(ctx, exec) @@ -163,7 +164,7 @@ func (exec *execCtx) collectChildren(ctx context.Context) bool { exec.status = statusUndefined exec.err = err - exec.log.Debug("could not collect object children", + exec.log.Debug(logs.DeleteCouldNotCollectObjectChildren, zap.String("error", err.Error()), ) @@ -181,7 +182,7 @@ func (exec *execCtx) collectChildren(ctx context.Context) bool { } func (exec *execCtx) supplementBySplitID(ctx context.Context) bool { - exec.log.Debug("supplement by split ID") + exec.log.Debug(logs.DeleteSupplementBySplitID) chain, err := exec.svc.searcher.splitMembers(ctx, exec) @@ -190,7 +191,7 @@ func (exec *execCtx) supplementBySplitID(ctx context.Context) bool { exec.status = statusUndefined exec.err = err - exec.log.Debug("could not search for split chain members", + exec.log.Debug(logs.DeleteCouldNotSearchForSplitChainMembers, zap.String("error", err.Error()), ) @@ -226,7 +227,7 @@ func (exec *execCtx) initTombstoneObject() bool { exec.status = statusUndefined exec.err = err - exec.log.Debug("could not marshal tombstone structure", + exec.log.Debug(logs.DeleteCouldNotMarshalTombstoneStructure, zap.String("error", err.Error()), ) @@ -265,7 +266,7 @@ func (exec *execCtx) saveTombstone(ctx context.Context) bool { exec.status = statusUndefined exec.err = err - exec.log.Debug("could not save the tombstone", + exec.log.Debug(logs.DeleteCouldNotSaveTheTombstone, zap.String("error", err.Error()), ) diff --git a/pkg/services/object/delete/local.go b/pkg/services/object/delete/local.go index 17eb0e4e1..34839b194 100644 --- a/pkg/services/object/delete/local.go +++ b/pkg/services/object/delete/local.go @@ -3,20 +3,21 @@ package deletesvc import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.uber.org/zap" ) func (exec *execCtx) executeLocal(ctx context.Context) { - exec.log.Debug("forming tombstone structure...") + exec.log.Debug(logs.DeleteFormingTombstoneStructure) ok := exec.formTombstone(ctx) if !ok { return } - exec.log.Debug("tombstone structure successfully formed, saving...") + exec.log.Debug(logs.DeleteTombstoneStructureSuccessfullyFormedSaving) exec.saveTombstone(ctx) } @@ -27,7 +28,7 @@ func (exec *execCtx) formTombstone(ctx context.Context) (ok bool) { exec.status = statusUndefined exec.err = err - exec.log.Debug("could not read tombstone lifetime config", + exec.log.Debug(logs.DeleteCouldNotReadTombstoneLifetimeConfig, zap.String("error", err.Error()), ) @@ -40,14 +41,14 @@ func (exec *execCtx) formTombstone(ctx context.Context) (ok bool) { ) exec.addMembers([]oid.ID{exec.address().Object()}) - exec.log.Debug("forming split info...") + exec.log.Debug(logs.DeleteFormingSplitInfo) ok = exec.formSplitInfo(ctx) if !ok { return } - exec.log.Debug("split info successfully formed, collecting members...") + exec.log.Debug(logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers) exec.tombstone.SetSplitID(exec.splitInfo.SplitID()) @@ -56,7 +57,7 @@ func (exec *execCtx) formTombstone(ctx context.Context) (ok bool) { return } - exec.log.Debug("members successfully collected") + exec.log.Debug(logs.DeleteMembersSuccessfullyCollected) ok = exec.initTombstoneObject() if !ok { diff --git a/pkg/services/object/get/assemble.go b/pkg/services/object/get/assemble.go index db71df6a4..d2108b003 100644 --- a/pkg/services/object/get/assemble.go +++ b/pkg/services/object/get/assemble.go @@ -4,6 +4,7 @@ import ( "context" "errors" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -12,7 +13,7 @@ import ( func (exec *execCtx) assemble(ctx context.Context) { if !exec.canAssemble() { - exec.log.Debug("can not assemble the object") + exec.log.Debug(logs.GetCanNotAssembleTheObject) return } @@ -34,16 +35,16 @@ func (exec *execCtx) assemble(ctx context.Context) { // `execCtx` so it should be disabled there. exec.disableForwarding() - exec.log.Debug("trying to assemble the object...") + exec.log.Debug(logs.GetTryingToAssembleTheObject) assembler := newAssembler(exec.address(), exec.splitInfo(), exec.ctxRange(), exec) - exec.log.Debug("assembling splitted object...", + exec.log.Debug(logs.GetAssemblingSplittedObject, zap.Stringer("address", exec.address()), zap.Uint64("range_offset", exec.ctxRange().GetOffset()), zap.Uint64("range_length", exec.ctxRange().GetLength()), ) - defer exec.log.Debug("assembling splitted object completed", + defer exec.log.Debug(logs.GetAssemblingSplittedObjectCompleted, zap.Stringer("address", exec.address()), zap.Uint64("range_offset", exec.ctxRange().GetOffset()), zap.Uint64("range_length", exec.ctxRange().GetLength()), @@ -51,7 +52,7 @@ func (exec *execCtx) assemble(ctx context.Context) { obj, err := assembler.Assemble(ctx, exec.prm.objWriter) if err != nil { - exec.log.Warn("failed to assemble splitted object", + exec.log.Warn(logs.GetFailedToAssembleSplittedObject, zap.Error(err), zap.Stringer("address", exec.address()), zap.Uint64("range_offset", exec.ctxRange().GetOffset()), diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go index cfb538d38..74d63966e 100644 --- a/pkg/services/object/get/container.go +++ b/pkg/services/object/get/container.go @@ -3,19 +3,20 @@ package getsvc import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "go.uber.org/zap" ) func (exec *execCtx) executeOnContainer(ctx context.Context) { if exec.isLocal() { - exec.log.Debug("return result directly") + exec.log.Debug(logs.GetReturnResultDirectly) return } lookupDepth := exec.netmapLookupDepth() - exec.log.Debug("trying to execute in container...", + exec.log.Debug(logs.GetTryingToExecuteInContainer, zap.Uint64("netmap lookup depth", lookupDepth), ) @@ -43,7 +44,7 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) { } func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool { - exec.log.Debug("process epoch", + exec.log.Debug(logs.GetProcessEpoch, zap.Uint64("number", exec.curProcEpoch), ) @@ -60,7 +61,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool { for { addrs := traverser.Next() if len(addrs) == 0 { - exec.log.Debug("no more nodes, abort placement iteration") + exec.log.Debug(logs.GetNoMoreNodesAbortPlacementIteration) return false } @@ -68,7 +69,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool { for i := range addrs { select { case <-ctx.Done(): - exec.log.Debug("interrupt placement iteration by context", + exec.log.Debug(logs.GetInterruptPlacementIterationByContext, zap.String("error", ctx.Err().Error()), ) @@ -84,7 +85,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool { client.NodeInfoFromNetmapElement(&info, addrs[i]) if exec.processNode(ctx, info) { - exec.log.Debug("completing the operation") + exec.log.Debug(logs.GetCompletingTheOperation) return true } } diff --git a/pkg/services/object/get/exec.go b/pkg/services/object/get/exec.go index 2ba014574..7f090dd50 100644 --- a/pkg/services/object/get/exec.go +++ b/pkg/services/object/get/exec.go @@ -4,6 +4,7 @@ import ( "context" "crypto/ecdsa" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" @@ -149,7 +150,7 @@ func (exec *execCtx) initEpoch() bool { exec.status = statusUndefined exec.err = err - exec.log.Debug("could not get current epoch number", + exec.log.Debug(logs.GetCouldNotGetCurrentEpochNumber, zap.String("error", err.Error()), ) @@ -170,7 +171,7 @@ func (exec *execCtx) generateTraverser(addr oid.Address) (*placement.Traverser, exec.status = statusUndefined exec.err = err - exec.log.Debug("could not generate container traverser", + exec.log.Debug(logs.GetCouldNotGenerateContainerTraverser, zap.String("error", err.Error()), ) @@ -188,7 +189,7 @@ func (exec execCtx) remoteClient(info clientcore.NodeInfo) (getClient, bool) { exec.status = statusUndefined exec.err = err - exec.log.Debug("could not construct remote node client") + exec.log.Debug(logs.GetCouldNotConstructRemoteNodeClient) case err == nil: return c, true } @@ -225,7 +226,7 @@ func (exec *execCtx) writeCollectedHeader(ctx context.Context) bool { exec.status = statusUndefined exec.err = err - exec.log.Debug("could not write header", + exec.log.Debug(logs.GetCouldNotWriteHeader, zap.String("error", err.Error()), ) case err == nil: @@ -248,7 +249,7 @@ func (exec *execCtx) writeObjectPayload(ctx context.Context, obj *objectSDK.Obje exec.status = statusUndefined exec.err = err - exec.log.Debug("could not write payload chunk", + exec.log.Debug(logs.GetCouldNotWritePayloadChunk, zap.String("error", err.Error()), ) case err == nil: diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go index 0f5983e99..bb0d669da 100644 --- a/pkg/services/object/get/get.go +++ b/pkg/services/object/get/get.go @@ -3,6 +3,7 @@ package getsvc import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "go.uber.org/zap" @@ -83,7 +84,7 @@ func (s *Service) get(ctx context.Context, prm commonPrm, opts ...execOption) st } func (exec *execCtx) execute(ctx context.Context) { - exec.log.Debug("serving request...") + exec.log.Debug(logs.GetServingRequest) // perform local operation exec.executeLocal(ctx) @@ -95,16 +96,16 @@ func (exec *execCtx) analyzeStatus(ctx context.Context, execCnr bool) { // analyze local result switch exec.status { case statusOK: - exec.log.Debug("operation finished successfully") + exec.log.Debug(logs.GetOperationFinishedSuccessfully) case statusINHUMED: - exec.log.Debug("requested object was marked as removed") + exec.log.Debug(logs.GetRequestedObjectWasMarkedAsRemoved) case statusVIRTUAL: - exec.log.Debug("requested object is virtual") + exec.log.Debug(logs.GetRequestedObjectIsVirtual) exec.assemble(ctx) case statusOutOfRange: - exec.log.Debug("requested range is out of object bounds") + exec.log.Debug(logs.GetRequestedRangeIsOutOfObjectBounds) default: - exec.log.Debug("operation finished with error", + exec.log.Debug(logs.GetOperationFinishedWithError, zap.String("error", exec.err.Error()), ) diff --git a/pkg/services/object/get/local.go b/pkg/services/object/get/local.go index 82ed911e4..8ac83d97a 100644 --- a/pkg/services/object/get/local.go +++ b/pkg/services/object/get/local.go @@ -5,6 +5,7 @@ import ( "errors" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "go.uber.org/zap" @@ -29,7 +30,7 @@ func (exec *execCtx) executeLocal(ctx context.Context) { exec.status = statusUndefined exec.err = err - exec.log.Debug("local get failed", + exec.log.Debug(logs.GetLocalGetFailed, zap.String("error", err.Error()), ) case err == nil: diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go index 697e48ee2..f4f74083b 100644 --- a/pkg/services/object/get/remote.go +++ b/pkg/services/object/get/remote.go @@ -5,6 +5,7 @@ import ( "errors" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -15,7 +16,7 @@ func (exec *execCtx) processNode(ctx context.Context, info client.NodeInfo) bool ctx, span := tracing.StartSpanFromContext(ctx, "getService.processNode") defer span.End() - exec.log.Debug("processing node...") + exec.log.Debug(logs.GetProcessingNode) client, ok := exec.remoteClient(info) if !ok { @@ -35,7 +36,7 @@ func (exec *execCtx) processNode(ctx context.Context, info client.NodeInfo) bool exec.status = statusUndefined exec.err = errNotFound - exec.log.Debug("remote call failed", + exec.log.Debug(logs.GetRemoteCallFailed, zap.String("error", err.Error()), ) case err == nil: diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go index d8b59487e..86dc3c2ca 100644 --- a/pkg/services/object/put/distributed.go +++ b/pkg/services/object/put/distributed.go @@ -6,6 +6,7 @@ import ( "sync" "sync/atomic" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" @@ -198,7 +199,7 @@ func (t *distributedTarget) iteratePlacement(ctx context.Context) (*transformer. if t.traversal.submitPrimaryPlacementFinish() { _, err = t.iteratePlacement(ctx) if err != nil { - t.log.Error("additional container broadcast failure", zap.Error(err)) + t.log.Error(logs.PutAdditionalContainerBroadcastFailure, zap.Error(err)) // we don't fail primary operation because of broadcast failure } } diff --git a/pkg/services/object/search/container.go b/pkg/services/object/search/container.go index b158bc23e..9df438e00 100644 --- a/pkg/services/object/search/container.go +++ b/pkg/services/object/search/container.go @@ -5,19 +5,20 @@ import ( "encoding/hex" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "go.uber.org/zap" ) func (exec *execCtx) executeOnContainer(ctx context.Context) { if exec.isLocal() { - exec.log.Debug("return result directly") + exec.log.Debug(logs.SearchReturnResultDirectly) return } lookupDepth := exec.netmapLookupDepth() - exec.log.Debug("trying to execute in container...", + exec.log.Debug(logs.SearchTryingToExecuteInContainer, zap.Uint64("netmap lookup depth", lookupDepth), ) @@ -48,7 +49,7 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) { } func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool { - exec.log.Debug("process epoch", + exec.log.Debug(logs.SearchProcessEpoch, zap.Uint64("number", exec.curProcEpoch), ) @@ -63,7 +64,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool { for { addrs := traverser.Next() if len(addrs) == 0 { - exec.log.Debug("no more nodes, abort placement iteration") + exec.log.Debug(logs.SearchNoMoreNodesAbortPlacementIteration) break } @@ -76,7 +77,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool { defer wg.Done() select { case <-ctx.Done(): - exec.log.Debug("interrupt placement iteration by context", + exec.log.Debug(logs.SearchInterruptPlacementIterationByContext, zap.String("error", ctx.Err().Error())) return default: @@ -86,7 +87,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool { client.NodeInfoFromNetmapElement(&info, addrs[i]) - exec.log.Debug("processing node...", zap.String("key", hex.EncodeToString(addrs[i].PublicKey()))) + exec.log.Debug(logs.SearchProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey()))) c, err := exec.svc.clientConstructor.get(info) if err != nil { @@ -95,13 +96,13 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool { exec.err = err mtx.Unlock() - exec.log.Debug("could not construct remote node client") + exec.log.Debug(logs.SearchCouldNotConstructRemoteNodeClient) return } ids, err := c.searchObjects(ctx, exec, info) if err != nil { - exec.log.Debug("remote operation failed", + exec.log.Debug(logs.SearchRemoteOperationFailed, zap.String("error", err.Error())) return diff --git a/pkg/services/object/search/exec.go b/pkg/services/object/search/exec.go index f815270d9..1733d7840 100644 --- a/pkg/services/object/search/exec.go +++ b/pkg/services/object/search/exec.go @@ -1,6 +1,7 @@ package searchsvc import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -80,7 +81,7 @@ func (exec *execCtx) initEpoch() bool { exec.status = statusUndefined exec.err = err - exec.log.Debug("could not get current epoch number", + exec.log.Debug(logs.SearchCouldNotGetCurrentEpochNumber, zap.String("error", err.Error()), ) @@ -99,7 +100,7 @@ func (exec *execCtx) generateTraverser(cnr cid.ID) (*placement.Traverser, bool) exec.status = statusUndefined exec.err = err - exec.log.Debug("could not generate container traverser", + exec.log.Debug(logs.SearchCouldNotGenerateContainerTraverser, zap.String("error", err.Error()), ) @@ -118,7 +119,7 @@ func (exec *execCtx) writeIDList(ids []oid.ID) { exec.status = statusUndefined exec.err = err - exec.log.Debug("could not write object identifiers", + exec.log.Debug(logs.SearchCouldNotWriteObjectIdentifiers, zap.String("error", err.Error()), ) case err == nil: diff --git a/pkg/services/object/search/local.go b/pkg/services/object/search/local.go index 1e4776921..f768c8861 100644 --- a/pkg/services/object/search/local.go +++ b/pkg/services/object/search/local.go @@ -1,6 +1,7 @@ package searchsvc import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "go.uber.org/zap" ) @@ -11,7 +12,7 @@ func (exec *execCtx) executeLocal() { exec.status = statusUndefined exec.err = err - exec.log.Debug("local operation failed", + exec.log.Debug(logs.SearchLocalOperationFailed, zap.String("error", err.Error()), ) diff --git a/pkg/services/object/search/search.go b/pkg/services/object/search/search.go index 325b42a54..5bf0710ad 100644 --- a/pkg/services/object/search/search.go +++ b/pkg/services/object/search/search.go @@ -3,6 +3,7 @@ package searchsvc import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "go.uber.org/zap" ) @@ -23,7 +24,7 @@ func (s *Service) Search(ctx context.Context, prm Prm) error { } func (exec *execCtx) execute(ctx context.Context) { - exec.log.Debug("serving request...") + exec.log.Debug(logs.SearchServingRequest) // perform local operation exec.executeLocal() @@ -35,11 +36,11 @@ func (exec *execCtx) analyzeStatus(ctx context.Context, execCnr bool) { // analyze local result switch exec.status { default: - exec.log.Debug("operation finished with error", + exec.log.Debug(logs.SearchOperationFinishedWithError, zap.String("error", exec.err.Error()), ) case statusOK: - exec.log.Debug("operation finished successfully") + exec.log.Debug(logs.SearchOperationFinishedSuccessfully) } if execCnr { diff --git a/pkg/services/object/util/log.go b/pkg/services/object/util/log.go index beda45c0c..92beedaa7 100644 --- a/pkg/services/object/util/log.go +++ b/pkg/services/object/util/log.go @@ -1,6 +1,7 @@ package util import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "go.uber.org/zap" @@ -8,7 +9,7 @@ import ( // LogServiceError writes error message of object service to provided logger. func LogServiceError(l *logger.Logger, req string, node network.AddressGroup, err error) { - l.Error("object service error", + l.Error(logs.UtilObjectServiceError, zap.String("node", network.StringifyGroup(node)), zap.String("request", req), zap.String("error", err.Error()), @@ -17,7 +18,7 @@ func LogServiceError(l *logger.Logger, req string, node network.AddressGroup, er // LogWorkerPoolError writes debug error message of object worker pool to provided logger. func LogWorkerPoolError(l *logger.Logger, req string, err error) { - l.Error("could not push task to worker pool", + l.Error(logs.UtilCouldNotPushTaskToWorkerPool, zap.String("request", req), zap.String("error", err.Error()), ) diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go index 4097f22bf..46fcc9840 100644 --- a/pkg/services/object_manager/tombstone/checker.go +++ b/pkg/services/object_manager/tombstone/checker.go @@ -5,6 +5,7 @@ import ( "strconv" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -57,7 +58,7 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr ts, err := g.tsSource.Tombstone(ctx, a, epoch) if err != nil { log.Warn( - "tombstone getter: could not get the tombstone the source", + logs.TombstoneCouldNotGetTheTombstoneTheSource, zap.Error(err), ) } else { diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go index 9cdc4d813..e91b8871b 100644 --- a/pkg/services/policer/check.go +++ b/pkg/services/policer/check.go @@ -4,6 +4,7 @@ import ( "context" "errors" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" @@ -73,7 +74,7 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add cnr, err := p.cnrSrc.Get(idCnr) if err != nil { - p.log.Error("could not get container", + p.log.Error(logs.PolicerCouldNotGetContainer, zap.Stringer("cid", idCnr), zap.String("error", err.Error()), ) @@ -84,7 +85,7 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add _, err := p.jobQueue.localStorage.Inhume(ctx, prm) if err != nil { - p.log.Error("could not inhume object with missing container", + p.log.Error(logs.PolicerCouldNotInhumeObjectWithMissingContainer, zap.Stringer("cid", idCnr), zap.Stringer("oid", idObj), zap.String("error", err.Error())) @@ -98,7 +99,7 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add nn, err := p.placementBuilder.BuildPlacement(idCnr, &idObj, policy) if err != nil { - p.log.Error("could not build placement vector for object", + p.log.Error(logs.PolicerCouldNotBuildPlacementVectorForObject, zap.Stringer("cid", idCnr), zap.String("error", err.Error()), ) @@ -127,7 +128,7 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add } if !c.needLocalCopy && c.removeLocalCopy { - p.log.Info("redundant local object copy detected", + p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", addr), ) @@ -199,7 +200,7 @@ func (p *Policer) processNodes(ctx context.Context, requirements *placementRequi if isClientErrMaintenance(err) { shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies) } else if err != nil { - p.log.Error("receive object header to check policy compliance", + p.log.Error(logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", addr), zap.String("error", err.Error()), ) @@ -228,7 +229,7 @@ func (p *Policer) handleMaintenance(node netmap.NodeInfo, checkedNodes *nodeCach shortage-- uncheckedCopies++ - p.log.Debug("consider node under maintenance as OK", + p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(node)), ) return shortage, uncheckedCopies @@ -237,7 +238,7 @@ func (p *Policer) handleMaintenance(node netmap.NodeInfo, checkedNodes *nodeCach func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address, requirements *placementRequirements, nodes []netmap.NodeInfo, checkedNodes *nodeCache, shortage uint32, uncheckedCopies int) { if shortage > 0 { - p.log.Debug("shortage of object copies detected", + p.log.Debug(logs.PolicerShortageOfObjectCopiesDetected, zap.Stringer("object", addr), zap.Uint32("shortage", shortage), ) @@ -251,7 +252,7 @@ func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address } else if uncheckedCopies > 0 { // If we have more copies than needed, but some of them are from the maintenance nodes, // save the local copy. - p.log.Debug("some of the copies are stored on nodes under maintenance, save local copy", + p.log.Debug(logs.PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance, zap.Int("count", uncheckedCopies)) } else if uncheckedCopies == 0 { // Safe to remove: checked all copies, shortage == 0. diff --git a/pkg/services/policer/process.go b/pkg/services/policer/process.go index 687216407..4a40f00ba 100644 --- a/pkg/services/policer/process.go +++ b/pkg/services/policer/process.go @@ -5,6 +5,7 @@ import ( "errors" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" "go.uber.org/zap" @@ -12,7 +13,7 @@ import ( func (p *Policer) Run(ctx context.Context) { defer func() { - p.log.Info("routine stopped") + p.log.Info(logs.PolicerRoutineStopped) }() go p.poolCapacityWorker(ctx) @@ -39,7 +40,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) { time.Sleep(time.Second) // finished whole cycle, sleep a bit continue } - p.log.Warn("failure at object select for replication", zap.Error(err)) + p.log.Warn(logs.PolicerFailureAtObjectSelectForReplication, zap.Error(err)) } for i := range addrs { @@ -68,7 +69,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) { p.objsInWork.remove(addr.Address) }) if err != nil { - p.log.Warn("pool submission", zap.Error(err)) + p.log.Warn(logs.PolicerPoolSubmission, zap.Error(err)) } } } @@ -91,7 +92,7 @@ func (p *Policer) poolCapacityWorker(ctx context.Context) { if p.taskPool.Cap() != newCapacity { p.taskPool.Tune(newCapacity) - p.log.Debug("tune replication capacity", + p.log.Debug(logs.PolicerTuneReplicationCapacity, zap.Float64("system_load", frostfsSysLoad), zap.Int("new_capacity", newCapacity)) } diff --git a/pkg/services/replicator/process.go b/pkg/services/replicator/process.go index 53df81b77..46e0c9468 100644 --- a/pkg/services/replicator/process.go +++ b/pkg/services/replicator/process.go @@ -3,6 +3,7 @@ package replicator import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -20,7 +21,7 @@ type TaskResult interface { // Passes all the nodes that accepted the replication to the TaskResult. func (p *Replicator) HandleTask(ctx context.Context, task Task, res TaskResult) { defer func() { - p.log.Debug("finish work", + p.log.Debug(logs.ReplicatorFinishWork, zap.Uint32("amount of unfinished replicas", task.quantity), ) }() @@ -29,7 +30,7 @@ func (p *Replicator) HandleTask(ctx context.Context, task Task, res TaskResult) var err error task.obj, err = engine.Get(ctx, p.localStorage, task.addr) if err != nil { - p.log.Error("could not get object from local storage", + p.log.Error(logs.ReplicatorCouldNotGetObjectFromLocalStorage, zap.Stringer("object", task.addr), zap.Error(err)) @@ -59,11 +60,11 @@ func (p *Replicator) HandleTask(ctx context.Context, task Task, res TaskResult) cancel() if err != nil { - log.Error("could not replicate object", + log.Error(logs.ReplicatorCouldNotReplicateObject, zap.String("error", err.Error()), ) } else { - log.Debug("object successfully replicated") + log.Debug(logs.ReplicatorObjectSuccessfullyReplicated) task.quantity-- diff --git a/pkg/services/reputation/common/managers.go b/pkg/services/reputation/common/managers.go index ef11b8122..84201809f 100644 --- a/pkg/services/reputation/common/managers.go +++ b/pkg/services/reputation/common/managers.go @@ -3,6 +3,7 @@ package common import ( "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" apiNetmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -76,7 +77,7 @@ func (x nodeServer) ExternalAddresses() []string { // BuildManagers sorts nodes in NetMap with HRW algorithms and // takes the next node after the current one as the only manager. func (mb *managerBuilder) BuildManagers(epoch uint64, p apireputation.PeerID) ([]ServerInfo, error) { - mb.log.Debug("start building managers", + mb.log.Debug(logs.CommonStartBuildingManagers, zap.Uint64("epoch", epoch), zap.Stringer("peer", p), ) diff --git a/pkg/services/reputation/common/router/calls.go b/pkg/services/reputation/common/router/calls.go index a177f6a2b..4ed293beb 100644 --- a/pkg/services/reputation/common/router/calls.go +++ b/pkg/services/reputation/common/router/calls.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" "go.uber.org/zap" @@ -92,7 +93,7 @@ func (w *trustWriter) Write(ctx context.Context, t reputation.Trust) error { if !ok { provider, err := w.router.remoteProvider.InitRemote(remoteInfo) if err != nil { - w.router.log.Debug("could not initialize writer provider", + w.router.log.Debug(logs.RouterCouldNotInitializeWriterProvider, zap.String("error", err.Error()), ) @@ -102,7 +103,7 @@ func (w *trustWriter) Write(ctx context.Context, t reputation.Trust) error { // init writer with original context wrapped in routeContext remoteWriter, err = provider.InitWriter(w.routeInfo.EpochProvider) if err != nil { - w.router.log.Debug("could not initialize writer", + w.router.log.Debug(logs.RouterCouldNotInitializeWriter, zap.String("error", err.Error()), ) @@ -114,7 +115,7 @@ func (w *trustWriter) Write(ctx context.Context, t reputation.Trust) error { err := remoteWriter.Write(ctx, t) if err != nil { - w.router.log.Debug("could not write the value", + w.router.log.Debug(logs.RouterCouldNotWriteTheValue, zap.String("error", err.Error()), ) } @@ -127,7 +128,7 @@ func (w *trustWriter) Close(ctx context.Context) error { for key, wRemote := range w.mServers { err := wRemote.Close(ctx) if err != nil { - w.router.log.Debug("could not close remote server writer", + w.router.log.Debug(logs.RouterCouldNotCloseRemoteServerWriter, zap.String("key", key), zap.String("error", err.Error()), ) diff --git a/pkg/services/reputation/eigentrust/calculator/calls.go b/pkg/services/reputation/eigentrust/calculator/calls.go index a8e5cf1da..5e2e900ae 100644 --- a/pkg/services/reputation/eigentrust/calculator/calls.go +++ b/pkg/services/reputation/eigentrust/calculator/calls.go @@ -3,6 +3,7 @@ package eigentrustcalc import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust" apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" @@ -27,7 +28,7 @@ func (c *Calculator) Calculate(ctx context.Context, prm CalculatePrm) { alpha, err := c.prm.AlphaProvider.EigenTrustAlpha() if err != nil { c.opts.log.Debug( - "failed to get alpha param", + logs.CalculatorFailedToGetAlphaParam, zap.Error(err), ) return @@ -56,7 +57,7 @@ func (c *Calculator) Calculate(ctx context.Context, prm CalculatePrm) { consumersIter, err := c.prm.DaughterTrustSource.InitConsumersIterator(epochIteration) if err != nil { - log.Debug("consumers trust iterator's init failure", + log.Debug(logs.CalculatorConsumersTrustIteratorsInitFailure, zap.String("error", err.Error()), ) @@ -76,7 +77,7 @@ func (c *Calculator) Calculate(ctx context.Context, prm CalculatePrm) { }) }) if err != nil { - log.Debug("worker pool submit failure", + log.Debug(logs.CalculatorWorkerPoolSubmitFailure, zap.String("error", err.Error()), ) } @@ -85,7 +86,7 @@ func (c *Calculator) Calculate(ctx context.Context, prm CalculatePrm) { return nil }) if err != nil { - log.Debug("iterate daughter's consumers failed", + log.Debug(logs.CalculatorIterateDaughtersConsumersFailed, zap.String("error", err.Error()), ) } @@ -104,7 +105,7 @@ type iterDaughterPrm struct { func (c *Calculator) iterateDaughter(ctx context.Context, p iterDaughterPrm) { initTrust, err := c.prm.InitialTrustSource.InitialTrust(p.id) if err != nil { - c.opts.log.Debug("get initial trust failure", + c.opts.log.Debug(logs.CalculatorGetInitialTrustFailure, zap.Stringer("daughter", p.id), zap.String("error", err.Error()), ) @@ -114,7 +115,7 @@ func (c *Calculator) iterateDaughter(ctx context.Context, p iterDaughterPrm) { daughterIter, err := c.prm.DaughterTrustSource.InitDaughterIterator(p.ei, p.id) if err != nil { - c.opts.log.Debug("daughter trust iterator's init failure", + c.opts.log.Debug(logs.CalculatorDaughterTrustIteratorsInitFailure, zap.String("error", err.Error()), ) @@ -136,7 +137,7 @@ func (c *Calculator) iterateDaughter(ctx context.Context, p iterDaughterPrm) { return nil }) if err != nil { - c.opts.log.Debug("iterate over daughter's trusts failure", + c.opts.log.Debug(logs.CalculatorIterateOverDaughtersTrustsFailure, zap.String("error", err.Error()), ) @@ -165,7 +166,7 @@ func (c *Calculator) iterateDaughter(ctx context.Context, p iterDaughterPrm) { func (c *Calculator) processLastIteration(p iterDaughterPrm, intermediateTrust eigentrust.IterationTrust, sum reputation.TrustValue) { finalWriter, err := c.prm.FinalResultTarget.InitIntermediateWriter(p.ei) if err != nil { - c.opts.log.Debug("init writer failure", + c.opts.log.Debug(logs.CalculatorInitWriterFailure, zap.String("error", err.Error()), ) @@ -176,7 +177,7 @@ func (c *Calculator) processLastIteration(p iterDaughterPrm, intermediateTrust e err = finalWriter.WriteIntermediateTrust(intermediateTrust) if err != nil { - c.opts.log.Debug("write final result failure", + c.opts.log.Debug(logs.CalculatorWriteFinalResultFailure, zap.String("error", err.Error()), ) @@ -187,7 +188,7 @@ func (c *Calculator) processLastIteration(p iterDaughterPrm, intermediateTrust e func (c *Calculator) processIntermediateIteration(ctx context.Context, p iterDaughterPrm, daughterIter TrustIterator, sum reputation.TrustValue) { intermediateWriter, err := c.prm.IntermediateValueTarget.InitWriter(p.ei) if err != nil { - c.opts.log.Debug("init writer failure", + c.opts.log.Debug(logs.CalculatorInitWriterFailure, zap.String("error", err.Error()), ) @@ -208,7 +209,7 @@ func (c *Calculator) processIntermediateIteration(ctx context.Context, p iterDau err := intermediateWriter.Write(ctx, trust) if err != nil { - c.opts.log.Debug("write value failure", + c.opts.log.Debug(logs.CalculatorWriteValueFailure, zap.String("error", err.Error()), ) } @@ -216,7 +217,7 @@ func (c *Calculator) processIntermediateIteration(ctx context.Context, p iterDau return nil }) if err != nil { - c.opts.log.Debug("iterate daughter trusts failure", + c.opts.log.Debug(logs.CalculatorIterateDaughterTrustsFailure, zap.String("error", err.Error()), ) } @@ -233,7 +234,7 @@ func (c *Calculator) processIntermediateIteration(ctx context.Context, p iterDau func (c *Calculator) sendInitialValues(ctx context.Context, epochInfo EpochIterationInfo) { daughterIter, err := c.prm.DaughterTrustSource.InitAllDaughtersIterator(epochInfo) if err != nil { - c.opts.log.Debug("all daughters trust iterator's init failure", + c.opts.log.Debug(logs.CalculatorAllDaughtersTrustIteratorsInitFailure, zap.String("error", err.Error()), ) @@ -242,7 +243,7 @@ func (c *Calculator) sendInitialValues(ctx context.Context, epochInfo EpochItera intermediateWriter, err := c.prm.IntermediateValueTarget.InitWriter(epochInfo) if err != nil { - c.opts.log.Debug("init writer failure", + c.opts.log.Debug(logs.CalculatorInitWriterFailure, zap.String("error", err.Error()), ) @@ -255,7 +256,7 @@ func (c *Calculator) sendInitialValues(ctx context.Context, epochInfo EpochItera initTrust, err := c.prm.InitialTrustSource.InitialTrust(trusted) if err != nil { - c.opts.log.Debug("get initial trust failure", + c.opts.log.Debug(logs.CalculatorGetInitialTrustFailure, zap.Stringer("peer", trusted), zap.String("error", err.Error()), ) @@ -269,7 +270,7 @@ func (c *Calculator) sendInitialValues(ctx context.Context, epochInfo EpochItera err = intermediateWriter.Write(ctx, trust) if err != nil { - c.opts.log.Debug("write value failure", + c.opts.log.Debug(logs.CalculatorWriteValueFailure, zap.String("error", err.Error()), ) @@ -280,14 +281,14 @@ func (c *Calculator) sendInitialValues(ctx context.Context, epochInfo EpochItera }) }) if err != nil { - c.opts.log.Debug("iterate over all daughters failure", + c.opts.log.Debug(logs.CalculatorIterateOverAllDaughtersFailure, zap.String("error", err.Error()), ) } err = intermediateWriter.Close(ctx) if err != nil { - c.opts.log.Debug("could not close writer", + c.opts.log.Debug(logs.CalculatorCouldNotCloseWriter, zap.String("error", err.Error()), ) } diff --git a/pkg/services/reputation/eigentrust/controller/calls.go b/pkg/services/reputation/eigentrust/controller/calls.go index 1753a430b..886daf9be 100644 --- a/pkg/services/reputation/eigentrust/controller/calls.go +++ b/pkg/services/reputation/eigentrust/controller/calls.go @@ -3,6 +3,7 @@ package eigentrustctrl import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust" "go.uber.org/zap" ) @@ -37,7 +38,7 @@ func (c *Controller) Continue(ctx context.Context, prm ContinuePrm) { iterations, err := c.prm.IterationsProvider.EigenTrustIterations() if err != nil { - c.opts.log.Error("could not get EigenTrust iteration number", + c.opts.log.Error(logs.ControllerCouldNotGetEigenTrustIterationNumber, zap.Error(err), ) } else { @@ -54,7 +55,7 @@ func (c *Controller) Continue(ctx context.Context, prm ContinuePrm) { iterCtx.Increment() }) if err != nil { - c.opts.log.Debug("iteration submit failure", + c.opts.log.Debug(logs.ControllerIterationSubmitFailure, zap.String("error", err.Error()), ) } diff --git a/pkg/services/reputation/eigentrust/routes/calls.go b/pkg/services/reputation/eigentrust/routes/calls.go index c4d9688a9..ccb2fe8ea 100644 --- a/pkg/services/reputation/eigentrust/routes/calls.go +++ b/pkg/services/reputation/eigentrust/routes/calls.go @@ -3,6 +3,7 @@ package routes import ( "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" "go.uber.org/zap" @@ -14,7 +15,7 @@ import ( func (b *Builder) NextStage(epoch uint64, t reputation.Trust, passed []common.ServerInfo) ([]common.ServerInfo, error) { passedLen := len(passed) - b.log.Debug("building next stage for trust route", + b.log.Debug(logs.RoutesBuildingNextStageForTrustRoute, zap.Uint64("epoch", epoch), zap.Int("passed_length", passedLen), ) diff --git a/pkg/services/reputation/local/controller/calls.go b/pkg/services/reputation/local/controller/calls.go index 80fa772d6..1cad09313 100644 --- a/pkg/services/reputation/local/controller/calls.go +++ b/pkg/services/reputation/local/controller/calls.go @@ -4,6 +4,7 @@ import ( "context" "errors" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" @@ -77,7 +78,7 @@ func (c *Controller) acquireReporter(ctx context.Context, epoch uint64) (context )} if started { - log.Debug("report is already started") + log.Debug(logs.ControllerReportIsAlreadyStarted) return ctx, nil } @@ -92,12 +93,12 @@ func (c *Controller) acquireReporter(ctx context.Context, epoch uint64) (context } func (c *reporter) report(ctx context.Context) { - c.log.Debug("starting to report local trust values") + c.log.Debug(logs.ControllerStartingToReportLocalTrustValues) // initialize iterator over locally collected values iterator, err := c.ctrl.prm.LocalTrustSource.InitIterator(c.ep) if err != nil { - c.log.Debug("could not initialize iterator over local trust values", + c.log.Debug(logs.ControllerCouldNotInitializeIteratorOverLocalTrustValues, zap.String("error", err.Error()), ) @@ -107,7 +108,7 @@ func (c *reporter) report(ctx context.Context) { // initialize target of local trust values targetWriter, err := c.ctrl.prm.LocalTrustTarget.InitWriter(c.ep) if err != nil { - c.log.Debug("could not initialize local trust target", + c.log.Debug(logs.ControllerCouldNotInitializeLocalTrustTarget, zap.String("error", err.Error()), ) @@ -126,7 +127,7 @@ func (c *reporter) report(ctx context.Context) { }, ) if err != nil && !errors.Is(err, context.Canceled) { - c.log.Debug("iterator over local trust failed", + c.log.Debug(logs.ControllerIteratorOverLocalTrustFailed, zap.String("error", err.Error()), ) @@ -136,14 +137,14 @@ func (c *reporter) report(ctx context.Context) { // finish writing err = targetWriter.Close(ctx) if err != nil { - c.log.Debug("could not finish writing local trust values", + c.log.Debug(logs.ControllerCouldNotFinishWritingLocalTrustValues, zap.String("error", err.Error()), ) return } - c.log.Debug("reporting successfully finished") + c.log.Debug(logs.ControllerReportingSuccessfullyFinished) } func (c *Controller) freeReport(epoch uint64, log *logger.Logger) { @@ -165,9 +166,9 @@ func (c *Controller) freeReport(epoch uint64, log *logger.Logger) { c.mtx.Unlock() if stopped { - log.Debug("reporting successfully interrupted") + log.Debug(logs.ControllerReportingSuccessfullyInterrupted) } else { - log.Debug("reporting is not started or already interrupted") + log.Debug(logs.ControllerReportingIsNotStartedOrAlreadyInterrupted) } } diff --git a/pkg/services/reputation/local/routes/calls.go b/pkg/services/reputation/local/routes/calls.go index f0eae16fe..2f99f0e10 100644 --- a/pkg/services/reputation/local/routes/calls.go +++ b/pkg/services/reputation/local/routes/calls.go @@ -3,6 +3,7 @@ package routes import ( "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" "go.uber.org/zap" @@ -14,7 +15,7 @@ import ( func (b *Builder) NextStage(epoch uint64, t reputation.Trust, passed []common.ServerInfo) ([]common.ServerInfo, error) { passedLen := len(passed) - b.log.Debug("building next stage for local trust route", + b.log.Debug(logs.RoutesBuildingNextStageForLocalTrustRoute, zap.Uint64("epoch", epoch), zap.Int("passed_length", passedLen), ) diff --git a/pkg/services/session/executor.go b/pkg/services/session/executor.go index 237a13962..0be6497be 100644 --- a/pkg/services/session/executor.go +++ b/pkg/services/session/executor.go @@ -5,6 +5,7 @@ import ( "fmt" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "go.uber.org/zap" ) @@ -28,7 +29,7 @@ func NewExecutionService(exec ServiceExecutor, l *logger.Logger) Server { } func (s *executorSvc) Create(ctx context.Context, req *session.CreateRequest) (*session.CreateResponse, error) { - s.log.Debug("serving request...", + s.log.Debug(logs.SessionServingRequest, zap.String("component", "SessionService"), zap.String("request", "Create"), ) diff --git a/pkg/services/session/storage/persistent/storage.go b/pkg/services/session/storage/persistent/storage.go index ded33d1ec..25f067d62 100644 --- a/pkg/services/session/storage/persistent/storage.go +++ b/pkg/services/session/storage/persistent/storage.go @@ -6,6 +6,7 @@ import ( "encoding/hex" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" @@ -108,7 +109,7 @@ func (s *TokenStore) Get(ownerID user.ID, tokenID []byte) (t *storage.PrivateTok return nil }) if err != nil { - s.l.Error("could not get session from persistent storage", + s.l.Error(logs.PersistentCouldNotGetSessionFromPersistentStorage, zap.Error(err), zap.Stringer("ownerID", ownerID), zap.String("tokenID", hex.EncodeToString(tokenID)), @@ -133,7 +134,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) { if epochFromToken(v) <= epoch { err = c.Delete() if err != nil { - s.l.Error("could not delete %s token", + s.l.Error(logs.PersistentCouldNotDeleteSToken, zap.String("token_id", hex.EncodeToString(k)), ) } @@ -144,7 +145,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) { }) }) if err != nil { - s.l.Error("could not clean up expired tokens", + s.l.Error(logs.PersistentCouldNotCleanUpExpiredTokens, zap.Uint64("epoch", epoch), ) } diff --git a/pkg/services/tree/redirect.go b/pkg/services/tree/redirect.go index 1671d2511..9594514f1 100644 --- a/pkg/services/tree/redirect.go +++ b/pkg/services/tree/redirect.go @@ -5,6 +5,7 @@ import ( "context" "errors" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "go.uber.org/zap" ) @@ -29,7 +30,7 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo return false } - s.log.Debug("redirecting tree service query", zap.String("endpoint", endpoint)) + s.log.Debug(logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint)) called = true stop = f(c) return true diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go index bb20310b2..98ed3df39 100644 --- a/pkg/services/tree/replicator.go +++ b/pkg/services/tree/replicator.go @@ -8,6 +8,7 @@ import ( "fmt" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -45,7 +46,7 @@ func (s *Service) localReplicationWorker() { case op := <-s.replicateLocalCh: err := s.forest.TreeApply(op.cid, op.treeID, &op.Move, false) if err != nil { - s.log.Error("failed to apply replicated operation", + s.log.Error(logs.TreeFailedToApplyReplicatedOperation, zap.String("err", err.Error())) } } @@ -79,10 +80,10 @@ func (s *Service) replicationWorker(ctx context.Context) { if lastErr != nil { if errors.Is(lastErr, errRecentlyFailed) { - s.log.Debug("do not send update to the node", + s.log.Debug(logs.TreeDoNotSendUpdateToTheNode, zap.String("last_error", lastErr.Error())) } else { - s.log.Warn("failed to sent update to the node", + s.log.Warn(logs.TreeFailedToSentUpdateToTheNode, zap.String("last_error", lastErr.Error()), zap.String("address", lastAddr), zap.String("key", hex.EncodeToString(task.n.PublicKey()))) @@ -112,7 +113,7 @@ func (s *Service) replicateLoop(ctx context.Context) { case op := <-s.replicateCh: err := s.replicate(op) if err != nil { - s.log.Error("error during replication", + s.log.Error(logs.TreeErrorDuringReplication, zap.String("err", err.Error()), zap.Stringer("cid", op.cid), zap.String("treeID", op.treeID)) diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go index 7a5a95c4c..e861541f4 100644 --- a/pkg/services/tree/signature.go +++ b/pkg/services/tree/signature.go @@ -8,6 +8,7 @@ import ( "fmt" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" @@ -76,7 +77,7 @@ func (s *Service) verifyClient(req message, cid cidSDK.ID, rawBearer []byte, op var tableFromBearer bool if len(rawBearer) != 0 { if !basicACL.AllowedBearerRules(op) { - s.log.Debug("bearer presented but not allowed by ACL", + s.log.Debug(logs.TreeBearerPresentedButNotAllowedByACL, zap.String("cid", cid.EncodeToString()), zap.String("op", op.String()), ) diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index 47299d1c9..91f43900f 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -11,6 +11,7 @@ import ( "sync" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" @@ -86,7 +87,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error { for _, tid := range treesToSync { h, err := s.forest.TreeLastSyncHeight(cid, tid) if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) { - s.log.Warn("could not get last synchronized height for a tree", + s.log.Warn(logs.TreeCouldNotGetLastSynchronizedHeightForATree, zap.Stringer("cid", cid), zap.String("tree", tid)) continue @@ -94,7 +95,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error { newHeight := s.synchronizeTree(ctx, cid, h, tid, nodes) if h < newHeight { if err := s.forest.TreeUpdateLastSyncHeight(cid, tid, newHeight); err != nil { - s.log.Warn("could not update last synchronized height for a tree", + s.log.Warn(logs.TreeCouldNotUpdateLastSynchronizedHeightForATree, zap.Stringer("cid", cid), zap.String("tree", tid)) } @@ -126,7 +127,7 @@ func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, treeID string, nodes []netmapSDK.NodeInfo) uint64 { - s.log.Debug("synchronize tree", + s.log.Debug(logs.TreeSynchronizeTree, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.Uint64("from", from)) @@ -184,7 +185,7 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, } if err := errGroup.Wait(); err != nil { - s.log.Warn("failed to run tree synchronization over all nodes", zap.Error(err)) + s.log.Warn(logs.TreeFailedToRunTreeSynchronizationOverAllNodes, zap.Error(err)) } newHeight := uint64(math.MaxUint64) @@ -283,11 +284,11 @@ func (s *Service) syncLoop(ctx context.Context) { case <-ctx.Done(): return case <-s.syncChan: - s.log.Debug("syncing trees...") + s.log.Debug(logs.TreeSyncingTrees) cnrs, err := s.cfg.cnrSource.List() if err != nil { - s.log.Error("could not fetch containers", zap.Error(err)) + s.log.Error(logs.TreeCouldNotFetchContainers, zap.Error(err)) continue } @@ -297,7 +298,7 @@ func (s *Service) syncLoop(ctx context.Context) { s.removeContainers(ctx, newMap) - s.log.Debug("trees have been synchronized") + s.log.Debug(logs.TreeTreesHaveBeenSynchronized) } } } @@ -310,19 +311,19 @@ func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) { cnr := cnr err := s.syncPool.Submit(func() { defer wg.Done() - s.log.Debug("syncing container trees...", zap.Stringer("cid", cnr)) + s.log.Debug(logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr)) err := s.synchronizeAllTrees(ctx, cnr) if err != nil { - s.log.Error("could not sync trees", zap.Stringer("cid", cnr), zap.Error(err)) + s.log.Error(logs.TreeCouldNotSyncTrees, zap.Stringer("cid", cnr), zap.Error(err)) return } - s.log.Debug("container trees have been synced", zap.Stringer("cid", cnr)) + s.log.Debug(logs.TreeContainerTreesHaveBeenSynced, zap.Stringer("cid", cnr)) }) if err != nil { wg.Done() - s.log.Error("could not query trees for synchronization", + s.log.Error(logs.TreeCouldNotQueryTreesForSynchronization, zap.Stringer("cid", cnr), zap.Error(err)) if errors.Is(err, ants.ErrPoolClosed) { @@ -349,11 +350,11 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID } for _, cnr := range removed { - s.log.Debug("removing redundant trees...", zap.Stringer("cid", cnr)) + s.log.Debug(logs.TreeRemovingRedundantTrees, zap.Stringer("cid", cnr)) err := s.DropTree(ctx, cnr, "") if err != nil { - s.log.Error("could not remove redundant tree", + s.log.Error(logs.TreeCouldNotRemoveRedundantTree, zap.Stringer("cid", cnr), zap.Error(err)) } @@ -367,7 +368,7 @@ func (s *Service) containersToSync(cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID for _, cnr := range cnrs { _, pos, err := s.getContainerNodes(cnr) if err != nil { - s.log.Error("could not calculate container nodes", + s.log.Error(logs.TreeCouldNotCalculateContainerNodes, zap.Stringer("cid", cnr), zap.Error(err)) continue