Compare commits

...

2 commits

Author SHA1 Message Date
bc8ecb89ff [#240] logs: Fix log consts
Drop duplicate entities.
Format entities.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-04-13 14:50:40 +03:00
4b790be5f2 [#240] logs: Move log messages to constants
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-04-12 17:58:42 +03:00
149 changed files with 1532 additions and 687 deletions

View file

@ -9,6 +9,7 @@ import (
"os/signal"
"syscall"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http"
@ -80,13 +81,13 @@ func main() {
err = innerRing.Start(ctx, intErr)
exitErr(err)
log.Info("application started",
log.Info(logs.CommonApplicationStarted,
zap.String("version", misc.Version))
select {
case <-ctx.Done():
case err := <-intErr:
log.Info("internal error", zap.String("msg", err.Error()))
log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
}
innerRing.Stop()
@ -98,14 +99,14 @@ func main() {
go func() {
err := srv.Shutdown()
if err != nil {
log.Debug("could not shutdown HTTP server",
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.String("error", err.Error()),
)
}
}()
}
log.Info("application stopped")
log.Info(logs.FrostFSIRApplicationStopped)
}
func initHTTPServers(cfg *viper.Viper, log *logger.Logger) []*httputil.Server {

View file

@ -29,6 +29,7 @@ import (
objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object"
replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator"
tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
@ -342,13 +343,13 @@ type internals struct {
func (c *cfg) startMaintenance() {
c.isMaintenance.Store(true)
c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE)
c.log.Info("started local node's maintenance")
c.log.Info(logs.FrostFSNodeStartedLocalNodesMaintenance)
}
// stops node's maintenance.
func (c *internals) stopMaintenance() {
c.isMaintenance.Store(false)
c.log.Info("stopped local node's maintenance")
c.log.Info(logs.FrostFSNodeStoppedLocalNodesMaintenance)
}
// IsMaintenance checks if storage node is under maintenance.
@ -881,10 +882,10 @@ func initLocalStorage(c *cfg) {
for _, optsWithMeta := range c.shardOpts() {
id, err := ls.AddShard(append(optsWithMeta.shOpts, shard.WithTombstoneSource(tombstoneSource))...)
if err != nil {
c.log.Error("failed to attach shard to engine", zap.Error(err))
c.log.Error(logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
} else {
shardsAttached++
c.log.Info("shard attached to engine", zap.Stringer("id", id))
c.log.Info(logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id))
}
}
if shardsAttached == 0 {
@ -894,15 +895,15 @@ func initLocalStorage(c *cfg) {
c.cfgObject.cfgLocalStorage.localStorage = ls
c.onShutdown(func() {
c.log.Info("closing components of the storage engine...")
c.log.Info(logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
err := ls.Close()
if err != nil {
c.log.Info("storage engine closing failure",
c.log.Info(logs.FrostFSNodeStorageEngineClosingFailure,
zap.String("error", err.Error()),
)
} else {
c.log.Info("all components of the storage engine closed successfully")
c.log.Info(logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
}
})
}
@ -976,11 +977,11 @@ func (c *cfg) bootstrap() error {
// switch to online except when under maintenance
st := c.cfgNetmap.state.controlNetmapStatus()
if st == control.NetmapStatus_MAINTENANCE {
c.log.Info("bootstrapping with the maintenance state")
c.log.Info(logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
return c.bootstrapWithState((*netmap.NodeInfo).SetMaintenance)
}
c.log.Info("bootstrapping with online state",
c.log.Info(logs.FrostFSNodeBootstrappingWithOnlineState,
zap.Stringer("previous", st),
)
@ -1015,32 +1016,32 @@ func (c *cfg) signalWatcher(ctx context.Context) {
case syscall.SIGHUP:
c.reloadConfig(ctx)
case syscall.SIGTERM, syscall.SIGINT:
c.log.Info("termination signal has been received, stopping...")
c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
// TODO (@acid-ant): #49 need to cover case when stuck at the middle(node health UNDEFINED or STARTING)
c.shutdown()
c.log.Info("termination signal processing is complete")
c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
return
}
case err := <-c.internalErr: // internal application error
c.log.Warn("internal application error",
c.log.Warn(logs.FrostFSNodeInternalApplicationError,
zap.String("message", err.Error()))
c.shutdown()
c.log.Info("internal error processing is complete")
c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete)
return
}
}
}
func (c *cfg) reloadConfig(ctx context.Context) {
c.log.Info("SIGHUP has been received, rereading configuration...")
c.log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
err := c.readConfig(c.appCfg)
if err != nil {
c.log.Error("configuration reading", zap.Error(err))
c.log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
return
}
@ -1052,7 +1053,7 @@ func (c *cfg) reloadConfig(ctx context.Context) {
logPrm, err := c.loggerPrm()
if err != nil {
c.log.Error("logger configuration preparation", zap.Error(err))
c.log.Error(logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
return
}
@ -1060,7 +1061,7 @@ func (c *cfg) reloadConfig(ctx context.Context) {
components = append(components, dCmp{"tracing", func() error {
updated, err := tracing.Setup(ctx, *tracingconfig.ToTracingConfig(c.appCfg))
if updated {
c.log.Info("tracing configation updated")
c.log.Info(logs.FrostFSNodeTracingConfigationUpdated)
}
return err
}})
@ -1085,20 +1086,20 @@ func (c *cfg) reloadConfig(ctx context.Context) {
err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
if err != nil {
c.log.Error("storage engine configuration update", zap.Error(err))
c.log.Error(logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
return
}
for _, component := range components {
err = component.reloadFunc()
if err != nil {
c.log.Error("updated configuration applying",
c.log.Error(logs.FrostFSNodeUpdatedConfigurationApplying,
zap.String("component", component.name),
zap.Error(err))
}
}
c.log.Info("configuration has been reloaded successfully")
c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
}
func (c *cfg) shutdown() {

View file

@ -11,6 +11,7 @@ import (
containerV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
@ -136,13 +137,13 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
} else {
// unlike removal, we expect successful receive of the container
// after successful creation, so logging can be useful
c.log.Error("read newly created container after the notification",
c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
zap.Stringer("id", ev.ID),
zap.Error(err),
)
}
c.log.Debug("container creation event's receipt",
c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt,
zap.Stringer("id", ev.ID),
)
})
@ -161,7 +162,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
cachedContainerStorage.handleRemoval(ev.ID)
c.log.Debug("container removal event's receipt",
c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt,
zap.Stringer("id", ev.ID),
)
})
@ -295,7 +296,7 @@ type morphLoadWriter struct {
}
func (w *morphLoadWriter) Put(a containerSDK.SizeEstimation) error {
w.log.Debug("save used space announcement in contract",
w.log.Debug(logs.FrostFSNodeSaveUsedSpaceAnnouncementInContract,
zap.Uint64("epoch", a.Epoch()),
zap.Stringer("cid", a.Container()),
zap.Uint64("size", a.Value()),
@ -458,7 +459,7 @@ func (d *localStorageLoad) Iterate(f loadcontroller.UsedSpaceFilter, h loadcontr
for i := range idList {
sz, err := engine.ContainerSize(d.engine, idList[i])
if err != nil {
d.log.Debug("failed to calculate container size in storage engine",
d.log.Debug(logs.FrostFSNodeFailedToCalculateContainerSizeInStorageEngine,
zap.Stringer("cid", idList[i]),
zap.String("error", err.Error()),
)
@ -466,7 +467,7 @@ func (d *localStorageLoad) Iterate(f loadcontroller.UsedSpaceFilter, h loadcontr
continue
}
d.log.Debug("container size in storage engine calculated successfully",
d.log.Debug(logs.FrostFSNodeContainerSizeInStorageEngineCalculatedSuccessfully,
zap.Uint64("size", sz),
zap.Stringer("cid", idList[i]),
)

View file

@ -5,6 +5,7 @@ import (
"net"
controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
@ -52,7 +53,7 @@ func initControlService(c *cfg) {
lis, err := net.Listen("tcp", endpoint)
if err != nil {
c.log.Error("can't listen gRPC endpoint (control)", zap.Error(err))
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
return
}

View file

@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
"google.golang.org/grpc"
@ -33,7 +34,7 @@ func initGRPC(c *cfg) {
if tlsCfg != nil {
cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile())
if err != nil {
c.log.Error("could not read certificate from file", zap.Error(err))
c.log.Error(logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
return
}
@ -63,7 +64,7 @@ func initGRPC(c *cfg) {
lis, err := net.Listen("tcp", sc.Endpoint())
if err != nil {
c.log.Error("can't listen gRPC endpoint", zap.Error(err))
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
return
}
@ -93,14 +94,14 @@ func serveGRPC(c *cfg) {
go func() {
defer func() {
c.log.Info("stop listening gRPC endpoint",
c.log.Info(logs.FrostFSNodeStopListeningGRPCEndpoint,
zap.String("endpoint", lis.Addr().String()),
)
c.wg.Done()
}()
c.log.Info("start listening gRPC endpoint",
c.log.Info(logs.FrostFSNodeStartListeningGRPCEndpoint,
zap.String("endpoint", lis.Addr().String()),
)
@ -114,7 +115,7 @@ func serveGRPC(c *cfg) {
func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
l = &logger.Logger{Logger: l.With(zap.String("name", name))}
l.Info("stopping gRPC server...")
l.Info(logs.FrostFSNodeStoppingGRPCServer)
// GracefulStop() may freeze forever, see #1270
done := make(chan struct{})
@ -126,9 +127,9 @@ func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
select {
case <-done:
case <-time.After(1 * time.Minute):
l.Info("gRPC cannot shutdown gracefully, forcing stop")
l.Info(logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
s.Stop()
}
l.Info("gRPC server stopped successfully")
l.Info(logs.FrostFSNodeGRPCServerStoppedSuccessfully)
}

View file

@ -8,6 +8,7 @@ import (
"os"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
"go.uber.org/zap"
@ -142,14 +143,14 @@ func bootUp(ctx context.Context, c *cfg) {
}
func wait(c *cfg, cancel func()) {
c.log.Info("application started",
c.log.Info(logs.CommonApplicationStarted,
zap.String("version", misc.Version))
<-c.done // graceful shutdown
cancel()
c.log.Debug("waiting for all processes to stop")
c.log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop)
c.wg.Wait()
}

View file

@ -7,6 +7,7 @@ import (
"time"
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
@ -49,7 +50,7 @@ func initMorphComponents(ctx context.Context, c *cfg) {
client.WithSwitchInterval(morphconfig.SwitchInterval(c.appCfg)),
)
if err != nil {
c.log.Info("failed to create neo RPC client",
c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient,
zap.Any("endpoints", addresses),
zap.String("error", err.Error()),
)
@ -58,12 +59,12 @@ func initMorphComponents(ctx context.Context, c *cfg) {
}
c.onShutdown(func() {
c.log.Info("closing morph components...")
c.log.Info(logs.FrostFSNodeClosingMorphComponents)
cli.Close()
})
if err := cli.SetGroupSignerScope(); err != nil {
c.log.Info("failed to set group signer scope, continue with Global", zap.Error(err))
c.log.Info(logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
}
c.cfgMorph.client = cli
@ -80,7 +81,7 @@ func initMorphComponents(ctx context.Context, c *cfg) {
fatalOnErr(err)
}
c.log.Info("notary support",
c.log.Info(logs.FrostFSNodeNotarySupport,
zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled),
)
@ -95,7 +96,7 @@ func initMorphComponents(ctx context.Context, c *cfg) {
msPerBlock, err := c.cfgMorph.client.MsPerBlock()
fatalOnErr(err)
c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond
c.log.Debug("morph.cache_ttl fetched from network", zap.Duration("value", c.cfgMorph.cacheTTL))
c.log.Debug(logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL))
}
if c.cfgMorph.cacheTTL < 0 {
@ -122,7 +123,7 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
// non-error deposit with an empty TX hash means
// that the deposit has already been made; no
// need to wait it.
c.log.Info("notary deposit has already been made")
c.log.Info(logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade)
return
}
@ -190,7 +191,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
if err != nil {
fromSideChainBlock = 0
c.log.Warn("can't get last processed side chain block number", zap.String("error", err.Error()))
c.log.Warn(logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
}
subs, err = subscriber.New(ctx, &subscriber.Params{
@ -215,7 +216,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) {
res, err := netmapEvent.ParseNewEpoch(src)
if err == nil {
c.log.Info("new epoch event from sidechain",
c.log.Info(logs.FrostFSNodeNewEpochEventFromSidechain,
zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()),
)
}
@ -226,11 +227,11 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers)
registerBlockHandler(lis, func(block *block.Block) {
c.log.Debug("new block", zap.Uint32("index", block.Index))
c.log.Debug(logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index)
if err != nil {
c.log.Warn("can't update persistent state",
c.log.Warn(logs.FrostFSNodeCantUpdatePersistentState,
zap.String("chain", "side"),
zap.Uint32("block_index", block.Index))
}

View file

@ -8,6 +8,7 @@ import (
netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap/grpc"
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
@ -193,7 +194,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
if (n-c.cfgNetmap.startEpoch)%reBootstrapInterval == 0 {
err := c.bootstrap()
if err != nil {
c.log.Warn("can't send re-bootstrap tx", zap.Error(err))
c.log.Warn(logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
}
}
})
@ -203,7 +204,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
ni, err := c.netmapLocalNodeState(e)
if err != nil {
c.log.Error("could not update node state on new epoch",
c.log.Error(logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
zap.Uint64("epoch", e),
zap.String("error", err.Error()),
)
@ -218,7 +219,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
addNewEpochAsyncNotificationHandler(c, func(ev event.Event) {
_, err := makeNotaryDeposit(c)
if err != nil {
c.log.Error("could not make notary deposit",
c.log.Error(logs.FrostFSNodeCouldNotMakeNotaryDeposit,
zap.String("error", err.Error()),
)
}
@ -298,7 +299,7 @@ func initNetmapState(c *cfg) {
}
}
c.log.Info("initial network state",
c.log.Info(logs.FrostFSNodeInitialNetworkState,
zap.Uint64("epoch", epoch),
zap.String("state", stateWord),
)

View file

@ -6,6 +6,7 @@ import (
"fmt"
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
@ -28,7 +29,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler
listRes, err := n.e.ListContainers(engine.ListContainersPrm{})
if err != nil {
log.Error("notificator: could not list containers", zap.Error(err))
log.Error(logs.FrostFSNodeNotificatorCouldNotListContainers, zap.Error(err))
return
}
@ -43,7 +44,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler
selectRes, err := n.e.Select(selectPrm)
if err != nil {
log.Error("notificator: could not select objects from container",
log.Error(logs.FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer,
zap.Stringer("cid", c),
zap.Error(err),
)
@ -53,7 +54,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler
for _, a := range selectRes.AddressList() {
err = n.processAddress(ctx, a, handler)
if err != nil {
log.Error("notificator: could not process object",
log.Error(logs.FrostFSNodeNotificatorCouldNotProcessObject,
zap.Stringer("address", a),
zap.Error(err),
)
@ -62,7 +63,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler
}
}
log.Debug("notificator: finished processing object notifications")
log.Debug(logs.FrostFSNodeNotificatorFinishedProcessingObjectNotifications)
}
func (n *notificationSource) processAddress(
@ -101,7 +102,7 @@ type notificationWriter struct {
func (n notificationWriter) Notify(topic string, address oid.Address) {
if err := n.w.Notify(topic, address); err != nil {
n.l.Warn("could not write object notification",
n.l.Warn(logs.FrostFSNodeCouldNotWriteObjectNotification,
zap.Stringer("address", address),
zap.String("topic", topic),
zap.Error(err),

View file

@ -11,6 +11,7 @@ import (
metricsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/metrics"
policerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/policer"
replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
@ -62,7 +63,7 @@ type objectSvc struct {
func (c *cfg) MaxObjectSize() uint64 {
sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
if err != nil {
c.log.Error("could not get max object size value",
c.log.Error(logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
zap.String("error", err.Error()),
)
}
@ -259,7 +260,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *reputati
_, err := ls.Inhume(ctx, inhumePrm)
if err != nil {
c.log.Warn("could not inhume mark redundant copy as garbage",
c.log.Warn(logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
zap.String("error", err.Error()),
)
}
@ -600,7 +601,7 @@ func (c *reputationClientConstructor) Get(info coreclient.NodeInfo) (coreclient.
}
}
} else {
c.log.Warn("could not get latest network map to overload the client",
c.log.Warn(logs.FrostFSNodeCouldNotGetLatestNetworkMapToOverloadTheClient,
zap.String("error", err.Error()),
)
}

View file

@ -11,6 +11,7 @@ import (
intermediatereputation "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/intermediate"
localreputation "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/local"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/ticker"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
@ -105,7 +106,7 @@ func addReputationReportHandler(ctx context.Context, c *cfg) {
addNewEpochAsyncNotificationHandler(
c,
func(ev event.Event) {
c.log.Debug("start reporting reputation on new epoch event")
c.log.Debug(logs.FrostFSNodeStartReportingReputationOnNewEpochEvent)
var reportPrm localtrustcontroller.ReportPrm
@ -127,13 +128,13 @@ func addEigenTrustEpochHandler(ctx context.Context, c *cfg, eigenTrustController
duration, err := c.cfgNetmap.wrapper.EpochDuration()
if err != nil {
log.Debug("could not fetch epoch duration", zap.Error(err))
log.Debug(logs.FrostFSNodeCouldNotFetchEpochDuration, zap.Error(err))
return
}
iterations, err := c.cfgNetmap.wrapper.EigenTrustIterations()
if err != nil {
log.Debug("could not fetch iteration number", zap.Error(err))
log.Debug(logs.FrostFSNodeCouldNotFetchIterationNumber, zap.Error(err))
return
}
@ -145,7 +146,7 @@ func addEigenTrustEpochHandler(ctx context.Context, c *cfg, eigenTrustController
)
})
if err != nil {
log.Debug("could not create fixed epoch timer", zap.Error(err))
log.Debug(logs.FrostFSNodeCouldNotCreateFixedEpochTimer, zap.Error(err))
return
}

View file

@ -3,6 +3,7 @@ package common
import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
@ -71,16 +72,16 @@ func NewRemoteTrustProvider(prm RemoteProviderPrm) *RemoteTrustProvider {
}
func (rtp *RemoteTrustProvider) InitRemote(srv reputationcommon.ServerInfo) (reputationcommon.WriterProvider, error) {
rtp.log.Debug("initializing remote writer provider")
rtp.log.Debug(logs.CommonInitializingRemoteWriterProvider)
if srv == nil {
rtp.log.Debug("route has reached dead-end provider")
rtp.log.Debug(logs.CommonRouteHasReachedDeadendProvider)
return rtp.deadEndProvider, nil
}
if rtp.netmapKeys.IsLocalKey(srv.PublicKey()) {
// if local => return no-op writer
rtp.log.Debug("initializing no-op writer provider")
rtp.log.Debug(logs.CommonInitializingNoopWriterProvider)
return trustcontroller.SimpleWriterProvider(new(NopReputationWriter)), nil
}

View file

@ -3,6 +3,7 @@ package intermediate
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
@ -31,7 +32,7 @@ type ConsumerTrustWriter struct {
}
func (w *ConsumerTrustWriter) Write(_ context.Context, t reputation.Trust) error {
w.log.Debug("writing received consumer's trusts",
w.log.Debug(logs.IntermediateWritingReceivedConsumersTrusts,
zap.Uint64("epoch", w.iterInfo.Epoch()),
zap.Uint32("iteration", w.iterInfo.I()),
zap.Stringer("trusting_peer", t.TrustingPeer()),

View file

@ -4,6 +4,7 @@ import (
"crypto/ecdsa"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
eigentrustcalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator"
@ -71,7 +72,7 @@ type FinalWriter struct {
}
func (fw FinalWriter) WriteIntermediateTrust(t eigentrust.IterationTrust) error {
fw.l.Debug("start writing global trusts to contract")
fw.l.Debug(logs.IntermediateStartWritingGlobalTrustsToContract)
args := repClient.PutPrm{}

View file

@ -3,6 +3,7 @@ package intermediate
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/daughters"
@ -27,7 +28,7 @@ type DaughterTrustWriter struct {
}
func (w *DaughterTrustWriter) Write(_ context.Context, t reputation.Trust) error {
w.log.Debug("writing received daughter's trusts",
w.log.Debug(logs.IntermediateWritingReceivedDaughtersTrusts,
zap.Uint64("epoch", w.ep.Epoch()),
zap.Stringer("trusting_peer", t.TrustingPeer()),
zap.Stringer("trusted_peer", t.Peer()),

View file

@ -6,6 +6,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/common"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
@ -92,7 +93,7 @@ func (rtp *RemoteTrustWriter) Write(ctx context.Context, t reputation.Trust) err
epoch := rtp.iterInfo.Epoch()
i := rtp.iterInfo.I()
rtp.log.Debug("announcing trust",
rtp.log.Debug(logs.IntermediateAnnouncingTrust,
zap.Uint64("epoch", epoch),
zap.Uint32("iteration", i),
zap.Stringer("trusting_peer", t.TrustingPeer()),

View file

@ -6,6 +6,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/common"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
@ -96,7 +97,7 @@ func (rtp *RemoteTrustWriter) Write(_ context.Context, t reputation.Trust) error
func (rtp *RemoteTrustWriter) Close(ctx context.Context) error {
epoch := rtp.ep.Epoch()
rtp.log.Debug("announcing trusts",
rtp.log.Debug(logs.LocalAnnouncingTrusts,
zap.Uint64("epoch", epoch),
)

View file

@ -4,6 +4,7 @@ import (
"bytes"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
@ -27,7 +28,7 @@ type TrustStorage struct {
func (s *TrustStorage) InitIterator(ep reputationcommon.EpochProvider) (trustcontroller.Iterator, error) {
epoch := ep.Epoch()
s.Log.Debug("initializing iterator over trusts",
s.Log.Debug(logs.LocalInitializingIteratorOverTrusts,
zap.Uint64("epoch", epoch),
)

View file

@ -6,6 +6,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"go.uber.org/zap"
)
@ -14,7 +15,7 @@ func initTracing(ctx context.Context, c *cfg) {
_, err := tracing.Setup(ctx, *conf)
if err != nil {
c.log.Error("failed init tracing", zap.Error(err))
c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
}
c.closers = append(c.closers, closer{
@ -24,7 +25,7 @@ func initTracing(ctx context.Context, c *cfg) {
defer cancel()
err := tracing.Shutdown(ctx) //cfg context cancels before close
if err != nil {
c.log.Error("failed shutdown tracing", zap.Error(err))
c.log.Error(logs.FrostFSNodeFailedShutdownTracing, zap.Error(err))
}
},
})

View file

@ -6,6 +6,7 @@ import (
"time"
treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
containerClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
@ -37,7 +38,7 @@ func (c cnrSource) List() ([]cid.ID, error) {
func initTreeService(c *cfg) {
treeConfig := treeconfig.Tree(c.appCfg)
if !treeConfig.Enabled() {
c.log.Info("tree service is not enabled, skip initialization")
c.log.Info(logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization)
return
}
@ -68,7 +69,7 @@ func initTreeService(c *cfg) {
addNewEpochNotificationHandler(c, func(_ event.Event) {
err := c.treeService.SynchronizeAll()
if err != nil {
c.log.Error("could not synchronize Tree Service", zap.Error(err))
c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
}
})
} else {
@ -79,7 +80,7 @@ func initTreeService(c *cfg) {
for range tick.C {
err := c.treeService.SynchronizeAll()
if err != nil {
c.log.Error("could not synchronize Tree Service", zap.Error(err))
c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
if errors.Is(err, tree.ErrShuttingDown) {
return
}
@ -92,11 +93,11 @@ func initTreeService(c *cfg) {
ev := e.(containerEvent.DeleteSuccess)
// This is executed asynchronously, so we don't care about the operation taking some time.
c.log.Debug("removing all trees for container", zap.Stringer("cid", ev.ID))
c.log.Debug(logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID))
err := c.treeService.DropTree(context.Background(), ev.ID, "")
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
// Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged.
c.log.Error("container removal event received, but trees weren't removed",
c.log.Error(logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
zap.Stringer("cid", ev.ID),
zap.String("error", err.Error()))
}

694
internal/logs/logs.go Normal file
View file

@ -0,0 +1,694 @@
package logs
const (
InnerringAmountCanNotBeRepresentedAsAnInt640 = "amount can not be represented as an int64" // Error in ../node/pkg/innerring/settlement.go
InnerringCantGetUsedSpaceEstimation1 = "can't get used space estimation" // Warn in ../node/pkg/innerring/settlement.go
InnerringSubnetCreationQueueFailure2 = "subnet creation queue failure" // Error in ../node/pkg/innerring/subnet.go
InnerringDiscardSubnetCreation3 = "discard subnet creation" // Info in ../node/pkg/innerring/subnet.go
InnerringApproveSubnetCreation4 = "approve subnet creation" // Error in ../node/pkg/innerring/subnet.go
InnerringSubnetRemovalHandlingFailure5 = "subnet removal handling failure" // Error in ../node/pkg/innerring/subnet.go
InnerringGettingNetmapCandidates6 = "getting netmap candidates" // Error in ../node/pkg/innerring/subnet.go
InnerringUnmarshallingRemovedSubnetID7 = "unmarshalling removed subnet ID" // Error in ../node/pkg/innerring/subnet.go
InnerringIteratingNodesSubnets8 = "iterating node's subnets" // Error in ../node/pkg/innerring/subnet.go
InnerringRemovingNodeFromNetmapCandidates9 = "removing node from netmap candidates" // Debug in ../node/pkg/innerring/subnet.go
InnerringRemovingNodeFromCandidates10 = "removing node from candidates" // Error in ../node/pkg/innerring/subnet.go
InnerringRemovingSubnetFromTheNode11 = "removing subnet from the node" // Debug in ../node/pkg/innerring/subnet.go
InnerringUpdatingSubnetInfo12 = "updating subnet info" // Error in ../node/pkg/innerring/subnet.go
InnerringNonalphabetModeDoNotStopContainerEstimations13 = "non-alphabet mode, do not stop container estimations" // Debug in ../node/pkg/innerring/blocktimer.go
InnerringCantStopEpochEstimation14 = "can't stop epoch estimation" // Warn in ../node/pkg/innerring/blocktimer.go
InnerringCantMakeNotaryDepositInMainChain15 = "can't make notary deposit in main chain" // Error in ../node/pkg/innerring/notary.go
InnerringCantMakeNotaryDepositInSideChain16 = "can't make notary deposit in side chain" // Error in ../node/pkg/innerring/notary.go
InnerringNotaryDepositHasAlreadyBeenMade17 = "notary deposit has already been made" // Info in ../node/pkg/innerring/notary.go
InnerringCantGetInnerRingIndex18 = "can't get inner ring index" // Error in ../node/pkg/innerring/state.go
InnerringCantGetInnerRingSize19 = "can't get inner ring size" // Error in ../node/pkg/innerring/state.go
InnerringCantGetAlphabetIndex20 = "can't get alphabet index" // Error in ../node/pkg/innerring/state.go
InnerringIgnoreValidatorVoteNodeNotInAlphabetRange21 = "ignore validator vote: node not in alphabet range" // Info in ../node/pkg/innerring/state.go
InnerringIgnoreValidatorVoteEmptyValidatorsList22 = "ignore validator vote: empty validators list" // Info in ../node/pkg/innerring/state.go
InnerringCantInvokeVoteMethodInAlphabetContract23 = "can't invoke vote method in alphabet contract" // Warn in ../node/pkg/innerring/state.go
InnerringCantGetLastProcessedMainChainBlockNumber24 = "can't get last processed main chain block number" // Warn in ../node/pkg/innerring/initialization.go
InnerringNotarySupport25 = "notary support" // Info in ../node/pkg/innerring/initialization.go
InnerringAlphabetKeysSyncIsDisabled26 = "alphabet keys sync is disabled" // Debug in ../node/pkg/innerring/initialization.go
InnerringNoControlServerEndpointSpecifiedServiceIsDisabled27 = "no Control server endpoint specified, service is disabled" // Info in ../node/pkg/innerring/initialization.go
InnerringCantGetLastProcessedSideChainBlockNumber28 = "can't get last processed side chain block number" // Warn in ../node/pkg/innerring/initialization.go
InnerringFailedToSetGroupSignerScopeContinueWithGlobal29 = "failed to set group signer scope, continue with Global" // Info in ../node/pkg/innerring/initialization.go
InnerringCantVoteForPreparedValidators30 = "can't vote for prepared validators" // Warn in ../node/pkg/innerring/innerring.go
InnerringNewBlock31 = "new block" // Debug in ../node/pkg/innerring/innerring.go
InnerringCantUpdatePersistentState32 = "can't update persistent state" // Warn in ../node/pkg/innerring/innerring.go
InnerringCantUpdatePersistentState33 = "can't update persistent state" // Warn in ../node/pkg/innerring/innerring.go
InnerringCloserError34 = "closer error" // Warn in ../node/pkg/innerring/innerring.go
InnerringReadConfigFromBlockchain35 = "read config from blockchain" // Debug in ../node/pkg/innerring/innerring.go
InnerringCantSetupRemoteConnection36 = "can't setup remote connection" // Warn in ../node/pkg/innerring/rpc.go
InnerringCantGetStorageGroupObject37 = "can't get storage group object" // Warn in ../node/pkg/innerring/rpc.go
NotificatorNotificatorStartProcessingObjectNotifications38 = "notificator: start processing object notifications" // Debug in ../node/pkg/services/notificator/service.go
NotificatorNotificatorProcessingObjectNotification39 = "notificator: processing object notification" // Debug in ../node/pkg/services/notificator/service.go
PolicerCouldNotGetContainer40 = "could not get container" // Error in ../node/pkg/services/policer/check.go
PolicerCouldNotInhumeObjectWithMissingContainer41 = "could not inhume object with missing container" // Error in ../node/pkg/services/policer/check.go
PolicerCouldNotBuildPlacementVectorForObject42 = "could not build placement vector for object" // Error in ../node/pkg/services/policer/check.go
PolicerRedundantLocalObjectCopyDetected43 = "redundant local object copy detected" // Info in ../node/pkg/services/policer/check.go
PolicerReceiveObjectHeaderToCheckPolicyCompliance44 = "receive object header to check policy compliance" // Error in ../node/pkg/services/policer/check.go
PolicerConsiderNodeUnderMaintenanceAsOK45 = "consider node under maintenance as OK" // Debug in ../node/pkg/services/policer/check.go
PolicerShortageOfObjectCopiesDetected46 = "shortage of object copies detected" // Debug in ../node/pkg/services/policer/check.go
PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenanceSaveLocalCopy47 = "some of the copies are stored on nodes under maintenance, save local copy" // Debug in ../node/pkg/services/policer/check.go
PolicerRoutineStopped48 = "routine stopped" // Info in ../node/pkg/services/policer/process.go
PolicerFailureAtObjectSelectForReplication49 = "failure at object select for replication" // Warn in ../node/pkg/services/policer/process.go
PolicerPoolSubmission50 = "pool submission" // Warn in ../node/pkg/services/policer/process.go
PolicerTuneReplicationCapacity51 = "tune replication capacity" // Debug in ../node/pkg/services/policer/process.go
ReplicatorFinishWork52 = "finish work" // Debug in ../node/pkg/services/replicator/process.go
ReplicatorCouldNotGetObjectFromLocalStorage53 = "could not get object from local storage" // Error in ../node/pkg/services/replicator/process.go
ReplicatorCouldNotReplicateObject54 = "could not replicate object" // Error in ../node/pkg/services/replicator/process.go
ReplicatorObjectSuccessfullyReplicated55 = "object successfully replicated" // Debug in ../node/pkg/services/replicator/process.go
SessionServingRequest56 = "serving request..." // Debug in ../node/pkg/services/session/executor.go
TreeRedirectingTreeServiceQuery57 = "redirecting tree service query" // Debug in ../node/pkg/services/tree/redirect.go
TreeBearerPresentedButNotAllowedByACL58 = "bearer presented but not allowed by ACL" // Debug in ../node/pkg/services/tree/signature.go
TreeCouldNotGetLastSynchronizedHeightForATree59 = "could not get last synchronized height for a tree" // Warn in ../node/pkg/services/tree/sync.go
TreeCouldNotUpdateLastSynchronizedHeightForATree60 = "could not update last synchronized height for a tree" // Warn in ../node/pkg/services/tree/sync.go
TreeSynchronizeTree61 = "synchronize tree" // Debug in ../node/pkg/services/tree/sync.go
TreeFailedToRunTreeSynchronizationOverAllNodes62 = "failed to run tree synchronization over all nodes" // Warn in ../node/pkg/services/tree/sync.go
TreeSyncingTrees63 = "syncing trees..." // Debug in ../node/pkg/services/tree/sync.go
TreeCouldNotFetchContainers64 = "could not fetch containers" // Error in ../node/pkg/services/tree/sync.go
TreeTreesHaveBeenSynchronized65 = "trees have been synchronized" // Debug in ../node/pkg/services/tree/sync.go
TreeSyncingContainerTrees66 = "syncing container trees..." // Debug in ../node/pkg/services/tree/sync.go
TreeCouldNotSyncTrees67 = "could not sync trees" // Error in ../node/pkg/services/tree/sync.go
TreeContainerTreesHaveBeenSynced68 = "container trees have been synced" // Debug in ../node/pkg/services/tree/sync.go
TreeCouldNotQueryTreesForSynchronization69 = "could not query trees for synchronization" // Error in ../node/pkg/services/tree/sync.go
TreeRemovingRedundantTrees70 = "removing redundant trees..." // Debug in ../node/pkg/services/tree/sync.go
TreeCouldNotRemoveRedundantTree71 = "could not remove redundant tree" // Error in ../node/pkg/services/tree/sync.go
TreeCouldNotCalculateContainerNodes72 = "could not calculate container nodes" // Error in ../node/pkg/services/tree/sync.go
TreeFailedToApplyReplicatedOperation73 = "failed to apply replicated operation" // Error in ../node/pkg/services/tree/replicator.go
TreeDoNotSendUpdateToTheNode74 = "do not send update to the node" // Debug in ../node/pkg/services/tree/replicator.go
TreeFailedToSentUpdateToTheNode75 = "failed to sent update to the node" // Warn in ../node/pkg/services/tree/replicator.go
TreeErrorDuringReplication76 = "error during replication" // Error in ../node/pkg/services/tree/replicator.go
PersistentCouldNotGetSessionFromPersistentStorage77 = "could not get session from persistent storage" // Error in ../node/pkg/services/session/storage/persistent/storage.go
PersistentCouldNotDeleteSToken78 = "could not delete %s token" // Error in ../node/pkg/services/session/storage/persistent/storage.go
PersistentCouldNotCleanUpExpiredTokens79 = "could not clean up expired tokens" // Error in ../node/pkg/services/session/storage/persistent/storage.go
CommonStartBuildingManagers80 = "start building managers" // Debug in ../node/pkg/services/reputation/common/managers.go
ControllerReportIsAlreadyStarted81 = "report is already started" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
ControllerStartingToReportLocalTrustValues82 = "starting to report local trust values" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
ControllerCouldNotInitializeIteratorOverLocalTrustValues83 = "could not initialize iterator over local trust values" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
ControllerCouldNotInitializeLocalTrustTarget84 = "could not initialize local trust target" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
ControllerIteratorOverLocalTrustFailed85 = "iterator over local trust failed" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
ControllerCouldNotFinishWritingLocalTrustValues86 = "could not finish writing local trust values" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
ControllerReportingSuccessfullyFinished87 = "reporting successfully finished" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
ControllerReportingSuccessfullyInterrupted88 = "reporting successfully interrupted" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
ControllerReportingIsNotStartedOrAlreadyInterrupted89 = "reporting is not started or already interrupted" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
RoutesBuildingNextStageForLocalTrustRoute90 = "building next stage for local trust route" // Debug in ../node/pkg/services/reputation/local/routes/calls.go
CalculatorFailedToGetAlphaParam91 = "failed to get alpha param" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
CalculatorConsumersTrustIteratorsInitFailure92 = "consumers trust iterator's init failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
CalculatorWorkerPoolSubmitFailure93 = "worker pool submit failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
CalculatorIterateDaughtersConsumersFailed94 = "iterate daughter's consumers failed" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
CalculatorGetInitialTrustFailure95 = "get initial trust failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
CalculatorDaughterTrustIteratorsInitFailure96 = "daughter trust iterator's init failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
CalculatorIterateOverDaughtersTrustsFailure97 = "iterate over daughter's trusts failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
CalculatorInitWriterFailure98 = "init writer failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
CalculatorWriteFinalResultFailure99 = "write final result failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
CalculatorInitWriterFailure100 = "init writer failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
CalculatorWriteValueFailure101 = "write value failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
CalculatorIterateDaughterTrustsFailure102 = "iterate daughter trusts failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
CalculatorCouldNotCloseWriter103 = "could not close writer" // Error in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
CalculatorAllDaughtersTrustIteratorsInitFailure104 = "all daughters trust iterator's init failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
CalculatorInitWriterFailure105 = "init writer failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
CalculatorGetInitialTrustFailure106 = "get initial trust failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
CalculatorWriteValueFailure107 = "write value failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
CalculatorIterateOverAllDaughtersFailure108 = "iterate over all daughters failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
CalculatorCouldNotCloseWriter109 = "could not close writer" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
ControllerCouldNotGetEigenTrustIterationNumber110 = "could not get EigenTrust iteration number" // Error in ../node/pkg/services/reputation/eigentrust/controller/calls.go
ControllerIterationSubmitFailure111 = "iteration submit failure" // Debug in ../node/pkg/services/reputation/eigentrust/controller/calls.go
RoutesBuildingNextStageForTrustRoute112 = "building next stage for trust route" // Debug in ../node/pkg/services/reputation/eigentrust/routes/calls.go
RouterCouldNotInitializeWriterProvider113 = "could not initialize writer provider" // Debug in ../node/pkg/services/reputation/common/router/calls.go
RouterCouldNotInitializeWriter114 = "could not initialize writer" // Debug in ../node/pkg/services/reputation/common/router/calls.go
RouterCouldNotWriteTheValue115 = "could not write the value" // Debug in ../node/pkg/services/reputation/common/router/calls.go
RouterCouldNotCloseRemoteServerWriter116 = "could not close remote server writer" // Debug in ../node/pkg/services/reputation/common/router/calls.go
TombstoneTombstoneGetterCouldNotGetTheTombstoneTheSource117 = "tombstone getter: could not get the tombstone the source" // Warn in ../node/pkg/services/object_manager/tombstone/checker.go
TombstoneTombstoneGetterCouldNotParseTombstoneExpirationEpoch118 = "tombstone getter: could not parse tombstone expiration epoch" // Warn in ../node/pkg/services/object_manager/tombstone/checker.go
DeleteRequestIsNotRolledOverToTheContainer119 = "request is not rolled over to the container" // Debug in ../node/pkg/services/object/delete/container.go
DeleteServingRequest120 = "serving request..." // Debug in ../node/pkg/services/object/delete/delete.go
DeleteOperationFinishedSuccessfully121 = "operation finished successfully" // Debug in ../node/pkg/services/object/delete/delete.go
DeleteOperationFinishedWithError122 = "operation finished with error" // Debug in ../node/pkg/services/object/delete/delete.go
DeleteCouldNotComposeSplitInfo123 = "could not compose split info" // Debug in ../node/pkg/services/object/delete/exec.go
DeleteNoSplitInfoObjectIsPHY124 = "no split info, object is PHY" // Debug in ../node/pkg/services/object/delete/exec.go
DeleteAssemblingChain125 = "assembling chain..." // Debug in ../node/pkg/services/object/delete/exec.go
DeleteCouldNotGetPreviousSplitElement126 = "could not get previous split element" // Debug in ../node/pkg/services/object/delete/exec.go
DeleteCollectingChildren127 = "collecting children..." // Debug in ../node/pkg/services/object/delete/exec.go
DeleteCouldNotCollectObjectChildren128 = "could not collect object children" // Debug in ../node/pkg/services/object/delete/exec.go
DeleteSupplementBySplitID129 = "supplement by split ID" // Debug in ../node/pkg/services/object/delete/exec.go
DeleteCouldNotSearchForSplitChainMembers130 = "could not search for split chain members" // Debug in ../node/pkg/services/object/delete/exec.go
DeleteCouldNotMarshalTombstoneStructure131 = "could not marshal tombstone structure" // Debug in ../node/pkg/services/object/delete/exec.go
DeleteCouldNotSaveTheTombstone132 = "could not save the tombstone" // Debug in ../node/pkg/services/object/delete/exec.go
DeleteFormingTombstoneStructure133 = "forming tombstone structure..." // Debug in ../node/pkg/services/object/delete/local.go
DeleteTombstoneStructureSuccessfullyFormedSaving134 = "tombstone structure successfully formed, saving..." // Debug in ../node/pkg/services/object/delete/local.go
DeleteCouldNotReadTombstoneLifetimeConfig135 = "could not read tombstone lifetime config" // Debug in ../node/pkg/services/object/delete/local.go
DeleteFormingSplitInfo136 = "forming split info..." // Debug in ../node/pkg/services/object/delete/local.go
DeleteSplitInfoSuccessfullyFormedCollectingMembers137 = "split info successfully formed, collecting members..." // Debug in ../node/pkg/services/object/delete/local.go
DeleteMembersSuccessfullyCollected138 = "members successfully collected" // Debug in ../node/pkg/services/object/delete/local.go
GetProcessingNode139 = "processing node..." // Debug in ../node/pkg/services/object/get/remote.go
GetRemoteCallFailed140 = "remote call failed" // Debug in ../node/pkg/services/object/get/remote.go
GetCanNotAssembleTheObject141 = "can not assemble the object" // Debug in ../node/pkg/services/object/get/assemble.go
GetTryingToAssembleTheObject142 = "trying to assemble the object..." // Debug in ../node/pkg/services/object/get/assemble.go
GetAssemblingSplittedObject143 = "assembling splitted object..." // Debug in ../node/pkg/services/object/get/assemble.go
GetAssemblingSplittedObjectCompleted144 = "assembling splitted object completed" // Debug in ../node/pkg/services/object/get/assemble.go
GetFailedToAssembleSplittedObject145 = "failed to assemble splitted object" // Warn in ../node/pkg/services/object/get/assemble.go
GetCouldNotGetCurrentEpochNumber146 = "could not get current epoch number" // Debug in ../node/pkg/services/object/get/exec.go
GetCouldNotGenerateContainerTraverser147 = "could not generate container traverser" // Debug in ../node/pkg/services/object/get/exec.go
GetCouldNotConstructRemoteNodeClient148 = "could not construct remote node client" // Debug in ../node/pkg/services/object/get/exec.go
GetCouldNotWriteHeader149 = "could not write header" // Debug in ../node/pkg/services/object/get/exec.go
GetCouldNotWritePayloadChunk150 = "could not write payload chunk" // Debug in ../node/pkg/services/object/get/exec.go
GetLocalGetFailed151 = "local get failed" // Debug in ../node/pkg/services/object/get/local.go
GetReturnResultDirectly152 = "return result directly" // Debug in ../node/pkg/services/object/get/container.go
GetTryingToExecuteInContainer153 = "trying to execute in container..." // Debug in ../node/pkg/services/object/get/container.go
GetProcessEpoch154 = "process epoch" // Debug in ../node/pkg/services/object/get/container.go
GetNoMoreNodesAbortPlacementIteration155 = "no more nodes, abort placement iteration" // Debug in ../node/pkg/services/object/get/container.go
GetInterruptPlacementIterationByContext156 = "interrupt placement iteration by context" // Debug in ../node/pkg/services/object/get/container.go
GetCompletingTheOperation157 = "completing the operation" // Debug in ../node/pkg/services/object/get/container.go
GetServingRequest158 = "serving request..." // Debug in ../node/pkg/services/object/get/get.go
GetOperationFinishedSuccessfully159 = "operation finished successfully" // Debug in ../node/pkg/services/object/get/get.go
GetRequestedObjectWasMarkedAsRemoved160 = "requested object was marked as removed" // Debug in ../node/pkg/services/object/get/get.go
GetRequestedObjectIsVirtual161 = "requested object is virtual" // Debug in ../node/pkg/services/object/get/get.go
GetRequestedRangeIsOutOfObjectBounds162 = "requested range is out of object bounds" // Debug in ../node/pkg/services/object/get/get.go
GetOperationFinishedWithError163 = "operation finished with error" // Debug in ../node/pkg/services/object/get/get.go
PutAdditionalContainerBroadcastFailure164 = "additional container broadcast failure" // Error in ../node/pkg/services/object/put/distributed.go
SearchReturnResultDirectly165 = "return result directly" // Debug in ../node/pkg/services/object/search/container.go
SearchTryingToExecuteInContainer166 = "trying to execute in container..." // Debug in ../node/pkg/services/object/search/container.go
SearchProcessEpoch167 = "process epoch" // Debug in ../node/pkg/services/object/search/container.go
SearchNoMoreNodesAbortPlacementIteration168 = "no more nodes, abort placement iteration" // Debug in ../node/pkg/services/object/search/container.go
SearchInterruptPlacementIterationByContext169 = "interrupt placement iteration by context" // Debug in ../node/pkg/services/object/search/container.go
SearchProcessingNode170 = "processing node..." // Debug in ../node/pkg/services/object/search/container.go
SearchCouldNotConstructRemoteNodeClient171 = "could not construct remote node client" // Debug in ../node/pkg/services/object/search/container.go
SearchRemoteOperationFailed172 = "remote operation failed" // Debug in ../node/pkg/services/object/search/container.go
SearchCouldNotGetCurrentEpochNumber173 = "could not get current epoch number" // Debug in ../node/pkg/services/object/search/exec.go
SearchCouldNotGenerateContainerTraverser174 = "could not generate container traverser" // Debug in ../node/pkg/services/object/search/exec.go
SearchCouldNotWriteObjectIdentifiers175 = "could not write object identifiers" // Debug in ../node/pkg/services/object/search/exec.go
SearchLocalOperationFailed176 = "local operation failed" // Debug in ../node/pkg/services/object/search/local.go
SearchServingRequest177 = "serving request..." // Debug in ../node/pkg/services/object/search/search.go
SearchOperationFinishedWithError178 = "operation finished with error" // Debug in ../node/pkg/services/object/search/search.go
SearchOperationFinishedSuccessfully179 = "operation finished successfully" // Debug in ../node/pkg/services/object/search/search.go
UtilObjectServiceError180 = "object service error" // Error in ../node/pkg/services/object/util/log.go
UtilCouldNotPushTaskToWorkerPool181 = "could not push task to worker pool" // Error in ../node/pkg/services/object/util/log.go
V2CantCheckIfRequestFromInnerRing182 = "can't check if request from inner ring" // Debug in ../node/pkg/services/object/acl/v2/classifier.go
V2CantCheckIfRequestFromContainerNode183 = "can't check if request from container node" // Debug in ../node/pkg/services/object/acl/v2/classifier.go
NatsNatsConnectionWasLost184 = "nats: connection was lost" // Error in ../node/pkg/services/notificator/nats/service.go
NatsNatsReconnectedToTheServer185 = "nats: reconnected to the server" // Warn in ../node/pkg/services/notificator/nats/service.go
NatsNatsClosingConnectionAsTheContextIsDone186 = "nats: closing connection as the context is done" // Info in ../node/pkg/services/notificator/nats/service.go
ControllerStartingToAnnounceTheValuesOfTheMetrics187 = "starting to announce the values of the metrics" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerCouldNotInitializeIteratorOverLocallyCollectedMetrics188 = "could not initialize iterator over locally collected metrics" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerCouldNotInitializeAnnouncementAccumulator189 = "could not initialize announcement accumulator" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerIteratorOverLocallyCollectedMetricsAborted190 = "iterator over locally collected metrics aborted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerCouldNotFinishWritingLocalAnnouncements191 = "could not finish writing local announcements" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerTrustAnnouncementSuccessfullyFinished192 = "trust announcement successfully finished" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerAnnouncementIsAlreadyStarted193 = "announcement is already started" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerAnnouncementSuccessfullyInterrupted194 = "announcement successfully interrupted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerAnnouncementIsNotStartedOrAlreadyInterrupted195 = "announcement is not started or already interrupted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerReportIsAlreadyStarted196 = "report is already started" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerAnnouncementSuccessfullyInterrupted197 = "announcement successfully interrupted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerAnnouncementIsNotStartedOrAlreadyInterrupted198 = "announcement is not started or already interrupted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerCouldNotInitializeIteratorOverLocallyAccumulatedAnnouncements199 = "could not initialize iterator over locally accumulated announcements" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerCouldNotInitializeResultTarget200 = "could not initialize result target" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerIteratorOverLocalAnnouncementsAborted201 = "iterator over local announcements aborted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerCouldNotFinishWritingLoadEstimations202 = "could not finish writing load estimations" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
RouteCouldNotInitializeWriterProvider203 = "could not initialize writer provider" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go
RouteCouldNotInitializeWriter204 = "could not initialize writer" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go
RouteCouldNotPutTheValue205 = "could not put the value" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go
RouteCouldNotCloseRemoteServerWriter206 = "could not close remote server writer" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go
AuditorCouldNotGetObjectHeaderFromCandidate207 = "could not get object header from candidate" // Debug in ../node/pkg/services/audit/auditor/pop.go
AuditorCouldNotBuildPlacementForObject208 = "could not build placement for object" // Debug in ../node/pkg/services/audit/auditor/pop.go
AuditorCantHeadObject209 = "can't head object" // Debug in ../node/pkg/services/audit/auditor/por.go
AuditorCantConcatenateTzHash210 = "can't concatenate tz hash" // Debug in ../node/pkg/services/audit/auditor/por.go
AuditorStorageGroupSizeCheckFailed211 = "storage group size check failed" // Debug in ../node/pkg/services/audit/auditor/por.go
AuditorStorageGroupTzHashCheckFailed212 = "storage group tz hash check failed" // Debug in ../node/pkg/services/audit/auditor/por.go
AuditorCantBuildPlacementForStorageGroupMember213 = "can't build placement for storage group member" // Info in ../node/pkg/services/audit/auditor/por.go
AuditorAuditContextIsDone214 = "audit context is done" // Debug in ../node/pkg/services/audit/auditor/context.go
AuditorWritingAuditReport215 = "writing audit report..." // Debug in ../node/pkg/services/audit/auditor/context.go
AuditorCouldNotWriteAuditReport216 = "could not write audit report" // Error in ../node/pkg/services/audit/auditor/context.go
AuditorSleepBeforeGetRangeHash217 = "sleep before get range hash" // Debug in ../node/pkg/services/audit/auditor/pdp.go
AuditorCouldNotGetPayloadRangeHash218 = "could not get payload range hash" // Debug in ../node/pkg/services/audit/auditor/pdp.go
TaskmanagerProcessRoutine219 = "process routine" // Info in ../node/pkg/services/audit/taskmanager/listen.go
TaskmanagerStopListenerByContext220 = "stop listener by context" // Warn in ../node/pkg/services/audit/taskmanager/listen.go
TaskmanagerQueueChannelIsClosed221 = "queue channel is closed" // Warn in ../node/pkg/services/audit/taskmanager/listen.go
TaskmanagerCouldNotGeneratePDPWorkerPool222 = "could not generate PDP worker pool" // Error in ../node/pkg/services/audit/taskmanager/listen.go
TaskmanagerCouldNotGeneratePoRWorkerPool223 = "could not generate PoR worker pool" // Error in ../node/pkg/services/audit/taskmanager/listen.go
TaskmanagerCouldNotSubmitAuditTask224 = "could not submit audit task" // Warn in ../node/pkg/services/audit/taskmanager/listen.go
ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch225 = "could not restore block subscription after RPC switch" // Error in ../node/pkg/morph/client/notifications.go
ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch226 = "could not restore notification subscription after RPC switch" // Error in ../node/pkg/morph/client/notifications.go
ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch227 = "could not restore notary notification subscription after RPC switch" // Error in ../node/pkg/morph/client/notifications.go
ClientCouldNotEstablishConnectionToTheSwitchedRPCNode228 = "could not establish connection to the switched RPC node" // Warn in ../node/pkg/morph/client/multi.go
ClientConnectionToTheNewRPCNodeHasBeenEstablished229 = "connection to the new RPC node has been established" // Info in ../node/pkg/morph/client/multi.go
ClientSwitchingToTheNextRPCNode230 = "switching to the next RPC node" // Warn in ../node/pkg/morph/client/multi.go
ClientCouldNotEstablishConnectionToAnyRPCNode231 = "could not establish connection to any RPC node" // Error in ../node/pkg/morph/client/multi.go
ClientCouldNotCreateClientToTheHigherPriorityNode232 = "could not create client to the higher priority node" // Warn in ../node/pkg/morph/client/multi.go
ClientSwitchedToTheHigherPriorityRPC233 = "switched to the higher priority RPC" // Info in ../node/pkg/morph/client/multi.go
ClientCouldNotRestoreSideChainSubscriptionsUsingNode234 = "could not restore side chain subscriptions using node" // Warn in ../node/pkg/morph/client/multi.go
ClientNotaryDepositHasAlreadyBeenMade235 = "notary deposit has already been made" // Info in ../node/pkg/morph/client/notary.go
ClientNotaryDepositInvoke236 = "notary deposit invoke" // Info in ../node/pkg/morph/client/notary.go
ClientNotaryRequestWithPreparedMainTXInvoked237 = "notary request with prepared main TX invoked" // Debug in ../node/pkg/morph/client/notary.go
ClientNotaryRequestInvoked238 = "notary request invoked" // Debug in ../node/pkg/morph/client/notary.go
ClientNeoClientInvoke239 = "neo client invoke" // Debug in ../node/pkg/morph/client/client.go
ClientNativeGasTransferInvoke240 = "native gas transfer invoke" // Debug in ../node/pkg/morph/client/client.go
ClientBatchGasTransferInvoke241 = "batch gas transfer invoke" // Debug in ../node/pkg/morph/client/client.go
ClientCantGetBlockchainHeight242 = "can't get blockchain height" // Error in ../node/pkg/morph/client/client.go
ClientCantGetBlockchainHeight243 = "can't get blockchain height" // Error in ../node/pkg/morph/client/client.go
EventCouldNotSubmitHandlerToWorkerPool244 = "could not Submit handler to worker pool" // Warn in ../node/pkg/morph/event/utils.go
EventCouldNotStartListenToEvents245 = "could not start listen to events" // Error in ../node/pkg/morph/event/listener.go
EventCouldNotStartListenToEvents246 = "could not start listen to events" // Error in ../node/pkg/morph/event/listener.go
EventStopEventListenerByError247 = "stop event listener by error" // Error in ../node/pkg/morph/event/listener.go
EventStopEventListenerByContext248 = "stop event listener by context" // Info in ../node/pkg/morph/event/listener.go
EventStopEventListenerByNotificationChannel249 = "stop event listener by notification channel" // Warn in ../node/pkg/morph/event/listener.go
EventNilNotificationEventWasCaught250 = "nil notification event was caught" // Warn in ../node/pkg/morph/event/listener.go
EventStopEventListenerByNotaryChannel251 = "stop event listener by notary channel" // Warn in ../node/pkg/morph/event/listener.go
EventNilNotaryEventWasCaught252 = "nil notary event was caught" // Warn in ../node/pkg/morph/event/listener.go
EventStopEventListenerByBlockChannel253 = "stop event listener by block channel" // Warn in ../node/pkg/morph/event/listener.go
EventNilBlockWasCaught254 = "nil block was caught" // Warn in ../node/pkg/morph/event/listener.go
EventListenerWorkerPoolDrained255 = "listener worker pool drained" // Warn in ../node/pkg/morph/event/listener.go
EventListenerWorkerPoolDrained256 = "listener worker pool drained" // Warn in ../node/pkg/morph/event/listener.go
EventListenerWorkerPoolDrained257 = "listener worker pool drained" // Warn in ../node/pkg/morph/event/listener.go
EventEventParserNotSet258 = "event parser not set" // Debug in ../node/pkg/morph/event/listener.go
EventCouldNotParseNotificationEvent259 = "could not parse notification event" // Warn in ../node/pkg/morph/event/listener.go
EventNotificationHandlersForParsedNotificationEventWereNotRegistered260 = "notification handlers for parsed notification event were not registered" // Info in ../node/pkg/morph/event/listener.go
EventSkipExpiredMainTXNotaryEvent261 = "skip expired main TX notary event" // Warn in ../node/pkg/morph/event/listener.go
EventCouldNotPrepareAndValidateNotaryEvent262 = "could not prepare and validate notary event" // Warn in ../node/pkg/morph/event/listener.go
EventNotaryParserNotSet263 = "notary parser not set" // Debug in ../node/pkg/morph/event/listener.go
EventCouldNotParseNotaryEvent264 = "could not parse notary event" // Warn in ../node/pkg/morph/event/listener.go
EventNotaryHandlersForParsedNotificationEventWereNotRegistered265 = "notary handlers for parsed notification event were not registered" // Info in ../node/pkg/morph/event/listener.go
EventIgnoreNilEventParser266 = "ignore nil event parser" // Info in ../node/pkg/morph/event/listener.go
EventListenerHasBeenAlreadyStartedIgnoreParser267 = "listener has been already started, ignore parser" // Warn in ../node/pkg/morph/event/listener.go
EventRegisteredNewEventParser268 = "registered new event parser" // Debug in ../node/pkg/morph/event/listener.go
EventIgnoreNilEventHandler269 = "ignore nil event handler" // Warn in ../node/pkg/morph/event/listener.go
EventIgnoreHandlerOfEventWoParser270 = "ignore handler of event w/o parser" // Warn in ../node/pkg/morph/event/listener.go
EventRegisteredNewEventHandler271 = "registered new event handler" // Debug in ../node/pkg/morph/event/listener.go
EventIgnoreNilNotaryEventParser272 = "ignore nil notary event parser" // Info in ../node/pkg/morph/event/listener.go
EventListenerHasBeenAlreadyStartedIgnoreNotaryParser273 = "listener has been already started, ignore notary parser" // Warn in ../node/pkg/morph/event/listener.go
EventRegisteredNewEventParser274 = "registered new event parser" // Info in ../node/pkg/morph/event/listener.go
EventIgnoreNilNotaryEventHandler275 = "ignore nil notary event handler" // Warn in ../node/pkg/morph/event/listener.go
EventIgnoreHandlerOfNotaryEventWoParser276 = "ignore handler of notary event w/o parser" // Warn in ../node/pkg/morph/event/listener.go
EventRegisteredNewEventHandler277 = "registered new event handler" // Info in ../node/pkg/morph/event/listener.go
EventIgnoreNilBlockHandler278 = "ignore nil block handler" // Warn in ../node/pkg/morph/event/listener.go
SubscriberUnsubscribeForNotification279 = "unsubscribe for notification" // Error in ../node/pkg/morph/subscriber/subscriber.go
SubscriberRemoteNotificationChannelHasBeenClosed280 = "remote notification channel has been closed" // Warn in ../node/pkg/morph/subscriber/subscriber.go
SubscriberCantCastNotifyEventValueToTheNotifyStruct281 = "can't cast notify event value to the notify struct" // Error in ../node/pkg/morph/subscriber/subscriber.go
SubscriberNewNotificationEventFromSidechain282 = "new notification event from sidechain" // Debug in ../node/pkg/morph/subscriber/subscriber.go
SubscriberCantCastBlockEventValueToBlock283 = "can't cast block event value to block" // Error in ../node/pkg/morph/subscriber/subscriber.go
SubscriberCantCastNotifyEventValueToTheNotaryRequestStruct284 = "can't cast notify event value to the notary request struct" // Error in ../node/pkg/morph/subscriber/subscriber.go
SubscriberUnsupportedNotificationFromTheChain285 = "unsupported notification from the chain" // Debug in ../node/pkg/morph/subscriber/subscriber.go
BlobovniczaCreatingDirectoryForBoltDB286 = "creating directory for BoltDB" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
BlobovniczaOpeningBoltDB287 = "opening BoltDB" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
BlobovniczaInitializing288 = "initializing..." // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
BlobovniczaAlreadyInitialized289 = "already initialized" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
BlobovniczaCreatingBucketForSizeRange290 = "creating bucket for size range" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
BlobovniczaClosingBoltDB291 = "closing BoltDB" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
BlobovniczaObjectWasRemovedFromBucket292 = "object was removed from bucket" // Debug in ../node/pkg/local_object_storage/blobovnicza/delete.go
BlobstorOpening293 = "opening..." // Debug in ../node/pkg/local_object_storage/blobstor/control.go
BlobstorInitializing294 = "initializing..." // Debug in ../node/pkg/local_object_storage/blobstor/control.go
BlobstorClosing295 = "closing..." // Debug in ../node/pkg/local_object_storage/blobstor/control.go
BlobstorCouldntCloseStorage296 = "couldn't close storage" // Info in ../node/pkg/local_object_storage/blobstor/control.go
BlobstorErrorOccurredDuringObjectExistenceChecking297 = "error occurred during object existence checking" // Warn in ../node/pkg/local_object_storage/blobstor/exists.go
BlobstorErrorOccurredDuringTheIteration298 = "error occurred during the iteration" // Warn in ../node/pkg/local_object_storage/blobstor/iterate.go
EngineShardHasBeenRemoved299 = "shard has been removed" // Info in ../node/pkg/local_object_storage/engine/shards.go
EngineCouldNotCloseRemovedShard300 = "could not close removed shard" // Error in ../node/pkg/local_object_storage/engine/shards.go
EngineCouldNotOpenShardClosingAndSkipping301 = "could not open shard, closing and skipping" // Error in ../node/pkg/local_object_storage/engine/control.go
EngineCouldNotClosePartiallyInitializedShard302 = "could not close partially initialized shard" // Error in ../node/pkg/local_object_storage/engine/control.go
EngineCouldNotInitializeShardClosingAndSkipping303 = "could not initialize shard, closing and skipping" // Error in ../node/pkg/local_object_storage/engine/control.go
EngineCouldNotClosePartiallyInitializedShard304 = "could not close partially initialized shard" // Error in ../node/pkg/local_object_storage/engine/control.go
EngineCouldNotCloseShard305 = "could not close shard" // Debug in ../node/pkg/local_object_storage/engine/control.go
EngineCouldNotReloadAShard306 = "could not reload a shard" // Error in ../node/pkg/local_object_storage/engine/control.go
EngineAddedNewShard307 = "added new shard" // Info in ../node/pkg/local_object_storage/engine/control.go
EngineCouldNotMarkObjectForShardRelocation308 = "could not mark object for shard relocation" // Warn in ../node/pkg/local_object_storage/engine/put.go
EngineCouldNotPutObjectToShard309 = "could not put object to shard" // Warn in ../node/pkg/local_object_storage/engine/put.go
EngineErrorDuringSearchingForObjectChildren310 = "error during searching for object children" // Warn in ../node/pkg/local_object_storage/engine/delete.go
EngineCouldNotInhumeObjectInShard311 = "could not inhume object in shard" // Debug in ../node/pkg/local_object_storage/engine/delete.go
EngineStartingRemovalOfLocallyredundantCopies312 = "starting removal of locally-redundant copies" // Info in ../node/pkg/local_object_storage/engine/remove_copies.go
EngineStartedDuplicatesRemovalRoutine313 = "started duplicates removal routine" // Debug in ../node/pkg/local_object_storage/engine/remove_copies.go
EngineFinishedRemovalOfLocallyredundantCopies314 = "finished removal of locally-redundant copies" // Error in ../node/pkg/local_object_storage/engine/remove_copies.go
EngineFinishedRemovalOfLocallyredundantCopies315 = "finished removal of locally-redundant copies" // Info in ../node/pkg/local_object_storage/engine/remove_copies.go
EngineRemovingAnObjectWithoutFullLockingCheck316 = "removing an object without full locking check" // Warn in ../node/pkg/local_object_storage/engine/inhume.go
EngineInterruptProcessingTheExpiredLocks317 = "interrupt processing the expired locks" // Info in ../node/pkg/local_object_storage/engine/inhume.go
EngineInterruptProcessingTheDeletedLocks318 = "interrupt processing the deleted locks" // Info in ../node/pkg/local_object_storage/engine/inhume.go
EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly319 = "failed to move shard in degraded-read-only mode, moving to read-only" // Error in ../node/pkg/local_object_storage/engine/engine.go
EngineFailedToMoveShardInReadonlyMode320 = "failed to move shard in read-only mode" // Error in ../node/pkg/local_object_storage/engine/engine.go
EngineShardIsMovedInReadonlyModeDueToErrorThreshold321 = "shard is moved in read-only mode due to error threshold" // Info in ../node/pkg/local_object_storage/engine/engine.go
EngineShardIsMovedInDegradedModeDueToErrorThreshold322 = "shard is moved in degraded mode due to error threshold" // Info in ../node/pkg/local_object_storage/engine/engine.go
EngineModeChangeIsInProgressIgnoringSetmodeRequest323 = "mode change is in progress, ignoring set-mode request" // Debug in ../node/pkg/local_object_storage/engine/engine.go
EngineStartedShardsEvacuation324 = "started shards evacuation" // Info in ../node/pkg/local_object_storage/engine/evacuate.go
EngineFinishedShardsEvacuation325 = "finished shards evacuation" // Info in ../node/pkg/local_object_storage/engine/evacuate.go
EngineObjectIsMovedToAnotherShard326 = "object is moved to another shard" // Debug in ../node/pkg/local_object_storage/engine/evacuate.go
MetabaseMissingMatcher327 = "missing matcher" // Debug in ../node/pkg/local_object_storage/metabase/select.go
MetabaseErrorInFKBTSelection328 = "error in FKBT selection" // Debug in ../node/pkg/local_object_storage/metabase/select.go
MetabaseCantDecodeListBucketLeaf329 = "can't decode list bucket leaf" // Debug in ../node/pkg/local_object_storage/metabase/select.go
MetabaseUnknownOperation330 = "unknown operation" // Debug in ../node/pkg/local_object_storage/metabase/select.go
MetabaseCantDecodeListBucketLeaf331 = "can't decode list bucket leaf" // Debug in ../node/pkg/local_object_storage/metabase/select.go
MetabaseCantIterateOverTheBucket332 = "can't iterate over the bucket" // Debug in ../node/pkg/local_object_storage/metabase/select.go
MetabaseUnknownOperation333 = "unknown operation" // Debug in ../node/pkg/local_object_storage/metabase/select.go
MetabaseCouldNotIterateOverTheBuckets334 = "could not iterate over the buckets" // Debug in ../node/pkg/local_object_storage/metabase/select.go
MetabaseCreatedDirectoryForMetabase335 = "created directory for Metabase" // Debug in ../node/pkg/local_object_storage/metabase/control.go
MetabaseOpenedBoltDBInstanceForMetabase336 = "opened boltDB instance for Metabase" // Debug in ../node/pkg/local_object_storage/metabase/control.go
MetabaseCheckingMetabaseVersion337 = "checking metabase version" // Debug in ../node/pkg/local_object_storage/metabase/control.go
ShardCantSelectAllObjects338 = "can't select all objects" // Debug in ../node/pkg/local_object_storage/shard/list.go
ShardSettingShardMode339 = "setting shard mode" // Info in ../node/pkg/local_object_storage/shard/mode.go
ShardShardModeSetSuccessfully340 = "shard mode set successfully" // Info in ../node/pkg/local_object_storage/shard/mode.go
ShardCouldNotMarkObjectForShardRelocationInMetabase341 = "could not mark object for shard relocation in metabase" // Debug in ../node/pkg/local_object_storage/shard/move.go
ShardCantDeleteObjectFromWriteCache342 = "can't delete object from write cache" // Warn in ../node/pkg/local_object_storage/shard/delete.go
ShardCantGetStorageIDFromMetabase343 = "can't get storage ID from metabase" // Debug in ../node/pkg/local_object_storage/shard/delete.go
ShardCantRemoveObjectFromBlobStor344 = "can't remove object from blobStor" // Debug in ../node/pkg/local_object_storage/shard/delete.go
ShardFetchingObjectWithoutMeta345 = "fetching object without meta" // Warn in ../node/pkg/local_object_storage/shard/get.go
ShardObjectIsMissingInWritecache346 = "object is missing in write-cache" // Debug in ../node/pkg/local_object_storage/shard/get.go
ShardFailedToFetchObjectFromWritecache347 = "failed to fetch object from write-cache" // Error in ../node/pkg/local_object_storage/shard/get.go
ShardCantPutObjectToTheWritecacheTryingBlobstor348 = "can't put object to the write-cache, trying blobstor" // Debug in ../node/pkg/local_object_storage/shard/put.go
ShardMetaObjectCounterRead349 = "meta: object counter read" // Warn in ../node/pkg/local_object_storage/shard/shard.go
ShardMetaCantReadContainerList350 = "meta: can't read container list" // Warn in ../node/pkg/local_object_storage/shard/shard.go
ShardMetaCantReadContainerSize351 = "meta: can't read container size" // Warn in ../node/pkg/local_object_storage/shard/shard.go
ShardMetabaseFailureSwitchingMode352 = "metabase failure, switching mode" // Error in ../node/pkg/local_object_storage/shard/control.go
ShardCantMoveShardToReadonlySwitchMode353 = "can't move shard to readonly, switch mode" // Error in ../node/pkg/local_object_storage/shard/control.go
ShardCouldNotUnmarshalObject354 = "could not unmarshal object" // Warn in ../node/pkg/local_object_storage/shard/control.go
ShardCouldNotCloseShardComponent355 = "could not close shard component" // Error in ../node/pkg/local_object_storage/shard/control.go
ShardCantOpenMetabaseMoveToADegradedMode356 = "can't open metabase, move to a degraded mode" // Error in ../node/pkg/local_object_storage/shard/control.go
ShardCantInitializeMetabaseMoveToADegradedreadonlyMode357 = "can't initialize metabase, move to a degraded-read-only mode" // Error in ../node/pkg/local_object_storage/shard/control.go
ShardTryingToRestoreReadwriteMode358 = "trying to restore read-write mode" // Info in ../node/pkg/local_object_storage/shard/control.go
ShardStopEventListenerByClosedChannel359 = "stop event listener by closed channel" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardCouldNotSubmitGCJobToWorkerPool360 = "could not submit GC job to worker pool" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardGCIsStopped361 = "GC is stopped" // Debug in ../node/pkg/local_object_storage/shard/gc.go
ShardWaitingForGCWorkersToStop362 = "waiting for GC workers to stop..." // Info in ../node/pkg/local_object_storage/shard/gc.go
ShardIteratorOverMetabaseGraveyardFailed363 = "iterator over metabase graveyard failed" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardCouldNotDeleteTheObjects364 = "could not delete the objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardIteratorOverExpiredObjectsFailed365 = "iterator over expired objects failed" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardCouldNotInhumeTheObjects366 = "could not inhume the objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardStartedExpiredTombstonesHandling367 = "started expired tombstones handling" // Debug in ../node/pkg/local_object_storage/shard/gc.go
ShardIteratingTombstones368 = "iterating tombstones" // Debug in ../node/pkg/local_object_storage/shard/gc.go
ShardShardIsInADegradedModeSkipCollectingExpiredTombstones369 = "shard is in a degraded mode, skip collecting expired tombstones" // Debug in ../node/pkg/local_object_storage/shard/gc.go
ShardIteratorOverGraveyardFailed370 = "iterator over graveyard failed" // Error in ../node/pkg/local_object_storage/shard/gc.go
ShardHandlingExpiredTombstonesBatch371 = "handling expired tombstones batch" // Debug in ../node/pkg/local_object_storage/shard/gc.go
ShardFinishedExpiredTombstonesHandling372 = "finished expired tombstones handling" // Debug in ../node/pkg/local_object_storage/shard/gc.go
ShardIteratorOverExpiredLocksFailed373 = "iterator over expired locks failed" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardCouldNotMarkTombstonesAsGarbage374 = "could not mark tombstones as garbage" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardCouldNotDropExpiredGraveRecords375 = "could not drop expired grave records" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardFailureToUnlockObjects376 = "failure to unlock objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardFailureToMarkLockersAsGarbage377 = "failure to mark lockers as garbage" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardFailureToGetExpiredUnlockedObjects378 = "failure to get expired unlocked objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardFailureToUnlockObjects379 = "failure to unlock objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardCouldNotMarkObjectToDeleteInMetabase380 = "could not mark object to delete in metabase" // Debug in ../node/pkg/local_object_storage/shard/inhume.go
WritecacheTriedToFlushItemsFromWritecache381 = "tried to flush items from write-cache" // Debug in ../node/pkg/local_object_storage/writecache/flush.go
WritecacheWaitingForChannelsToFlush382 = "waiting for channels to flush" // Info in ../node/pkg/local_object_storage/writecache/mode.go
WritecacheFillingFlushMarksForObjectsInFSTree383 = "filling flush marks for objects in FSTree" // Info in ../node/pkg/local_object_storage/writecache/init.go
WritecacheFinishedUpdatingFSTreeFlushMarks384 = "finished updating FSTree flush marks" // Info in ../node/pkg/local_object_storage/writecache/init.go
WritecacheFillingFlushMarksForObjectsInDatabase385 = "filling flush marks for objects in database" // Info in ../node/pkg/local_object_storage/writecache/init.go
WritecacheFinishedUpdatingFlushMarks386 = "finished updating flush marks" // Info in ../node/pkg/local_object_storage/writecache/init.go
WritecacheCantRemoveObjectsFromTheDatabase387 = "can't remove objects from the database" // Error in ../node/pkg/local_object_storage/writecache/storage.go
WritecacheCantParseAddress388 = "can't parse address" // Error in ../node/pkg/local_object_storage/writecache/storage.go
WritecacheCantRemoveObjectFromWritecache389 = "can't remove object from write-cache" // Error in ../node/pkg/local_object_storage/writecache/storage.go
BlobovniczatreeCouldNotGetObjectFromLevel390 = "could not get object from level" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
BlobovniczatreeCouldNotReadPayloadRangeFromOpenedBlobovnicza391 = "could not read payload range from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
BlobovniczatreeCouldNotReadPayloadRangeFromActiveBlobovnicza392 = "could not read payload range from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
BlobovniczatreeCouldNotCloseBlobovnicza393 = "could not close Blobovnicza" // Error in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
BlobovniczatreeBlobovniczaSuccessfullyClosedOnEvict394 = "blobovnicza successfully closed on evict" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
BlobovniczatreeUpdatingActiveBlobovnicza395 = "updating active blobovnicza..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
BlobovniczatreeActiveBlobovniczaSuccessfullyUpdated396 = "active blobovnicza successfully updated" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
BlobovniczatreeBlobovniczaSuccessfullyActivated397 = "blobovnicza successfully activated" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
BlobovniczatreeCouldNotRemoveObjectFromLevel398 = "could not remove object from level" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
BlobovniczatreeCouldNotRemoveObjectFromOpenedBlobovnicza399 = "could not remove object from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
BlobovniczatreeCouldNotRemoveObjectFromActiveBlobovnicza400 = "could not remove object from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
BlobovniczatreeCouldNotGetActiveBlobovnicza401 = "could not get active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
BlobovniczatreeCouldNotGetActiveBlobovnicza402 = "could not get active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
BlobovniczatreeBlobovniczaOverflowed403 = "blobovnicza overflowed" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
BlobovniczatreeCouldNotUpdateActiveBlobovnicza404 = "could not update active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
BlobovniczatreeCouldNotUpdateActiveBlobovnicza405 = "could not update active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza406 = "could not put object to active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza407 = "could not put object to active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
BlobovniczatreeCouldNotGetObjectFromLevel408 = "could not get object from level" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get.go
BlobovniczatreeCouldNotReadObjectFromOpenedBlobovnicza409 = "could not read object from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get.go
BlobovniczatreeCouldNotGetObjectFromActiveBlobovnicza410 = "could not get object from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get.go
BlobovniczatreeInitializingBlobovniczas411 = "initializing Blobovnicza's" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization412 = "read-only mode, skip blobovniczas initialization..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing413 = "blobovnicza successfully initialized, closing..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
BlobovniczatreeCouldNotCloseActiveBlobovnicza414 = "could not close active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
BlobovniczatreeCouldNotCloseActiveBlobovnicza415 = "could not close active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
BlobovniczatreeCouldNotGetObjectFromLevel416 = "could not get object from level" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/exists.go
AlphabetTick417 = "tick" // Info in ../node/pkg/innerring/processors/alphabet/handlers.go
AlphabetAlphabetProcessorWorkerPoolDrained418 = "alphabet processor worker pool drained" // Warn in ../node/pkg/innerring/processors/alphabet/handlers.go
AlphabetNonAlphabetModeIgnoreGasEmissionEvent419 = "non alphabet mode, ignore gas emission event" // Info in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent420 = "node is out of alphabet range, ignore gas emission event" // Debug in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetCantInvokeAlphabetEmitMethod421 = "can't invoke alphabet emit method" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetStorageNodeEmissionIsOff422 = "storage node emission is off" // Info in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes423 = "can't get netmap snapshot to emit gas to storage nodes" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetGasEmission424 = "gas emission" // Debug in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetCantParseNodePublicKey425 = "can't parse node public key" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetCantTransferGas426 = "can't transfer gas" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetCantTransferGasToWallet427 = "can't transfer gas to wallet" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetAlphabetWorkerPool428 = "alphabet worker pool" // Debug in ../node/pkg/innerring/processors/alphabet/processor.go
AuditContainerListingFinished429 = "container listing finished" // Debug in ../node/pkg/innerring/processors/audit/scheduler.go
AuditNewRoundOfAudit430 = "new round of audit" // Info in ../node/pkg/innerring/processors/audit/handlers.go
AuditPreviousRoundOfAuditPrepareHasntFinishedYet431 = "previous round of audit prepare hasn't finished yet" // Warn in ../node/pkg/innerring/processors/audit/handlers.go
AuditSomeTasksFromPreviousEpochAreSkipped432 = "some tasks from previous epoch are skipped" // Info in ../node/pkg/innerring/processors/audit/process.go
AuditContainerSelectionFailure433 = "container selection failure" // Error in ../node/pkg/innerring/processors/audit/process.go
AuditSelectContainersForAudit434 = "select containers for audit" // Info in ../node/pkg/innerring/processors/audit/process.go
AuditCantFetchNetworkMap435 = "can't fetch network map" // Error in ../node/pkg/innerring/processors/audit/process.go
AuditCantGetContainerInfoIgnore436 = "can't get container info, ignore" // Error in ../node/pkg/innerring/processors/audit/process.go
AuditCantBuildPlacementForContainerIgnore437 = "can't build placement for container, ignore" // Info in ../node/pkg/innerring/processors/audit/process.go
AuditSelectStorageGroupsForAudit438 = "select storage groups for audit" // Info in ../node/pkg/innerring/processors/audit/process.go
AuditFilterExpiredStorageGroupsForAudit439 = "filter expired storage groups for audit" // Info in ../node/pkg/innerring/processors/audit/process.go
AuditParseClientNodeInfo440 = "parse client node info" // Warn in ../node/pkg/innerring/processors/audit/process.go
AuditErrorInStorageGroupSearch441 = "error in storage group search" // Warn in ../node/pkg/innerring/processors/audit/process.go
AuditCouldNotGetStorageGroupObjectForAuditSkipping442 = "could not get storage group object for audit, skipping" // Error in ../node/pkg/innerring/processors/audit/process.go
BalanceNotification443 = "notification" // Info in ../node/pkg/innerring/processors/balance/handlers.go
BalanceBalanceWorkerPoolDrained444 = "balance worker pool drained" // Warn in ../node/pkg/innerring/processors/balance/handlers.go
BalanceNonAlphabetModeIgnoreBalanceLock445 = "non alphabet mode, ignore balance lock" // Info in ../node/pkg/innerring/processors/balance/process_assets.go
BalanceCantSendLockAssetTx446 = "can't send lock asset tx" // Error in ../node/pkg/innerring/processors/balance/process_assets.go
BalanceBalanceWorkerPool447 = "balance worker pool" // Debug in ../node/pkg/innerring/processors/balance/processor.go
ContainerContainerWorkerPool448 = "container worker pool" // Debug in ../node/pkg/innerring/processors/container/processor.go
ContainerNotification449 = "notification" // Info in ../node/pkg/innerring/processors/container/handlers.go
ContainerContainerProcessorWorkerPoolDrained450 = "container processor worker pool drained" // Warn in ../node/pkg/innerring/processors/container/handlers.go
ContainerNotification451 = "notification" // Info in ../node/pkg/innerring/processors/container/handlers.go
ContainerContainerProcessorWorkerPoolDrained452 = "container processor worker pool drained" // Warn in ../node/pkg/innerring/processors/container/handlers.go
ContainerNotification453 = "notification" // Info in ../node/pkg/innerring/processors/container/handlers.go
ContainerContainerProcessorWorkerPoolDrained454 = "container processor worker pool drained" // Warn in ../node/pkg/innerring/processors/container/handlers.go
ContainerNonAlphabetModeIgnoreContainerPut455 = "non alphabet mode, ignore container put" // Info in ../node/pkg/innerring/processors/container/process_container.go
ContainerPutContainerCheckFailed456 = "put container check failed" // Error in ../node/pkg/innerring/processors/container/process_container.go
ContainerCouldNotApprovePutContainer457 = "could not approve put container" // Error in ../node/pkg/innerring/processors/container/process_container.go
ContainerNonAlphabetModeIgnoreContainerDelete458 = "non alphabet mode, ignore container delete" // Info in ../node/pkg/innerring/processors/container/process_container.go
ContainerDeleteContainerCheckFailed459 = "delete container check failed" // Error in ../node/pkg/innerring/processors/container/process_container.go
ContainerCouldNotApproveDeleteContainer460 = "could not approve delete container" // Error in ../node/pkg/innerring/processors/container/process_container.go
ContainerNonAlphabetModeIgnoreSetEACL461 = "non alphabet mode, ignore set EACL" // Info in ../node/pkg/innerring/processors/container/process_eacl.go
ContainerSetEACLCheckFailed462 = "set EACL check failed" // Error in ../node/pkg/innerring/processors/container/process_eacl.go
ContainerCouldNotApproveSetEACL463 = "could not approve set EACL" // Error in ../node/pkg/innerring/processors/container/process_eacl.go
FrostfsNonAlphabetModeIgnoreBind464 = "non alphabet mode, ignore bind" // Info in ../node/pkg/innerring/processors/frostfs/process_bind.go
FrostfsInvalidManageKeyEvent465 = "invalid manage key event" // Error in ../node/pkg/innerring/processors/frostfs/process_bind.go
FrostfsCouldNotDecodeScriptHashFromBytes466 = "could not decode script hash from bytes" // Error in ../node/pkg/innerring/processors/frostfs/process_bind.go
FrostfsNonAlphabetModeIgnoreConfig467 = "non alphabet mode, ignore config" // Info in ../node/pkg/innerring/processors/frostfs/process_config.go
FrostfsCantRelaySetConfigEvent468 = "can't relay set config event" // Error in ../node/pkg/innerring/processors/frostfs/process_config.go
FrostfsFrostfsWorkerPool469 = "frostfs worker pool" // Debug in ../node/pkg/innerring/processors/frostfs/processor.go
FrostfsNotification470 = "notification" // Info in ../node/pkg/innerring/processors/frostfs/handlers.go
FrostfsFrostfsProcessorWorkerPoolDrained471 = "frostfs processor worker pool drained" // Warn in ../node/pkg/innerring/processors/frostfs/handlers.go
FrostfsNotification472 = "notification" // Info in ../node/pkg/innerring/processors/frostfs/handlers.go
FrostfsFrostfsProcessorWorkerPoolDrained473 = "frostfs processor worker pool drained" // Warn in ../node/pkg/innerring/processors/frostfs/handlers.go
FrostfsNotification474 = "notification" // Info in ../node/pkg/innerring/processors/frostfs/handlers.go
FrostfsFrostfsProcessorWorkerPoolDrained475 = "frostfs processor worker pool drained" // Warn in ../node/pkg/innerring/processors/frostfs/handlers.go
FrostfsNotification476 = "notification" // Info in ../node/pkg/innerring/processors/frostfs/handlers.go
FrostfsFrostfsProcessorWorkerPoolDrained477 = "frostfs processor worker pool drained" // Warn in ../node/pkg/innerring/processors/frostfs/handlers.go
FrostfsNotification478 = "notification" // Info in ../node/pkg/innerring/processors/frostfs/handlers.go
FrostfsFrostfsProcessorWorkerPoolDrained479 = "frostfs processor worker pool drained" // Warn in ../node/pkg/innerring/processors/frostfs/handlers.go
FrostfsNotification480 = "notification" // Info in ../node/pkg/innerring/processors/frostfs/handlers.go
FrostfsFrostfsProcessorWorkerPoolDrained481 = "frostfs processor worker pool drained" // Warn in ../node/pkg/innerring/processors/frostfs/handlers.go
FrostfsNonAlphabetModeIgnoreDeposit482 = "non alphabet mode, ignore deposit" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostfsCantTransferAssetsToBalanceContract483 = "can't transfer assets to balance contract" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostfsDoubleMintEmissionDeclined484 = "double mint emission declined" // Warn in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostfsCantGetGasBalanceOfTheNode485 = "can't get gas balance of the node" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostfsGasBalanceThresholdHasBeenReached486 = "gas balance threshold has been reached" // Warn in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostfsCantTransferNativeGasToReceiver487 = "can't transfer native gas to receiver" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostfsNonAlphabetModeIgnoreWithdraw488 = "non alphabet mode, ignore withdraw" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostfsCantCreateLockAccount489 = "can't create lock account" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostfsCantLockAssetsForWithdraw490 = "can't lock assets for withdraw" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostfsNonAlphabetModeIgnoreCheque491 = "non alphabet mode, ignore cheque" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostfsCantTransferAssetsToFedContract492 = "can't transfer assets to fed contract" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
GovernanceNewEvent493 = "new event" // Info in ../node/pkg/innerring/processors/governance/handlers.go
GovernanceGovernanceWorkerPoolDrained494 = "governance worker pool drained" // Warn in ../node/pkg/innerring/processors/governance/handlers.go
GovernanceNonAlphabetModeIgnoreAlphabetSync495 = "non alphabet mode, ignore alphabet sync" // Info in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantFetchAlphabetListFromMainNet496 = "can't fetch alphabet list from main net" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantFetchAlphabetListFromSideChain497 = "can't fetch alphabet list from side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantMergeAlphabetListsFromMainNetAndSideChain498 = "can't merge alphabet lists from main net and side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged499 = "no governance update, alphabet list has not been changed" // Info in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceAlphabetListHasBeenChangedStartingUpdate500 = "alphabet list has been changed, starting update" // Info in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantVoteForSideChainCommittee501 = "can't vote for side chain committee" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceFinishedAlphabetListUpdate502 = "finished alphabet list update" // Info in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantFetchInnerRingListFromSideChain503 = "can't fetch inner ring list from side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys504 = "can't create new inner ring list with new alphabet keys" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceUpdateOfTheInnerRingList505 = "update of the inner ring list" // Info in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantUpdateInnerRingListWithNewAlphabetKeys506 = "can't update inner ring list with new alphabet keys" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantUpdateListOfNotaryNodesInSideChain507 = "can't update list of notary nodes in side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract508 = "can't update list of alphabet nodes in frostfs contract" // Error in ../node/pkg/innerring/processors/governance/process_update.go
NetmapNetmapWorkerPool509 = "netmap worker pool" // Debug in ../node/pkg/innerring/processors/netmap/processor.go
NetmapTick510 = "tick" // Info in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNetmapWorkerPoolDrained511 = "netmap worker pool drained" // Warn in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNotification512 = "notification" // Info in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNetmapWorkerPoolDrained513 = "netmap worker pool drained" // Warn in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNotification514 = "notification" // Info in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNetmapWorkerPoolDrained515 = "netmap worker pool drained" // Warn in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNotification516 = "notification" // Info in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNetmapWorkerPoolDrained517 = "netmap worker pool drained" // Warn in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNetmapCleanUpRoutineIsDisabled518 = "netmap clean up routine is disabled" // Debug in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapTick519 = "tick" // Info in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNetmapWorkerPoolDrained520 = "netmap worker pool drained" // Warn in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNotification521 = "notification" // Info in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNetmapWorkerPoolDrained522 = "netmap worker pool drained" // Warn in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick523 = "non alphabet mode, ignore new netmap cleanup tick" // Info in ../node/pkg/innerring/processors/netmap/process_cleanup.go
NetmapCantDecodePublicKeyOfNetmapNode524 = "can't decode public key of netmap node" // Warn in ../node/pkg/innerring/processors/netmap/process_cleanup.go
NetmapVoteToRemoveNodeFromNetmap525 = "vote to remove node from netmap" // Info in ../node/pkg/innerring/processors/netmap/process_cleanup.go
NetmapCantInvokeNetmapUpdateState526 = "can't invoke netmap.UpdateState" // Error in ../node/pkg/innerring/processors/netmap/process_cleanup.go
NetmapCantIterateOnNetmapCleanerCache527 = "can't iterate on netmap cleaner cache" // Warn in ../node/pkg/innerring/processors/netmap/process_cleanup.go
NetmapCantGetEpochDuration528 = "can't get epoch duration" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapCantGetTransactionHeight529 = "can't get transaction height" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapCantResetEpochTimer530 = "can't reset epoch timer" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapCantGetNetmapSnapshotToPerformCleanup531 = "can't get netmap snapshot to perform cleanup" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapCantStartContainerSizeEstimation532 = "can't start container size estimation" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapNonAlphabetModeIgnoreNewEpochTick533 = "non alphabet mode, ignore new epoch tick" // Info in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapNextEpoch534 = "next epoch" // Debug in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapCantInvokeNetmapNewEpoch535 = "can't invoke netmap.NewEpoch" // Error in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapNonAlphabetModeIgnoreNewPeerNotification536 = "non alphabet mode, ignore new peer notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapNonhaltNotaryTransaction537 = "non-halt notary transaction" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCantParseNetworkMapCandidate538 = "can't parse network map candidate" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate539 = "could not verify and update information about network map candidate" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapApprovingNetworkMapCandidate540 = "approving network map candidate" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCantInvokeNetmapAddPeer541 = "can't invoke netmap.AddPeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapNonAlphabetModeIgnoreUpdatePeerNotification542 = "non alphabet mode, ignore update peer notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapPreventSwitchingNodeToMaintenanceState543 = "prevent switching node to maintenance state" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCantInvokeNetmapUpdatePeer544 = "can't invoke netmap.UpdatePeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapNonAlphabetModeIgnoreRemoveNodeFromSubnetNotification545 = "non alphabet mode, ignore remove node from subnet notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCouldNotGetNetworkMapCandidates546 = "could not get network map candidates" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCouldNotUnmarshalSubnetId547 = "could not unmarshal subnet id" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapGotZeroSubnetInRemoveNodeNotification548 = "got zero subnet in remove node notification" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCouldNotIterateOverSubnetworksOfTheNode549 = "could not iterate over subnetworks of the node" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapVoteToRemoveNodeFromNetmap550 = "vote to remove node from netmap" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCouldNotInvokeNetmapUpdateState551 = "could not invoke netmap.UpdateState" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCouldNotInvokeNetmapAddPeer552 = "could not invoke netmap.AddPeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
ReputationNotification553 = "notification" // Info in ../node/pkg/innerring/processors/reputation/handlers.go
ReputationReputationWorkerPoolDrained554 = "reputation worker pool drained" // Warn in ../node/pkg/innerring/processors/reputation/handlers.go
ReputationNonAlphabetModeIgnoreReputationPutNotification555 = "non alphabet mode, ignore reputation put notification" // Info in ../node/pkg/innerring/processors/reputation/process_put.go
ReputationIgnoreReputationValue556 = "ignore reputation value" // Info in ../node/pkg/innerring/processors/reputation/process_put.go
ReputationIgnoreReputationValue557 = "ignore reputation value" // Info in ../node/pkg/innerring/processors/reputation/process_put.go
ReputationIgnoreReputationValue558 = "ignore reputation value" // Info in ../node/pkg/innerring/processors/reputation/process_put.go
ReputationCantSendApprovalTxForReputationValue559 = "can't send approval tx for reputation value" // Warn in ../node/pkg/innerring/processors/reputation/process_put.go
ReputationReputationWorkerPool560 = "reputation worker pool" // Debug in ../node/pkg/innerring/processors/reputation/processor.go
SettlementNonAlphabetModeIgnoreAuditPayments561 = "non alphabet mode, ignore audit payments" // Info in ../node/pkg/innerring/processors/settlement/calls.go
SettlementNewAuditSettlementEvent562 = "new audit settlement event" // Info in ../node/pkg/innerring/processors/settlement/calls.go
SettlementIgnoreGenesisEpoch563 = "ignore genesis epoch" // Debug in ../node/pkg/innerring/processors/settlement/calls.go
SettlementCouldNotAddHandlerOfAuditEventToQueue564 = "could not add handler of AuditEvent to queue" // Warn in ../node/pkg/innerring/processors/settlement/calls.go
SettlementAuditEventHandlingSuccessfullyScheduled565 = "AuditEvent handling successfully scheduled" // Debug in ../node/pkg/innerring/processors/settlement/calls.go
SettlementNonAlphabetModeIgnoreIncomeCollectionEvent566 = "non alphabet mode, ignore income collection event" // Info in ../node/pkg/innerring/processors/settlement/calls.go
SettlementStartBasicIncomeCollection567 = "start basic income collection" // Info in ../node/pkg/innerring/processors/settlement/calls.go
SettlementIncomeContextAlreadyExists568 = "income context already exists" // Error in ../node/pkg/innerring/processors/settlement/calls.go
SettlementCantCreateIncomeContext569 = "can't create income context" // Error in ../node/pkg/innerring/processors/settlement/calls.go
SettlementCouldNotAddHandlerOfBasicIncomeCollectionToQueue570 = "could not add handler of basic income collection to queue" // Warn in ../node/pkg/innerring/processors/settlement/calls.go
SettlementNonAlphabetModeIgnoreIncomeDistributionEvent571 = "non alphabet mode, ignore income distribution event" // Info in ../node/pkg/innerring/processors/settlement/calls.go
SettlementStartBasicIncomeDistribution572 = "start basic income distribution" // Info in ../node/pkg/innerring/processors/settlement/calls.go
SettlementIncomeContextDistributionDoesNotExists573 = "income context distribution does not exists" // Warn in ../node/pkg/innerring/processors/settlement/calls.go
SettlementCouldNotAddHandlerOfBasicIncomeDistributionToQueue574 = "could not add handler of basic income distribution to queue" // Warn in ../node/pkg/innerring/processors/settlement/calls.go
SettlementProcessAuditSettlements575 = "process audit settlements" // Info in ../node/pkg/innerring/processors/settlement/handlers.go
SettlementAuditProcessingFinished576 = "audit processing finished" // Info in ../node/pkg/innerring/processors/settlement/handlers.go
SettlementWorkerPoolForSettlementProcessorSuccessfullyInitialized577 = "worker pool for settlement processor successfully initialized" // Debug in ../node/pkg/innerring/processors/settlement/processor.go
AuditSettlementsAreIgnoredForZeroEpoch578 = "settlements are ignored for zero epoch" // Info in ../node/pkg/innerring/processors/settlement/audit/calculate.go
AuditCalculateAuditSettlements579 = "calculate audit settlements" // Info in ../node/pkg/innerring/processors/settlement/audit/calculate.go
AuditGettingResultsForThePreviousEpoch580 = "getting results for the previous epoch" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
AuditCouldNotCollectAuditResults581 = "could not collect audit results" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
AuditNoAuditResultsInPreviousEpoch582 = "no audit results in previous epoch" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
AuditCantFetchAuditFeeFromNetworkConfig583 = "can't fetch audit fee from network config" // Warn in ../node/pkg/innerring/processors/settlement/audit/calculate.go
AuditProcessingAuditResults584 = "processing audit results" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
AuditProcessingTransfers585 = "processing transfers" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
AuditReadingInformationAboutTheContainer586 = "reading information about the container" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
AuditBuildingPlacement587 = "building placement" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
AuditCollectingPassedNodes588 = "collecting passed nodes" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
AuditCalculatingSumOfTheSizesOfAllStorageGroups589 = "calculating sum of the sizes of all storage groups" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
AuditFillingTransferTable590 = "filling transfer table" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
AuditMissingContainerInAuditResult591 = "missing container in audit result" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
AuditCouldNotGetContainerInfo592 = "could not get container info" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
AuditCouldNotGetContainerNodes593 = "could not get container nodes" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
AuditEmptyListOfContainerNodes594 = "empty list of container nodes" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
AuditNoneOfTheContainerNodesPassedTheAudit595 = "none of the container nodes passed the audit" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
AuditCouldNotGetSGInfo = "could not get storage group info" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
AuditZeroSumSGSize = "zero sum storage group size" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
AuditCouldNotResolvePublicKeyOfTheStorageNode = "could not resolve public key of the storage node" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
AuditCalculatingStorageNodeSalaryForAudit = "calculating storage node salary for audit (GASe-12)" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
AuditCouldNotParsePublicKeyOfTheInnerRingNode = "could not parse public key of the inner ring node" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
BasicCantGetBasicIncomeRate = "can't get basic income rate" // Error in ../node/pkg/innerring/processors/settlement/basic/collect.go
BasicCantFetchContainerSizeEstimations = "can't fetch container size estimations" // Error in ../node/pkg/innerring/processors/settlement/basic/collect.go
BasicCantFetchContainerInfo = "can't fetch container info" // Warn in ../node/pkg/innerring/processors/settlement/basic/collect.go
BasicCantFetchBalanceOfBankingAccount = "can't fetch balance of banking account" // Error in ../node/pkg/innerring/processors/settlement/basic/distribute.go
BasicCantTransformPublicKeyToOwnerID = "can't transform public key to owner id" // Warn in ../node/pkg/innerring/processors/settlement/basic/distribute.go
FrostFSIRInternalError = "internal error" // Info in ../node/cmd/frostfs-ir/main.go
FrostFSIRCouldNotShutdownHTTPServer = "could not shutdown HTTP server" // Debug in ../node/cmd/frostfs-ir/main.go
FrostFSIRApplicationStopped = "application stopped" // Info in ../node/cmd/frostfs-ir/main.go
FrostFSNodeCouldNotReadCertificateFromFile = "could not read certificate from file" // Error in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeCantListenGRPCEndpoint = "can't listen gRPC endpoint" // Error in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeStopListeningGRPCEndpoint = "stop listening gRPC endpoint" // Info in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeStartListeningGRPCEndpoint = "start listening gRPC endpoint" // Info in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeStoppingGRPCServer = "stopping gRPC server..." // Info in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeGRPCCannotShutdownGracefullyForcingStop = "gRPC cannot shutdown gracefully, forcing stop" // Info in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeGRPCServerStoppedSuccessfully = "gRPC server stopped successfully" // Info in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeWaitingForAllProcessesToStop = "waiting for all processes to stop" // Debug in ../node/cmd/frostfs-node/main.go
FrostFSNodeStartReportingReputationOnNewEpochEvent = "start reporting reputation on new epoch event" // Debug in ../node/cmd/frostfs-node/reputation.go
FrostFSNodeCouldNotFetchEpochDuration = "could not fetch epoch duration" // Debug in ../node/cmd/frostfs-node/reputation.go
FrostFSNodeCouldNotFetchIterationNumber = "could not fetch iteration number" // Debug in ../node/cmd/frostfs-node/reputation.go
FrostFSNodeCouldNotCreateFixedEpochTimer = "could not create fixed epoch timer" // Debug in ../node/cmd/frostfs-node/reputation.go
FrostFSNodeStartedLocalNodesMaintenance = "started local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeStoppedLocalNodesMaintenance = "stopped local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeFailedToAttachShardToEngine = "failed to attach shard to engine" // Error in ../node/cmd/frostfs-node/config.go
FrostFSNodeShardAttachedToEngine = "shard attached to engine" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeClosingComponentsOfTheStorageEngine = "closing components of the storage engine..." // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeStorageEngineClosingFailure = "storage engine closing failure" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully = "all components of the storage engine closed successfully" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeBootstrappingWithTheMaintenanceState = "bootstrapping with the maintenance state" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeBootstrappingWithOnlineState = "bootstrapping with online state" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeTerminationSignalHasBeenReceivedStopping = "termination signal has been received, stopping..." // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeTerminationSignalProcessingIsComplete = "termination signal processing is complete" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeInternalApplicationError = "internal application error" // Warn in ../node/cmd/frostfs-node/config.go
FrostFSNodeInternalErrorProcessingIsComplete = "internal error processing is complete" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration = "SIGHUP has been received, rereading configuration..." // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeConfigurationReading = "configuration reading" // Error in ../node/cmd/frostfs-node/config.go
FrostFSNodeLoggerConfigurationPreparation = "logger configuration preparation" // Error in ../node/cmd/frostfs-node/config.go
FrostFSNodeTracingConfigationUpdated = "tracing configation updated" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update" // Error in ../node/cmd/frostfs-node/config.go
FrostFSNodeUpdatedConfigurationApplying = "updated configuration applying" // Error in ../node/cmd/frostfs-node/config.go
FrostFSNodeConfigurationHasBeenReloadedSuccessfully = "configuration has been reloaded successfully" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification" // Error in ../node/cmd/frostfs-node/container.go
FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt" // Debug in ../node/cmd/frostfs-node/container.go
FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt" // Debug in ../node/cmd/frostfs-node/container.go
FrostFSNodeSaveUsedSpaceAnnouncementInContract = "save used space announcement in contract" // Debug in ../node/cmd/frostfs-node/container.go
FrostFSNodeFailedToCalculateContainerSizeInStorageEngine = "failed to calculate container size in storage engine" // Debug in ../node/cmd/frostfs-node/container.go
FrostFSNodeContainerSizeInStorageEngineCalculatedSuccessfully = "container size in storage engine calculated successfully" // Debug in ../node/cmd/frostfs-node/container.go
FrostFSNodeNotificatorCouldNotListContainers = "notificator: could not list containers" // Error in ../node/cmd/frostfs-node/notificator.go
FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer = "notificator: could not select objects from container" // Error in ../node/cmd/frostfs-node/notificator.go
FrostFSNodeNotificatorCouldNotProcessObject = "notificator: could not process object" // Error in ../node/cmd/frostfs-node/notificator.go
FrostFSNodeNotificatorFinishedProcessingObjectNotifications = "notificator: finished processing object notifications" // Debug in ../node/cmd/frostfs-node/notificator.go
FrostFSNodeCouldNotWriteObjectNotification = "could not write object notification" // Warn in ../node/cmd/frostfs-node/notificator.go
FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value" // Error in ../node/cmd/frostfs-node/object.go
FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage" // Warn in ../node/cmd/frostfs-node/object.go
FrostFSNodeWritingLocalReputationValues = "writing local reputation values" // Debug in ../node/cmd/frostfs-node/object.go
FrostFSNodeCouldNotGetLatestNetworkMapToOverloadTheClient = "could not get latest network map to overload the client" // Warn in ../node/cmd/frostfs-node/object.go
FrostFSNodeFailedInitTracing = "failed init tracing" // Error in ../node/cmd/frostfs-node/tracing.go
FrostFSNodeFailedShutdownTracing = "failed shutdown tracing" // Error in ../node/cmd/frostfs-node/tracing.go
FrostFSNodeFailedToCreateNeoRPCClient = "failed to create neo RPC client" // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeClosingMorphComponents = "closing morph components..." // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal = "failed to set group signer scope, continue with Global" // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeNotarySupport = "notary support" // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeMorphcacheTTLFetchedFromNetwork = "morph.cache_ttl fetched from network" // Debug in ../node/cmd/frostfs-node/morph.go
FrostFSNodeNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number" // Warn in ../node/cmd/frostfs-node/morph.go
FrostFSNodeNewEpochEventFromSidechain = "new epoch event from sidechain" // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeNewBlock = "new block" // Debug in ../node/cmd/frostfs-node/morph.go
FrostFSNodeCantUpdatePersistentState = "can't update persistent state" // Warn in ../node/cmd/frostfs-node/morph.go
FrostFSNodeCantSendRebootstrapTx = "can't send re-bootstrap tx" // Warn in ../node/cmd/frostfs-node/netmap.go
FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch = "could not update node state on new epoch" // Error in ../node/cmd/frostfs-node/netmap.go
FrostFSNodeCouldNotMakeNotaryDeposit = "could not make notary deposit" // Error in ../node/cmd/frostfs-node/netmap.go
FrostFSNodeInitialNetworkState = "initial network state" // Info in ../node/cmd/frostfs-node/netmap.go
FrostFSNodeTreeServiceIsNotEnabledSkipInitialization = "tree service is not enabled, skip initialization" // Info in ../node/cmd/frostfs-node/tree.go
FrostFSNodeCouldNotSynchronizeTreeService = "could not synchronize Tree Service" // Error in ../node/cmd/frostfs-node/tree.go
FrostFSNodeRemovingAllTreesForContainer = "removing all trees for container" // Debug in ../node/cmd/frostfs-node/tree.go
FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved = "container removal event received, but trees weren't removed" // Error in ../node/cmd/frostfs-node/tree.go
FrostFSNodeCantListenGRPCEndpointControl = "can't listen gRPC endpoint (control)" // Error in ../node/cmd/frostfs-node/control.go
CommonApplicationStarted = "application started" // Info in ../node/cmd/frostfs-ir/main.go
CommonInitializingRemoteWriterProvider = "initializing remote writer provider" // Debug in ../node/cmd/frostfs-node/reputation/common/remote.go
CommonRouteHasReachedDeadendProvider = "route has reached dead-end provider" // Debug in ../node/cmd/frostfs-node/reputation/common/remote.go
CommonInitializingNoopWriterProvider = "initializing no-op writer provider" // Debug in ../node/cmd/frostfs-node/reputation/common/remote.go
IntermediateWritingReceivedConsumersTrusts = "writing received consumer's trusts" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/consumers.go
IntermediateStartWritingGlobalTrustsToContract = "start writing global trusts to contract" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
IntermediateFailedToSignGlobalTrust = "failed to sign global trust" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
IntermediateFailedToWriteGlobalTrustToContract = "failed to write global trust to contract" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
IntermediateSentGlobalTrustToContract = "sent global trust to contract" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
IntermediateWritingReceivedDaughtersTrusts = "writing received daughter's trusts" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/daughters.go
IntermediateAnnouncingTrust = "announcing trust" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/remote.go
LocalAnnouncingTrusts = "announcing trusts" // Debug in ../node/cmd/frostfs-node/reputation/local/remote.go
LocalInitializingIteratorOverTrusts = "initializing iterator over trusts" // Debug in ../node/cmd/frostfs-node/reputation/local/storage.go
)

View file

@ -3,6 +3,7 @@ package innerring
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement"
timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
@ -98,7 +99,7 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer {
args.stopEstimationDDiv,
func() {
if !args.alphabetState.IsAlphabet() {
args.l.Debug("non-alphabet mode, do not stop container estimations")
args.l.Debug(logs.InnerringNonalphabetModeDoNotStopContainerEstimations13)
return
}
@ -112,7 +113,7 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer {
err := args.cnrWrapper.StopEstimation(prm)
if err != nil {
args.l.Warn("can't stop epoch estimation",
args.l.Warn(logs.InnerringCantStopEpochEstimation14,
zap.Uint64("epoch", epochN),
zap.String("error", err.Error()))
}

View file

@ -6,6 +6,7 @@ import (
"fmt"
"net"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/audit"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/balance"
@ -129,7 +130,7 @@ func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain *
fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey)
if err != nil {
fromMainChainBlock = 0
s.log.Warn("can't get last processed main chain block number", zap.String("error", err.Error()))
s.log.Warn(logs.InnerringCantGetLastProcessedMainChainBlockNumber24, zap.String("error", err.Error()))
}
mainnetChain.from = fromMainChainBlock
@ -177,7 +178,7 @@ func (s *Server) initNotaryConfig(cfg *viper.Viper) {
!s.withoutMainNet && s.mainnetClient.ProbeNotary(), // if mainnet disabled then notary flag must be disabled too
)
s.log.Info("notary support",
s.log.Info(logs.InnerringNotarySupport25,
zap.Bool("sidechain_enabled", !s.sideNotaryConfig.disabled),
zap.Bool("mainchain_enabled", !s.mainNotaryConfig.disabled),
)
@ -275,7 +276,7 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli
if s.withoutMainNet || cfg.GetBool("governance.disable") {
alphaSync = func(event.Event) {
s.log.Debug("alphabet keys sync is disabled")
s.log.Debug(logs.InnerringAlphabetKeysSyncIsDisabled26)
}
} else {
// create governance processor
@ -496,7 +497,7 @@ func (s *Server) initReputationProcessor(cfg *viper.Viper, sidechainFee fixedn.F
func (s *Server) initGRPCServer(cfg *viper.Viper) error {
controlSvcEndpoint := cfg.GetString("control.grpc.endpoint")
if controlSvcEndpoint == "" {
s.log.Info("no Control server endpoint specified, service is disabled")
s.log.Info(logs.InnerringNoControlServerEndpointSpecifiedServiceIsDisabled27)
return nil
}
@ -692,7 +693,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey)
if err != nil {
fromSideChainBlock = 0
s.log.Warn("can't get last processed side chain block number", zap.String("error", err.Error()))
s.log.Warn(logs.InnerringCantGetLastProcessedSideChainBlockNumber28, zap.String("error", err.Error()))
}
morphChain := &chainParams{
@ -715,7 +716,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
return nil, err
}
if err := s.morphClient.SetGroupSignerScope(); err != nil {
morphChain.log.Info("failed to set group signer scope, continue with Global", zap.Error(err))
morphChain.log.Info(logs.InnerringFailedToSetGroupSignerScopeContinueWithGlobal29, zap.Error(err))
}
return morphChain, nil

View file

@ -6,6 +6,7 @@ import (
"fmt"
"io"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap"
@ -168,7 +169,7 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
err = s.voteForSidechainValidator(prm)
if err != nil {
// we don't stop inner ring execution on this error
s.log.Warn("can't vote for prepared validators",
s.log.Warn(logs.InnerringCantVoteForPreparedValidators30,
zap.String("error", err.Error()))
}
@ -210,13 +211,13 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
func (s *Server) registerMorphNewBlockEventHandler() {
s.morphListener.RegisterBlockHandler(func(b *block.Block) {
s.log.Debug("new block",
s.log.Debug(logs.InnerringNewBlock31,
zap.Uint32("index", b.Index),
)
err := s.persistate.SetUInt32(persistateSideChainLastBlockKey, b.Index)
if err != nil {
s.log.Warn("can't update persistent state",
s.log.Warn(logs.InnerringCantUpdatePersistentState32,
zap.String("chain", "side"),
zap.Uint32("block_index", b.Index))
}
@ -230,7 +231,7 @@ func (s *Server) registerMainnetNewBlockEventHandler() {
s.mainnetListener.RegisterBlockHandler(func(b *block.Block) {
err := s.persistate.SetUInt32(persistateMainChainLastBlockKey, b.Index)
if err != nil {
s.log.Warn("can't update persistent state",
s.log.Warn(logs.InnerringCantUpdatePersistentState33,
zap.String("chain", "main"),
zap.Uint32("block_index", b.Index))
}
@ -302,7 +303,7 @@ func (s *Server) Stop() {
for _, c := range s.closers {
if err := c(); err != nil {
s.log.Warn("closer error",
s.log.Warn(logs.InnerringCloserError34,
zap.String("error", err.Error()),
)
}
@ -547,7 +548,7 @@ func (s *Server) initConfigFromBlockchain() error {
return err
}
s.log.Debug("read config from blockchain",
s.log.Debug(logs.InnerringReadConfigFromBlockchain35,
zap.Bool("active", s.IsActive()),
zap.Bool("alphabet", s.IsAlphabet()),
zap.Uint64("epoch", epoch),

View file

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/util"
@ -52,14 +53,14 @@ func (s *Server) notaryHandler(_ event.Event) {
if !s.mainNotaryConfig.disabled {
_, err := s.depositMainNotary()
if err != nil {
s.log.Error("can't make notary deposit in main chain", zap.Error(err))
s.log.Error(logs.InnerringCantMakeNotaryDepositInMainChain15, zap.Error(err))
}
}
if !s.sideNotaryConfig.disabled {
_, err := s.depositSideNotary()
if err != nil {
s.log.Error("can't make notary deposit in side chain", zap.Error(err))
s.log.Error(logs.InnerringCantMakeNotaryDepositInSideChain16, zap.Error(err))
}
}
}
@ -82,7 +83,7 @@ func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaite
// non-error deposit with an empty TX hash means
// that the deposit has already been made; no
// need to wait it.
s.log.Info("notary deposit has already been made")
s.log.Info(logs.InnerringNotaryDepositHasAlreadyBeenMade17)
return nil
}

View file

@ -1,6 +1,7 @@
package alphabet
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"go.uber.org/zap"
@ -8,14 +9,14 @@ import (
func (ap *Processor) HandleGasEmission(ev event.Event) {
_ = ev.(timers.NewAlphabetEmitTick)
ap.log.Info("tick", zap.String("type", "alphabet gas emit"))
ap.log.Info(logs.AlphabetTick417, zap.String("type", "alphabet gas emit"))
// send event to the worker pool
err := ap.pool.Submit(func() { ap.processEmit() })
if err != nil {
// there system can be moved into controlled degradation stage
ap.log.Warn("alphabet processor worker pool drained",
ap.log.Warn(logs.AlphabetAlphabetProcessorWorkerPoolDrained418,
zap.Int("capacity", ap.pool.Cap()))
}
}

View file

@ -3,6 +3,7 @@ package alphabet
import (
"crypto/elliptic"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
@ -14,14 +15,14 @@ const emitMethod = "emit"
func (ap *Processor) processEmit() {
index := ap.irList.AlphabetIndex()
if index < 0 {
ap.log.Info("non alphabet mode, ignore gas emission event")
ap.log.Info(logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent419)
return
}
contract, ok := ap.alphabetContracts.GetByIndex(index)
if !ok {
ap.log.Debug("node is out of alphabet range, ignore gas emission event",
ap.log.Debug(logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent420,
zap.Int("index", index))
return
@ -30,20 +31,20 @@ func (ap *Processor) processEmit() {
// there is no signature collecting, so we don't need extra fee
err := ap.morphClient.Invoke(contract, 0, emitMethod)
if err != nil {
ap.log.Warn("can't invoke alphabet emit method", zap.String("error", err.Error()))
ap.log.Warn(logs.AlphabetCantInvokeAlphabetEmitMethod421, zap.String("error", err.Error()))
return
}
if ap.storageEmission == 0 {
ap.log.Info("storage node emission is off")
ap.log.Info(logs.AlphabetStorageNodeEmissionIsOff422)
return
}
networkMap, err := ap.netmapClient.NetMap()
if err != nil {
ap.log.Warn("can't get netmap snapshot to emit gas to storage nodes",
ap.log.Warn(logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes423,
zap.String("error", err.Error()))
return
@ -53,7 +54,7 @@ func (ap *Processor) processEmit() {
nmLen := len(nmNodes)
extraLen := len(ap.parsedWallets)
ap.log.Debug("gas emission",
ap.log.Debug(logs.AlphabetGasEmission424,
zap.Int("network_map", nmLen),
zap.Int("extra_wallets", extraLen))
@ -74,7 +75,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN
key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256())
if err != nil {
ap.log.Warn("can't parse node public key",
ap.log.Warn(logs.AlphabetCantParseNodePublicKey425,
zap.String("error", err.Error()))
continue
@ -82,7 +83,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN
err = ap.morphClient.TransferGas(key.GetScriptHash(), gasPerNode)
if err != nil {
ap.log.Warn("can't transfer gas",
ap.log.Warn(logs.AlphabetCantTransferGas426,
zap.String("receiver", key.Address()),
zap.Int64("amount", int64(gasPerNode)),
zap.String("error", err.Error()),
@ -99,7 +100,7 @@ func (ap *Processor) transferGasToExtraNodes(extraLen int, gasPerNode fixedn.Fix
for i, addr := range ap.parsedWallets {
receiversLog[i] = addr.StringLE()
}
ap.log.Warn("can't transfer gas to wallet",
ap.log.Warn(logs.AlphabetCantTransferGasToWallet427,
zap.Strings("receivers", receiversLog),
zap.Int64("amount", int64(gasPerNode)),
zap.String("error", err.Error()),

View file

@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
@ -67,7 +68,7 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/alphabet: global state is not set")
}
p.Log.Debug("alphabet worker pool", zap.Int("size", p.PoolSize))
p.Log.Debug(logs.AlphabetAlphabetWorkerPool428, zap.Int("size", p.PoolSize))
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {

View file

@ -1,6 +1,7 @@
package audit
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"go.uber.org/zap"
)
@ -10,12 +11,12 @@ func (ap *Processor) handleNewAuditRound(ev event.Event) {
epoch := auditEvent.Epoch()
ap.log.Info("new round of audit", zap.Uint64("epoch", epoch))
ap.log.Info(logs.AuditNewRoundOfAudit430, zap.Uint64("epoch", epoch))
// send an event to the worker pool
err := ap.pool.Submit(func() { ap.processStartAudit(epoch) })
if err != nil {
ap.log.Warn("previous round of audit prepare hasn't finished yet")
ap.log.Warn(logs.AuditPreviousRoundOfAuditPrepareHasntFinishedYet431)
}
}

View file

@ -4,6 +4,7 @@ import (
"context"
"crypto/sha256"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/storagegroup"
@ -24,23 +25,23 @@ func (ap *Processor) processStartAudit(epoch uint64) {
skipped := ap.taskManager.Reset()
if skipped > 0 {
ap.log.Info("some tasks from previous epoch are skipped",
ap.log.Info(logs.AuditSomeTasksFromPreviousEpochAreSkipped432,
zap.Int("amount", skipped),
)
}
containers, err := ap.selectContainersToAudit(epoch)
if err != nil {
log.Error("container selection failure", zap.String("error", err.Error()))
log.Error(logs.AuditContainerSelectionFailure433, zap.String("error", err.Error()))
return
}
log.Info("select containers for audit", zap.Int("amount", len(containers)))
log.Info(logs.AuditSelectContainersForAudit434, zap.Int("amount", len(containers)))
nm, err := ap.netmapClient.GetNetMap(0)
if err != nil {
ap.log.Error("can't fetch network map",
ap.log.Error(logs.AuditCantFetchNetworkMap435,
zap.String("error", err.Error()))
return
@ -64,7 +65,7 @@ func (ap *Processor) startAuditTasksOnContainers(cancelChannel <-chan struct{},
for i := range containers {
cnr, err := cntClient.Get(ap.containerClient, containers[i]) // get container structure
if err != nil {
log.Error("can't get container info, ignore",
log.Error(logs.AuditCantGetContainerInfoIgnore436,
zap.Stringer("cid", containers[i]),
zap.String("error", err.Error()))
@ -76,7 +77,7 @@ func (ap *Processor) startAuditTasksOnContainers(cancelChannel <-chan struct{},
// find all container nodes for current epoch
nodes, err := nm.ContainerNodes(cnr.Value.PlacementPolicy(), pivot)
if err != nil {
log.Info("can't build placement for container, ignore",
log.Info(logs.AuditCantBuildPlacementForContainerIgnore437,
zap.Stringer("cid", containers[i]),
zap.String("error", err.Error()))
@ -92,13 +93,13 @@ func (ap *Processor) startAuditTasksOnContainers(cancelChannel <-chan struct{},
// search storage groups
storageGroupsIDs := ap.findStorageGroups(containers[i], n)
log.Info("select storage groups for audit",
log.Info(logs.AuditSelectStorageGroupsForAudit438,
zap.Stringer("cid", containers[i]),
zap.Int("amount", len(storageGroupsIDs)))
// filter expired storage groups
storageGroups := ap.filterExpiredSG(containers[i], storageGroupsIDs, nodes, *nm)
log.Info("filter expired storage groups for audit",
log.Info(logs.AuditFilterExpiredStorageGroupsForAudit439,
zap.Stringer("cid", containers[i]),
zap.Int("amount", len(storageGroups)))
@ -146,7 +147,7 @@ func (ap *Processor) findStorageGroups(cnr cid.ID, shuffled netmapcore.Nodes) []
err := clientcore.NodeInfoFromRawNetmapElement(&info, netmapcore.Node(shuffled[i]))
if err != nil {
log.Warn("parse client node info", zap.String("error", err.Error()))
log.Warn(logs.AuditParseClientNodeInfo440, zap.String("error", err.Error()))
continue
}
@ -162,7 +163,7 @@ func (ap *Processor) findStorageGroups(cnr cid.ID, shuffled netmapcore.Nodes) []
cancel()
if err != nil {
log.Warn("error in storage group search", zap.String("error", err.Error()))
log.Warn(logs.AuditErrorInStorageGroupSearch441, zap.String("error", err.Error()))
continue
}

View file

@ -6,6 +6,7 @@ import (
"sort"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.uber.org/zap"
)
@ -20,7 +21,7 @@ func (ap *Processor) selectContainersToAudit(epoch uint64) ([]cid.ID, error) {
// consider getting extra information about container complexity from
// audit contract there
ap.log.Debug("container listing finished",
ap.log.Debug(logs.AuditContainerListingFinished429,
zap.Int("total amount", len(containers)),
)

View file

@ -3,6 +3,7 @@ package balance
import (
"encoding/hex"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
"go.uber.org/zap"
@ -10,7 +11,7 @@ import (
func (bp *Processor) handleLock(ev event.Event) {
lock := ev.(balanceEvent.Lock)
bp.log.Info("notification",
bp.log.Info(logs.BalanceNotification443,
zap.String("type", "lock"),
zap.String("value", hex.EncodeToString(lock.ID())))
@ -19,7 +20,7 @@ func (bp *Processor) handleLock(ev event.Event) {
err := bp.pool.Submit(func() { bp.processLock(&lock) })
if err != nil {
// there system can be moved into controlled degradation stage
bp.log.Warn("balance worker pool drained",
bp.log.Warn(logs.BalanceBalanceWorkerPoolDrained444,
zap.Int("capacity", bp.pool.Cap()))
}
}

View file

@ -1,6 +1,7 @@
package balance
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
frostfsContract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
"go.uber.org/zap"
@ -10,7 +11,7 @@ import (
// back to the withdraw issuer.
func (bp *Processor) processLock(lock *balanceEvent.Lock) {
if !bp.alphabetState.IsAlphabet() {
bp.log.Info("non alphabet mode, ignore balance lock")
bp.log.Info(logs.BalanceNonAlphabetModeIgnoreBalanceLock445)
return
}
@ -24,6 +25,6 @@ func (bp *Processor) processLock(lock *balanceEvent.Lock) {
err := bp.frostfsClient.Cheque(prm)
if err != nil {
bp.log.Error("can't send lock asset tx", zap.Error(err))
bp.log.Error(logs.BalanceCantSendLockAssetTx446, zap.Error(err))
}
}

View file

@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
@ -60,7 +61,7 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/balance: balance precision converter is not set")
}
p.Log.Debug("balance worker pool", zap.Int("size", p.PoolSize))
p.Log.Debug(logs.BalanceBalanceWorkerPool447, zap.Int("size", p.PoolSize))
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {

View file

@ -3,6 +3,7 @@ package container
import (
"crypto/sha256"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container"
"github.com/mr-tron/base58"
@ -13,7 +14,7 @@ func (cp *Processor) handlePut(ev event.Event) {
put := ev.(putEvent)
id := sha256.Sum256(put.Container())
cp.log.Info("notification",
cp.log.Info(logs.ContainerNotification449,
zap.String("type", "container put"),
zap.String("id", base58.Encode(id[:])))
@ -22,14 +23,14 @@ func (cp *Processor) handlePut(ev event.Event) {
err := cp.pool.Submit(func() { cp.processContainerPut(put) })
if err != nil {
// there system can be moved into controlled degradation stage
cp.log.Warn("container processor worker pool drained",
cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained450,
zap.Int("capacity", cp.pool.Cap()))
}
}
func (cp *Processor) handleDelete(ev event.Event) {
del := ev.(containerEvent.Delete)
cp.log.Info("notification",
cp.log.Info(logs.ContainerNotification451,
zap.String("type", "container delete"),
zap.String("id", base58.Encode(del.ContainerID())))
@ -38,7 +39,7 @@ func (cp *Processor) handleDelete(ev event.Event) {
err := cp.pool.Submit(func() { cp.processContainerDelete(&del) })
if err != nil {
// there system can be moved into controlled degradation stage
cp.log.Warn("container processor worker pool drained",
cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained452,
zap.Int("capacity", cp.pool.Cap()))
}
}
@ -46,7 +47,7 @@ func (cp *Processor) handleDelete(ev event.Event) {
func (cp *Processor) handleSetEACL(ev event.Event) {
e := ev.(containerEvent.SetEACL)
cp.log.Info("notification",
cp.log.Info(logs.ContainerNotification453,
zap.String("type", "set EACL"),
)
@ -57,7 +58,7 @@ func (cp *Processor) handleSetEACL(ev event.Event) {
})
if err != nil {
// there system can be moved into controlled degradation stage
cp.log.Warn("container processor worker pool drained",
cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained454,
zap.Int("capacity", cp.pool.Cap()))
}
}

View file

@ -3,6 +3,7 @@ package container
import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
@ -35,7 +36,7 @@ type putContainerContext struct {
// and sending approve tx back to the morph.
func (cp *Processor) processContainerPut(put putEvent) {
if !cp.alphabetState.IsAlphabet() {
cp.log.Info("non alphabet mode, ignore container put")
cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerPut455)
return
}
@ -45,7 +46,7 @@ func (cp *Processor) processContainerPut(put putEvent) {
err := cp.checkPutContainer(ctx)
if err != nil {
cp.log.Error("put container check failed",
cp.log.Error(logs.ContainerPutContainerCheckFailed456,
zap.String("error", err.Error()),
)
@ -119,7 +120,7 @@ func (cp *Processor) approvePutContainer(ctx *putContainerContext) {
err = cp.cnrClient.Put(prm)
}
if err != nil {
cp.log.Error("could not approve put container",
cp.log.Error(logs.ContainerCouldNotApprovePutContainer457,
zap.String("error", err.Error()),
)
}
@ -129,13 +130,13 @@ func (cp *Processor) approvePutContainer(ctx *putContainerContext) {
// and sending approve tx back to morph.
func (cp *Processor) processContainerDelete(e *containerEvent.Delete) {
if !cp.alphabetState.IsAlphabet() {
cp.log.Info("non alphabet mode, ignore container delete")
cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerDelete458)
return
}
err := cp.checkDeleteContainer(e)
if err != nil {
cp.log.Error("delete container check failed",
cp.log.Error(logs.ContainerDeleteContainerCheckFailed459,
zap.String("error", err.Error()),
)
@ -194,7 +195,7 @@ func (cp *Processor) approveDeleteContainer(e *containerEvent.Delete) {
err = cp.cnrClient.Delete(prm)
}
if err != nil {
cp.log.Error("could not approve delete container",
cp.log.Error(logs.ContainerCouldNotApproveDeleteContainer460,
zap.String("error", err.Error()),
)
}

View file

@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
@ -13,13 +14,13 @@ import (
func (cp *Processor) processSetEACL(e container.SetEACL) {
if !cp.alphabetState.IsAlphabet() {
cp.log.Info("non alphabet mode, ignore set EACL")
cp.log.Info(logs.ContainerNonAlphabetModeIgnoreSetEACL461)
return
}
err := cp.checkSetEACL(e)
if err != nil {
cp.log.Error("set EACL check failed",
cp.log.Error(logs.ContainerSetEACLCheckFailed462,
zap.String("error", err.Error()),
)
@ -91,7 +92,7 @@ func (cp *Processor) approveSetEACL(e container.SetEACL) {
err = cp.cnrClient.PutEACL(prm)
}
if err != nil {
cp.log.Error("could not approve set EACL",
cp.log.Error(logs.ContainerCouldNotApproveSetEACL463,
zap.String("error", err.Error()),
)
}

View file

@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet"
@ -88,7 +89,7 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/container: subnet client is not set")
}
p.Log.Debug("container worker pool", zap.Int("size", p.PoolSize))
p.Log.Debug(logs.ContainerContainerWorkerPool448, zap.Int("size", p.PoolSize))
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {

View file

@ -3,6 +3,7 @@ package frostfs
import (
"encoding/hex"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
"github.com/nspcc-dev/neo-go/pkg/util/slice"
@ -11,7 +12,7 @@ import (
func (np *Processor) handleDeposit(ev event.Event) {
deposit := ev.(frostfsEvent.Deposit)
np.log.Info("notification",
np.log.Info(logs.FrostfsNotification470,
zap.String("type", "deposit"),
zap.String("id", hex.EncodeToString(slice.CopyReverse(deposit.ID()))))
@ -20,14 +21,14 @@ func (np *Processor) handleDeposit(ev event.Event) {
err := np.pool.Submit(func() { np.processDeposit(&deposit) })
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn("frostfs processor worker pool drained",
np.log.Warn(logs.FrostfsFrostfsProcessorWorkerPoolDrained471,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleWithdraw(ev event.Event) {
withdraw := ev.(frostfsEvent.Withdraw)
np.log.Info("notification",
np.log.Info(logs.FrostfsNotification472,
zap.String("type", "withdraw"),
zap.String("id", hex.EncodeToString(slice.CopyReverse(withdraw.ID()))))
@ -36,14 +37,14 @@ func (np *Processor) handleWithdraw(ev event.Event) {
err := np.pool.Submit(func() { np.processWithdraw(&withdraw) })
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn("frostfs processor worker pool drained",
np.log.Warn(logs.FrostfsFrostfsProcessorWorkerPoolDrained473,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleCheque(ev event.Event) {
cheque := ev.(frostfsEvent.Cheque)
np.log.Info("notification",
np.log.Info(logs.FrostfsNotification474,
zap.String("type", "cheque"),
zap.String("id", hex.EncodeToString(cheque.ID())))
@ -52,14 +53,14 @@ func (np *Processor) handleCheque(ev event.Event) {
err := np.pool.Submit(func() { np.processCheque(&cheque) })
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn("frostfs processor worker pool drained",
np.log.Warn(logs.FrostfsFrostfsProcessorWorkerPoolDrained475,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleConfig(ev event.Event) {
cfg := ev.(frostfsEvent.Config)
np.log.Info("notification",
np.log.Info(logs.FrostfsNotification476,
zap.String("type", "set config"),
zap.String("key", hex.EncodeToString(cfg.Key())),
zap.String("value", hex.EncodeToString(cfg.Value())))
@ -69,14 +70,14 @@ func (np *Processor) handleConfig(ev event.Event) {
err := np.pool.Submit(func() { np.processConfig(&cfg) })
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn("frostfs processor worker pool drained",
np.log.Warn(logs.FrostfsFrostfsProcessorWorkerPoolDrained477,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleBind(ev event.Event) {
e := ev.(frostfsEvent.Bind)
np.log.Info("notification",
np.log.Info(logs.FrostfsNotification478,
zap.String("type", "bind"),
)
@ -85,14 +86,14 @@ func (np *Processor) handleBind(ev event.Event) {
err := np.pool.Submit(func() { np.processBind(e) })
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn("frostfs processor worker pool drained",
np.log.Warn(logs.FrostfsFrostfsProcessorWorkerPoolDrained479,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleUnbind(ev event.Event) {
e := ev.(frostfsEvent.Unbind)
np.log.Info("notification",
np.log.Info(logs.FrostfsNotification480,
zap.String("type", "unbind"),
)
@ -101,7 +102,7 @@ func (np *Processor) handleUnbind(ev event.Event) {
err := np.pool.Submit(func() { np.processBind(e) })
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn("frostfs processor worker pool drained",
np.log.Warn(logs.FrostfsFrostfsProcessorWorkerPoolDrained481,
zap.Int("capacity", np.pool.Cap()))
}
}

View file

@ -1,6 +1,7 @@
package frostfs
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
"github.com/nspcc-dev/neo-go/pkg/util"
@ -16,7 +17,7 @@ const (
// gas in the sidechain.
func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) {
if !np.alphabetState.IsAlphabet() {
np.log.Info("non alphabet mode, ignore deposit")
np.log.Info(logs.FrostfsNonAlphabetModeIgnoreDeposit482)
return
}
@ -29,7 +30,7 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) {
// send transferX to a balance contract
err := np.balanceClient.Mint(prm)
if err != nil {
np.log.Error("can't transfer assets to balance contract", zap.Error(err))
np.log.Error(logs.FrostfsCantTransferAssetsToBalanceContract483, zap.Error(err))
}
curEpoch := np.epochState.EpochCounter()
@ -43,7 +44,7 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) {
val, ok := np.mintEmitCache.Get(receiver.String())
if ok && val+np.mintEmitThreshold >= curEpoch {
np.log.Warn("double mint emission declined",
np.log.Warn(logs.FrostfsDoubleMintEmissionDeclined484,
zap.String("receiver", receiver.String()),
zap.Uint64("last_emission", val),
zap.Uint64("current_epoch", curEpoch))
@ -55,12 +56,12 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) {
// before gas transfer check if the balance is greater than the threshold
balance, err := np.morphClient.GasBalance()
if err != nil {
np.log.Error("can't get gas balance of the node", zap.Error(err))
np.log.Error(logs.FrostfsCantGetGasBalanceOfTheNode485, zap.Error(err))
return
}
if balance < np.gasBalanceThreshold {
np.log.Warn("gas balance threshold has been reached",
np.log.Warn(logs.FrostfsGasBalanceThresholdHasBeenReached486,
zap.Int64("balance", balance),
zap.Int64("threshold", np.gasBalanceThreshold))
@ -69,7 +70,7 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) {
err = np.morphClient.TransferGas(receiver, np.mintEmitValue)
if err != nil {
np.log.Error("can't transfer native gas to receiver",
np.log.Error(logs.FrostfsCantTransferNativeGasToReceiver487,
zap.String("error", err.Error()))
return
@ -81,14 +82,14 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) {
// Process withdraw event by locking assets in the balance account.
func (np *Processor) processWithdraw(withdraw *frostfsEvent.Withdraw) {
if !np.alphabetState.IsAlphabet() {
np.log.Info("non alphabet mode, ignore withdraw")
np.log.Info(logs.FrostfsNonAlphabetModeIgnoreWithdraw488)
return
}
// create lock account
lock, err := util.Uint160DecodeBytesBE(withdraw.ID()[:util.Uint160Size])
if err != nil {
np.log.Error("can't create lock account", zap.Error(err))
np.log.Error(logs.FrostfsCantCreateLockAccount489, zap.Error(err))
return
}
@ -104,7 +105,7 @@ func (np *Processor) processWithdraw(withdraw *frostfsEvent.Withdraw) {
err = np.balanceClient.Lock(prm)
if err != nil {
np.log.Error("can't lock assets for withdraw", zap.Error(err))
np.log.Error(logs.FrostfsCantLockAssetsForWithdraw490, zap.Error(err))
}
}
@ -112,7 +113,7 @@ func (np *Processor) processWithdraw(withdraw *frostfsEvent.Withdraw) {
// the reserve account.
func (np *Processor) processCheque(cheque *frostfsEvent.Cheque) {
if !np.alphabetState.IsAlphabet() {
np.log.Info("non alphabet mode, ignore cheque")
np.log.Info(logs.FrostfsNonAlphabetModeIgnoreCheque491)
return
}
@ -124,6 +125,6 @@ func (np *Processor) processCheque(cheque *frostfsEvent.Cheque) {
err := np.balanceClient.Burn(prm)
if err != nil {
np.log.Error("can't transfer assets to fed contract", zap.Error(err))
np.log.Error(logs.FrostfsCantTransferAssetsToFedContract492, zap.Error(err))
}
}

View file

@ -4,6 +4,7 @@ import (
"crypto/elliptic"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@ -20,7 +21,7 @@ type bindCommon interface {
func (np *Processor) processBind(e bindCommon) {
if !np.alphabetState.IsAlphabet() {
np.log.Info("non alphabet mode, ignore bind")
np.log.Info(logs.FrostfsNonAlphabetModeIgnoreBind464)
return
}
@ -32,7 +33,7 @@ func (np *Processor) processBind(e bindCommon) {
err := np.checkBindCommon(c)
if err != nil {
np.log.Error("invalid manage key event",
np.log.Error(logs.FrostfsInvalidManageKeyEvent465,
zap.Bool("bind", c.bind),
zap.String("error", err.Error()),
)
@ -77,7 +78,7 @@ func (np *Processor) approveBindCommon(e *bindCommonContext) {
u160, err := util.Uint160DecodeBytesBE(scriptHash)
if err != nil {
np.log.Error("could not decode script hash from bytes",
np.log.Error(logs.FrostfsCouldNotDecodeScriptHashFromBytes466,
zap.String("error", err.Error()),
)

View file

@ -1,6 +1,7 @@
package frostfs
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
"go.uber.org/zap"
@ -10,7 +11,7 @@ import (
// the sidechain.
func (np *Processor) processConfig(config *frostfsEvent.Config) {
if !np.alphabetState.IsAlphabet() {
np.log.Info("non alphabet mode, ignore config")
np.log.Info(logs.FrostfsNonAlphabetModeIgnoreConfig467)
return
}
@ -23,6 +24,6 @@ func (np *Processor) processConfig(config *frostfsEvent.Config) {
err := np.netmapClient.SetConfig(prm)
if err != nil {
np.log.Error("can't relay set config event", zap.Error(err))
np.log.Error(logs.FrostfsCantRelaySetConfigEvent468, zap.Error(err))
}
}

View file

@ -5,6 +5,7 @@ import (
"fmt"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
@ -98,7 +99,7 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/frostfs: balance precision converter is not set")
}
p.Log.Debug("frostfs worker pool", zap.Int("size", p.PoolSize))
p.Log.Debug(logs.FrostfsFrostfsWorkerPool469, zap.Int("size", p.PoolSize))
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {

View file

@ -1,6 +1,7 @@
package governance
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/rolemanagement"
"github.com/nspcc-dev/neo-go/pkg/core/native"
@ -30,14 +31,14 @@ func (gp *Processor) HandleAlphabetSync(e event.Event) {
return
}
gp.log.Info("new event", zap.String("type", typ))
gp.log.Info(logs.GovernanceNewEvent493, zap.String("type", typ))
// send event to the worker pool
err := gp.pool.Submit(func() { gp.processAlphabetSync(hash) })
if err != nil {
// there system can be moved into controlled degradation stage
gp.log.Warn("governance worker pool drained",
gp.log.Warn(logs.GovernanceGovernanceWorkerPoolDrained494,
zap.Int("capacity", gp.pool.Cap()))
}
}

View file

@ -6,6 +6,7 @@ import (
"sort"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
@ -20,37 +21,37 @@ const (
func (gp *Processor) processAlphabetSync(txHash util.Uint256) {
if !gp.alphabetState.IsAlphabet() {
gp.log.Info("non alphabet mode, ignore alphabet sync")
gp.log.Info(logs.GovernanceNonAlphabetModeIgnoreAlphabetSync495)
return
}
mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList()
if err != nil {
gp.log.Error("can't fetch alphabet list from main net",
gp.log.Error(logs.GovernanceCantFetchAlphabetListFromMainNet496,
zap.String("error", err.Error()))
return
}
sidechainAlphabet, err := gp.morphClient.Committee()
if err != nil {
gp.log.Error("can't fetch alphabet list from side chain",
gp.log.Error(logs.GovernanceCantFetchAlphabetListFromSideChain497,
zap.String("error", err.Error()))
return
}
newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet)
if err != nil {
gp.log.Error("can't merge alphabet lists from main net and side chain",
gp.log.Error(logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain498,
zap.String("error", err.Error()))
return
}
if newAlphabet == nil {
gp.log.Info("no governance update, alphabet list has not been changed")
gp.log.Info(logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged499)
return
}
gp.log.Info("alphabet list has been changed, starting update",
gp.log.Info(logs.GovernanceAlphabetListHasBeenChangedStartingUpdate500,
zap.String("side_chain_alphabet", prettyKeys(sidechainAlphabet)),
zap.String("new_alphabet", prettyKeys(newAlphabet)),
)
@ -63,7 +64,7 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) {
// 1. Vote to sidechain committee via alphabet contracts.
err = gp.voter.VoteForSidechainValidator(votePrm)
if err != nil {
gp.log.Error("can't vote for side chain committee",
gp.log.Error(logs.GovernanceCantVoteForSideChainCommittee501,
zap.String("error", err.Error()))
}
@ -76,7 +77,7 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) {
// 4. Update FrostFS contract in the mainnet.
gp.updateFrostFSContractInMainnet(newAlphabet)
gp.log.Info("finished alphabet list update")
gp.log.Info(logs.GovernanceFinishedAlphabetListUpdate502)
}
func prettyKeys(keys keys.PublicKeys) string {
@ -94,21 +95,21 @@ func prettyKeys(keys keys.PublicKeys) string {
func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) {
innerRing, err := gp.irFetcher.InnerRingKeys()
if err != nil {
gp.log.Error("can't fetch inner ring list from side chain",
gp.log.Error(logs.GovernanceCantFetchInnerRingListFromSideChain503,
zap.String("error", err.Error()))
return
}
newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet)
if err != nil {
gp.log.Error("can't create new inner ring list with new alphabet keys",
gp.log.Error(logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys504,
zap.String("error", err.Error()))
return
}
sort.Sort(newInnerRing)
gp.log.Info("update of the inner ring list",
gp.log.Info(logs.GovernanceUpdateOfTheInnerRingList505,
zap.String("before", prettyKeys(innerRing)),
zap.String("after", prettyKeys(newInnerRing)),
)
@ -130,7 +131,7 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAl
}
if err != nil {
gp.log.Error("can't update inner ring list with new alphabet keys",
gp.log.Error(logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys506,
zap.String("error", err.Error()))
}
}
@ -147,7 +148,7 @@ func (gp *Processor) updateNotaryRoleInSidechain(newAlphabet keys.PublicKeys, tx
err := gp.morphClient.UpdateNotaryList(updPrm)
if err != nil {
gp.log.Error("can't update list of notary nodes in side chain",
gp.log.Error(logs.GovernanceCantUpdateListOfNotaryNodesInSideChain507,
zap.String("error", err.Error()))
}
}
@ -167,7 +168,7 @@ func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys)
err := gp.frostfsClient.AlphabetUpdate(prm)
if err != nil {
gp.log.Error("can't update list of alphabet nodes in frostfs contract",
gp.log.Error(logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract508,
zap.String("error", err.Error()))
}
}

View file

@ -3,6 +3,7 @@ package netmap
import (
"encoding/hex"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
@ -12,21 +13,21 @@ import (
func (np *Processor) HandleNewEpochTick(ev event.Event) {
_ = ev.(timerEvent.NewEpochTick)
np.log.Info("tick", zap.String("type", "epoch"))
np.log.Info(logs.NetmapTick510, zap.String("type", "epoch"))
// send an event to the worker pool
err := np.pool.Submit(func() { np.processNewEpochTick() })
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn("netmap worker pool drained",
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained511,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleNewEpoch(ev event.Event) {
epochEvent := ev.(netmapEvent.NewEpoch)
np.log.Info("notification",
np.log.Info(logs.NetmapNotification512,
zap.String("type", "new epoch"),
zap.Uint64("value", epochEvent.EpochNumber()))
@ -37,7 +38,7 @@ func (np *Processor) handleNewEpoch(ev event.Event) {
})
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn("netmap worker pool drained",
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained513,
zap.Int("capacity", np.pool.Cap()))
}
}
@ -45,7 +46,7 @@ func (np *Processor) handleNewEpoch(ev event.Event) {
func (np *Processor) handleAddPeer(ev event.Event) {
newPeer := ev.(netmapEvent.AddPeer)
np.log.Info("notification",
np.log.Info(logs.NetmapNotification514,
zap.String("type", "add peer"),
)
@ -56,14 +57,14 @@ func (np *Processor) handleAddPeer(ev event.Event) {
})
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn("netmap worker pool drained",
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained515,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleUpdateState(ev event.Event) {
updPeer := ev.(netmapEvent.UpdatePeer)
np.log.Info("notification",
np.log.Info(logs.NetmapNotification516,
zap.String("type", "update peer state"),
zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes())))
@ -74,21 +75,21 @@ func (np *Processor) handleUpdateState(ev event.Event) {
})
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn("netmap worker pool drained",
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained517,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleCleanupTick(ev event.Event) {
if !np.netmapSnapshot.enabled {
np.log.Debug("netmap clean up routine is disabled")
np.log.Debug(logs.NetmapNetmapCleanUpRoutineIsDisabled518)
return
}
cleanup := ev.(netmapCleanupTick)
np.log.Info("tick", zap.String("type", "netmap cleaner"))
np.log.Info(logs.NetmapTick519, zap.String("type", "netmap cleaner"))
// send event to the worker pool
err := np.pool.Submit(func() {
@ -96,7 +97,7 @@ func (np *Processor) handleCleanupTick(ev event.Event) {
})
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn("netmap worker pool drained",
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained520,
zap.Int("capacity", np.pool.Cap()))
}
}
@ -104,7 +105,7 @@ func (np *Processor) handleCleanupTick(ev event.Event) {
func (np *Processor) handleRemoveNode(ev event.Event) {
removeNode := ev.(subnetevents.RemoveNode)
np.log.Info("notification",
np.log.Info(logs.NetmapNotification521,
zap.String("type", "remove node from subnet"),
zap.String("subnetID", hex.EncodeToString(removeNode.SubnetworkID())),
zap.String("key", hex.EncodeToString(removeNode.Node())),
@ -115,7 +116,7 @@ func (np *Processor) handleRemoveNode(ev event.Event) {
})
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn("netmap worker pool drained",
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained522,
zap.Int("capacity", np.pool.Cap()))
}
}

View file

@ -2,6 +2,7 @@ package netmap
import (
v2netmap "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"go.uber.org/zap"
@ -9,7 +10,7 @@ import (
func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) {
if !np.alphabetState.IsAlphabet() {
np.log.Info("non alphabet mode, ignore new netmap cleanup tick")
np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick523)
return
}
@ -17,13 +18,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) {
err := np.netmapSnapshot.forEachRemoveCandidate(ev.epoch, func(s string) error {
key, err := keys.NewPublicKeyFromString(s)
if err != nil {
np.log.Warn("can't decode public key of netmap node",
np.log.Warn(logs.NetmapCantDecodePublicKeyOfNetmapNode524,
zap.String("key", s))
return nil
}
np.log.Info("vote to remove node from netmap", zap.String("key", s))
np.log.Info(logs.NetmapVoteToRemoveNodeFromNetmap525, zap.String("key", s))
// In notary environments we call UpdateStateIR method instead of UpdateState.
// It differs from UpdateState only by name, so we can do this in the same form.
@ -48,13 +49,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) {
)
}
if err != nil {
np.log.Error("can't invoke netmap.UpdateState", zap.Error(err))
np.log.Error(logs.NetmapCantInvokeNetmapUpdateState526, zap.Error(err))
}
return nil
})
if err != nil {
np.log.Warn("can't iterate on netmap cleaner cache",
np.log.Warn(logs.NetmapCantIterateOnNetmapCleanerCache527,
zap.String("error", err.Error()))
}
}

View file

@ -1,6 +1,7 @@
package netmap
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/audit"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement"
@ -16,7 +17,7 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) {
epochDuration, err := np.netmapClient.EpochDuration()
if err != nil {
np.log.Warn("can't get epoch duration",
np.log.Warn(logs.NetmapCantGetEpochDuration528,
zap.String("error", err.Error()))
} else {
np.epochState.SetEpochDuration(epochDuration)
@ -26,20 +27,20 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) {
h, err := np.netmapClient.Morph().TxHeight(ev.TxHash())
if err != nil {
np.log.Warn("can't get transaction height",
np.log.Warn(logs.NetmapCantGetTransactionHeight529,
zap.String("hash", ev.TxHash().StringLE()),
zap.String("error", err.Error()))
}
if err := np.epochTimer.ResetEpochTimer(h); err != nil {
np.log.Warn("can't reset epoch timer",
np.log.Warn(logs.NetmapCantResetEpochTimer530,
zap.String("error", err.Error()))
}
// get new netmap snapshot
networkMap, err := np.netmapClient.NetMap()
if err != nil {
np.log.Warn("can't get netmap snapshot to perform cleanup",
np.log.Warn(logs.NetmapCantGetNetmapSnapshotToPerformCleanup531,
zap.String("error", err.Error()))
return
@ -54,7 +55,7 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) {
err = np.containerWrp.StartEstimation(prm)
if err != nil {
np.log.Warn("can't start container size estimation",
np.log.Warn(logs.NetmapCantStartContainerSizeEstimation532,
zap.Uint64("epoch", epoch),
zap.String("error", err.Error()))
}
@ -71,15 +72,15 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) {
// Process new epoch tick by invoking new epoch method in network map contract.
func (np *Processor) processNewEpochTick() {
if !np.alphabetState.IsAlphabet() {
np.log.Info("non alphabet mode, ignore new epoch tick")
np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewEpochTick533)
return
}
nextEpoch := np.epochState.EpochCounter() + 1
np.log.Debug("next epoch", zap.Uint64("value", nextEpoch))
np.log.Debug(logs.NetmapNextEpoch534, zap.Uint64("value", nextEpoch))
err := np.netmapClient.NewEpoch(nextEpoch)
if err != nil {
np.log.Error("can't invoke netmap.NewEpoch", zap.Error(err))
np.log.Error(logs.NetmapCantInvokeNetmapNewEpoch535, zap.Error(err))
}
}

View file

@ -4,6 +4,7 @@ import (
"bytes"
"encoding/hex"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
subnetEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/subnet"
@ -16,7 +17,7 @@ import (
// local epoch timer.
func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
if !np.alphabetState.IsAlphabet() {
np.log.Info("non alphabet mode, ignore new peer notification")
np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewPeerNotification536)
return
}
@ -25,7 +26,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
tx := originalRequest.MainTransaction
ok, err := np.netmapClient.Morph().IsValidScript(tx.Script, tx.Signers)
if err != nil || !ok {
np.log.Warn("non-halt notary transaction",
np.log.Warn(logs.NetmapNonhaltNotaryTransaction537,
zap.String("method", "netmap.AddPeer"),
zap.String("hash", tx.Hash().StringLE()),
zap.Error(err))
@ -37,14 +38,14 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
var nodeInfo netmap.NodeInfo
if err := nodeInfo.Unmarshal(ev.Node()); err != nil {
// it will be nice to have tx id at event structure to log it
np.log.Warn("can't parse network map candidate")
np.log.Warn(logs.NetmapCantParseNetworkMapCandidate538)
return
}
// validate and update node info
err := np.nodeValidator.VerifyAndUpdate(&nodeInfo)
if err != nil {
np.log.Warn("could not verify and update information about network map candidate",
np.log.Warn(logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate539,
zap.String("error", err.Error()),
)
@ -62,7 +63,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
updated := np.netmapSnapshot.touch(keyString, np.epochState.EpochCounter(), nodeInfoBinary)
if updated {
np.log.Info("approving network map candidate",
np.log.Info(logs.NetmapApprovingNetworkMapCandidate540,
zap.String("key", keyString))
prm := netmapclient.AddPeerPrm{}
@ -89,7 +90,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
}
if err != nil {
np.log.Error("can't invoke netmap.AddPeer", zap.Error(err))
np.log.Error(logs.NetmapCantInvokeNetmapAddPeer541, zap.Error(err))
}
}
}
@ -97,7 +98,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
// Process update peer notification by sending approval tx to the smart contract.
func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) {
if !np.alphabetState.IsAlphabet() {
np.log.Info("non alphabet mode, ignore update peer notification")
np.log.Info(logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification542)
return
}
@ -110,7 +111,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) {
if ev.Maintenance() {
err = np.nodeStateSettings.MaintenanceModeAllowed()
if err != nil {
np.log.Info("prevent switching node to maintenance state",
np.log.Info(logs.NetmapPreventSwitchingNodeToMaintenanceState543,
zap.Error(err),
)
@ -135,19 +136,19 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) {
err = np.netmapClient.UpdatePeerState(prm)
}
if err != nil {
np.log.Error("can't invoke netmap.UpdatePeer", zap.Error(err))
np.log.Error(logs.NetmapCantInvokeNetmapUpdatePeer544, zap.Error(err))
}
}
func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) {
if !np.alphabetState.IsAlphabet() {
np.log.Info("non alphabet mode, ignore remove node from subnet notification")
np.log.Info(logs.NetmapNonAlphabetModeIgnoreRemoveNodeFromSubnetNotification545)
return
}
candidates, err := np.netmapClient.GetCandidates()
if err != nil {
np.log.Warn("could not get network map candidates",
np.log.Warn(logs.NetmapCouldNotGetNetworkMapCandidates546,
zap.Error(err),
)
return
@ -158,14 +159,14 @@ func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) {
err = subnetToRemoveFrom.Unmarshal(rawSubnet)
if err != nil {
np.log.Warn("could not unmarshal subnet id",
np.log.Warn(logs.NetmapCouldNotUnmarshalSubnetId547,
zap.Error(err),
)
return
}
if subnetid.IsZero(subnetToRemoveFrom) {
np.log.Warn("got zero subnet in remove node notification")
np.log.Warn(logs.NetmapGotZeroSubnetInRemoveNodeNotification548)
return
}
@ -182,8 +183,8 @@ func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) {
return nil
})
if err != nil {
np.log.Warn("could not iterate over subnetworks of the node", zap.Error(err))
np.log.Info("vote to remove node from netmap", zap.String("key", hex.EncodeToString(ev.Node())))
np.log.Warn(logs.NetmapCouldNotIterateOverSubnetworksOfTheNode549, zap.Error(err))
np.log.Info(logs.NetmapVoteToRemoveNodeFromNetmap550, zap.String("key", hex.EncodeToString(ev.Node())))
prm := netmapclient.UpdatePeerPrm{}
prm.SetKey(ev.Node())
@ -191,7 +192,7 @@ func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) {
err = np.netmapClient.UpdatePeerState(prm)
if err != nil {
np.log.Error("could not invoke netmap.UpdateState", zap.Error(err))
np.log.Error(logs.NetmapCouldNotInvokeNetmapUpdateState551, zap.Error(err))
return
}
} else {
@ -201,7 +202,7 @@ func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) {
err = np.netmapClient.AddPeer(prm)
if err != nil {
np.log.Error("could not invoke netmap.AddPeer", zap.Error(err))
np.log.Error(logs.NetmapCouldNotInvokeNetmapAddPeer552, zap.Error(err))
return
}
}

View file

@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
@ -142,7 +143,7 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/netmap: node state settings is not set")
}
p.Log.Debug("netmap worker pool", zap.Int("size", p.PoolSize))
p.Log.Debug(logs.NetmapNetmapWorkerPool509, zap.Int("size", p.PoolSize))
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {

View file

@ -3,6 +3,7 @@ package reputation
import (
"encoding/hex"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation"
"go.uber.org/zap"
@ -13,7 +14,7 @@ func (rp *Processor) handlePutReputation(ev event.Event) {
peerID := put.PeerID()
// FIXME: #1147 do not use `ToV2` method outside frostfs-api-go library
rp.log.Info("notification",
rp.log.Info(logs.ReputationNotification553,
zap.String("type", "reputation put"),
zap.String("peer_id", hex.EncodeToString(peerID.PublicKey())))
@ -22,7 +23,7 @@ func (rp *Processor) handlePutReputation(ev event.Event) {
err := rp.pool.Submit(func() { rp.processPut(&put) })
if err != nil {
// there system can be moved into controlled degradation stage
rp.log.Warn("reputation worker pool drained",
rp.log.Warn(logs.ReputationReputationWorkerPoolDrained554,
zap.Int("capacity", rp.pool.Cap()))
}
}

View file

@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation"
apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
@ -16,7 +17,7 @@ var errWrongManager = errors.New("got manager that is incorrect for peer")
func (rp *Processor) processPut(e *reputationEvent.Put) {
if !rp.alphabetState.IsAlphabet() {
rp.log.Info("non alphabet mode, ignore reputation put notification")
rp.log.Info(logs.ReputationNonAlphabetModeIgnoreReputationPutNotification555)
return
}
@ -27,7 +28,7 @@ func (rp *Processor) processPut(e *reputationEvent.Put) {
// check if epoch is valid
currentEpoch := rp.epochState.EpochCounter()
if epoch >= currentEpoch {
rp.log.Info("ignore reputation value",
rp.log.Info(logs.ReputationIgnoreReputationValue556,
zap.String("reason", "invalid epoch number"),
zap.Uint64("trust_epoch", epoch),
zap.Uint64("local_epoch", currentEpoch))
@ -37,7 +38,7 @@ func (rp *Processor) processPut(e *reputationEvent.Put) {
// check signature
if !value.VerifySignature() {
rp.log.Info("ignore reputation value",
rp.log.Info(logs.ReputationIgnoreReputationValue557,
zap.String("reason", "invalid signature"),
)
@ -46,7 +47,7 @@ func (rp *Processor) processPut(e *reputationEvent.Put) {
// check if manager is correct
if err := rp.checkManagers(epoch, value.Manager(), id); err != nil {
rp.log.Info("ignore reputation value",
rp.log.Info(logs.ReputationIgnoreReputationValue558,
zap.String("reason", "wrong manager"),
zap.String("error", err.Error()))
@ -91,7 +92,7 @@ func (rp *Processor) approvePutReputation(e *reputationEvent.Put) {
}
if err != nil {
// FIXME: #1147 do not use `ToV2` method outside frostfs-api-go library
rp.log.Warn("can't send approval tx for reputation value",
rp.log.Warn(logs.ReputationCantSendApprovalTxForReputationValue559,
zap.String("peer_id", hex.EncodeToString(id.PublicKey())),
zap.String("error", err.Error()))
}

View file

@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation"
@ -71,7 +72,7 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/reputation: manager builder is not set")
}
p.Log.Debug("reputation worker pool", zap.Int("size", p.PoolSize))
p.Log.Debug(logs.ReputationReputationWorkerPool560, zap.Int("size", p.PoolSize))
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {

View file

@ -7,6 +7,7 @@ import (
"encoding/hex"
"math/big"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/audit"
@ -58,32 +59,32 @@ func (c *Calculator) Calculate(p *CalculatePrm) {
)}
if p.Epoch == 0 {
log.Info("settlements are ignored for zero epoch")
log.Info(logs.AuditSettlementsAreIgnoredForZeroEpoch578)
return
}
log.Info("calculate audit settlements")
log.Info(logs.AuditCalculateAuditSettlements579)
log.Debug("getting results for the previous epoch")
log.Debug(logs.AuditGettingResultsForThePreviousEpoch580)
prevEpoch := p.Epoch - 1
auditResults, err := c.prm.ResultStorage.AuditResultsForEpoch(prevEpoch)
if err != nil {
log.Error("could not collect audit results")
log.Error(logs.AuditCouldNotCollectAuditResults581)
return
} else if len(auditResults) == 0 {
log.Debug("no audit results in previous epoch")
log.Debug(logs.AuditNoAuditResultsInPreviousEpoch582)
return
}
auditFee, err := c.prm.AuditFeeFetcher.AuditFee()
if err != nil {
log.Warn("can't fetch audit fee from network config",
log.Warn(logs.AuditCantFetchAuditFeeFromNetworkConfig583,
zap.String("error", err.Error()))
auditFee = 0
}
log.Debug("processing audit results",
log.Debug(logs.AuditProcessingAuditResults584,
zap.Int("number", len(auditResults)),
)
@ -98,7 +99,7 @@ func (c *Calculator) Calculate(p *CalculatePrm) {
})
}
log.Debug("processing transfers")
log.Debug(logs.AuditProcessingTransfers585)
common.TransferAssets(c.prm.Exchanger, table, common.AuditSettlementDetails(prevEpoch))
}
@ -109,35 +110,35 @@ func (c *Calculator) processResult(ctx *singleResultCtx) {
zap.Uint64("audit epoch", ctx.auditResult.Epoch()),
)}
ctx.log.Debug("reading information about the container")
ctx.log.Debug(logs.AuditReadingInformationAboutTheContainer586)
ok := c.readContainerInfo(ctx)
if !ok {
return
}
ctx.log.Debug("building placement")
ctx.log.Debug(logs.AuditBuildingPlacement587)
ok = c.buildPlacement(ctx)
if !ok {
return
}
ctx.log.Debug("collecting passed nodes")
ctx.log.Debug(logs.AuditCollectingPassedNodes588)
ok = c.collectPassNodes(ctx)
if !ok {
return
}
ctx.log.Debug("calculating sum of the sizes of all storage groups")
ctx.log.Debug(logs.AuditCalculatingSumOfTheSizesOfAllStorageGroups589)
ok = c.sumSGSizes(ctx)
if !ok {
return
}
ctx.log.Debug("filling transfer table")
ctx.log.Debug(logs.AuditFillingTransferTable590)
c.fillTransferTable(ctx)
}
@ -145,7 +146,7 @@ func (c *Calculator) processResult(ctx *singleResultCtx) {
func (c *Calculator) readContainerInfo(ctx *singleResultCtx) bool {
cnr, ok := ctx.auditResult.Container()
if !ok {
ctx.log.Error("missing container in audit result")
ctx.log.Error(logs.AuditMissingContainerInAuditResult591)
return false
}
@ -153,7 +154,7 @@ func (c *Calculator) readContainerInfo(ctx *singleResultCtx) bool {
ctx.cnrInfo, err = c.prm.ContainerStorage.ContainerInfo(cnr)
if err != nil {
ctx.log.Error("could not get container info",
ctx.log.Error(logs.AuditCouldNotGetContainerInfo592,
zap.String("error", err.Error()),
)
}
@ -166,14 +167,14 @@ func (c *Calculator) buildPlacement(ctx *singleResultCtx) bool {
ctx.cnrNodes, err = c.prm.PlacementCalculator.ContainerNodes(ctx.auditEpoch(), ctx.containerID())
if err != nil {
ctx.log.Error("could not get container nodes",
ctx.log.Error(logs.AuditCouldNotGetContainerNodes593,
zap.String("error", err.Error()),
)
}
empty := len(ctx.cnrNodes) == 0
if empty {
ctx.log.Debug("empty list of container nodes")
ctx.log.Debug(logs.AuditEmptyListOfContainerNodes594)
}
return err == nil && !empty
@ -206,7 +207,7 @@ func (c *Calculator) collectPassNodes(ctx *singleResultCtx) bool {
empty := len(ctx.passNodes) == 0
if empty {
ctx.log.Debug("none of the container nodes passed the audit")
ctx.log.Debug(logs.AuditNoneOfTheContainerNodesPassedTheAudit595)
}
return !empty
@ -224,7 +225,7 @@ func (c *Calculator) sumSGSizes(ctx *singleResultCtx) bool {
sgInfo, err := c.prm.SGStorage.SGInfo(addr)
if err != nil {
ctx.log.Error("could not get SG info",
ctx.log.Error(logs.AuditCouldNotGetSGInfo,
zap.String("id", id.String()),
zap.String("error", err.Error()),
)
@ -244,7 +245,7 @@ func (c *Calculator) sumSGSizes(ctx *singleResultCtx) bool {
}
if sumPassSGSize == 0 {
ctx.log.Debug("zero sum SG size")
ctx.log.Debug(logs.AuditZeroSumSGSize)
return false
}
@ -260,7 +261,7 @@ func (c *Calculator) fillTransferTable(ctx *singleResultCtx) bool {
for k, info := range ctx.passNodes {
ownerID, err := c.prm.AccountStorage.ResolveKey(info)
if err != nil {
ctx.log.Error("could not resolve public key of the storage node",
ctx.log.Error(logs.AuditCouldNotResolvePublicKeyOfTheStorageNode,
zap.String("error", err.Error()),
zap.String("key", k),
)
@ -270,7 +271,7 @@ func (c *Calculator) fillTransferTable(ctx *singleResultCtx) bool {
price := info.Price()
ctx.log.Debug("calculating storage node salary for audit (GASe-12)",
ctx.log.Debug(logs.AuditCalculatingStorageNodeSalaryForAudit,
zap.Stringer("sum SG size", ctx.sumSGSize),
zap.Stringer("price", price),
)
@ -292,7 +293,7 @@ func (c *Calculator) fillTransferTable(ctx *singleResultCtx) bool {
// add txs to pay inner ring node for audit result
auditIR, err := ownerFromKey(ctx.auditResult.AuditorKey())
if err != nil {
ctx.log.Error("could not parse public key of the inner ring node",
ctx.log.Error(logs.AuditCouldNotParsePublicKeyOfTheInnerRingNode,
zap.String("error", err.Error()),
zap.String("key", hex.EncodeToString(ctx.auditResult.AuditorKey())),
)

View file

@ -3,6 +3,7 @@ package basic
import (
"math/big"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common"
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
"go.uber.org/zap"
@ -20,7 +21,7 @@ func (inc *IncomeSettlementContext) Collect() {
cachedRate, err := inc.rate.BasicRate()
if err != nil {
inc.log.Error("can't get basic income rate",
inc.log.Error(logs.BasicCantGetBasicIncomeRate,
zap.String("error", err.Error()))
return
@ -33,7 +34,7 @@ func (inc *IncomeSettlementContext) Collect() {
cnrEstimations, err := inc.estimations.Estimations(inc.epoch)
if err != nil {
inc.log.Error("can't fetch container size estimations",
inc.log.Error(logs.BasicCantFetchContainerSizeEstimations,
zap.Uint64("epoch", inc.epoch),
zap.String("error", err.Error()))
@ -45,7 +46,7 @@ func (inc *IncomeSettlementContext) Collect() {
for i := range cnrEstimations {
owner, err := inc.container.ContainerInfo(cnrEstimations[i].ContainerID)
if err != nil {
inc.log.Warn("can't fetch container info",
inc.log.Warn(logs.BasicCantFetchContainerInfo,
zap.Uint64("epoch", inc.epoch),
zap.Stringer("container_id", cnrEstimations[i].ContainerID),
zap.String("error", err.Error()))
@ -55,7 +56,7 @@ func (inc *IncomeSettlementContext) Collect() {
cnrNodes, err := inc.placement.ContainerNodes(inc.epoch, cnrEstimations[i].ContainerID)
if err != nil {
inc.log.Debug("can't fetch container info",
inc.log.Debug(logs.BasicCantFetchContainerInfo,
zap.Uint64("epoch", inc.epoch),
zap.Stringer("container_id", cnrEstimations[i].ContainerID),
zap.String("error", err.Error()))

View file

@ -4,6 +4,7 @@ import (
"encoding/hex"
"math/big"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common"
"go.uber.org/zap"
)
@ -20,7 +21,7 @@ func (inc *IncomeSettlementContext) Distribute() {
bankBalance, err := inc.balances.Balance(inc.bankOwner)
if err != nil {
inc.log.Error("can't fetch balance of banking account",
inc.log.Error(logs.BasicCantFetchBalanceOfBankingAccount,
zap.String("error", err.Error()))
return
@ -31,7 +32,7 @@ func (inc *IncomeSettlementContext) Distribute() {
inc.distributeTable.Iterate(func(key []byte, n *big.Int) {
nodeOwner, err := inc.accounts.ResolveKey(nodeInfoWrapper(key))
if err != nil {
inc.log.Warn("can't transform public key to owner id",
inc.log.Warn(logs.BasicCantTransformPublicKeyToOwnerID,
zap.String("public_key", hex.EncodeToString(key)),
zap.String("error", err.Error()))

View file

@ -1,6 +1,7 @@
package settlement
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
@ -14,7 +15,7 @@ func (p *Processor) HandleAuditEvent(e event.Event) {
epoch := ev.Epoch()
if !p.state.IsAlphabet() {
p.log.Info("non alphabet mode, ignore audit payments")
p.log.Info(logs.SettlementNonAlphabetModeIgnoreAuditPayments561)
return
}
@ -23,10 +24,10 @@ func (p *Processor) HandleAuditEvent(e event.Event) {
zap.Uint64("epoch", epoch),
)}
log.Info("new audit settlement event")
log.Info(logs.SettlementNewAuditSettlementEvent562)
if epoch == 0 {
log.Debug("ignore genesis epoch")
log.Debug(logs.SettlementIgnoreGenesisEpoch563)
return
}
@ -38,14 +39,14 @@ func (p *Processor) HandleAuditEvent(e event.Event) {
err := p.pool.Submit(handler.handle)
if err != nil {
log.Warn("could not add handler of AuditEvent to queue",
log.Warn(logs.SettlementCouldNotAddHandlerOfAuditEventToQueue564,
zap.String("error", err.Error()),
)
return
}
log.Debug("AuditEvent handling successfully scheduled")
log.Debug(logs.SettlementAuditEventHandlingSuccessfullyScheduled565)
}
func (p *Processor) HandleIncomeCollectionEvent(e event.Event) {
@ -53,19 +54,19 @@ func (p *Processor) HandleIncomeCollectionEvent(e event.Event) {
epoch := ev.Epoch()
if !p.state.IsAlphabet() {
p.log.Info("non alphabet mode, ignore income collection event")
p.log.Info(logs.SettlementNonAlphabetModeIgnoreIncomeCollectionEvent566)
return
}
p.log.Info("start basic income collection",
p.log.Info(logs.SettlementStartBasicIncomeCollection567,
zap.Uint64("epoch", epoch))
p.contextMu.Lock()
defer p.contextMu.Unlock()
if _, ok := p.incomeContexts[epoch]; ok {
p.log.Error("income context already exists",
p.log.Error(logs.SettlementIncomeContextAlreadyExists568,
zap.Uint64("epoch", epoch))
return
@ -73,7 +74,7 @@ func (p *Processor) HandleIncomeCollectionEvent(e event.Event) {
incomeCtx, err := p.basicIncome.CreateContext(epoch)
if err != nil {
p.log.Error("can't create income context",
p.log.Error(logs.SettlementCantCreateIncomeContext569,
zap.String("error", err.Error()))
return
@ -85,7 +86,7 @@ func (p *Processor) HandleIncomeCollectionEvent(e event.Event) {
incomeCtx.Collect()
})
if err != nil {
p.log.Warn("could not add handler of basic income collection to queue",
p.log.Warn(logs.SettlementCouldNotAddHandlerOfBasicIncomeCollectionToQueue570,
zap.String("error", err.Error()),
)
@ -98,12 +99,12 @@ func (p *Processor) HandleIncomeDistributionEvent(e event.Event) {
epoch := ev.Epoch()
if !p.state.IsAlphabet() {
p.log.Info("non alphabet mode, ignore income distribution event")
p.log.Info(logs.SettlementNonAlphabetModeIgnoreIncomeDistributionEvent571)
return
}
p.log.Info("start basic income distribution",
p.log.Info(logs.SettlementStartBasicIncomeDistribution572,
zap.Uint64("epoch", epoch))
p.contextMu.Lock()
@ -113,7 +114,7 @@ func (p *Processor) HandleIncomeDistributionEvent(e event.Event) {
delete(p.incomeContexts, epoch)
if !ok {
p.log.Warn("income context distribution does not exists",
p.log.Warn(logs.SettlementIncomeContextDistributionDoesNotExists573,
zap.Uint64("epoch", epoch))
return
@ -123,7 +124,7 @@ func (p *Processor) HandleIncomeDistributionEvent(e event.Event) {
incomeCtx.Distribute()
})
if err != nil {
p.log.Warn("could not add handler of basic income distribution to queue",
p.log.Warn(logs.SettlementCouldNotAddHandlerOfBasicIncomeDistributionToQueue574,
zap.String("error", err.Error()),
)

View file

@ -1,6 +1,9 @@
package settlement
import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
)
type auditEventHandler struct {
log *logger.Logger
@ -11,9 +14,9 @@ type auditEventHandler struct {
}
func (p *auditEventHandler) handle() {
p.log.Info("process audit settlements")
p.log.Info(logs.SettlementProcessAuditSettlements575)
p.proc.ProcessAuditSettlements(p.epoch)
p.log.Info("audit processing finished")
p.log.Info(logs.SettlementAuditProcessingFinished576)
}

View file

@ -4,6 +4,7 @@ import (
"fmt"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/basic"
nodeutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@ -63,7 +64,7 @@ func New(prm Prm, opts ...Option) *Processor {
panic(fmt.Errorf("could not create worker pool: %w", err))
}
o.log.Debug("worker pool for settlement processor successfully initialized",
o.log.Debug(logs.SettlementWorkerPoolForSettlementProcessorSuccessfullyInitialized577,
zap.Int("capacity", o.poolSize),
)

View file

@ -6,6 +6,7 @@ import (
"fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
storagegroup2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/storagegroup"
@ -94,7 +95,7 @@ func (c *ClientCache) getSG(ctx context.Context, addr oid.Address, nm *netmap.Ne
cli, err := c.getWrappedClient(info)
if err != nil {
c.log.Warn("can't setup remote connection",
c.log.Warn(logs.InnerringCantSetupRemoteConnection36,
zap.String("error", err.Error()))
continue
@ -109,7 +110,7 @@ func (c *ClientCache) getSG(ctx context.Context, addr oid.Address, nm *netmap.Ne
cancel()
if err != nil {
c.log.Warn("can't get storage group object",
c.log.Warn(logs.InnerringCantGetStorageGroupObject37,
zap.String("error", err.Error()))
continue

View file

@ -9,6 +9,7 @@ import (
"fmt"
"math/big"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/audit"
@ -223,7 +224,7 @@ func (s settlementDeps) Transfer(sender, recipient user.ID, amount *big.Int, det
)
if !amount.IsInt64() {
s.log.Error("amount can not be represented as an int64")
s.log.Error(logs.InnerringAmountCanNotBeRepresentedAsAnInt640)
return
}
@ -262,7 +263,7 @@ func (b basicIncomeSettlementDeps) Estimations(epoch uint64) ([]*containerClient
for i := range estimationIDs {
estimation, err := b.cnrClient.GetUsedSpaceEstimations(estimationIDs[i])
if err != nil {
b.log.Warn("can't get used space estimation",
b.log.Warn(logs.InnerringCantGetUsedSpaceEstimation1,
zap.String("estimation_id", hex.EncodeToString(estimationIDs[i])),
zap.String("error", err.Error()))

View file

@ -4,6 +4,7 @@ import (
"fmt"
"sort"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
auditClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/audit"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit"
@ -61,7 +62,7 @@ func (s *Server) IsAlphabet() bool {
func (s *Server) InnerRingIndex() int {
index, err := s.statusIndex.InnerRingIndex()
if err != nil {
s.log.Error("can't get inner ring index", zap.String("error", err.Error()))
s.log.Error(logs.InnerringCantGetInnerRingIndex18, zap.String("error", err.Error()))
return -1
}
@ -73,7 +74,7 @@ func (s *Server) InnerRingIndex() int {
func (s *Server) InnerRingSize() int {
size, err := s.statusIndex.InnerRingSize()
if err != nil {
s.log.Error("can't get inner ring size", zap.String("error", err.Error()))
s.log.Error(logs.InnerringCantGetInnerRingSize19, zap.String("error", err.Error()))
return 0
}
@ -85,7 +86,7 @@ func (s *Server) InnerRingSize() int {
func (s *Server) AlphabetIndex() int {
index, err := s.statusIndex.AlphabetIndex()
if err != nil {
s.log.Error("can't get alphabet index", zap.String("error", err.Error()))
s.log.Error(logs.InnerringCantGetAlphabetIndex20, zap.String("error", err.Error()))
return -1
}
@ -97,13 +98,13 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro
index := s.InnerRingIndex()
if s.contracts.alphabet.indexOutOfRange(index) {
s.log.Info("ignore validator vote: node not in alphabet range")
s.log.Info(logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange21)
return nil
}
if len(validators) == 0 {
s.log.Info("ignore validator vote: empty validators list")
s.log.Info(logs.InnerringIgnoreValidatorVoteEmptyValidatorsList22)
return nil
}
@ -128,7 +129,7 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro
s.contracts.alphabet.iterate(func(letter GlagoliticLetter, contract util.Uint160) {
err := s.morphClient.NotaryInvoke(contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators)
if err != nil {
s.log.Warn("can't invoke vote method in alphabet contract",
s.log.Warn(logs.InnerringCantInvokeVoteMethodInAlphabetContract23,
zap.Int8("alphabet_index", int8(letter)),
zap.Uint64("epoch", epoch),
zap.String("error", err.Error()))

View file

@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
irsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/subnet"
netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet"
@ -173,7 +174,7 @@ func (s *Server) catchSubnetCreation(e event.Event) {
s.handleSubnetCreation(e)
})
if err != nil {
s.log.Error("subnet creation queue failure",
s.log.Error(logs.InnerringSubnetCreationQueueFailure2,
zap.String("error", err.Error()),
)
}
@ -225,7 +226,7 @@ func (s *Server) handleSubnetCreation(e event.Event) {
ev: putEv,
})
if err != nil {
s.log.Info("discard subnet creation",
s.log.Info(logs.InnerringDiscardSubnetCreation3,
zap.String("reason", err.Error()),
)
@ -251,7 +252,7 @@ func (s *Server) handleSubnetCreation(e event.Event) {
}
if err != nil {
s.log.Error("approve subnet creation",
s.log.Error(logs.InnerringApproveSubnetCreation4,
zap.Bool("notary", isNotary),
zap.String("error", err.Error()),
)
@ -266,7 +267,7 @@ func (s *Server) catchSubnetRemoval(e event.Event) {
s.handleSubnetRemoval(e)
})
if err != nil {
s.log.Error("subnet removal handling failure",
s.log.Error(logs.InnerringSubnetRemovalHandlingFailure5,
zap.String("error", err.Error()),
)
}
@ -280,7 +281,7 @@ func (s *Server) handleSubnetRemoval(e event.Event) {
candidates, err := s.netmapClient.GetCandidates()
if err != nil {
s.log.Error("getting netmap candidates",
s.log.Error(logs.InnerringGettingNetmapCandidates6,
zap.Error(err),
)
@ -290,7 +291,7 @@ func (s *Server) handleSubnetRemoval(e event.Event) {
var removedID subnetid.ID
err = removedID.Unmarshal(delEv.ID())
if err != nil {
s.log.Error("unmarshalling removed subnet ID",
s.log.Error(logs.InnerringUnmarshallingRemovedSubnetID7,
zap.String("error", err.Error()),
)
@ -318,8 +319,8 @@ func (s *Server) processCandidate(txHash neogoutil.Uint256, removedID subnetid.I
return nil
})
if err != nil {
log.Error("iterating node's subnets", zap.Error(err))
log.Debug("removing node from netmap candidates")
log.Error(logs.InnerringIteratingNodesSubnets8, zap.Error(err))
log.Debug(logs.InnerringRemovingNodeFromNetmapCandidates9)
var updateStatePrm netmapclient.UpdatePeerPrm
updateStatePrm.SetKey(c.PublicKey())
@ -327,7 +328,7 @@ func (s *Server) processCandidate(txHash neogoutil.Uint256, removedID subnetid.I
err = s.netmapClient.UpdatePeerState(updateStatePrm)
if err != nil {
log.Error("removing node from candidates",
log.Error(logs.InnerringRemovingNodeFromCandidates10,
zap.Error(err),
)
}
@ -338,7 +339,7 @@ func (s *Server) processCandidate(txHash neogoutil.Uint256, removedID subnetid.I
// remove subnet from node's information
// if it contains removed subnet
if removeSubnet {
log.Debug("removing subnet from the node")
log.Debug(logs.InnerringRemovingSubnetFromTheNode11)
var addPeerPrm netmapclient.AddPeerPrm
addPeerPrm.SetNodeInfo(c)
@ -346,7 +347,7 @@ func (s *Server) processCandidate(txHash neogoutil.Uint256, removedID subnetid.I
err = s.netmapClient.AddPeer(addPeerPrm)
if err != nil {
log.Error("updating subnet info",
log.Error(logs.InnerringUpdatingSubnetInfo12,
zap.Error(err),
)
}

View file

@ -5,6 +5,7 @@ import (
"os"
"path/filepath"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"go.etcd.io/bbolt"
"go.uber.org/zap"
@ -14,7 +15,7 @@ import (
//
// If the database file does not exist, it will be created automatically.
func (b *Blobovnicza) Open() error {
b.log.Debug("creating directory for BoltDB",
b.log.Debug(logs.BlobovniczaCreatingDirectoryForBoltDB286,
zap.String("path", b.path),
zap.Bool("ro", b.boltOptions.ReadOnly),
)
@ -28,7 +29,7 @@ func (b *Blobovnicza) Open() error {
}
}
b.log.Debug("opening BoltDB",
b.log.Debug(logs.BlobovniczaOpeningBoltDB287,
zap.String("path", b.path),
zap.Stringer("permissions", b.perm),
)
@ -44,13 +45,13 @@ func (b *Blobovnicza) Open() error {
//
// Should not be called in read-only configuration.
func (b *Blobovnicza) Init() error {
b.log.Debug("initializing...",
b.log.Debug(logs.BlobovniczaInitializing288,
zap.Uint64("object size limit", b.objSizeLimit),
zap.Uint64("storage size limit", b.fullSizeLimit),
)
if size := b.filled.Load(); size != 0 {
b.log.Debug("already initialized", zap.Uint64("size", size))
b.log.Debug(logs.BlobovniczaAlreadyInitialized289, zap.Uint64("size", size))
return nil
}
@ -59,7 +60,7 @@ func (b *Blobovnicza) Init() error {
// create size range bucket
rangeStr := stringifyBounds(lower, upper)
b.log.Debug("creating bucket for size range",
b.log.Debug(logs.BlobovniczaCreatingBucketForSizeRange290,
zap.String("range", rangeStr))
_, err := tx.CreateBucketIfNotExists(key)
@ -86,7 +87,7 @@ func (b *Blobovnicza) Init() error {
// Close releases all internal database resources.
func (b *Blobovnicza) Close() error {
b.log.Debug("closing BoltDB",
b.log.Debug(logs.BlobovniczaClosingBoltDB291,
zap.String("path", b.path),
)

View file

@ -1,6 +1,7 @@
package blobovnicza
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
@ -51,7 +52,7 @@ func (b *Blobovnicza) Delete(prm DeletePrm) (DeleteRes, error) {
err := buck.Delete(addrKey)
if err == nil {
b.log.Debug("object was removed from bucket",
b.log.Debug(logs.BlobovniczaObjectWasRemovedFromBucket292,
zap.String("binary size", stringifyByteSize(sz)),
zap.String("range", stringifyBounds(lower, upper)),
)

View file

@ -7,6 +7,7 @@ import (
"strconv"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
@ -104,12 +105,12 @@ func NewBlobovniczaTree(opts ...Option) (blz *Blobovniczas) {
// it from opened cache.
return
} else if err := value.Close(); err != nil {
blz.log.Error("could not close Blobovnicza",
blz.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza393,
zap.String("id", p),
zap.String("error", err.Error()),
)
} else {
blz.log.Debug("blobovnicza successfully closed on evict",
blz.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyClosedOnEvict394,
zap.String("id", p),
)
}
@ -141,11 +142,11 @@ func (b *Blobovniczas) getActivated(lvlPath string) (blobovniczaWithIndex, error
//
// if current active blobovnicza's index is not old, it remains unchanged.
func (b *Blobovniczas) updateActive(lvlPath string, old *uint64) error {
b.log.Debug("updating active blobovnicza...", zap.String("path", lvlPath))
b.log.Debug(logs.BlobovniczatreeUpdatingActiveBlobovnicza395, zap.String("path", lvlPath))
_, err := b.updateAndGet(lvlPath, old)
b.log.Debug("active blobovnicza successfully updated", zap.String("path", lvlPath))
b.log.Debug(logs.BlobovniczatreeActiveBlobovniczaSuccessfullyUpdated396, zap.String("path", lvlPath))
return err
}
@ -201,7 +202,7 @@ func (b *Blobovniczas) updateAndGet(lvlPath string, old *uint64) (blobovniczaWit
}
b.lruMtx.Unlock()
b.log.Debug("blobovnicza successfully activated",
b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyActivated397,
zap.String("path", activePath))
return active, nil

View file

@ -4,6 +4,7 @@ import (
"fmt"
"path/filepath"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"go.uber.org/zap"
)
@ -18,10 +19,10 @@ func (b *Blobovniczas) Open(readOnly bool) error {
//
// Should be called exactly once.
func (b *Blobovniczas) Init() error {
b.log.Debug("initializing Blobovnicza's")
b.log.Debug(logs.BlobovniczatreeInitializingBlobovniczas411)
if b.readOnly {
b.log.Debug("read-only mode, skip blobovniczas initialization...")
b.log.Debug(logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization412)
return nil
}
@ -36,7 +37,7 @@ func (b *Blobovniczas) Init() error {
return true, fmt.Errorf("could not initialize blobovnicza structure %s: %w", p, err)
}
b.log.Debug("blobovnicza successfully initialized, closing...", zap.String("id", p))
b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing413, zap.String("id", p))
return false, nil
})
}
@ -49,7 +50,7 @@ func (b *Blobovniczas) Close() error {
for p, v := range b.active {
if err := v.blz.Close(); err != nil {
b.log.Debug("could not close active blobovnicza",
b.log.Debug(logs.BlobovniczatreeCouldNotCloseActiveBlobovnicza414,
zap.String("path", p),
zap.String("error", err.Error()),
)
@ -59,7 +60,7 @@ func (b *Blobovniczas) Close() error {
for _, k := range b.opened.Keys() {
blz, _ := b.opened.Get(k)
if err := blz.Close(); err != nil {
b.log.Debug("could not close active blobovnicza",
b.log.Debug(logs.BlobovniczatreeCouldNotCloseActiveBlobovnicza415,
zap.String("path", k),
zap.String("error", err.Error()),
)

View file

@ -3,6 +3,7 @@ package blobovniczatree
import (
"path/filepath"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@ -44,7 +45,7 @@ func (b *Blobovniczas) Delete(prm common.DeletePrm) (res common.DeleteRes, err e
res, err = b.deleteObjectFromLevel(bPrm, p, !ok, prm)
if err != nil {
if !blobovnicza.IsErrNotFound(err) {
b.log.Debug("could not remove object from level",
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromLevel398,
zap.String("level", p),
zap.String("error", err.Error()),
)
@ -83,7 +84,7 @@ func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath
if res, err := b.deleteObject(v, prm, dp); err == nil {
return res, err
} else if !blobovnicza.IsErrNotFound(err) {
b.log.Debug("could not remove object from opened blobovnicza",
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromOpenedBlobovnicza399,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)
@ -102,7 +103,7 @@ func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath
if res, err := b.deleteObject(active.blz, prm, dp); err == nil {
return res, err
} else if !blobovnicza.IsErrNotFound(err) {
b.log.Debug("could not remove object from active blobovnicza",
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromActiveBlobovnicza400,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)

View file

@ -6,6 +6,7 @@ import (
"path/filepath"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"go.opentelemetry.io/otel/attribute"
@ -47,7 +48,7 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
_, err := b.getObjectFromLevel(ctx, gPrm, p, !ok)
if err != nil {
if !blobovnicza.IsErrNotFound(err) {
b.log.Debug("could not get object from level",
b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel416,
zap.String("level", p),
zap.String("error", err.Error()))
}

View file

@ -7,6 +7,7 @@ import (
"path/filepath"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@ -53,7 +54,7 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
res, err = b.getObjectFromLevel(ctx, bPrm, p, !ok)
if err != nil {
if !blobovnicza.IsErrNotFound(err) {
b.log.Debug("could not get object from level",
b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel408,
zap.String("level", p),
zap.String("error", err.Error()),
)
@ -88,7 +89,7 @@ func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.G
if res, err := b.getObject(ctx, v, prm); err == nil {
return res, err
} else if !blobovnicza.IsErrNotFound(err) {
b.log.Debug("could not read object from opened blobovnicza",
b.log.Debug(logs.BlobovniczatreeCouldNotReadObjectFromOpenedBlobovnicza409,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)
@ -108,7 +109,7 @@ func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.G
if res, err := b.getObject(ctx, active.blz, prm); err == nil {
return res, err
} else if !blobovnicza.IsErrNotFound(err) {
b.log.Debug("could not get object from active blobovnicza",
b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromActiveBlobovnicza410,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)

View file

@ -8,6 +8,7 @@ import (
"strconv"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@ -54,7 +55,7 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
if err != nil {
outOfBounds := isErrOutOfRange(err)
if !outOfBounds && !blobovnicza.IsErrNotFound(err) {
b.log.Debug("could not get object from level",
b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel390,
zap.String("level", p),
zap.String("error", err.Error()),
)
@ -98,7 +99,7 @@ func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRang
return res, err
default:
if !blobovnicza.IsErrNotFound(err) {
b.log.Debug("could not read payload range from opened blobovnicza",
b.log.Debug(logs.BlobovniczatreeCouldNotReadPayloadRangeFromOpenedBlobovnicza391,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)
@ -123,7 +124,7 @@ func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRang
return res, err
default:
if !blobovnicza.IsErrNotFound(err) {
b.log.Debug("could not read payload range from active blobovnicza",
b.log.Debug(logs.BlobovniczatreeCouldNotReadPayloadRangeFromActiveBlobovnicza392,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)

View file

@ -4,6 +4,7 @@ import (
"errors"
"path/filepath"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"go.etcd.io/bbolt"
@ -56,9 +57,9 @@ func (i *putIterator) iterate(path string) (bool, error) {
active, err := i.B.getActivated(path)
if err != nil {
if !isLogical(err) {
i.B.reportError("could not get active blobovnicza", err)
i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza401, err)
} else {
i.B.log.Debug("could not get active blobovnicza",
i.B.log.Debug(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza402,
zap.String("error", err.Error()))
}
@ -71,15 +72,15 @@ func (i *putIterator) iterate(path string) (bool, error) {
// and `updateActive` takes care of not updating the active blobovnicza twice.
if isFull := errors.Is(err, blobovnicza.ErrFull); isFull || errors.Is(err, bbolt.ErrDatabaseNotOpen) {
if isFull {
i.B.log.Debug("blobovnicza overflowed",
i.B.log.Debug(logs.BlobovniczatreeBlobovniczaOverflowed403,
zap.String("path", filepath.Join(path, u64ToHexString(active.ind))))
}
if err := i.B.updateActive(path, &active.ind); err != nil {
if !isLogical(err) {
i.B.reportError("could not update active blobovnicza", err)
i.B.reportError(logs.BlobovniczatreeCouldNotUpdateActiveBlobovnicza404, err)
} else {
i.B.log.Debug("could not update active blobovnicza",
i.B.log.Debug(logs.BlobovniczatreeCouldNotUpdateActiveBlobovnicza405,
zap.String("level", path),
zap.String("error", err.Error()))
}
@ -92,9 +93,9 @@ func (i *putIterator) iterate(path string) (bool, error) {
i.AllFull = false
if !isLogical(err) {
i.B.reportError("could not put object to active blobovnicza", err)
i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza406, err)
} else {
i.B.log.Debug("could not put object to active blobovnicza",
i.B.log.Debug(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza407,
zap.String("path", filepath.Join(path, u64ToHexString(active.ind))),
zap.String("error", err.Error()))
}

View file

@ -4,12 +4,13 @@ import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"go.uber.org/zap"
)
// Open opens BlobStor.
func (b *BlobStor) Open(readOnly bool) error {
b.log.Debug("opening...")
b.log.Debug(logs.BlobstorOpening293)
for i := range b.storage {
err := b.storage[i].Storage.Open(readOnly)
@ -29,7 +30,7 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag
//
// Returns wrapped ErrInitBlobovniczas on blobovnicza tree's initializaiton failure.
func (b *BlobStor) Init() error {
b.log.Debug("initializing...")
b.log.Debug(logs.BlobstorInitializing294)
if err := b.compression.Init(); err != nil {
return err
@ -46,13 +47,13 @@ func (b *BlobStor) Init() error {
// Close releases all internal resources of BlobStor.
func (b *BlobStor) Close() error {
b.log.Debug("closing...")
b.log.Debug(logs.BlobstorClosing295)
var firstErr error
for i := range b.storage {
err := b.storage[i].Storage.Close()
if err != nil {
b.log.Info("couldn't close storage", zap.String("error", err.Error()))
b.log.Info(logs.BlobstorCouldntCloseStorage296, zap.String("error", err.Error()))
if firstErr == nil {
firstErr = err
}

View file

@ -5,6 +5,7 @@ import (
"encoding/hex"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@ -57,7 +58,7 @@ func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi
}
for _, err := range errors[:len(errors)-1] {
b.log.Warn("error occurred during object existence checking",
b.log.Warn(logs.BlobstorErrorOccurredDuringObjectExistenceChecking297,
zap.Stringer("address", prm.Address),
zap.String("error", err.Error()))
}

View file

@ -3,6 +3,7 @@ package blobstor
import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
@ -38,7 +39,7 @@ func IterateBinaryObjects(blz *BlobStor, f func(addr oid.Address, data []byte, d
}
prm.IgnoreErrors = true
prm.ErrorHandler = func(addr oid.Address, err error) error {
blz.log.Warn("error occurred during the iteration",
blz.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration298,
zap.Stringer("address", addr),
zap.String("err", err.Error()))
return nil

View file

@ -8,6 +8,7 @@ import (
"strings"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"go.uber.org/zap"
@ -47,7 +48,7 @@ func (e *StorageEngine) open() error {
for res := range errCh {
if res.err != nil {
e.log.Error("could not open shard, closing and skipping",
e.log.Error(logs.EngineCouldNotOpenShardClosingAndSkipping301,
zap.String("id", res.id),
zap.Error(res.err))
@ -56,7 +57,7 @@ func (e *StorageEngine) open() error {
err := sh.Close()
if err != nil {
e.log.Error("could not close partially initialized shard",
e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard302,
zap.String("id", res.id),
zap.Error(res.err))
}
@ -94,7 +95,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
for res := range errCh {
if res.err != nil {
if errors.Is(res.err, blobstor.ErrInitBlobovniczas) {
e.log.Error("could not initialize shard, closing and skipping",
e.log.Error(logs.EngineCouldNotInitializeShardClosingAndSkipping303,
zap.String("id", res.id),
zap.Error(res.err))
@ -103,7 +104,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
err := sh.Close()
if err != nil {
e.log.Error("could not close partially initialized shard",
e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard304,
zap.String("id", res.id),
zap.Error(res.err))
}
@ -149,7 +150,7 @@ func (e *StorageEngine) close(releasePools bool) error {
for id, sh := range e.shards {
if err := sh.Close(); err != nil {
e.log.Debug("could not close shard",
e.log.Debug(logs.EngineCouldNotCloseShard305,
zap.String("id", id),
zap.String("error", err.Error()),
)
@ -309,7 +310,7 @@ loop:
for _, p := range shardsToReload {
err := p.sh.Reload(p.opts...)
if err != nil {
e.log.Error("could not reload a shard",
e.log.Error(logs.EngineCouldNotReloadAShard306,
zap.Stringer("shard id", p.sh.ID()),
zap.Error(err))
}
@ -338,7 +339,7 @@ loop:
return fmt.Errorf("could not add %s shard: %w", idStr, err)
}
e.log.Info("added new shard", zap.String("id", idStr))
e.log.Info(logs.EngineAddedNewShard307, zap.String("id", idStr))
}
return nil

View file

@ -4,6 +4,7 @@ import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -136,7 +137,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
res, err := sh.Select(selectPrm)
if err != nil {
e.log.Warn("error during searching for object children",
e.log.Warn(logs.EngineErrorDuringSearchingForObjectChildren310,
zap.Stringer("addr", addr),
zap.String("error", err.Error()))
return false
@ -147,7 +148,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
_, err = sh.Inhume(ctx, inhumePrm)
if err != nil {
e.log.Debug("could not inhume object in shard",
e.log.Debug(logs.EngineCouldNotInhumeObjectInShard311,
zap.Stringer("addr", addr),
zap.String("err", err.Error()))
continue

View file

@ -4,6 +4,7 @@ import (
"errors"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@ -87,24 +88,24 @@ func (e *StorageEngine) moveToDegraded(sh *shard.Shard, errCount uint32) {
sid := sh.ID()
err := sh.SetMode(mode.DegradedReadOnly)
if err != nil {
e.log.Error("failed to move shard in degraded-read-only mode, moving to read-only",
e.log.Error(logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly319,
zap.Stringer("shard_id", sid),
zap.Uint32("error count", errCount),
zap.Error(err))
err = sh.SetMode(mode.ReadOnly)
if err != nil {
e.log.Error("failed to move shard in read-only mode",
e.log.Error(logs.EngineFailedToMoveShardInReadonlyMode320,
zap.Stringer("shard_id", sid),
zap.Uint32("error count", errCount),
zap.Error(err))
} else {
e.log.Info("shard is moved in read-only mode due to error threshold",
e.log.Info(logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold321,
zap.Stringer("shard_id", sid),
zap.Uint32("error count", errCount))
}
} else {
e.log.Info("shard is moved in degraded mode due to error threshold",
e.log.Info(logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold322,
zap.Stringer("shard_id", sid),
zap.Uint32("error count", errCount))
}
@ -182,7 +183,7 @@ func (e *StorageEngine) reportShardErrorWithFlags(
default:
// For background workers we can have a lot of such errors,
// thus logging is done with DEBUG level.
e.log.Debug("mode change is in progress, ignoring set-mode request",
e.log.Debug(logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest323,
zap.Stringer("shard_id", sid),
zap.Uint32("error_count", errCount))
}

View file

@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
@ -79,7 +80,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (Eva
}
}
e.log.Info("started shards evacuation", zap.Strings("shard_ids", shardIDs))
e.log.Info(logs.EngineStartedShardsEvacuation324, zap.Strings("shard_ids", shardIDs))
var res EvacuateShardRes
@ -89,7 +90,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (Eva
}
}
e.log.Info("finished shards evacuation", zap.Strings("shard_ids", shardIDs))
e.log.Info(logs.EngineFinishedShardsEvacuation325, zap.Strings("shard_ids", shardIDs))
return res, nil
}
@ -206,7 +207,7 @@ func (e *StorageEngine) tryEvacuateObject(ctx context.Context, addr oid.Address,
putDone, exists := e.putToShard(ctx, shards[j].hashedShard, j, shards[j].pool, addr, object)
if putDone || exists {
if putDone {
e.log.Debug("object is moved to another shard",
e.log.Debug(logs.EngineObjectIsMovedToAnotherShard326,
zap.Stringer("from", sh.ID()),
zap.Stringer("to", shards[j].ID()),
zap.Stringer("addr", addr))

View file

@ -4,6 +4,7 @@ import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@ -83,7 +84,7 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, e
if !prm.forceRemoval {
locked, err := e.IsLocked(prm.addrs[i])
if err != nil {
e.log.Warn("removing an object without full locking check",
e.log.Warn(logs.EngineRemovingAnObjectWithoutFullLockingCheck316,
zap.Error(err),
zap.Stringer("addr", prm.addrs[i]))
} else if locked {
@ -222,7 +223,7 @@ func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, l
select {
case <-ctx.Done():
e.log.Info("interrupt processing the expired locks", zap.Error(ctx.Err()))
e.log.Info(logs.EngineInterruptProcessingTheExpiredLocks317, zap.Error(ctx.Err()))
return true
default:
return false
@ -236,7 +237,7 @@ func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.A
select {
case <-ctx.Done():
e.log.Info("interrupt processing the deleted locks", zap.Error(ctx.Err()))
e.log.Info(logs.EngineInterruptProcessingTheDeletedLocks318, zap.Error(ctx.Err()))
return true
default:
return false

View file

@ -4,6 +4,7 @@ import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@ -118,7 +119,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, ind int,
_, err = sh.ToMoveIt(toMoveItPrm)
if err != nil {
e.log.Warn("could not mark object for shard relocation",
e.log.Warn(logs.EngineCouldNotMarkObjectForShardRelocation308,
zap.Stringer("shard", sh.ID()),
zap.String("error", err.Error()),
)
@ -135,7 +136,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, ind int,
if err != nil {
if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) ||
errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) {
e.log.Warn("could not put object to shard",
e.log.Warn(logs.EngineCouldNotPutObjectToShard309,
zap.Stringer("shard_id", sh.ID()),
zap.String("error", err.Error()))
return

View file

@ -4,6 +4,7 @@ import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -42,7 +43,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
prm.Concurrency = defaultRemoveDuplicatesConcurrency
}
e.log.Info("starting removal of locally-redundant copies",
e.log.Info(logs.EngineStartingRemovalOfLocallyredundantCopies312,
zap.Int("concurrency", prm.Concurrency))
// The mutext must be taken for the whole duration to avoid target shard being removed
@ -54,7 +55,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
// This is not currently the case, because `FreeSpace` metric used by weight sorting is always 0.
// However we could change weights in future and easily forget this function.
for _, sh := range e.shards {
e.log.Debug("started duplicates removal routine", zap.String("shard_id", sh.ID().String()))
e.log.Debug(logs.EngineStartedDuplicatesRemovalRoutine313, zap.String("shard_id", sh.ID().String()))
ch := make(chan oid.Address)
errG, ctx := errgroup.WithContext(ctx)
@ -92,12 +93,12 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
})
}
if err := errG.Wait(); err != nil {
e.log.Error("finished removal of locally-redundant copies", zap.Error(err))
e.log.Error(logs.EngineFinishedRemovalOfLocallyredundantCopies314, zap.Error(err))
return err
}
}
e.log.Info("finished removal of locally-redundant copies")
e.log.Info(logs.EngineFinishedRemovalOfLocallyredundantCopies315)
return nil
}

View file

@ -3,6 +3,7 @@ package engine
import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@ -168,7 +169,7 @@ func (e *StorageEngine) removeShards(ids ...string) {
delete(e.shardPools, id)
}
e.log.Info("shard has been removed",
e.log.Info(logs.EngineShardHasBeenRemoved299,
zap.String("id", id))
}
e.mtx.Unlock()
@ -176,7 +177,7 @@ func (e *StorageEngine) removeShards(ids ...string) {
for _, sh := range ss {
err := sh.Close()
if err != nil {
e.log.Error("could not close removed shard",
e.log.Error(logs.EngineCouldNotCloseRemovedShard300,
zap.Stringer("id", sh.ID()),
zap.Error(err),
)

View file

@ -5,6 +5,7 @@ import (
"fmt"
"path/filepath"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
@ -25,7 +26,7 @@ func (db *DB) Open(readOnly bool) error {
return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err)
}
db.log.Debug("created directory for Metabase", zap.String("path", db.info.Path))
db.log.Debug(logs.MetabaseCreatedDirectoryForMetabase335, zap.String("path", db.info.Path))
if db.boltOptions == nil {
opts := *bbolt.DefaultOptions
@ -46,9 +47,9 @@ func (db *DB) openBolt() error {
db.boltDB.MaxBatchDelay = db.boltBatchDelay
db.boltDB.MaxBatchSize = db.boltBatchSize
db.log.Debug("opened boltDB instance for Metabase")
db.log.Debug(logs.MetabaseOpenedBoltDBInstanceForMetabase336)
db.log.Debug("checking metabase version")
db.log.Debug(logs.MetabaseCheckingMetabaseVersion337)
return db.boltDB.View(func(tx *bbolt.Tx) error {
// The safest way to check if the metabase is fresh is to check if it has no buckets.
// However, shard info can be present. So here we check that the number of buckets is

View file

@ -7,6 +7,7 @@ import (
"strings"
v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -267,7 +268,7 @@ func (db *DB) selectFromFKBT(
) { //
matchFunc, ok := db.matchers[f.Operation()]
if !ok {
db.log.Debug("missing matcher", zap.Uint32("operation", uint32(f.Operation())))
db.log.Debug(logs.MetabaseMissingMatcher327, zap.Uint32("operation", uint32(f.Operation())))
return
}
@ -290,7 +291,7 @@ func (db *DB) selectFromFKBT(
})
})
if err != nil {
db.log.Debug("error in FKBT selection", zap.String("error", err.Error()))
db.log.Debug(logs.MetabaseErrorInFKBTSelection328, zap.String("error", err.Error()))
}
}
@ -360,13 +361,13 @@ func (db *DB) selectFromList(
case object.MatchStringEqual:
lst, err = decodeList(bkt.Get(bucketKeyHelper(f.Header(), f.Value())))
if err != nil {
db.log.Debug("can't decode list bucket leaf", zap.String("error", err.Error()))
db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf329, zap.String("error", err.Error()))
return
}
default:
fMatch, ok := db.matchers[op]
if !ok {
db.log.Debug("unknown operation", zap.Uint32("operation", uint32(op)))
db.log.Debug(logs.MetabaseUnknownOperation330, zap.Uint32("operation", uint32(op)))
return
}
@ -374,7 +375,7 @@ func (db *DB) selectFromList(
if err = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(key, val []byte) error {
l, err := decodeList(val)
if err != nil {
db.log.Debug("can't decode list bucket leaf",
db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf331,
zap.String("error", err.Error()),
)
@ -385,7 +386,7 @@ func (db *DB) selectFromList(
return nil
}); err != nil {
db.log.Debug("can't iterate over the bucket",
db.log.Debug(logs.MetabaseCantIterateOverTheBucket332,
zap.String("error", err.Error()),
)
@ -429,7 +430,7 @@ func (db *DB) selectObjectID(
default:
fMatch, ok := db.matchers[op]
if !ok {
db.log.Debug("unknown operation",
db.log.Debug(logs.MetabaseUnknownOperation333,
zap.Uint32("operation", uint32(f.Operation())),
)
@ -451,7 +452,7 @@ func (db *DB) selectObjectID(
return nil
})
if err != nil {
db.log.Debug("could not iterate over the buckets",
db.log.Debug(logs.MetabaseCouldNotIterateOverTheBuckets334,
zap.String("error", err.Error()),
)
}

View file

@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
@ -15,7 +16,7 @@ import (
)
func (s *Shard) handleMetabaseFailure(stage string, err error) error {
s.log.Error("metabase failure, switching mode",
s.log.Error(logs.ShardMetabaseFailureSwitchingMode352,
zap.String("stage", stage),
zap.Stringer("mode", mode.ReadOnly),
zap.Error(err))
@ -25,7 +26,7 @@ func (s *Shard) handleMetabaseFailure(stage string, err error) error {
return nil
}
s.log.Error("can't move shard to readonly, switch mode",
s.log.Error(logs.ShardCantMoveShardToReadonlySwitchMode353,
zap.String("stage", stage),
zap.Stringer("mode", mode.DegradedReadOnly),
zap.Error(err))
@ -167,7 +168,7 @@ func (s *Shard) refillMetabase() error {
err = blobstor.IterateBinaryObjects(s.blobStor, func(addr oid.Address, data []byte, descriptor []byte) error {
if err := obj.Unmarshal(data); err != nil {
s.log.Warn("could not unmarshal object",
s.log.Warn(logs.ShardCouldNotUnmarshalObject354,
zap.Stringer("address", addr),
zap.String("err", err.Error()))
return nil
@ -274,7 +275,7 @@ func (s *Shard) Close() error {
for _, component := range components {
if err := component.Close(); err != nil {
lastErr = err
s.log.Error("could not close shard component", zap.Error(err))
s.log.Error(logs.ShardCouldNotCloseShardComponent355, zap.Error(err))
}
}
@ -302,7 +303,7 @@ func (s *Shard) Reload(opts ...Option) error {
ok, err := s.metaBase.Reload(c.metaOpts...)
if err != nil {
if errors.Is(err, meta.ErrDegradedMode) {
s.log.Error("can't open metabase, move to a degraded mode", zap.Error(err))
s.log.Error(logs.ShardCantOpenMetabaseMoveToADegradedMode356, zap.Error(err))
_ = s.setMode(mode.DegradedReadOnly)
}
return err
@ -318,12 +319,12 @@ func (s *Shard) Reload(opts ...Option) error {
err = s.metaBase.Init()
}
if err != nil {
s.log.Error("can't initialize metabase, move to a degraded-read-only mode", zap.Error(err))
s.log.Error(logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode357, zap.Error(err))
_ = s.setMode(mode.DegradedReadOnly)
return err
}
}
s.log.Info("trying to restore read-write mode")
s.log.Info(logs.ShardTryingToRestoreReadwriteMode358)
return s.setMode(mode.ReadWrite)
}

View file

@ -3,6 +3,7 @@ package shard
import (
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
@ -49,7 +50,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
if s.hasWriteCache() {
err := s.writeCache.Delete(prm.addr[i])
if err != nil && !IsErrNotFound(err) && !errors.Is(err, writecache.ErrReadOnly) {
s.log.Warn("can't delete object from write cache", zap.String("error", err.Error()))
s.log.Warn(logs.ShardCantDeleteObjectFromWriteCache342, zap.String("error", err.Error()))
}
}
@ -58,7 +59,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
res, err := s.metaBase.StorageID(sPrm)
if err != nil {
s.log.Debug("can't get storage ID from metabase",
s.log.Debug(logs.ShardCantGetStorageIDFromMetabase343,
zap.Stringer("object", prm.addr[i]),
zap.String("error", err.Error()))
@ -100,7 +101,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
_, err = s.blobStor.Delete(delPrm)
if err != nil {
s.log.Debug("can't remove object from blobStor",
s.log.Debug(logs.ShardCantRemoveObjectFromBlobStor344,
zap.Stringer("object_address", prm.addr[i]),
zap.String("error", err.Error()))
}

View file

@ -5,6 +5,7 @@ import (
"sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
@ -124,7 +125,7 @@ func (gc *gc) listenEvents(ctx context.Context) {
for {
event, ok := <-gc.eventChan
if !ok {
gc.log.Warn("stop event listener by closed channel")
gc.log.Warn(logs.ShardStopEventListenerByClosedChannel359)
return
}
@ -149,7 +150,7 @@ func (gc *gc) listenEvents(ctx context.Context) {
v.prevGroup.Done()
})
if err != nil {
gc.log.Warn("could not submit GC job to worker pool",
gc.log.Warn(logs.ShardCouldNotSubmitGCJobToWorkerPool360,
zap.String("error", err.Error()),
)
@ -174,7 +175,7 @@ func (gc *gc) tickRemover() {
close(gc.eventChan)
gc.log.Debug("GC is stopped")
gc.log.Debug(logs.ShardGCIsStopped361)
return
case <-timer.C:
gc.remover()
@ -188,7 +189,7 @@ func (gc *gc) stop() {
gc.stopChannel <- struct{}{}
})
gc.log.Info("waiting for GC workers to stop...")
gc.log.Info(logs.ShardWaitingForGCWorkersToStop362)
gc.wg.Wait()
}
@ -220,7 +221,7 @@ func (s *Shard) removeGarbage() {
// (no more than s.rmBatchSize objects)
err := s.metaBase.IterateOverGarbage(iterPrm)
if err != nil {
s.log.Warn("iterator over metabase graveyard failed",
s.log.Warn(logs.ShardIteratorOverMetabaseGraveyardFailed363,
zap.String("error", err.Error()),
)
@ -235,7 +236,7 @@ func (s *Shard) removeGarbage() {
// delete accumulated objects
_, err = s.delete(deletePrm)
if err != nil {
s.log.Warn("could not delete the objects",
s.log.Warn(logs.ShardCouldNotDeleteTheObjects364,
zap.String("error", err.Error()),
)
@ -295,7 +296,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
})
if err := errGroup.Wait(); err != nil {
s.log.Warn("iterator over expired objects failed", zap.String("error", err.Error()))
s.log.Warn(logs.ShardIteratorOverExpiredObjectsFailed365, zap.String("error", err.Error()))
}
}
@ -321,7 +322,7 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address)
// inhume the collected objects
res, err := s.metaBase.Inhume(inhumePrm)
if err != nil {
s.log.Warn("could not inhume the objects",
s.log.Warn(logs.ShardCouldNotInhumeTheObjects366,
zap.String("error", err.Error()),
)
@ -342,7 +343,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
epoch := e.(newEpoch).epoch
log := s.log.With(zap.Uint64("epoch", epoch))
log.Debug("started expired tombstones handling")
log.Debug(logs.ShardStartedExpiredTombstonesHandling367)
const tssDeleteBatch = 50
tss := make([]meta.TombstonedObject, 0, tssDeleteBatch)
@ -360,12 +361,12 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
})
for {
log.Debug("iterating tombstones")
log.Debug(logs.ShardIteratingTombstones368)
s.m.RLock()
if s.info.Mode.NoMetabase() {
s.log.Debug("shard is in a degraded mode, skip collecting expired tombstones")
s.log.Debug(logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones369)
s.m.RUnlock()
return
@ -373,7 +374,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
err := s.metaBase.IterateOverGraveyard(iterPrm)
if err != nil {
log.Error("iterator over graveyard failed", zap.Error(err))
log.Error(logs.ShardIteratorOverGraveyardFailed370, zap.Error(err))
s.m.RUnlock()
return
@ -392,7 +393,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
}
}
log.Debug("handling expired tombstones batch", zap.Int("number", len(tssExp)))
log.Debug(logs.ShardHandlingExpiredTombstonesBatch371, zap.Int("number", len(tssExp)))
s.expiredTombstonesCallback(ctx, tssExp)
iterPrm.SetOffset(tss[tssLen-1].Address())
@ -400,7 +401,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
tssExp = tssExp[:0]
}
log.Debug("finished expired tombstones handling")
log.Debug(logs.ShardFinishedExpiredTombstonesHandling372)
}
func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
@ -442,7 +443,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
})
if err := errGroup.Wait(); err != nil {
s.log.Warn("iterator over expired locks failed", zap.String("error", err.Error()))
s.log.Warn(logs.ShardIteratorOverExpiredLocksFailed373, zap.String("error", err.Error()))
}
}
@ -503,7 +504,7 @@ func (s *Shard) HandleExpiredTombstones(tss []meta.TombstonedObject) {
// inhume tombstones
res, err := s.metaBase.Inhume(pInhume)
if err != nil {
s.log.Warn("could not mark tombstones as garbage",
s.log.Warn(logs.ShardCouldNotMarkTombstonesAsGarbage374,
zap.String("error", err.Error()),
)
@ -523,7 +524,7 @@ func (s *Shard) HandleExpiredTombstones(tss []meta.TombstonedObject) {
// from graveyard
err = s.metaBase.DropGraves(tss)
if err != nil {
s.log.Warn("could not drop expired grave records", zap.Error(err))
s.log.Warn(logs.ShardCouldNotDropExpiredGraveRecords375, zap.Error(err))
}
}
@ -535,7 +536,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
}
unlocked, err := s.metaBase.FreeLockedBy(lockers)
if err != nil {
s.log.Warn("failure to unlock objects",
s.log.Warn(logs.ShardFailureToUnlockObjects376,
zap.String("error", err.Error()),
)
@ -548,7 +549,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
res, err := s.metaBase.Inhume(pInhume)
if err != nil {
s.log.Warn("failure to mark lockers as garbage",
s.log.Warn(logs.ShardFailureToMarkLockersAsGarbage377,
zap.String("error", err.Error()),
)
@ -570,7 +571,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unlocked []oid.Address) {
expiredUnlocked, err := s.selectExpired(ctx, epoch, unlocked)
if err != nil {
s.log.Warn("failure to get expired unlocked objects", zap.Error(err))
s.log.Warn(logs.ShardFailureToGetExpiredUnlockedObjects378, zap.Error(err))
return
}
@ -589,7 +590,7 @@ func (s *Shard) HandleDeletedLocks(lockers []oid.Address) {
_, err := s.metaBase.FreeLockedBy(lockers)
if err != nil {
s.log.Warn("failure to unlock objects",
s.log.Warn(logs.ShardFailureToUnlockObjects379,
zap.String("error", err.Error()),
)

View file

@ -5,6 +5,7 @@ import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
@ -126,7 +127,7 @@ func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher,
return nil, false, logicerr.Wrap(apistatus.ObjectNotFound{})
}
} else {
s.log.Warn("fetching object without meta", zap.Stringer("addr", addr))
s.log.Warn(logs.ShardFetchingObjectWithoutMeta345, zap.Stringer("addr", addr))
}
if s.hasWriteCache() {
@ -135,11 +136,11 @@ func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher,
return res, false, err
}
if IsErrNotFound(err) {
s.log.Debug("object is missing in write-cache",
s.log.Debug(logs.ShardObjectIsMissingInWritecache346,
zap.Stringer("addr", addr),
zap.Bool("skip_meta", skipMeta))
} else {
s.log.Error("failed to fetch object from write-cache",
s.log.Error(logs.ShardFailedToFetchObjectFromWritecache347,
zap.Error(err),
zap.Stringer("addr", addr),
zap.Bool("skip_meta", skipMeta))

View file

@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
@ -98,7 +99,7 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
return InhumeRes{}, ErrLockObjectRemoval
}
s.log.Debug("could not mark object to delete in metabase",
s.log.Debug(logs.ShardCouldNotMarkObjectToDeleteInMetabase380,
zap.String("error", err.Error()),
)

View file

@ -3,6 +3,7 @@ package shard
import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@ -86,7 +87,7 @@ func (s *Shard) List() (res SelectRes, err error) {
sRes, err := s.metaBase.Select(sPrm) // consider making List in metabase
if err != nil {
s.log.Debug("can't select all objects",
s.log.Debug(logs.ShardCantSelectAllObjects338,
zap.Stringer("cid", lst[i]),
zap.String("error", err.Error()))

View file

@ -1,6 +1,7 @@
package shard
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"go.uber.org/zap"
@ -25,7 +26,7 @@ func (s *Shard) SetMode(m mode.Mode) error {
}
func (s *Shard) setMode(m mode.Mode) error {
s.log.Info("setting shard mode",
s.log.Info(logs.ShardSettingShardMode339,
zap.Stringer("old_mode", s.info.Mode),
zap.Stringer("new_mode", m))
@ -66,7 +67,7 @@ func (s *Shard) setMode(m mode.Mode) error {
s.metricsWriter.SetReadonly(s.info.Mode != mode.ReadWrite)
}
s.log.Info("shard mode set successfully",
s.log.Info(logs.ShardShardModeSetSuccessfully340,
zap.Stringer("mode", s.info.Mode))
return nil
}

View file

@ -1,6 +1,7 @@
package shard
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
@ -38,7 +39,7 @@ func (s *Shard) ToMoveIt(prm ToMoveItPrm) (ToMoveItRes, error) {
_, err := s.metaBase.ToMoveIt(toMovePrm)
if err != nil {
s.log.Debug("could not mark object for shard relocation in metabase",
s.log.Debug(logs.ShardCouldNotMarkObjectForShardRelocationInMetabase341,
zap.String("error", err.Error()),
)
}

View file

@ -3,6 +3,7 @@ package shard
import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
@ -58,7 +59,7 @@ func (s *Shard) Put(prm PutPrm) (PutRes, error) {
}
if err != nil || !tryCache {
if err != nil {
s.log.Debug("can't put object to the write-cache, trying blobstor",
s.log.Debug(logs.ShardCantPutObjectToTheWritecacheTryingBlobstor348,
zap.String("err", err.Error()))
}

View file

@ -5,6 +5,7 @@ import (
"sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
@ -349,7 +350,7 @@ func (s *Shard) updateMetrics() {
if s.cfg.metricsWriter != nil && !s.GetMode().NoMetabase() {
cc, err := s.metaBase.ObjectCounters()
if err != nil {
s.log.Warn("meta: object counter read",
s.log.Warn(logs.ShardMetaObjectCounterRead349,
zap.Error(err),
)
@ -361,7 +362,7 @@ func (s *Shard) updateMetrics() {
cnrList, err := s.metaBase.Containers()
if err != nil {
s.log.Warn("meta: can't read container list", zap.Error(err))
s.log.Warn(logs.ShardMetaCantReadContainerList350, zap.Error(err))
return
}
@ -370,7 +371,7 @@ func (s *Shard) updateMetrics() {
for i := range cnrList {
size, err := s.metaBase.ContainerSize(cnrList[i])
if err != nil {
s.log.Warn("meta: can't read container size",
s.log.Warn(logs.ShardMetaCantReadContainerSize351,
zap.String("cid", cnrList[i].EncodeToString()),
zap.Error(err))
continue

View file

@ -5,6 +5,7 @@ import (
"errors"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@ -134,7 +135,7 @@ func (c *cache) flushDB() {
c.modeMtx.RUnlock()
c.log.Debug("tried to flush items from write-cache",
c.log.Debug(logs.WritecacheTriedToFlushItemsFromWritecache381,
zap.Int("count", count),
zap.String("start", base58.Encode(lastKey)))
}

View file

@ -5,6 +5,7 @@ import (
"errors"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
@ -54,7 +55,7 @@ func (c *cache) initFlushMarks() {
var errStopIter = errors.New("stop iteration")
func (c *cache) fsTreeFlushMarkUpdate() {
c.log.Info("filling flush marks for objects in FSTree")
c.log.Info(logs.WritecacheFillingFlushMarksForObjectsInFSTree383)
var prm common.IteratePrm
prm.LazyHandler = func(addr oid.Address, _ func() ([]byte, error)) error {
@ -86,11 +87,11 @@ func (c *cache) fsTreeFlushMarkUpdate() {
return nil
}
_, _ = c.fsTree.Iterate(prm)
c.log.Info("finished updating FSTree flush marks")
c.log.Info(logs.WritecacheFinishedUpdatingFSTreeFlushMarks384)
}
func (c *cache) dbFlushMarkUpdate() {
c.log.Info("filling flush marks for objects in database")
c.log.Info(logs.WritecacheFillingFlushMarksForObjectsInDatabase385)
var m []string
var indices []int
@ -158,7 +159,7 @@ func (c *cache) dbFlushMarkUpdate() {
lastKey = append([]byte(m[len(m)-1]), 0)
}
c.log.Info("finished updating flush marks")
c.log.Info(logs.WritecacheFinishedUpdatingFlushMarks386)
}
// flushStatus returns info about the object state in the main storage.

View file

@ -4,6 +4,7 @@ import (
"fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
)
@ -59,7 +60,7 @@ func (c *cache) setMode(m mode.Mode) error {
// flushCh is populated by `flush` with `modeMtx` taken, thus waiting until it is empty
// guarantees that there are no in-fly operations.
for len(c.flushCh) != 0 {
c.log.Info("waiting for channels to flush")
c.log.Info(logs.WritecacheWaitingForChannelsToFlush382)
time.Sleep(time.Second)
}

View file

@ -5,6 +5,7 @@ import (
"fmt"
"os"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
@ -124,7 +125,7 @@ func (c *cache) deleteFromDB(keys []string) []string {
)
}
if err != nil {
c.log.Error("can't remove objects from the database", zap.Error(err))
c.log.Error(logs.WritecacheCantRemoveObjectsFromTheDatabase387, zap.Error(err))
}
copy(keys, keys[errorIndex:])
@ -141,13 +142,13 @@ func (c *cache) deleteFromDisk(keys []string) []string {
for i := range keys {
if err := addr.DecodeString(keys[i]); err != nil {
c.log.Error("can't parse address", zap.String("address", keys[i]))
c.log.Error(logs.WritecacheCantParseAddress388, zap.String("address", keys[i]))
continue
}
_, err := c.fsTree.Delete(common.DeletePrm{Address: addr})
if err != nil && !errors.As(err, new(apistatus.ObjectNotFound)) {
c.log.Error("can't remove object from write-cache", zap.Error(err))
c.log.Error(logs.WritecacheCantRemoveObjectFromWritecache389, zap.Error(err))
// Save the key for the next iteration.
keys[copyIndex] = keys[i]

View file

@ -8,6 +8,7 @@ import (
"sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
@ -188,7 +189,7 @@ func (c *Client) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string,
return fmt.Errorf("could not invoke %s: %w", method, err)
}
c.logger.Debug("neo client invoke",
c.logger.Debug(logs.ClientNeoClientInvoke239,
zap.String("method", method),
zap.Uint32("vub", vub),
zap.Stringer("tx_hash", txHash.Reverse()))
@ -271,7 +272,7 @@ func (c *Client) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error
return err
}
c.logger.Debug("native gas transfer invoke",
c.logger.Debug(logs.ClientNativeGasTransferInvoke240,
zap.String("to", receiver.StringLE()),
zap.Stringer("tx_hash", txHash.Reverse()),
zap.Uint32("vub", vub))
@ -305,7 +306,7 @@ func (c *Client) BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8
return err
}
c.logger.Debug("batch gas transfer invoke",
c.logger.Debug(logs.ClientBatchGasTransferInvoke241,
zap.Strings("to", receiversLog),
zap.Stringer("tx_hash", txHash.Reverse()),
zap.Uint32("vub", vub))
@ -332,7 +333,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
height, err = c.rpcActor.GetBlockCount()
if err != nil {
c.logger.Error("can't get blockchain height",
c.logger.Error(logs.ClientCantGetBlockchainHeight242,
zap.String("error", err.Error()))
return nil
}
@ -346,7 +347,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
newHeight, err = c.rpcActor.GetBlockCount()
if err != nil {
c.logger.Error("can't get blockchain height",
c.logger.Error(logs.ClientCantGetBlockchainHeight243,
zap.String("error", err.Error()))
return nil
}

Some files were not shown because too many files have changed in this diff Show more