[#1437] node: Use ctx for logging

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
Dmitrii Stepanov 2024-10-21 10:22:54 +03:00
parent c16dae8b4d
commit 6db46257c0
Signed by: dstepanov-yadro
GPG key ID: 237AF1A763293BC0
157 changed files with 764 additions and 713 deletions

View file

@ -400,13 +400,13 @@ type internals struct {
func (c *cfg) startMaintenance() {
c.isMaintenance.Store(true)
c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE)
c.log.Info(logs.FrostFSNodeStartedLocalNodesMaintenance)
c.log.Info(context.Background(), logs.FrostFSNodeStartedLocalNodesMaintenance)
}
// stops node's maintenance.
func (c *internals) stopMaintenance() {
if c.isMaintenance.CompareAndSwap(true, false) {
c.log.Info(logs.FrostFSNodeStoppedLocalNodesMaintenance)
c.log.Info(context.Background(), logs.FrostFSNodeStoppedLocalNodesMaintenance)
}
}
@ -705,7 +705,7 @@ func initCfg(appCfg *config.Config) *cfg {
log, err := logger.NewLogger(logPrm)
fatalOnErr(err)
if loggerconfig.ToLokiConfig(appCfg).Enabled {
log.Logger = log.Logger.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
log.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg))
return lokiCore
}))
@ -1103,10 +1103,10 @@ func initLocalStorage(ctx context.Context, c *cfg) {
shard.WithTombstoneSource(c.createTombstoneSource()),
shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)))...)
if err != nil {
c.log.Error(logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
c.log.Error(ctx, logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
} else {
shardsAttached++
c.log.Info(logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id))
c.log.Info(ctx, logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id))
}
}
if shardsAttached == 0 {
@ -1116,15 +1116,15 @@ func initLocalStorage(ctx context.Context, c *cfg) {
c.cfgObject.cfgLocalStorage.localStorage = ls
c.onShutdown(func() {
c.log.Info(logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
c.log.Info(ctx, logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
err := ls.Close(context.WithoutCancel(ctx))
if err != nil {
c.log.Info(logs.FrostFSNodeStorageEngineClosingFailure,
c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure,
zap.String("error", err.Error()),
)
} else {
c.log.Info(logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
}
})
}
@ -1132,7 +1132,7 @@ func initLocalStorage(ctx context.Context, c *cfg) {
func initAccessPolicyEngine(_ context.Context, c *cfg) {
var localOverrideDB chainbase.LocalOverrideDatabase
if nodeconfig.PersistentPolicyRules(c.appCfg).Path() == "" {
c.log.Warn(logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed)
c.log.Warn(context.Background(), logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed)
localOverrideDB = chainbase.NewInmemoryLocalOverrideDatabase()
} else {
localOverrideDB = chainbase.NewBoltLocalOverrideDatabase(
@ -1157,7 +1157,7 @@ func initAccessPolicyEngine(_ context.Context, c *cfg) {
c.onShutdown(func() {
if err := ape.LocalOverrideDatabaseCore().Close(); err != nil {
c.log.Warn(logs.FrostFSNodeAccessPolicyEngineClosingFailure,
c.log.Warn(context.Background(), logs.FrostFSNodeAccessPolicyEngineClosingFailure,
zap.Error(err),
)
}
@ -1209,7 +1209,7 @@ func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) {
func (c *cfg) updateContractNodeInfo(epoch uint64) {
ni, err := c.netmapLocalNodeState(epoch)
if err != nil {
c.log.Error(logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
zap.Uint64("epoch", epoch),
zap.String("error", err.Error()))
return
@ -1245,13 +1245,13 @@ func (c *cfg) bootstrap() error {
// switch to online except when under maintenance
st := c.cfgNetmap.state.controlNetmapStatus()
if st == control.NetmapStatus_MAINTENANCE {
c.log.Info(logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
c.log.Info(context.Background(), logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
return c.bootstrapWithState(func(ni *netmap.NodeInfo) {
ni.SetStatus(netmap.Maintenance)
})
}
c.log.Info(logs.FrostFSNodeBootstrappingWithOnlineState,
c.log.Info(context.Background(), logs.FrostFSNodeBootstrappingWithOnlineState,
zap.Stringer("previous", st),
)
@ -1280,19 +1280,19 @@ func (c *cfg) signalWatcher(ctx context.Context) {
// signals causing application to shut down should have priority over
// reconfiguration signal
case <-ch:
c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
c.shutdown()
c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
return
case err := <-c.internalErr: // internal application error
c.log.Warn(logs.FrostFSNodeInternalApplicationError,
c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError,
zap.String("message", err.Error()))
c.shutdown()
c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete)
c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete)
return
default:
// block until any signal is receieved
@ -1300,19 +1300,19 @@ func (c *cfg) signalWatcher(ctx context.Context) {
case <-sighupCh:
c.reloadConfig(ctx)
case <-ch:
c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
c.shutdown()
c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
return
case err := <-c.internalErr: // internal application error
c.log.Warn(logs.FrostFSNodeInternalApplicationError,
c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError,
zap.String("message", err.Error()))
c.shutdown()
c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete)
c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete)
return
}
}
@ -1320,17 +1320,17 @@ func (c *cfg) signalWatcher(ctx context.Context) {
}
func (c *cfg) reloadConfig(ctx context.Context) {
c.log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
c.log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
if !c.compareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
c.log.Info(logs.FrostFSNodeSIGHUPSkip)
c.log.Info(ctx, logs.FrostFSNodeSIGHUPSkip)
return
}
defer c.compareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
err := c.reloadAppConfig()
if err != nil {
c.log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
c.log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err))
return
}
@ -1341,7 +1341,7 @@ func (c *cfg) reloadConfig(ctx context.Context) {
logPrm, err := c.loggerPrm()
if err != nil {
c.log.Error(logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
c.log.Error(ctx, logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
return
}
@ -1362,25 +1362,25 @@ func (c *cfg) reloadConfig(ctx context.Context) {
err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
if err != nil {
c.log.Error(logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
c.log.Error(ctx, logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
return
}
for _, component := range components {
err = component.reloadFunc()
if err != nil {
c.log.Error(logs.FrostFSNodeUpdatedConfigurationApplying,
c.log.Error(ctx, logs.FrostFSNodeUpdatedConfigurationApplying,
zap.String("component", component.name),
zap.Error(err))
}
}
if err := c.dialerSource.Update(internalNetConfig(c.appCfg, c.metricsCollector.MultinetMetrics())); err != nil {
c.log.Error(logs.FailedToUpdateMultinetConfiguration, zap.Error(err))
c.log.Error(ctx, logs.FailedToUpdateMultinetConfiguration, zap.Error(err))
return
}
c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
}
func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
@ -1403,7 +1403,7 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
}
updated, err := tracing.Setup(ctx, *traceConfig)
if updated {
c.log.Info(logs.FrostFSNodeTracingConfigationUpdated)
c.log.Info(ctx, logs.FrostFSNodeTracingConfigationUpdated)
}
return err
}})
@ -1438,7 +1438,7 @@ func (c *cfg) reloadPools() error {
func (c *cfg) reloadPool(p *ants.Pool, newSize int, name string) {
oldSize := p.Cap()
if oldSize != newSize {
c.log.Info(logs.FrostFSNodePoolConfigurationUpdate, zap.String("field", name),
c.log.Info(context.Background(), logs.FrostFSNodePoolConfigurationUpdate, zap.String("field", name),
zap.Int("old", oldSize), zap.Int("new", newSize))
p.Tune(newSize)
}
@ -1477,11 +1477,11 @@ func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoPro
func (c *cfg) shutdown() {
old := c.swapHealthStatus(control.HealthStatus_SHUTTING_DOWN)
if old == control.HealthStatus_SHUTTING_DOWN {
c.log.Info(logs.FrostFSNodeShutdownSkip)
c.log.Info(context.Background(), logs.FrostFSNodeShutdownSkip)
return
}
if old == control.HealthStatus_STARTING {
c.log.Warn(logs.FrostFSNodeShutdownWhenNotReady)
c.log.Warn(context.Background(), logs.FrostFSNodeShutdownWhenNotReady)
}
c.ctxCancel()
@ -1491,6 +1491,6 @@ func (c *cfg) shutdown() {
}
if err := sdnotify.ClearStatus(); err != nil {
c.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
c.log.Error(context.Background(), logs.FailedToReportStatusToSystemd, zap.Error(err))
}
}

View file

@ -102,13 +102,13 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
} else {
// unlike removal, we expect successful receive of the container
// after successful creation, so logging can be useful
c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
c.log.Error(context.Background(), logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
zap.Stringer("id", ev.ID),
zap.Error(err),
)
}
c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt,
c.log.Debug(context.Background(), logs.FrostFSNodeContainerCreationEventsReceipt,
zap.Stringer("id", ev.ID),
)
})
@ -116,7 +116,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
subscribeToContainerRemoval(c, func(e event.Event) {
ev := e.(containerEvent.DeleteSuccess)
containerCache.handleRemoval(ev.ID)
c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt,
c.log.Debug(context.Background(), logs.FrostFSNodeContainerRemovalEventsReceipt,
zap.Stringer("id", ev.ID),
)
})

View file

@ -46,7 +46,7 @@ func initControlService(c *cfg) {
lis, err := net.Listen("tcp", endpoint)
if err != nil {
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
c.log.Error(context.Background(), logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
return
}
@ -60,7 +60,7 @@ func initControlService(c *cfg) {
c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) {
runAndLog(ctx, c, serviceNameControl, false, func(context.Context, *cfg) {
c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint,
zap.String("service", serviceNameControl),
zap.String("endpoint", endpoint))
fatalOnErr(c.cfgControlService.server.Serve(lis))
@ -113,6 +113,6 @@ func (c *cfg) notifySystemd(st control.HealthStatus) {
err = sdnotify.Status(fmt.Sprintf("%v", st))
}
if err != nil {
c.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
c.log.Error(context.Background(), logs.FailedToReportStatusToSystemd, zap.Error(err))
}
}

View file

@ -1,6 +1,7 @@
package main
import (
"context"
"crypto/tls"
"errors"
"net"
@ -30,7 +31,7 @@ func initGRPC(c *cfg) {
lis, err := net.Listen("tcp", sc.Endpoint())
if err != nil {
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(sc.Endpoint())
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
c.log.Error(context.Background(), logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
endpointsToReconnect = append(endpointsToReconnect, sc.Endpoint())
return
}
@ -76,19 +77,19 @@ func scheduleReconnect(endpoint string, c *cfg) {
}
func tryReconnect(endpoint string, c *cfg) bool {
c.log.Info(logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint))
c.log.Info(context.Background(), logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint))
serverOpts, found := getGRPCEndpointOpts(endpoint, c)
if !found {
c.log.Warn(logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint))
c.log.Warn(context.Background(), logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint))
return true
}
lis, err := net.Listen("tcp", endpoint)
if err != nil {
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(endpoint)
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
c.log.Warn(logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout))
c.log.Error(context.Background(), logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
c.log.Warn(context.Background(), logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout))
return false
}
c.metricsCollector.GrpcServerMetrics().MarkHealthy(endpoint)
@ -101,7 +102,7 @@ func tryReconnect(endpoint string, c *cfg) bool {
c.cfgGRPC.appendAndHandle(endpoint, lis, srv)
c.log.Info(logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint))
c.log.Info(context.Background(), logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint))
return true
}
@ -143,7 +144,7 @@ func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool
if tlsCfg != nil {
cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile())
if err != nil {
c.log.Error(logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
return nil, false
}
@ -180,21 +181,21 @@ func serveGRPC(c *cfg) {
go func() {
defer func() {
c.log.Info(logs.FrostFSNodeStopListeningGRPCEndpoint,
c.log.Info(context.Background(), logs.FrostFSNodeStopListeningGRPCEndpoint,
zap.Stringer("endpoint", l.Addr()),
)
c.wg.Done()
}()
c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
c.log.Info(context.Background(), logs.FrostFSNodeStartListeningEndpoint,
zap.String("service", "gRPC"),
zap.Stringer("endpoint", l.Addr()),
)
if err := s.Serve(l); err != nil {
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(e)
c.log.Error(logs.FrostFSNodeGRPCServerError, zap.Error(err))
c.log.Error(context.Background(), logs.FrostFSNodeGRPCServerError, zap.Error(err))
c.cfgGRPC.dropConnection(e)
scheduleReconnect(e, c)
}
@ -203,9 +204,9 @@ func serveGRPC(c *cfg) {
}
func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
l = &logger.Logger{Logger: l.With(zap.String("name", name))}
l = l.With(zap.String("name", name))
l.Info(logs.FrostFSNodeStoppingGRPCServer)
l.Info(context.Background(), logs.FrostFSNodeStoppingGRPCServer)
// GracefulStop() may freeze forever, see #1270
done := make(chan struct{})
@ -217,9 +218,9 @@ func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
select {
case <-done:
case <-time.After(1 * time.Minute):
l.Info(logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
l.Info(context.Background(), logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
s.Stop()
}
l.Info(logs.FrostFSNodeGRPCServerStoppedSuccessfully)
l.Info(context.Background(), logs.FrostFSNodeGRPCServerStoppedSuccessfully)
}

View file

@ -22,7 +22,7 @@ type httpComponent struct {
func (cmp *httpComponent) init(c *cfg) {
if !cmp.enabled {
c.log.Info(cmp.name + " is disabled")
c.log.Info(context.Background(), cmp.name+" is disabled")
return
}
// Init server with parameters
@ -39,7 +39,7 @@ func (cmp *httpComponent) init(c *cfg) {
go func() {
defer c.wg.Done()
c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
c.log.Info(context.Background(), logs.FrostFSNodeStartListeningEndpoint,
zap.String("service", cmp.name),
zap.String("endpoint", cmp.address))
fatalOnErr(srv.Serve())

View file

@ -73,9 +73,9 @@ func main() {
}
func initAndLog(c *cfg, name string, initializer func(*cfg)) {
c.log.Info(fmt.Sprintf("initializing %s service...", name))
c.log.Info(context.Background(), fmt.Sprintf("initializing %s service...", name))
initializer(c)
c.log.Info(name + " service has been successfully initialized")
c.log.Info(context.Background(), name+" service has been successfully initialized")
}
func initApp(ctx context.Context, c *cfg) {
@ -120,25 +120,25 @@ func initApp(ctx context.Context, c *cfg) {
}
func runAndLog(ctx context.Context, c *cfg, name string, logSuccess bool, starter func(context.Context, *cfg)) {
c.log.Info(fmt.Sprintf("starting %s service...", name))
c.log.Info(ctx, fmt.Sprintf("starting %s service...", name))
starter(ctx, c)
if logSuccess {
c.log.Info(name + " service started successfully")
c.log.Info(ctx, name+" service started successfully")
}
}
func stopAndLog(c *cfg, name string, stopper func() error) {
c.log.Debug(fmt.Sprintf("shutting down %s service", name))
c.log.Debug(context.Background(), fmt.Sprintf("shutting down %s service", name))
err := stopper()
if err != nil {
c.log.Debug(fmt.Sprintf("could not shutdown %s server", name),
c.log.Debug(context.Background(), fmt.Sprintf("could not shutdown %s server", name),
zap.String("error", err.Error()),
)
}
c.log.Debug(name + " service has been stopped")
c.log.Debug(context.Background(), name+" service has been stopped")
}
func bootUp(ctx context.Context, c *cfg) {
@ -150,7 +150,7 @@ func bootUp(ctx context.Context, c *cfg) {
}
func wait(c *cfg) {
c.log.Info(logs.CommonApplicationStarted,
c.log.Info(context.Background(), logs.CommonApplicationStarted,
zap.String("version", misc.Version))
<-c.done // graceful shutdown
@ -160,12 +160,12 @@ func wait(c *cfg) {
go func() {
defer drain.Done()
for err := range c.internalErr {
c.log.Warn(logs.FrostFSNodeInternalApplicationError,
c.log.Warn(context.Background(), logs.FrostFSNodeInternalApplicationError,
zap.String("message", err.Error()))
}
}()
c.log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop)
c.log.Debug(context.Background(), logs.FrostFSNodeWaitingForAllProcessesToStop)
c.wg.Wait()

View file

@ -48,7 +48,7 @@ func (c *cfg) initMorphComponents(ctx context.Context) {
fatalOnErr(err)
}
c.log.Info(logs.FrostFSNodeNotarySupport,
c.log.Info(ctx, logs.FrostFSNodeNotarySupport,
zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled),
)
@ -64,7 +64,7 @@ func (c *cfg) initMorphComponents(ctx context.Context) {
msPerBlock, err := c.cfgMorph.client.MsPerBlock()
fatalOnErr(err)
c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond
c.log.Debug(logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL))
c.log.Debug(ctx, logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL))
}
if c.cfgMorph.cacheTTL < 0 {
@ -102,7 +102,7 @@ func initMorphClient(ctx context.Context, c *cfg) {
client.WithDialerSource(c.dialerSource),
)
if err != nil {
c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient,
c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient,
zap.Any("endpoints", addresses),
zap.String("error", err.Error()),
)
@ -111,12 +111,12 @@ func initMorphClient(ctx context.Context, c *cfg) {
}
c.onShutdown(func() {
c.log.Info(logs.FrostFSNodeClosingMorphComponents)
c.log.Info(ctx, logs.FrostFSNodeClosingMorphComponents)
cli.Close()
})
if err := cli.SetGroupSignerScope(); err != nil {
c.log.Info(logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
c.log.Info(ctx, logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
}
c.cfgMorph.client = cli
@ -136,7 +136,7 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
// non-error deposit with an empty TX hash means
// that the deposit has already been made; no
// need to wait it.
c.log.Info(logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade)
c.log.Info(ctx, logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade)
return
}
@ -202,7 +202,7 @@ func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32)
return fmt.Errorf("could not wait for notary deposit persists in chain: %w", err)
}
if res.Execution.VMState.HasFlag(vmstate.Halt) {
c.log.Info(logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted)
c.log.Info(ctx, logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted)
return nil
}
return errNotaryDepositFail
@ -217,7 +217,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
if err != nil {
fromSideChainBlock = 0
c.log.Warn(logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
}
subs, err = subscriber.New(ctx, &subscriber.Params{
@ -246,7 +246,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) {
res, err := netmapEvent.ParseNewEpoch(src)
if err == nil {
c.log.Info(logs.FrostFSNodeNewEpochEventFromSidechain,
c.log.Info(ctx, logs.FrostFSNodeNewEpochEventFromSidechain,
zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()),
)
}
@ -257,11 +257,11 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers)
registerBlockHandler(lis, func(block *block.Block) {
c.log.Debug(logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
c.log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index)
if err != nil {
c.log.Warn(logs.FrostFSNodeCantUpdatePersistentState,
c.log.Warn(ctx, logs.FrostFSNodeCantUpdatePersistentState,
zap.String("chain", "side"),
zap.Uint32("block_index", block.Index))
}

View file

@ -189,7 +189,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
}
if err := c.bootstrap(); err != nil {
c.log.Warn(logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
c.log.Warn(context.Background(), logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
}
})
@ -197,7 +197,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
addNewEpochAsyncNotificationHandler(c, func(_ event.Event) {
_, _, err := makeNotaryDeposit(c)
if err != nil {
c.log.Error(logs.FrostFSNodeCouldNotMakeNotaryDeposit,
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotMakeNotaryDeposit,
zap.String("error", err.Error()),
)
}
@ -210,7 +210,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
func bootstrapNode(c *cfg) {
if c.needBootstrap() {
if c.IsMaintenance() {
c.log.Info(logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap)
c.log.Info(context.Background(), logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap)
return
}
err := c.bootstrap()
@ -250,7 +250,7 @@ func initNetmapState(c *cfg) {
stateWord := nodeState(ni)
c.log.Info(logs.FrostFSNodeInitialNetworkState,
c.log.Info(context.Background(), logs.FrostFSNodeInitialNetworkState,
zap.Uint64("epoch", epoch),
zap.String("state", stateWord),
)
@ -307,7 +307,7 @@ func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error
if nmState != candidateState {
// This happens when the node was switched to maintenance without epoch tick.
// We expect it to continue staying in maintenance.
c.log.Info(logs.CandidateStatusPriority,
c.log.Info(context.Background(), logs.CandidateStatusPriority,
zap.String("netmap", nmState),
zap.String("candidate", candidateState))
}

View file

@ -58,7 +58,7 @@ type objectSvc struct {
func (c *cfg) MaxObjectSize() uint64 {
sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
if err != nil {
c.log.Error(logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
zap.String("error", err.Error()),
)
}
@ -223,7 +223,7 @@ func initObjectService(c *cfg) {
func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.ClientCache) {
if policerconfig.UnsafeDisable(c.appCfg) {
c.log.Warn(logs.FrostFSNodePolicerIsDisabled)
c.log.Warn(context.Background(), logs.FrostFSNodePolicerIsDisabled)
return
}
@ -287,7 +287,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
_, err := ls.Inhume(ctx, inhumePrm)
if err != nil {
c.log.Warn(logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
zap.String("error", err.Error()),
)
}

View file

@ -1,6 +1,7 @@
package main
import (
"context"
"os"
"runtime/debug"
@ -12,14 +13,14 @@ import (
func setRuntimeParameters(c *cfg) {
if len(os.Getenv("GOMEMLIMIT")) != 0 {
// default limit < yaml limit < app env limit < GOMEMLIMIT
c.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
c.log.Warn(context.Background(), logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
return
}
memLimitBytes := runtime.GCMemoryLimitBytes(c.appCfg)
previous := debug.SetMemoryLimit(memLimitBytes)
if memLimitBytes != previous {
c.log.Info(logs.RuntimeSoftMemoryLimitUpdated,
c.log.Info(context.Background(), logs.RuntimeSoftMemoryLimitUpdated,
zap.Int64("new_value", memLimitBytes),
zap.Int64("old_value", previous))
}

View file

@ -13,12 +13,12 @@ import (
func initTracing(ctx context.Context, c *cfg) {
conf, err := tracingconfig.ToTracingConfig(c.appCfg)
if err != nil {
c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err))
return
}
_, err = tracing.Setup(ctx, *conf)
if err != nil {
c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err))
return
}
@ -29,7 +29,7 @@ func initTracing(ctx context.Context, c *cfg) {
defer cancel()
err := tracing.Shutdown(ctx) // cfg context cancels before close
if err != nil {
c.log.Error(logs.FrostFSNodeFailedShutdownTracing, zap.Error(err))
c.log.Error(ctx, logs.FrostFSNodeFailedShutdownTracing, zap.Error(err))
}
},
})

View file

@ -44,7 +44,7 @@ func (c cnrSource) List() ([]cid.ID, error) {
func initTreeService(c *cfg) {
treeConfig := treeconfig.Tree(c.appCfg)
if !treeConfig.Enabled() {
c.log.Info(logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization)
c.log.Info(context.Background(), logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization)
return
}
@ -83,7 +83,7 @@ func initTreeService(c *cfg) {
addNewEpochNotificationHandler(c, func(_ event.Event) {
err := c.treeService.SynchronizeAll()
if err != nil {
c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
}
})
} else {
@ -94,7 +94,7 @@ func initTreeService(c *cfg) {
for range tick.C {
err := c.treeService.SynchronizeAll()
if err != nil {
c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
if errors.Is(err, tree.ErrShuttingDown) {
return
}
@ -107,11 +107,11 @@ func initTreeService(c *cfg) {
ev := e.(containerEvent.DeleteSuccess)
// This is executed asynchronously, so we don't care about the operation taking some time.
c.log.Debug(logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID))
c.log.Debug(context.Background(), logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID))
err := c.treeService.DropTree(context.Background(), ev.ID, "")
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
// Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged.
c.log.Error(logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
c.log.Error(context.Background(), logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
zap.Stringer("cid", ev.ID),
zap.String("error", err.Error()))
}