forked from TrueCloudLab/frostfs-node
[#1437] node: Use ctx for logging
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
parent
2cf30e28b8
commit
08182bd6f1
157 changed files with 764 additions and 713 deletions
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
@ -58,13 +59,13 @@ func watchForSignal(cancel func()) {
|
|||
// signals causing application to shut down should have priority over
|
||||
// reconfiguration signal
|
||||
case <-ch:
|
||||
log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||
log.Info(context.Background(), logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||
cancel()
|
||||
shutdown()
|
||||
log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||
log.Info(context.Background(), logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||
return
|
||||
case err := <-intErr: // internal application error
|
||||
log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
|
||||
log.Info(context.Background(), logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
|
||||
cancel()
|
||||
shutdown()
|
||||
return
|
||||
|
@ -72,35 +73,35 @@ func watchForSignal(cancel func()) {
|
|||
// block until any signal is receieved
|
||||
select {
|
||||
case <-ch:
|
||||
log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||
log.Info(context.Background(), logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||
cancel()
|
||||
shutdown()
|
||||
log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||
log.Info(context.Background(), logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||
return
|
||||
case err := <-intErr: // internal application error
|
||||
log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
|
||||
log.Info(context.Background(), logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
|
||||
cancel()
|
||||
shutdown()
|
||||
return
|
||||
case <-sighupCh:
|
||||
log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
|
||||
log.Info(context.Background(), logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
|
||||
if !innerRing.CompareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
|
||||
log.Info(logs.FrostFSNodeSIGHUPSkip)
|
||||
log.Info(context.Background(), logs.FrostFSNodeSIGHUPSkip)
|
||||
break
|
||||
}
|
||||
err := reloadConfig()
|
||||
if err != nil {
|
||||
log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||
log.Error(context.Background(), logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||
}
|
||||
pprofCmp.reload()
|
||||
metricsCmp.reload()
|
||||
log.Info(logs.FrostFSIRReloadExtraWallets)
|
||||
log.Info(context.Background(), logs.FrostFSIRReloadExtraWallets)
|
||||
err = innerRing.SetExtraWallets(cfg)
|
||||
if err != nil {
|
||||
log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||
log.Error(context.Background(), logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||
}
|
||||
innerRing.CompareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
|
||||
log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
||||
log.Info(context.Background(), logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
|
@ -25,7 +26,7 @@ const (
|
|||
)
|
||||
|
||||
func (c *httpComponent) init() {
|
||||
log.Info("init " + c.name)
|
||||
log.Info(context.Background(), "init "+c.name)
|
||||
c.enabled = cfg.GetBool(c.name + enabledKeyPostfix)
|
||||
c.address = cfg.GetString(c.name + addressKeyPostfix)
|
||||
c.shutdownDur = cfg.GetDuration(c.name + shutdownTimeoutKeyPostfix)
|
||||
|
@ -39,14 +40,14 @@ func (c *httpComponent) init() {
|
|||
httputil.WithShutdownTimeout(c.shutdownDur),
|
||||
)
|
||||
} else {
|
||||
log.Info(c.name + " is disabled, skip")
|
||||
log.Info(context.Background(), c.name+" is disabled, skip")
|
||||
c.srv = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *httpComponent) start() {
|
||||
if c.srv != nil {
|
||||
log.Info("start " + c.name)
|
||||
log.Info(context.Background(), "start "+c.name)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
@ -57,7 +58,7 @@ func (c *httpComponent) start() {
|
|||
|
||||
func (c *httpComponent) shutdown() error {
|
||||
if c.srv != nil {
|
||||
log.Info("shutdown " + c.name)
|
||||
log.Info(context.Background(), "shutdown "+c.name)
|
||||
return c.srv.Shutdown()
|
||||
}
|
||||
return nil
|
||||
|
@ -71,11 +72,11 @@ func (c *httpComponent) needReload() bool {
|
|||
}
|
||||
|
||||
func (c *httpComponent) reload() {
|
||||
log.Info("reload " + c.name)
|
||||
log.Info(context.Background(), "reload "+c.name)
|
||||
if c.needReload() {
|
||||
log.Info(c.name + " config updated")
|
||||
log.Info(context.Background(), c.name+" config updated")
|
||||
if err := c.shutdown(); err != nil {
|
||||
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
log.Debug(context.Background(), logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
} else {
|
||||
|
|
|
@ -103,32 +103,32 @@ func main() {
|
|||
err = innerRing.Start(ctx, intErr)
|
||||
exitErr(err)
|
||||
|
||||
log.Info(logs.CommonApplicationStarted,
|
||||
log.Info(ctx, logs.CommonApplicationStarted,
|
||||
zap.String("version", misc.Version))
|
||||
|
||||
watchForSignal(cancel)
|
||||
|
||||
<-ctx.Done() // graceful shutdown
|
||||
log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop)
|
||||
log.Debug(ctx, logs.FrostFSNodeWaitingForAllProcessesToStop)
|
||||
wg.Wait()
|
||||
|
||||
log.Info(logs.FrostFSIRApplicationStopped)
|
||||
log.Info(ctx, logs.FrostFSIRApplicationStopped)
|
||||
}
|
||||
|
||||
func shutdown() {
|
||||
innerRing.Stop()
|
||||
if err := metricsCmp.shutdown(); err != nil {
|
||||
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
log.Debug(context.Background(), logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
if err := pprofCmp.shutdown(); err != nil {
|
||||
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
log.Debug(context.Background(), logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
|
||||
if err := sdnotify.ClearStatus(); err != nil {
|
||||
log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
|
||||
log.Error(context.Background(), logs.FailedToReportStatusToSystemd, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
|
@ -52,11 +53,11 @@ func (c *pprofComponent) needReload() bool {
|
|||
}
|
||||
|
||||
func (c *pprofComponent) reload() {
|
||||
log.Info("reload " + c.name)
|
||||
log.Info(context.Background(), "reload "+c.name)
|
||||
if c.needReload() {
|
||||
log.Info(c.name + " config updated")
|
||||
log.Info(context.Background(), c.name+" config updated")
|
||||
if err := c.shutdown(); err != nil {
|
||||
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
log.Debug(context.Background(), logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()))
|
||||
return
|
||||
}
|
||||
|
|
|
@ -400,13 +400,13 @@ type internals struct {
|
|||
func (c *cfg) startMaintenance() {
|
||||
c.isMaintenance.Store(true)
|
||||
c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE)
|
||||
c.log.Info(logs.FrostFSNodeStartedLocalNodesMaintenance)
|
||||
c.log.Info(context.Background(), logs.FrostFSNodeStartedLocalNodesMaintenance)
|
||||
}
|
||||
|
||||
// stops node's maintenance.
|
||||
func (c *internals) stopMaintenance() {
|
||||
if c.isMaintenance.CompareAndSwap(true, false) {
|
||||
c.log.Info(logs.FrostFSNodeStoppedLocalNodesMaintenance)
|
||||
c.log.Info(context.Background(), logs.FrostFSNodeStoppedLocalNodesMaintenance)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -705,7 +705,7 @@ func initCfg(appCfg *config.Config) *cfg {
|
|||
log, err := logger.NewLogger(logPrm)
|
||||
fatalOnErr(err)
|
||||
if loggerconfig.ToLokiConfig(appCfg).Enabled {
|
||||
log.Logger = log.Logger.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
|
||||
log.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
|
||||
lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg))
|
||||
return lokiCore
|
||||
}))
|
||||
|
@ -1103,10 +1103,10 @@ func initLocalStorage(ctx context.Context, c *cfg) {
|
|||
shard.WithTombstoneSource(c.createTombstoneSource()),
|
||||
shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)))...)
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
|
||||
c.log.Error(ctx, logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
|
||||
} else {
|
||||
shardsAttached++
|
||||
c.log.Info(logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id))
|
||||
c.log.Info(ctx, logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id))
|
||||
}
|
||||
}
|
||||
if shardsAttached == 0 {
|
||||
|
@ -1116,15 +1116,15 @@ func initLocalStorage(ctx context.Context, c *cfg) {
|
|||
c.cfgObject.cfgLocalStorage.localStorage = ls
|
||||
|
||||
c.onShutdown(func() {
|
||||
c.log.Info(logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
|
||||
c.log.Info(ctx, logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
|
||||
|
||||
err := ls.Close(context.WithoutCancel(ctx))
|
||||
if err != nil {
|
||||
c.log.Info(logs.FrostFSNodeStorageEngineClosingFailure,
|
||||
c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
} else {
|
||||
c.log.Info(logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
|
||||
c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -1132,7 +1132,7 @@ func initLocalStorage(ctx context.Context, c *cfg) {
|
|||
func initAccessPolicyEngine(_ context.Context, c *cfg) {
|
||||
var localOverrideDB chainbase.LocalOverrideDatabase
|
||||
if nodeconfig.PersistentPolicyRules(c.appCfg).Path() == "" {
|
||||
c.log.Warn(logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed)
|
||||
c.log.Warn(context.Background(), logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed)
|
||||
localOverrideDB = chainbase.NewInmemoryLocalOverrideDatabase()
|
||||
} else {
|
||||
localOverrideDB = chainbase.NewBoltLocalOverrideDatabase(
|
||||
|
@ -1157,7 +1157,7 @@ func initAccessPolicyEngine(_ context.Context, c *cfg) {
|
|||
|
||||
c.onShutdown(func() {
|
||||
if err := ape.LocalOverrideDatabaseCore().Close(); err != nil {
|
||||
c.log.Warn(logs.FrostFSNodeAccessPolicyEngineClosingFailure,
|
||||
c.log.Warn(context.Background(), logs.FrostFSNodeAccessPolicyEngineClosingFailure,
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
@ -1209,7 +1209,7 @@ func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) {
|
|||
func (c *cfg) updateContractNodeInfo(epoch uint64) {
|
||||
ni, err := c.netmapLocalNodeState(epoch)
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
|
||||
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
|
||||
zap.Uint64("epoch", epoch),
|
||||
zap.String("error", err.Error()))
|
||||
return
|
||||
|
@ -1245,13 +1245,13 @@ func (c *cfg) bootstrap() error {
|
|||
// switch to online except when under maintenance
|
||||
st := c.cfgNetmap.state.controlNetmapStatus()
|
||||
if st == control.NetmapStatus_MAINTENANCE {
|
||||
c.log.Info(logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
|
||||
c.log.Info(context.Background(), logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
|
||||
return c.bootstrapWithState(func(ni *netmap.NodeInfo) {
|
||||
ni.SetStatus(netmap.Maintenance)
|
||||
})
|
||||
}
|
||||
|
||||
c.log.Info(logs.FrostFSNodeBootstrappingWithOnlineState,
|
||||
c.log.Info(context.Background(), logs.FrostFSNodeBootstrappingWithOnlineState,
|
||||
zap.Stringer("previous", st),
|
||||
)
|
||||
|
||||
|
@ -1280,19 +1280,19 @@ func (c *cfg) signalWatcher(ctx context.Context) {
|
|||
// signals causing application to shut down should have priority over
|
||||
// reconfiguration signal
|
||||
case <-ch:
|
||||
c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||
|
||||
c.shutdown()
|
||||
|
||||
c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||
return
|
||||
case err := <-c.internalErr: // internal application error
|
||||
c.log.Warn(logs.FrostFSNodeInternalApplicationError,
|
||||
c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError,
|
||||
zap.String("message", err.Error()))
|
||||
|
||||
c.shutdown()
|
||||
|
||||
c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete)
|
||||
c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete)
|
||||
return
|
||||
default:
|
||||
// block until any signal is receieved
|
||||
|
@ -1300,19 +1300,19 @@ func (c *cfg) signalWatcher(ctx context.Context) {
|
|||
case <-sighupCh:
|
||||
c.reloadConfig(ctx)
|
||||
case <-ch:
|
||||
c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||
|
||||
c.shutdown()
|
||||
|
||||
c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||
return
|
||||
case err := <-c.internalErr: // internal application error
|
||||
c.log.Warn(logs.FrostFSNodeInternalApplicationError,
|
||||
c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError,
|
||||
zap.String("message", err.Error()))
|
||||
|
||||
c.shutdown()
|
||||
|
||||
c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete)
|
||||
c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -1320,17 +1320,17 @@ func (c *cfg) signalWatcher(ctx context.Context) {
|
|||
}
|
||||
|
||||
func (c *cfg) reloadConfig(ctx context.Context) {
|
||||
c.log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
|
||||
c.log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
|
||||
|
||||
if !c.compareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
|
||||
c.log.Info(logs.FrostFSNodeSIGHUPSkip)
|
||||
c.log.Info(ctx, logs.FrostFSNodeSIGHUPSkip)
|
||||
return
|
||||
}
|
||||
defer c.compareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
|
||||
|
||||
err := c.reloadAppConfig()
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||
c.log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1341,7 +1341,7 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
|||
|
||||
logPrm, err := c.loggerPrm()
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
|
||||
c.log.Error(ctx, logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1362,25 +1362,25 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
|||
|
||||
err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
|
||||
c.log.Error(ctx, logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
for _, component := range components {
|
||||
err = component.reloadFunc()
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostFSNodeUpdatedConfigurationApplying,
|
||||
c.log.Error(ctx, logs.FrostFSNodeUpdatedConfigurationApplying,
|
||||
zap.String("component", component.name),
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.dialerSource.Update(internalNetConfig(c.appCfg, c.metricsCollector.MultinetMetrics())); err != nil {
|
||||
c.log.Error(logs.FailedToUpdateMultinetConfiguration, zap.Error(err))
|
||||
c.log.Error(ctx, logs.FailedToUpdateMultinetConfiguration, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
||||
c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
||||
}
|
||||
|
||||
func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
|
||||
|
@ -1403,7 +1403,7 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
|
|||
}
|
||||
updated, err := tracing.Setup(ctx, *traceConfig)
|
||||
if updated {
|
||||
c.log.Info(logs.FrostFSNodeTracingConfigationUpdated)
|
||||
c.log.Info(ctx, logs.FrostFSNodeTracingConfigationUpdated)
|
||||
}
|
||||
return err
|
||||
}})
|
||||
|
@ -1438,7 +1438,7 @@ func (c *cfg) reloadPools() error {
|
|||
func (c *cfg) reloadPool(p *ants.Pool, newSize int, name string) {
|
||||
oldSize := p.Cap()
|
||||
if oldSize != newSize {
|
||||
c.log.Info(logs.FrostFSNodePoolConfigurationUpdate, zap.String("field", name),
|
||||
c.log.Info(context.Background(), logs.FrostFSNodePoolConfigurationUpdate, zap.String("field", name),
|
||||
zap.Int("old", oldSize), zap.Int("new", newSize))
|
||||
p.Tune(newSize)
|
||||
}
|
||||
|
@ -1477,11 +1477,11 @@ func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoPro
|
|||
func (c *cfg) shutdown() {
|
||||
old := c.swapHealthStatus(control.HealthStatus_SHUTTING_DOWN)
|
||||
if old == control.HealthStatus_SHUTTING_DOWN {
|
||||
c.log.Info(logs.FrostFSNodeShutdownSkip)
|
||||
c.log.Info(context.Background(), logs.FrostFSNodeShutdownSkip)
|
||||
return
|
||||
}
|
||||
if old == control.HealthStatus_STARTING {
|
||||
c.log.Warn(logs.FrostFSNodeShutdownWhenNotReady)
|
||||
c.log.Warn(context.Background(), logs.FrostFSNodeShutdownWhenNotReady)
|
||||
}
|
||||
|
||||
c.ctxCancel()
|
||||
|
@ -1491,6 +1491,6 @@ func (c *cfg) shutdown() {
|
|||
}
|
||||
|
||||
if err := sdnotify.ClearStatus(); err != nil {
|
||||
c.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
|
||||
c.log.Error(context.Background(), logs.FailedToReportStatusToSystemd, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -102,13 +102,13 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
|
|||
} else {
|
||||
// unlike removal, we expect successful receive of the container
|
||||
// after successful creation, so logging can be useful
|
||||
c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
|
||||
c.log.Error(context.Background(), logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
|
||||
zap.Stringer("id", ev.ID),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
||||
c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt,
|
||||
c.log.Debug(context.Background(), logs.FrostFSNodeContainerCreationEventsReceipt,
|
||||
zap.Stringer("id", ev.ID),
|
||||
)
|
||||
})
|
||||
|
@ -116,7 +116,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
|
|||
subscribeToContainerRemoval(c, func(e event.Event) {
|
||||
ev := e.(containerEvent.DeleteSuccess)
|
||||
containerCache.handleRemoval(ev.ID)
|
||||
c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt,
|
||||
c.log.Debug(context.Background(), logs.FrostFSNodeContainerRemovalEventsReceipt,
|
||||
zap.Stringer("id", ev.ID),
|
||||
)
|
||||
})
|
||||
|
|
|
@ -46,7 +46,7 @@ func initControlService(c *cfg) {
|
|||
|
||||
lis, err := net.Listen("tcp", endpoint)
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
|
||||
c.log.Error(context.Background(), logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,7 @@ func initControlService(c *cfg) {
|
|||
|
||||
c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) {
|
||||
runAndLog(ctx, c, serviceNameControl, false, func(context.Context, *cfg) {
|
||||
c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
|
||||
c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint,
|
||||
zap.String("service", serviceNameControl),
|
||||
zap.String("endpoint", endpoint))
|
||||
fatalOnErr(c.cfgControlService.server.Serve(lis))
|
||||
|
@ -113,6 +113,6 @@ func (c *cfg) notifySystemd(st control.HealthStatus) {
|
|||
err = sdnotify.Status(fmt.Sprintf("%v", st))
|
||||
}
|
||||
if err != nil {
|
||||
c.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
|
||||
c.log.Error(context.Background(), logs.FailedToReportStatusToSystemd, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net"
|
||||
|
@ -30,7 +31,7 @@ func initGRPC(c *cfg) {
|
|||
lis, err := net.Listen("tcp", sc.Endpoint())
|
||||
if err != nil {
|
||||
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(sc.Endpoint())
|
||||
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
|
||||
c.log.Error(context.Background(), logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
|
||||
endpointsToReconnect = append(endpointsToReconnect, sc.Endpoint())
|
||||
return
|
||||
}
|
||||
|
@ -76,19 +77,19 @@ func scheduleReconnect(endpoint string, c *cfg) {
|
|||
}
|
||||
|
||||
func tryReconnect(endpoint string, c *cfg) bool {
|
||||
c.log.Info(logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint))
|
||||
c.log.Info(context.Background(), logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint))
|
||||
|
||||
serverOpts, found := getGRPCEndpointOpts(endpoint, c)
|
||||
if !found {
|
||||
c.log.Warn(logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint))
|
||||
c.log.Warn(context.Background(), logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint))
|
||||
return true
|
||||
}
|
||||
|
||||
lis, err := net.Listen("tcp", endpoint)
|
||||
if err != nil {
|
||||
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(endpoint)
|
||||
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
|
||||
c.log.Warn(logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout))
|
||||
c.log.Error(context.Background(), logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
|
||||
c.log.Warn(context.Background(), logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout))
|
||||
return false
|
||||
}
|
||||
c.metricsCollector.GrpcServerMetrics().MarkHealthy(endpoint)
|
||||
|
@ -101,7 +102,7 @@ func tryReconnect(endpoint string, c *cfg) bool {
|
|||
|
||||
c.cfgGRPC.appendAndHandle(endpoint, lis, srv)
|
||||
|
||||
c.log.Info(logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint))
|
||||
c.log.Info(context.Background(), logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint))
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -143,7 +144,7 @@ func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool
|
|||
if tlsCfg != nil {
|
||||
cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile())
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
|
||||
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
|
||||
return nil, false
|
||||
}
|
||||
|
||||
|
@ -180,21 +181,21 @@ func serveGRPC(c *cfg) {
|
|||
|
||||
go func() {
|
||||
defer func() {
|
||||
c.log.Info(logs.FrostFSNodeStopListeningGRPCEndpoint,
|
||||
c.log.Info(context.Background(), logs.FrostFSNodeStopListeningGRPCEndpoint,
|
||||
zap.Stringer("endpoint", l.Addr()),
|
||||
)
|
||||
|
||||
c.wg.Done()
|
||||
}()
|
||||
|
||||
c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
|
||||
c.log.Info(context.Background(), logs.FrostFSNodeStartListeningEndpoint,
|
||||
zap.String("service", "gRPC"),
|
||||
zap.Stringer("endpoint", l.Addr()),
|
||||
)
|
||||
|
||||
if err := s.Serve(l); err != nil {
|
||||
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(e)
|
||||
c.log.Error(logs.FrostFSNodeGRPCServerError, zap.Error(err))
|
||||
c.log.Error(context.Background(), logs.FrostFSNodeGRPCServerError, zap.Error(err))
|
||||
c.cfgGRPC.dropConnection(e)
|
||||
scheduleReconnect(e, c)
|
||||
}
|
||||
|
@ -203,9 +204,9 @@ func serveGRPC(c *cfg) {
|
|||
}
|
||||
|
||||
func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
|
||||
l = &logger.Logger{Logger: l.With(zap.String("name", name))}
|
||||
l = l.With(zap.String("name", name))
|
||||
|
||||
l.Info(logs.FrostFSNodeStoppingGRPCServer)
|
||||
l.Info(context.Background(), logs.FrostFSNodeStoppingGRPCServer)
|
||||
|
||||
// GracefulStop() may freeze forever, see #1270
|
||||
done := make(chan struct{})
|
||||
|
@ -217,9 +218,9 @@ func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
|
|||
select {
|
||||
case <-done:
|
||||
case <-time.After(1 * time.Minute):
|
||||
l.Info(logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
|
||||
l.Info(context.Background(), logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
|
||||
s.Stop()
|
||||
}
|
||||
|
||||
l.Info(logs.FrostFSNodeGRPCServerStoppedSuccessfully)
|
||||
l.Info(context.Background(), logs.FrostFSNodeGRPCServerStoppedSuccessfully)
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ type httpComponent struct {
|
|||
|
||||
func (cmp *httpComponent) init(c *cfg) {
|
||||
if !cmp.enabled {
|
||||
c.log.Info(cmp.name + " is disabled")
|
||||
c.log.Info(context.Background(), cmp.name+" is disabled")
|
||||
return
|
||||
}
|
||||
// Init server with parameters
|
||||
|
@ -39,7 +39,7 @@ func (cmp *httpComponent) init(c *cfg) {
|
|||
go func() {
|
||||
defer c.wg.Done()
|
||||
|
||||
c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
|
||||
c.log.Info(context.Background(), logs.FrostFSNodeStartListeningEndpoint,
|
||||
zap.String("service", cmp.name),
|
||||
zap.String("endpoint", cmp.address))
|
||||
fatalOnErr(srv.Serve())
|
||||
|
|
|
@ -73,9 +73,9 @@ func main() {
|
|||
}
|
||||
|
||||
func initAndLog(c *cfg, name string, initializer func(*cfg)) {
|
||||
c.log.Info(fmt.Sprintf("initializing %s service...", name))
|
||||
c.log.Info(context.Background(), fmt.Sprintf("initializing %s service...", name))
|
||||
initializer(c)
|
||||
c.log.Info(name + " service has been successfully initialized")
|
||||
c.log.Info(context.Background(), name+" service has been successfully initialized")
|
||||
}
|
||||
|
||||
func initApp(ctx context.Context, c *cfg) {
|
||||
|
@ -120,25 +120,25 @@ func initApp(ctx context.Context, c *cfg) {
|
|||
}
|
||||
|
||||
func runAndLog(ctx context.Context, c *cfg, name string, logSuccess bool, starter func(context.Context, *cfg)) {
|
||||
c.log.Info(fmt.Sprintf("starting %s service...", name))
|
||||
c.log.Info(ctx, fmt.Sprintf("starting %s service...", name))
|
||||
starter(ctx, c)
|
||||
|
||||
if logSuccess {
|
||||
c.log.Info(name + " service started successfully")
|
||||
c.log.Info(ctx, name+" service started successfully")
|
||||
}
|
||||
}
|
||||
|
||||
func stopAndLog(c *cfg, name string, stopper func() error) {
|
||||
c.log.Debug(fmt.Sprintf("shutting down %s service", name))
|
||||
c.log.Debug(context.Background(), fmt.Sprintf("shutting down %s service", name))
|
||||
|
||||
err := stopper()
|
||||
if err != nil {
|
||||
c.log.Debug(fmt.Sprintf("could not shutdown %s server", name),
|
||||
c.log.Debug(context.Background(), fmt.Sprintf("could not shutdown %s server", name),
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
|
||||
c.log.Debug(name + " service has been stopped")
|
||||
c.log.Debug(context.Background(), name+" service has been stopped")
|
||||
}
|
||||
|
||||
func bootUp(ctx context.Context, c *cfg) {
|
||||
|
@ -150,7 +150,7 @@ func bootUp(ctx context.Context, c *cfg) {
|
|||
}
|
||||
|
||||
func wait(c *cfg) {
|
||||
c.log.Info(logs.CommonApplicationStarted,
|
||||
c.log.Info(context.Background(), logs.CommonApplicationStarted,
|
||||
zap.String("version", misc.Version))
|
||||
|
||||
<-c.done // graceful shutdown
|
||||
|
@ -160,12 +160,12 @@ func wait(c *cfg) {
|
|||
go func() {
|
||||
defer drain.Done()
|
||||
for err := range c.internalErr {
|
||||
c.log.Warn(logs.FrostFSNodeInternalApplicationError,
|
||||
c.log.Warn(context.Background(), logs.FrostFSNodeInternalApplicationError,
|
||||
zap.String("message", err.Error()))
|
||||
}
|
||||
}()
|
||||
|
||||
c.log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop)
|
||||
c.log.Debug(context.Background(), logs.FrostFSNodeWaitingForAllProcessesToStop)
|
||||
|
||||
c.wg.Wait()
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ func (c *cfg) initMorphComponents(ctx context.Context) {
|
|||
fatalOnErr(err)
|
||||
}
|
||||
|
||||
c.log.Info(logs.FrostFSNodeNotarySupport,
|
||||
c.log.Info(ctx, logs.FrostFSNodeNotarySupport,
|
||||
zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled),
|
||||
)
|
||||
|
||||
|
@ -64,7 +64,7 @@ func (c *cfg) initMorphComponents(ctx context.Context) {
|
|||
msPerBlock, err := c.cfgMorph.client.MsPerBlock()
|
||||
fatalOnErr(err)
|
||||
c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond
|
||||
c.log.Debug(logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL))
|
||||
c.log.Debug(ctx, logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL))
|
||||
}
|
||||
|
||||
if c.cfgMorph.cacheTTL < 0 {
|
||||
|
@ -102,7 +102,7 @@ func initMorphClient(ctx context.Context, c *cfg) {
|
|||
client.WithDialerSource(c.dialerSource),
|
||||
)
|
||||
if err != nil {
|
||||
c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient,
|
||||
c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient,
|
||||
zap.Any("endpoints", addresses),
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
@ -111,12 +111,12 @@ func initMorphClient(ctx context.Context, c *cfg) {
|
|||
}
|
||||
|
||||
c.onShutdown(func() {
|
||||
c.log.Info(logs.FrostFSNodeClosingMorphComponents)
|
||||
c.log.Info(ctx, logs.FrostFSNodeClosingMorphComponents)
|
||||
cli.Close()
|
||||
})
|
||||
|
||||
if err := cli.SetGroupSignerScope(); err != nil {
|
||||
c.log.Info(logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
|
||||
c.log.Info(ctx, logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
|
||||
}
|
||||
|
||||
c.cfgMorph.client = cli
|
||||
|
@ -136,7 +136,7 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
|
|||
// non-error deposit with an empty TX hash means
|
||||
// that the deposit has already been made; no
|
||||
// need to wait it.
|
||||
c.log.Info(logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade)
|
||||
c.log.Info(ctx, logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -202,7 +202,7 @@ func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32)
|
|||
return fmt.Errorf("could not wait for notary deposit persists in chain: %w", err)
|
||||
}
|
||||
if res.Execution.VMState.HasFlag(vmstate.Halt) {
|
||||
c.log.Info(logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted)
|
||||
c.log.Info(ctx, logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted)
|
||||
return nil
|
||||
}
|
||||
return errNotaryDepositFail
|
||||
|
@ -217,7 +217,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
|
|||
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
|
||||
if err != nil {
|
||||
fromSideChainBlock = 0
|
||||
c.log.Warn(logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
|
||||
c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
|
||||
}
|
||||
|
||||
subs, err = subscriber.New(ctx, &subscriber.Params{
|
||||
|
@ -246,7 +246,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
|
|||
setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) {
|
||||
res, err := netmapEvent.ParseNewEpoch(src)
|
||||
if err == nil {
|
||||
c.log.Info(logs.FrostFSNodeNewEpochEventFromSidechain,
|
||||
c.log.Info(ctx, logs.FrostFSNodeNewEpochEventFromSidechain,
|
||||
zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()),
|
||||
)
|
||||
}
|
||||
|
@ -257,11 +257,11 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
|
|||
registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers)
|
||||
|
||||
registerBlockHandler(lis, func(block *block.Block) {
|
||||
c.log.Debug(logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
|
||||
c.log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
|
||||
|
||||
err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index)
|
||||
if err != nil {
|
||||
c.log.Warn(logs.FrostFSNodeCantUpdatePersistentState,
|
||||
c.log.Warn(ctx, logs.FrostFSNodeCantUpdatePersistentState,
|
||||
zap.String("chain", "side"),
|
||||
zap.Uint32("block_index", block.Index))
|
||||
}
|
||||
|
|
|
@ -189,7 +189,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
|
|||
}
|
||||
|
||||
if err := c.bootstrap(); err != nil {
|
||||
c.log.Warn(logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
|
||||
c.log.Warn(context.Background(), logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -197,7 +197,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
|
|||
addNewEpochAsyncNotificationHandler(c, func(_ event.Event) {
|
||||
_, _, err := makeNotaryDeposit(c)
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostFSNodeCouldNotMakeNotaryDeposit,
|
||||
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotMakeNotaryDeposit,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
|
@ -210,7 +210,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
|
|||
func bootstrapNode(c *cfg) {
|
||||
if c.needBootstrap() {
|
||||
if c.IsMaintenance() {
|
||||
c.log.Info(logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap)
|
||||
c.log.Info(context.Background(), logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap)
|
||||
return
|
||||
}
|
||||
err := c.bootstrap()
|
||||
|
@ -250,7 +250,7 @@ func initNetmapState(c *cfg) {
|
|||
|
||||
stateWord := nodeState(ni)
|
||||
|
||||
c.log.Info(logs.FrostFSNodeInitialNetworkState,
|
||||
c.log.Info(context.Background(), logs.FrostFSNodeInitialNetworkState,
|
||||
zap.Uint64("epoch", epoch),
|
||||
zap.String("state", stateWord),
|
||||
)
|
||||
|
@ -307,7 +307,7 @@ func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error
|
|||
if nmState != candidateState {
|
||||
// This happens when the node was switched to maintenance without epoch tick.
|
||||
// We expect it to continue staying in maintenance.
|
||||
c.log.Info(logs.CandidateStatusPriority,
|
||||
c.log.Info(context.Background(), logs.CandidateStatusPriority,
|
||||
zap.String("netmap", nmState),
|
||||
zap.String("candidate", candidateState))
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ type objectSvc struct {
|
|||
func (c *cfg) MaxObjectSize() uint64 {
|
||||
sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
|
||||
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
|
@ -223,7 +223,7 @@ func initObjectService(c *cfg) {
|
|||
|
||||
func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.ClientCache) {
|
||||
if policerconfig.UnsafeDisable(c.appCfg) {
|
||||
c.log.Warn(logs.FrostFSNodePolicerIsDisabled)
|
||||
c.log.Warn(context.Background(), logs.FrostFSNodePolicerIsDisabled)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -287,7 +287,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
|
|||
|
||||
_, err := ls.Inhume(ctx, inhumePrm)
|
||||
if err != nil {
|
||||
c.log.Warn(logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
|
||||
c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
|
||||
|
@ -12,14 +13,14 @@ import (
|
|||
func setRuntimeParameters(c *cfg) {
|
||||
if len(os.Getenv("GOMEMLIMIT")) != 0 {
|
||||
// default limit < yaml limit < app env limit < GOMEMLIMIT
|
||||
c.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
|
||||
c.log.Warn(context.Background(), logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
|
||||
return
|
||||
}
|
||||
|
||||
memLimitBytes := runtime.GCMemoryLimitBytes(c.appCfg)
|
||||
previous := debug.SetMemoryLimit(memLimitBytes)
|
||||
if memLimitBytes != previous {
|
||||
c.log.Info(logs.RuntimeSoftMemoryLimitUpdated,
|
||||
c.log.Info(context.Background(), logs.RuntimeSoftMemoryLimitUpdated,
|
||||
zap.Int64("new_value", memLimitBytes),
|
||||
zap.Int64("old_value", previous))
|
||||
}
|
||||
|
|
|
@ -13,12 +13,12 @@ import (
|
|||
func initTracing(ctx context.Context, c *cfg) {
|
||||
conf, err := tracingconfig.ToTracingConfig(c.appCfg)
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
|
||||
c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err))
|
||||
return
|
||||
}
|
||||
_, err = tracing.Setup(ctx, *conf)
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
|
||||
c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -29,7 +29,7 @@ func initTracing(ctx context.Context, c *cfg) {
|
|||
defer cancel()
|
||||
err := tracing.Shutdown(ctx) // cfg context cancels before close
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostFSNodeFailedShutdownTracing, zap.Error(err))
|
||||
c.log.Error(ctx, logs.FrostFSNodeFailedShutdownTracing, zap.Error(err))
|
||||
}
|
||||
},
|
||||
})
|
||||
|
|
|
@ -44,7 +44,7 @@ func (c cnrSource) List() ([]cid.ID, error) {
|
|||
func initTreeService(c *cfg) {
|
||||
treeConfig := treeconfig.Tree(c.appCfg)
|
||||
if !treeConfig.Enabled() {
|
||||
c.log.Info(logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization)
|
||||
c.log.Info(context.Background(), logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -83,7 +83,7 @@ func initTreeService(c *cfg) {
|
|||
addNewEpochNotificationHandler(c, func(_ event.Event) {
|
||||
err := c.treeService.SynchronizeAll()
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
|
||||
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
|
||||
}
|
||||
})
|
||||
} else {
|
||||
|
@ -94,7 +94,7 @@ func initTreeService(c *cfg) {
|
|||
for range tick.C {
|
||||
err := c.treeService.SynchronizeAll()
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
|
||||
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
|
||||
if errors.Is(err, tree.ErrShuttingDown) {
|
||||
return
|
||||
}
|
||||
|
@ -107,11 +107,11 @@ func initTreeService(c *cfg) {
|
|||
ev := e.(containerEvent.DeleteSuccess)
|
||||
|
||||
// This is executed asynchronously, so we don't care about the operation taking some time.
|
||||
c.log.Debug(logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID))
|
||||
c.log.Debug(context.Background(), logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID))
|
||||
err := c.treeService.DropTree(context.Background(), ev.ID, "")
|
||||
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
|
||||
// Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged.
|
||||
c.log.Error(logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
|
||||
c.log.Error(context.Background(), logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
|
||||
zap.Stringer("cid", ev.ID),
|
||||
zap.String("error", err.Error()))
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package audit
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
crypto "git.frostfs.info/TrueCloudLab/frostfs-crypto"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
|
@ -37,7 +39,7 @@ func LogRequestWithKey(log *logger.Logger, operation string, key []byte, target
|
|||
object = target.String()
|
||||
}
|
||||
|
||||
log.Info(logs.AuditEventLogRecord,
|
||||
log.Info(context.Background(), logs.AuditEventLogRecord,
|
||||
zap.String("operation", operation),
|
||||
zap.String("object", object),
|
||||
zap.String("subject", subject),
|
||||
|
|
|
@ -65,7 +65,7 @@ func TestFormatValidator_Validate(t *testing.T) {
|
|||
epoch: curEpoch,
|
||||
}),
|
||||
WithLockSource(ls),
|
||||
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
|
||||
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
|
||||
)
|
||||
|
||||
ownerKey, err := keys.NewPrivateKey()
|
||||
|
@ -290,7 +290,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
|
|||
}),
|
||||
WithLockSource(ls),
|
||||
WithVerifySessionTokenIssuer(false),
|
||||
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
|
||||
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
|
||||
)
|
||||
|
||||
tok := sessiontest.Object()
|
||||
|
@ -339,7 +339,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
|
|||
},
|
||||
},
|
||||
),
|
||||
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
|
||||
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
|
||||
)
|
||||
|
||||
tok := sessiontest.Object()
|
||||
|
@ -417,7 +417,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
|
|||
currentEpoch: curEpoch,
|
||||
},
|
||||
),
|
||||
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
|
||||
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
|
||||
)
|
||||
|
||||
require.NoError(t, v.Validate(context.Background(), obj, false))
|
||||
|
@ -491,7 +491,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
|
|||
currentEpoch: curEpoch,
|
||||
},
|
||||
),
|
||||
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
|
||||
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
|
||||
)
|
||||
|
||||
require.NoError(t, v.Validate(context.Background(), obj, false))
|
||||
|
@ -567,7 +567,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
|
|||
currentEpoch: curEpoch,
|
||||
},
|
||||
),
|
||||
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
|
||||
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
|
||||
)
|
||||
|
||||
require.Error(t, v.Validate(context.Background(), obj, false))
|
||||
|
|
|
@ -2,6 +2,7 @@ package object
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
|
@ -64,7 +65,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idC
|
|||
isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes)
|
||||
if err != nil {
|
||||
// do not throw error, try best case matching
|
||||
c.log.Debug(logs.V2CantCheckIfRequestFromInnerRing,
|
||||
c.log.Debug(context.Background(), logs.V2CantCheckIfRequestFromInnerRing,
|
||||
zap.String("error", err.Error()))
|
||||
} else if isInnerRingNode {
|
||||
return &ClassifyResult{
|
||||
|
@ -81,7 +82,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idC
|
|||
// error might happen if request has `RoleOther` key and placement
|
||||
// is not possible for previous epoch, so
|
||||
// do not throw error, try best case matching
|
||||
c.log.Debug(logs.V2CantCheckIfRequestFromContainerNode,
|
||||
c.log.Debug(context.Background(), logs.V2CantCheckIfRequestFromContainerNode,
|
||||
zap.String("error", err.Error()))
|
||||
} else if isContainerNode {
|
||||
return &ClassifyResult{
|
||||
|
|
|
@ -97,7 +97,7 @@ func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain *
|
|||
fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey)
|
||||
if err != nil {
|
||||
fromMainChainBlock = 0
|
||||
s.log.Warn(logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error()))
|
||||
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error()))
|
||||
}
|
||||
mainnetChain.from = fromMainChainBlock
|
||||
|
||||
|
@ -142,7 +142,7 @@ func (s *Server) initNotaryConfig() {
|
|||
!s.withoutMainNet && s.mainnetClient.ProbeNotary(), // if mainnet disabled then notary flag must be disabled too
|
||||
)
|
||||
|
||||
s.log.Info(logs.InnerringNotarySupport,
|
||||
s.log.Info(context.Background(), logs.InnerringNotarySupport,
|
||||
zap.Bool("sidechain_enabled", true),
|
||||
zap.Bool("mainchain_enabled", !s.mainNotaryConfig.disabled),
|
||||
)
|
||||
|
@ -153,7 +153,7 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli
|
|||
|
||||
if s.withoutMainNet || cfg.GetBool("governance.disable") {
|
||||
alphaSync = func(event.Event) {
|
||||
s.log.Debug(logs.InnerringAlphabetKeysSyncIsDisabled)
|
||||
s.log.Debug(context.Background(), logs.InnerringAlphabetKeysSyncIsDisabled)
|
||||
}
|
||||
} else {
|
||||
// create governance processor
|
||||
|
@ -307,7 +307,7 @@ func (s *Server) initFrostFSMainnetProcessor(cfg *viper.Viper) error {
|
|||
func (s *Server) initGRPCServer(cfg *viper.Viper, log *logger.Logger, audit *atomic.Bool) error {
|
||||
controlSvcEndpoint := cfg.GetString("control.grpc.endpoint")
|
||||
if controlSvcEndpoint == "" {
|
||||
s.log.Info(logs.InnerringNoControlServerEndpointSpecified)
|
||||
s.log.Info(context.Background(), logs.InnerringNoControlServerEndpointSpecified)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -446,7 +446,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
|
|||
fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey)
|
||||
if err != nil {
|
||||
fromSideChainBlock = 0
|
||||
s.log.Warn(logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
|
||||
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
|
||||
}
|
||||
|
||||
morphChain := &chainParams{
|
||||
|
@ -471,7 +471,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
|
|||
return nil, err
|
||||
}
|
||||
if err := s.morphClient.SetGroupSignerScope(); err != nil {
|
||||
morphChain.log.Info(logs.InnerringFailedToSetGroupSignerScope, zap.Error(err))
|
||||
morphChain.log.Info(ctx, logs.InnerringFailedToSetGroupSignerScope, zap.Error(err))
|
||||
}
|
||||
|
||||
return morphChain, nil
|
||||
|
|
|
@ -176,7 +176,7 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
|
|||
err = s.voteForSidechainValidator(prm)
|
||||
if err != nil {
|
||||
// we don't stop inner ring execution on this error
|
||||
s.log.Warn(logs.InnerringCantVoteForPreparedValidators,
|
||||
s.log.Warn(ctx, logs.InnerringCantVoteForPreparedValidators,
|
||||
zap.String("error", err.Error()))
|
||||
}
|
||||
|
||||
|
@ -218,13 +218,13 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
|
|||
|
||||
func (s *Server) registerMorphNewBlockEventHandler() {
|
||||
s.morphListener.RegisterBlockHandler(func(b *block.Block) {
|
||||
s.log.Debug(logs.InnerringNewBlock,
|
||||
s.log.Debug(context.Background(), logs.InnerringNewBlock,
|
||||
zap.Uint32("index", b.Index),
|
||||
)
|
||||
|
||||
err := s.persistate.SetUInt32(persistateSideChainLastBlockKey, b.Index)
|
||||
if err != nil {
|
||||
s.log.Warn(logs.InnerringCantUpdatePersistentState,
|
||||
s.log.Warn(context.Background(), logs.InnerringCantUpdatePersistentState,
|
||||
zap.String("chain", "side"),
|
||||
zap.Uint32("block_index", b.Index))
|
||||
}
|
||||
|
@ -238,7 +238,7 @@ func (s *Server) registerMainnetNewBlockEventHandler() {
|
|||
s.mainnetListener.RegisterBlockHandler(func(b *block.Block) {
|
||||
err := s.persistate.SetUInt32(persistateMainChainLastBlockKey, b.Index)
|
||||
if err != nil {
|
||||
s.log.Warn(logs.InnerringCantUpdatePersistentState,
|
||||
s.log.Warn(context.Background(), logs.InnerringCantUpdatePersistentState,
|
||||
zap.String("chain", "main"),
|
||||
zap.Uint32("block_index", b.Index))
|
||||
}
|
||||
|
@ -307,7 +307,7 @@ func (s *Server) Stop() {
|
|||
|
||||
for _, c := range s.closers {
|
||||
if err := c(); err != nil {
|
||||
s.log.Warn(logs.InnerringCloserError,
|
||||
s.log.Warn(context.Background(), logs.InnerringCloserError,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
|
@ -438,7 +438,7 @@ func createListener(ctx context.Context, cli *client.Client, p *chainParams) (ev
|
|||
}
|
||||
|
||||
listener, err := event.NewListener(event.ListenerParams{
|
||||
Logger: &logger.Logger{Logger: p.log.With(zap.String("chain", p.name))},
|
||||
Logger: p.log.With(zap.String("chain", p.name)),
|
||||
Subscriber: sub,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -602,7 +602,7 @@ func (s *Server) initConfigFromBlockchain() error {
|
|||
return err
|
||||
}
|
||||
|
||||
s.log.Debug(logs.InnerringReadConfigFromBlockchain,
|
||||
s.log.Debug(context.Background(), logs.InnerringReadConfigFromBlockchain,
|
||||
zap.Bool("active", s.IsActive()),
|
||||
zap.Bool("alphabet", s.IsAlphabet()),
|
||||
zap.Uint64("epoch", epoch),
|
||||
|
|
|
@ -54,12 +54,12 @@ func (s *Server) notaryHandler(_ event.Event) {
|
|||
if !s.mainNotaryConfig.disabled {
|
||||
_, err := s.depositMainNotary()
|
||||
if err != nil {
|
||||
s.log.Error(logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err))
|
||||
s.log.Error(context.Background(), logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := s.depositSideNotary(); err != nil {
|
||||
s.log.Error(logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err))
|
||||
s.log.Error(context.Background(), logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -81,11 +81,11 @@ func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaite
|
|||
// non-error deposit with an empty TX hash means
|
||||
// that the deposit has already been made; no
|
||||
// need to wait it.
|
||||
s.log.Info(logs.InnerringNotaryDepositHasAlreadyBeenMade)
|
||||
s.log.Info(ctx, logs.InnerringNotaryDepositHasAlreadyBeenMade)
|
||||
return nil
|
||||
}
|
||||
|
||||
s.log.Info(msg)
|
||||
s.log.Info(ctx, msg)
|
||||
|
||||
return await(ctx, tx)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package alphabet
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
|
||||
|
@ -10,14 +12,14 @@ import (
|
|||
|
||||
func (ap *Processor) HandleGasEmission(ev event.Event) {
|
||||
_ = ev.(timers.NewAlphabetEmitTick)
|
||||
ap.log.Info(logs.AlphabetTick, zap.String("type", "alphabet gas emit"))
|
||||
ap.log.Info(context.Background(), logs.AlphabetTick, zap.String("type", "alphabet gas emit"))
|
||||
|
||||
// send event to the worker pool
|
||||
|
||||
err := processors.SubmitEvent(ap.pool, ap.metrics, "alphabet_emit_gas", ap.processEmit)
|
||||
if err != nil {
|
||||
// there system can be moved into controlled degradation stage
|
||||
ap.log.Warn(logs.AlphabetAlphabetProcessorWorkerPoolDrained,
|
||||
ap.log.Warn(context.Background(), logs.AlphabetAlphabetProcessorWorkerPoolDrained,
|
||||
zap.Int("capacity", ap.pool.Cap()))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package alphabet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/elliptic"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
|
@ -16,14 +17,14 @@ const emitMethod = "emit"
|
|||
func (ap *Processor) processEmit() bool {
|
||||
index := ap.irList.AlphabetIndex()
|
||||
if index < 0 {
|
||||
ap.log.Info(logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent)
|
||||
ap.log.Info(context.Background(), logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
contract, ok := ap.alphabetContracts.GetByIndex(index)
|
||||
if !ok {
|
||||
ap.log.Debug(logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent,
|
||||
ap.log.Debug(context.Background(), logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent,
|
||||
zap.Int("index", index))
|
||||
|
||||
return false
|
||||
|
@ -32,20 +33,20 @@ func (ap *Processor) processEmit() bool {
|
|||
// there is no signature collecting, so we don't need extra fee
|
||||
_, err := ap.morphClient.Invoke(contract, 0, emitMethod)
|
||||
if err != nil {
|
||||
ap.log.Warn(logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error()))
|
||||
ap.log.Warn(context.Background(), logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error()))
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
if ap.storageEmission == 0 {
|
||||
ap.log.Info(logs.AlphabetStorageNodeEmissionIsOff)
|
||||
ap.log.Info(context.Background(), logs.AlphabetStorageNodeEmissionIsOff)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
networkMap, err := ap.netmapClient.NetMap()
|
||||
if err != nil {
|
||||
ap.log.Warn(logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
|
||||
ap.log.Warn(context.Background(), logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
|
||||
zap.String("error", err.Error()))
|
||||
|
||||
return false
|
||||
|
@ -58,7 +59,7 @@ func (ap *Processor) processEmit() bool {
|
|||
ap.pwLock.RUnlock()
|
||||
extraLen := len(pw)
|
||||
|
||||
ap.log.Debug(logs.AlphabetGasEmission,
|
||||
ap.log.Debug(context.Background(), logs.AlphabetGasEmission,
|
||||
zap.Int("network_map", nmLen),
|
||||
zap.Int("extra_wallets", extraLen))
|
||||
|
||||
|
@ -81,7 +82,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN
|
|||
|
||||
key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256())
|
||||
if err != nil {
|
||||
ap.log.Warn(logs.AlphabetCantParseNodePublicKey,
|
||||
ap.log.Warn(context.Background(), logs.AlphabetCantParseNodePublicKey,
|
||||
zap.String("error", err.Error()))
|
||||
|
||||
continue
|
||||
|
@ -89,7 +90,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN
|
|||
|
||||
err = ap.morphClient.TransferGas(key.GetScriptHash(), gasPerNode)
|
||||
if err != nil {
|
||||
ap.log.Warn(logs.AlphabetCantTransferGas,
|
||||
ap.log.Warn(context.Background(), logs.AlphabetCantTransferGas,
|
||||
zap.String("receiver", key.Address()),
|
||||
zap.Int64("amount", int64(gasPerNode)),
|
||||
zap.String("error", err.Error()),
|
||||
|
@ -106,7 +107,7 @@ func (ap *Processor) transferGasToExtraNodes(pw []util.Uint160, gasPerNode fixed
|
|||
for i, addr := range pw {
|
||||
receiversLog[i] = addr.StringLE()
|
||||
}
|
||||
ap.log.Warn(logs.AlphabetCantTransferGasToWallet,
|
||||
ap.log.Warn(context.Background(), logs.AlphabetCantTransferGasToWallet,
|
||||
zap.Strings("receivers", receiversLog),
|
||||
zap.Int64("amount", int64(gasPerNode)),
|
||||
zap.String("error", err.Error()),
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package alphabet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
@ -85,7 +86,7 @@ func New(p *Params) (*Processor, error) {
|
|||
return nil, errors.New("ir/alphabet: global state is not set")
|
||||
}
|
||||
|
||||
p.Log.Debug(logs.AlphabetAlphabetWorkerPool, zap.Int("size", p.PoolSize))
|
||||
p.Log.Debug(context.Background(), logs.AlphabetAlphabetWorkerPool, zap.Int("size", p.PoolSize))
|
||||
|
||||
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
||||
if err != nil {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package balance
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
|
@ -12,7 +13,7 @@ import (
|
|||
|
||||
func (bp *Processor) handleLock(ev event.Event) {
|
||||
lock := ev.(balanceEvent.Lock)
|
||||
bp.log.Info(logs.Notification,
|
||||
bp.log.Info(context.Background(), logs.Notification,
|
||||
zap.String("type", "lock"),
|
||||
zap.String("value", hex.EncodeToString(lock.ID())))
|
||||
|
||||
|
@ -23,7 +24,7 @@ func (bp *Processor) handleLock(ev event.Event) {
|
|||
})
|
||||
if err != nil {
|
||||
// there system can be moved into controlled degradation stage
|
||||
bp.log.Warn(logs.BalanceBalanceWorkerPoolDrained,
|
||||
bp.log.Warn(context.Background(), logs.BalanceBalanceWorkerPoolDrained,
|
||||
zap.Int("capacity", bp.pool.Cap()))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package balance
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
frostfsContract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
|
||||
balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
|
||||
|
@ -11,7 +13,7 @@ import (
|
|||
// back to the withdraw issuer.
|
||||
func (bp *Processor) processLock(lock *balanceEvent.Lock) bool {
|
||||
if !bp.alphabetState.IsAlphabet() {
|
||||
bp.log.Info(logs.BalanceNonAlphabetModeIgnoreBalanceLock)
|
||||
bp.log.Info(context.Background(), logs.BalanceNonAlphabetModeIgnoreBalanceLock)
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -25,7 +27,7 @@ func (bp *Processor) processLock(lock *balanceEvent.Lock) bool {
|
|||
|
||||
err := bp.frostfsClient.Cheque(prm)
|
||||
if err != nil {
|
||||
bp.log.Error(logs.BalanceCantSendLockAssetTx, zap.Error(err))
|
||||
bp.log.Error(context.Background(), logs.BalanceCantSendLockAssetTx, zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package balance
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
|
@ -68,7 +69,7 @@ func New(p *Params) (*Processor, error) {
|
|||
return nil, errors.New("ir/balance: balance precision converter is not set")
|
||||
}
|
||||
|
||||
p.Log.Debug(logs.BalanceBalanceWorkerPool, zap.Int("size", p.PoolSize))
|
||||
p.Log.Debug(context.Background(), logs.BalanceBalanceWorkerPool, zap.Int("size", p.PoolSize))
|
||||
|
||||
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
||||
if err != nil {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
|
@ -15,7 +16,7 @@ func (cp *Processor) handlePut(ev event.Event) {
|
|||
put := ev.(putEvent)
|
||||
|
||||
id := sha256.Sum256(put.Container())
|
||||
cp.log.Info(logs.Notification,
|
||||
cp.log.Info(context.Background(), logs.Notification,
|
||||
zap.String("type", "container put"),
|
||||
zap.String("id", base58.Encode(id[:])))
|
||||
|
||||
|
@ -26,14 +27,14 @@ func (cp *Processor) handlePut(ev event.Event) {
|
|||
})
|
||||
if err != nil {
|
||||
// there system can be moved into controlled degradation stage
|
||||
cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained,
|
||||
cp.log.Warn(context.Background(), logs.ContainerContainerProcessorWorkerPoolDrained,
|
||||
zap.Int("capacity", cp.pool.Cap()))
|
||||
}
|
||||
}
|
||||
|
||||
func (cp *Processor) handleDelete(ev event.Event) {
|
||||
del := ev.(containerEvent.Delete)
|
||||
cp.log.Info(logs.Notification,
|
||||
cp.log.Info(context.Background(), logs.Notification,
|
||||
zap.String("type", "container delete"),
|
||||
zap.String("id", base58.Encode(del.ContainerID())))
|
||||
|
||||
|
@ -44,7 +45,7 @@ func (cp *Processor) handleDelete(ev event.Event) {
|
|||
})
|
||||
if err != nil {
|
||||
// there system can be moved into controlled degradation stage
|
||||
cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained,
|
||||
cp.log.Warn(context.Background(), logs.ContainerContainerProcessorWorkerPoolDrained,
|
||||
zap.Int("capacity", cp.pool.Cap()))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
@ -38,7 +39,7 @@ var errContainerAndOwnerNamespaceDontMatch = errors.New("container and owner nam
|
|||
// and sending approve tx back to the morph.
|
||||
func (cp *Processor) processContainerPut(put putEvent) bool {
|
||||
if !cp.alphabetState.IsAlphabet() {
|
||||
cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerPut)
|
||||
cp.log.Info(context.Background(), logs.ContainerNonAlphabetModeIgnoreContainerPut)
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -48,7 +49,7 @@ func (cp *Processor) processContainerPut(put putEvent) bool {
|
|||
|
||||
err := cp.checkPutContainer(ctx)
|
||||
if err != nil {
|
||||
cp.log.Error(logs.ContainerPutContainerCheckFailed,
|
||||
cp.log.Error(context.Background(), logs.ContainerPutContainerCheckFailed,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
||||
|
@ -56,7 +57,7 @@ func (cp *Processor) processContainerPut(put putEvent) bool {
|
|||
}
|
||||
|
||||
if err := cp.morphClient.NotarySignAndInvokeTX(ctx.e.NotaryRequest().MainTransaction); err != nil {
|
||||
cp.log.Error(logs.ContainerCouldNotApprovePutContainer,
|
||||
cp.log.Error(context.Background(), logs.ContainerCouldNotApprovePutContainer,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
return false
|
||||
|
@ -105,13 +106,13 @@ func (cp *Processor) checkPutContainer(ctx *putContainerContext) error {
|
|||
// and sending approve tx back to morph.
|
||||
func (cp *Processor) processContainerDelete(e containerEvent.Delete) bool {
|
||||
if !cp.alphabetState.IsAlphabet() {
|
||||
cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerDelete)
|
||||
cp.log.Info(context.Background(), logs.ContainerNonAlphabetModeIgnoreContainerDelete)
|
||||
return true
|
||||
}
|
||||
|
||||
err := cp.checkDeleteContainer(e)
|
||||
if err != nil {
|
||||
cp.log.Error(logs.ContainerDeleteContainerCheckFailed,
|
||||
cp.log.Error(context.Background(), logs.ContainerDeleteContainerCheckFailed,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
||||
|
@ -119,7 +120,7 @@ func (cp *Processor) processContainerDelete(e containerEvent.Delete) bool {
|
|||
}
|
||||
|
||||
if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil {
|
||||
cp.log.Error(logs.ContainerCouldNotApproveDeleteContainer,
|
||||
cp.log.Error(context.Background(), logs.ContainerCouldNotApproveDeleteContainer,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
|
@ -97,7 +98,7 @@ func New(p *Params) (*Processor, error) {
|
|||
return nil, errors.New("ir/container: FrostFSID client is not set")
|
||||
}
|
||||
|
||||
p.Log.Debug(logs.ContainerContainerWorkerPool, zap.Int("size", p.PoolSize))
|
||||
p.Log.Debug(context.Background(), logs.ContainerContainerWorkerPool, zap.Int("size", p.PoolSize))
|
||||
|
||||
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
||||
if err != nil {
|
||||
|
|
|
@ -2,6 +2,7 @@ package frostfs
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"slices"
|
||||
|
||||
|
@ -16,7 +17,7 @@ func (np *Processor) handleDeposit(ev event.Event) {
|
|||
deposit := ev.(frostfsEvent.Deposit)
|
||||
depositIDBin := bytes.Clone(deposit.ID())
|
||||
slices.Reverse(depositIDBin)
|
||||
np.log.Info(logs.Notification,
|
||||
np.log.Info(context.Background(), logs.Notification,
|
||||
zap.String("type", "deposit"),
|
||||
zap.String("id", hex.EncodeToString(depositIDBin)))
|
||||
|
||||
|
@ -27,7 +28,7 @@ func (np *Processor) handleDeposit(ev event.Event) {
|
|||
})
|
||||
if err != nil {
|
||||
// there system can be moved into controlled degradation stage
|
||||
np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
|
||||
np.log.Warn(context.Background(), logs.FrostFSFrostfsProcessorWorkerPoolDrained,
|
||||
zap.Int("capacity", np.pool.Cap()))
|
||||
}
|
||||
}
|
||||
|
@ -36,7 +37,7 @@ func (np *Processor) handleWithdraw(ev event.Event) {
|
|||
withdraw := ev.(frostfsEvent.Withdraw)
|
||||
withdrawBin := bytes.Clone(withdraw.ID())
|
||||
slices.Reverse(withdrawBin)
|
||||
np.log.Info(logs.Notification,
|
||||
np.log.Info(context.Background(), logs.Notification,
|
||||
zap.String("type", "withdraw"),
|
||||
zap.String("id", hex.EncodeToString(withdrawBin)))
|
||||
|
||||
|
@ -47,14 +48,14 @@ func (np *Processor) handleWithdraw(ev event.Event) {
|
|||
})
|
||||
if err != nil {
|
||||
// there system can be moved into controlled degradation stage
|
||||
np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
|
||||
np.log.Warn(context.Background(), logs.FrostFSFrostfsProcessorWorkerPoolDrained,
|
||||
zap.Int("capacity", np.pool.Cap()))
|
||||
}
|
||||
}
|
||||
|
||||
func (np *Processor) handleCheque(ev event.Event) {
|
||||
cheque := ev.(frostfsEvent.Cheque)
|
||||
np.log.Info(logs.Notification,
|
||||
np.log.Info(context.Background(), logs.Notification,
|
||||
zap.String("type", "cheque"),
|
||||
zap.String("id", hex.EncodeToString(cheque.ID())))
|
||||
|
||||
|
@ -65,14 +66,14 @@ func (np *Processor) handleCheque(ev event.Event) {
|
|||
})
|
||||
if err != nil {
|
||||
// there system can be moved into controlled degradation stage
|
||||
np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
|
||||
np.log.Warn(context.Background(), logs.FrostFSFrostfsProcessorWorkerPoolDrained,
|
||||
zap.Int("capacity", np.pool.Cap()))
|
||||
}
|
||||
}
|
||||
|
||||
func (np *Processor) handleConfig(ev event.Event) {
|
||||
cfg := ev.(frostfsEvent.Config)
|
||||
np.log.Info(logs.Notification,
|
||||
np.log.Info(context.Background(), logs.Notification,
|
||||
zap.String("type", "set config"),
|
||||
zap.String("key", hex.EncodeToString(cfg.Key())),
|
||||
zap.String("value", hex.EncodeToString(cfg.Value())))
|
||||
|
@ -84,7 +85,7 @@ func (np *Processor) handleConfig(ev event.Event) {
|
|||
})
|
||||
if err != nil {
|
||||
// there system can be moved into controlled degradation stage
|
||||
np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
|
||||
np.log.Warn(context.Background(), logs.FrostFSFrostfsProcessorWorkerPoolDrained,
|
||||
zap.Int("capacity", np.pool.Cap()))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package frostfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
|
||||
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
|
||||
|
@ -17,7 +19,7 @@ const (
|
|||
// gas in the sidechain.
|
||||
func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
|
||||
if !np.alphabetState.IsAlphabet() {
|
||||
np.log.Info(logs.FrostFSNonAlphabetModeIgnoreDeposit)
|
||||
np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreDeposit)
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -30,7 +32,7 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
|
|||
// send transferX to a balance contract
|
||||
err := np.balanceClient.Mint(prm)
|
||||
if err != nil {
|
||||
np.log.Error(logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err))
|
||||
np.log.Error(context.Background(), logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err))
|
||||
}
|
||||
|
||||
curEpoch := np.epochState.EpochCounter()
|
||||
|
@ -44,7 +46,7 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
|
|||
|
||||
val, ok := np.mintEmitCache.Get(receiver.String())
|
||||
if ok && val+np.mintEmitThreshold >= curEpoch {
|
||||
np.log.Warn(logs.FrostFSDoubleMintEmissionDeclined,
|
||||
np.log.Warn(context.Background(), logs.FrostFSDoubleMintEmissionDeclined,
|
||||
zap.Stringer("receiver", receiver),
|
||||
zap.Uint64("last_emission", val),
|
||||
zap.Uint64("current_epoch", curEpoch))
|
||||
|
@ -56,12 +58,12 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
|
|||
// before gas transfer check if the balance is greater than the threshold
|
||||
balance, err := np.morphClient.GasBalance()
|
||||
if err != nil {
|
||||
np.log.Error(logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err))
|
||||
np.log.Error(context.Background(), logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
if balance < np.gasBalanceThreshold {
|
||||
np.log.Warn(logs.FrostFSGasBalanceThresholdHasBeenReached,
|
||||
np.log.Warn(context.Background(), logs.FrostFSGasBalanceThresholdHasBeenReached,
|
||||
zap.Int64("balance", balance),
|
||||
zap.Int64("threshold", np.gasBalanceThreshold))
|
||||
|
||||
|
@ -70,7 +72,7 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
|
|||
|
||||
err = np.morphClient.TransferGas(receiver, np.mintEmitValue)
|
||||
if err != nil {
|
||||
np.log.Error(logs.FrostFSCantTransferNativeGasToReceiver,
|
||||
np.log.Error(context.Background(), logs.FrostFSCantTransferNativeGasToReceiver,
|
||||
zap.String("error", err.Error()))
|
||||
|
||||
return false
|
||||
|
@ -84,14 +86,14 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
|
|||
// Process withdraw event by locking assets in the balance account.
|
||||
func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool {
|
||||
if !np.alphabetState.IsAlphabet() {
|
||||
np.log.Info(logs.FrostFSNonAlphabetModeIgnoreWithdraw)
|
||||
np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreWithdraw)
|
||||
return true
|
||||
}
|
||||
|
||||
// create lock account
|
||||
lock, err := util.Uint160DecodeBytesBE(withdraw.ID()[:util.Uint160Size])
|
||||
if err != nil {
|
||||
np.log.Error(logs.FrostFSCantCreateLockAccount, zap.Error(err))
|
||||
np.log.Error(context.Background(), logs.FrostFSCantCreateLockAccount, zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -107,7 +109,7 @@ func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool {
|
|||
|
||||
err = np.balanceClient.Lock(prm)
|
||||
if err != nil {
|
||||
np.log.Error(logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err))
|
||||
np.log.Error(context.Background(), logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -118,7 +120,7 @@ func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool {
|
|||
// the reserve account.
|
||||
func (np *Processor) processCheque(cheque frostfsEvent.Cheque) bool {
|
||||
if !np.alphabetState.IsAlphabet() {
|
||||
np.log.Info(logs.FrostFSNonAlphabetModeIgnoreCheque)
|
||||
np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreCheque)
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -130,7 +132,7 @@ func (np *Processor) processCheque(cheque frostfsEvent.Cheque) bool {
|
|||
|
||||
err := np.balanceClient.Burn(prm)
|
||||
if err != nil {
|
||||
np.log.Error(logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err))
|
||||
np.log.Error(context.Background(), logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package frostfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
|
||||
|
@ -11,7 +13,7 @@ import (
|
|||
// the sidechain.
|
||||
func (np *Processor) processConfig(config frostfsEvent.Config) bool {
|
||||
if !np.alphabetState.IsAlphabet() {
|
||||
np.log.Info(logs.FrostFSNonAlphabetModeIgnoreConfig)
|
||||
np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreConfig)
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -24,7 +26,7 @@ func (np *Processor) processConfig(config frostfsEvent.Config) bool {
|
|||
|
||||
err := np.netmapClient.SetConfig(prm)
|
||||
if err != nil {
|
||||
np.log.Error(logs.FrostFSCantRelaySetConfigEvent, zap.Error(err))
|
||||
np.log.Error(context.Background(), logs.FrostFSCantRelaySetConfigEvent, zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package frostfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
@ -110,7 +111,7 @@ func New(p *Params) (*Processor, error) {
|
|||
return nil, errors.New("ir/frostfs: balance precision converter is not set")
|
||||
}
|
||||
|
||||
p.Log.Debug(logs.FrostFSFrostfsWorkerPool, zap.Int("size", p.PoolSize))
|
||||
p.Log.Debug(context.Background(), logs.FrostFSFrostfsWorkerPool, zap.Int("size", p.PoolSize))
|
||||
|
||||
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
||||
if err != nil {
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package governance
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||
|
@ -32,7 +34,7 @@ func (gp *Processor) HandleAlphabetSync(e event.Event) {
|
|||
return
|
||||
}
|
||||
|
||||
gp.log.Info(logs.GovernanceNewEvent, zap.String("type", typ))
|
||||
gp.log.Info(context.Background(), logs.GovernanceNewEvent, zap.String("type", typ))
|
||||
|
||||
// send event to the worker pool
|
||||
|
||||
|
@ -41,7 +43,7 @@ func (gp *Processor) HandleAlphabetSync(e event.Event) {
|
|||
})
|
||||
if err != nil {
|
||||
// there system can be moved into controlled degradation stage
|
||||
gp.log.Warn(logs.GovernanceGovernanceWorkerPoolDrained,
|
||||
gp.log.Warn(context.Background(), logs.GovernanceGovernanceWorkerPoolDrained,
|
||||
zap.Int("capacity", gp.pool.Cap()))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package governance
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"sort"
|
||||
|
@ -20,37 +21,37 @@ const (
|
|||
|
||||
func (gp *Processor) processAlphabetSync(txHash util.Uint256) bool {
|
||||
if !gp.alphabetState.IsAlphabet() {
|
||||
gp.log.Info(logs.GovernanceNonAlphabetModeIgnoreAlphabetSync)
|
||||
gp.log.Info(context.Background(), logs.GovernanceNonAlphabetModeIgnoreAlphabetSync)
|
||||
return true
|
||||
}
|
||||
|
||||
mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList()
|
||||
if err != nil {
|
||||
gp.log.Error(logs.GovernanceCantFetchAlphabetListFromMainNet,
|
||||
gp.log.Error(context.Background(), logs.GovernanceCantFetchAlphabetListFromMainNet,
|
||||
zap.String("error", err.Error()))
|
||||
return false
|
||||
}
|
||||
|
||||
sidechainAlphabet, err := gp.morphClient.Committee()
|
||||
if err != nil {
|
||||
gp.log.Error(logs.GovernanceCantFetchAlphabetListFromSideChain,
|
||||
gp.log.Error(context.Background(), logs.GovernanceCantFetchAlphabetListFromSideChain,
|
||||
zap.String("error", err.Error()))
|
||||
return false
|
||||
}
|
||||
|
||||
newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet)
|
||||
if err != nil {
|
||||
gp.log.Error(logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
|
||||
gp.log.Error(context.Background(), logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
|
||||
zap.String("error", err.Error()))
|
||||
return false
|
||||
}
|
||||
|
||||
if newAlphabet == nil {
|
||||
gp.log.Info(logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged)
|
||||
gp.log.Info(context.Background(), logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged)
|
||||
return true
|
||||
}
|
||||
|
||||
gp.log.Info(logs.GovernanceAlphabetListHasBeenChangedStartingUpdate,
|
||||
gp.log.Info(context.Background(), logs.GovernanceAlphabetListHasBeenChangedStartingUpdate,
|
||||
zap.String("side_chain_alphabet", prettyKeys(sidechainAlphabet)),
|
||||
zap.String("new_alphabet", prettyKeys(newAlphabet)),
|
||||
)
|
||||
|
@ -63,7 +64,7 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) bool {
|
|||
// 1. Vote to sidechain committee via alphabet contracts.
|
||||
err = gp.voter.VoteForSidechainValidator(votePrm)
|
||||
if err != nil {
|
||||
gp.log.Error(logs.GovernanceCantVoteForSideChainCommittee,
|
||||
gp.log.Error(context.Background(), logs.GovernanceCantVoteForSideChainCommittee,
|
||||
zap.String("error", err.Error()))
|
||||
}
|
||||
|
||||
|
@ -76,7 +77,7 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) bool {
|
|||
// 4. Update FrostFS contract in the mainnet.
|
||||
gp.updateFrostFSContractInMainnet(newAlphabet)
|
||||
|
||||
gp.log.Info(logs.GovernanceFinishedAlphabetListUpdate)
|
||||
gp.log.Info(context.Background(), logs.GovernanceFinishedAlphabetListUpdate)
|
||||
|
||||
return true
|
||||
}
|
||||
|
@ -96,21 +97,21 @@ func prettyKeys(keys keys.PublicKeys) string {
|
|||
func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) {
|
||||
innerRing, err := gp.irFetcher.InnerRingKeys()
|
||||
if err != nil {
|
||||
gp.log.Error(logs.GovernanceCantFetchInnerRingListFromSideChain,
|
||||
gp.log.Error(context.Background(), logs.GovernanceCantFetchInnerRingListFromSideChain,
|
||||
zap.String("error", err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet)
|
||||
if err != nil {
|
||||
gp.log.Error(logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
|
||||
gp.log.Error(context.Background(), logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
|
||||
zap.String("error", err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
sort.Sort(newInnerRing)
|
||||
|
||||
gp.log.Info(logs.GovernanceUpdateOfTheInnerRingList,
|
||||
gp.log.Info(context.Background(), logs.GovernanceUpdateOfTheInnerRingList,
|
||||
zap.String("before", prettyKeys(innerRing)),
|
||||
zap.String("after", prettyKeys(newInnerRing)),
|
||||
)
|
||||
|
@ -120,7 +121,7 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAl
|
|||
updPrm.SetHash(txHash)
|
||||
|
||||
if err = gp.morphClient.UpdateNeoFSAlphabetList(updPrm); err != nil {
|
||||
gp.log.Error(logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
|
||||
gp.log.Error(context.Background(), logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
|
||||
zap.String("error", err.Error()))
|
||||
}
|
||||
}
|
||||
|
@ -133,7 +134,7 @@ func (gp *Processor) updateNotaryRoleInSidechain(newAlphabet keys.PublicKeys, tx
|
|||
|
||||
err := gp.morphClient.UpdateNotaryList(updPrm)
|
||||
if err != nil {
|
||||
gp.log.Error(logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
|
||||
gp.log.Error(context.Background(), logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
|
||||
zap.String("error", err.Error()))
|
||||
}
|
||||
}
|
||||
|
@ -153,7 +154,7 @@ func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys)
|
|||
|
||||
err := gp.frostfsClient.AlphabetUpdate(prm)
|
||||
if err != nil {
|
||||
gp.log.Error(logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
|
||||
gp.log.Error(context.Background(), logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
|
||||
zap.String("error", err.Error()))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package netmap
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
|
@ -13,21 +14,21 @@ import (
|
|||
|
||||
func (np *Processor) HandleNewEpochTick(ev event.Event) {
|
||||
_ = ev.(timerEvent.NewEpochTick)
|
||||
np.log.Info(logs.NetmapTick, zap.String("type", "epoch"))
|
||||
np.log.Info(context.Background(), logs.NetmapTick, zap.String("type", "epoch"))
|
||||
|
||||
// send an event to the worker pool
|
||||
|
||||
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", np.processNewEpochTick)
|
||||
if err != nil {
|
||||
// there system can be moved into controlled degradation stage
|
||||
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
|
||||
np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained,
|
||||
zap.Int("capacity", np.pool.Cap()))
|
||||
}
|
||||
}
|
||||
|
||||
func (np *Processor) handleNewEpoch(ev event.Event) {
|
||||
epochEvent := ev.(netmapEvent.NewEpoch)
|
||||
np.log.Info(logs.Notification,
|
||||
np.log.Info(context.Background(), logs.Notification,
|
||||
zap.String("type", "new epoch"),
|
||||
zap.Uint64("value", epochEvent.EpochNumber()))
|
||||
|
||||
|
@ -38,7 +39,7 @@ func (np *Processor) handleNewEpoch(ev event.Event) {
|
|||
})
|
||||
if err != nil {
|
||||
// there system can be moved into controlled degradation stage
|
||||
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
|
||||
np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained,
|
||||
zap.Int("capacity", np.pool.Cap()))
|
||||
}
|
||||
}
|
||||
|
@ -46,7 +47,7 @@ func (np *Processor) handleNewEpoch(ev event.Event) {
|
|||
func (np *Processor) handleAddPeer(ev event.Event) {
|
||||
newPeer := ev.(netmapEvent.AddPeer)
|
||||
|
||||
np.log.Info(logs.Notification,
|
||||
np.log.Info(context.Background(), logs.Notification,
|
||||
zap.String("type", "add peer"),
|
||||
)
|
||||
|
||||
|
@ -57,14 +58,14 @@ func (np *Processor) handleAddPeer(ev event.Event) {
|
|||
})
|
||||
if err != nil {
|
||||
// there system can be moved into controlled degradation stage
|
||||
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
|
||||
np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained,
|
||||
zap.Int("capacity", np.pool.Cap()))
|
||||
}
|
||||
}
|
||||
|
||||
func (np *Processor) handleUpdateState(ev event.Event) {
|
||||
updPeer := ev.(netmapEvent.UpdatePeer)
|
||||
np.log.Info(logs.Notification,
|
||||
np.log.Info(context.Background(), logs.Notification,
|
||||
zap.String("type", "update peer state"),
|
||||
zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes())))
|
||||
|
||||
|
@ -75,21 +76,21 @@ func (np *Processor) handleUpdateState(ev event.Event) {
|
|||
})
|
||||
if err != nil {
|
||||
// there system can be moved into controlled degradation stage
|
||||
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
|
||||
np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained,
|
||||
zap.Int("capacity", np.pool.Cap()))
|
||||
}
|
||||
}
|
||||
|
||||
func (np *Processor) handleCleanupTick(ev event.Event) {
|
||||
if !np.netmapSnapshot.enabled {
|
||||
np.log.Debug(logs.NetmapNetmapCleanUpRoutineIsDisabled518)
|
||||
np.log.Debug(context.Background(), logs.NetmapNetmapCleanUpRoutineIsDisabled518)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
cleanup := ev.(netmapCleanupTick)
|
||||
|
||||
np.log.Info(logs.NetmapTick, zap.String("type", "netmap cleaner"))
|
||||
np.log.Info(context.Background(), logs.NetmapTick, zap.String("type", "netmap cleaner"))
|
||||
|
||||
// send event to the worker pool
|
||||
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_cleanup_tick", func() bool {
|
||||
|
@ -97,7 +98,7 @@ func (np *Processor) handleCleanupTick(ev event.Event) {
|
|||
})
|
||||
if err != nil {
|
||||
// there system can be moved into controlled degradation stage
|
||||
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
|
||||
np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained,
|
||||
zap.Int("capacity", np.pool.Cap()))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package netmap
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
v2netmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
|
@ -9,7 +11,7 @@ import (
|
|||
|
||||
func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
|
||||
if !np.alphabetState.IsAlphabet() {
|
||||
np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick)
|
||||
np.log.Info(context.Background(), logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick)
|
||||
|
||||
return true
|
||||
}
|
||||
|
@ -17,13 +19,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
|
|||
err := np.netmapSnapshot.forEachRemoveCandidate(ev.epoch, func(s string) error {
|
||||
key, err := keys.NewPublicKeyFromString(s)
|
||||
if err != nil {
|
||||
np.log.Warn(logs.NetmapCantDecodePublicKeyOfNetmapNode,
|
||||
np.log.Warn(context.Background(), logs.NetmapCantDecodePublicKeyOfNetmapNode,
|
||||
zap.String("key", s))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
np.log.Info(logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s))
|
||||
np.log.Info(context.Background(), logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s))
|
||||
|
||||
// In notary environments we call UpdateStateIR method instead of UpdateState.
|
||||
// It differs from UpdateState only by name, so we can do this in the same form.
|
||||
|
@ -39,13 +41,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
|
|||
int64(v2netmap.Offline), key.Bytes(),
|
||||
)
|
||||
if err != nil {
|
||||
np.log.Error(logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err))
|
||||
np.log.Error(context.Background(), logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err))
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
np.log.Warn(logs.NetmapCantIterateOnNetmapCleanerCache,
|
||||
np.log.Warn(context.Background(), logs.NetmapCantIterateOnNetmapCleanerCache,
|
||||
zap.String("error", err.Error()))
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package netmap
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
|
||||
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
|
||||
|
@ -14,7 +16,7 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) bool {
|
|||
|
||||
epochDuration, err := np.netmapClient.EpochDuration()
|
||||
if err != nil {
|
||||
np.log.Warn(logs.NetmapCantGetEpochDuration,
|
||||
np.log.Warn(context.Background(), logs.NetmapCantGetEpochDuration,
|
||||
zap.String("error", err.Error()))
|
||||
} else {
|
||||
np.epochState.SetEpochDuration(epochDuration)
|
||||
|
@ -24,20 +26,20 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) bool {
|
|||
|
||||
h, err := np.netmapClient.MorphTxHeight(ev.TxHash())
|
||||
if err != nil {
|
||||
np.log.Warn(logs.NetmapCantGetTransactionHeight,
|
||||
np.log.Warn(context.Background(), logs.NetmapCantGetTransactionHeight,
|
||||
zap.String("hash", ev.TxHash().StringLE()),
|
||||
zap.String("error", err.Error()))
|
||||
}
|
||||
|
||||
if err := np.epochTimer.ResetEpochTimer(h); err != nil {
|
||||
np.log.Warn(logs.NetmapCantResetEpochTimer,
|
||||
np.log.Warn(context.Background(), logs.NetmapCantResetEpochTimer,
|
||||
zap.String("error", err.Error()))
|
||||
}
|
||||
|
||||
// get new netmap snapshot
|
||||
networkMap, err := np.netmapClient.NetMap()
|
||||
if err != nil {
|
||||
np.log.Warn(logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
|
||||
np.log.Warn(context.Background(), logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
|
||||
zap.String("error", err.Error()))
|
||||
|
||||
return false
|
||||
|
@ -54,16 +56,16 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) bool {
|
|||
// Process new epoch tick by invoking new epoch method in network map contract.
|
||||
func (np *Processor) processNewEpochTick() bool {
|
||||
if !np.alphabetState.IsAlphabet() {
|
||||
np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewEpochTick)
|
||||
np.log.Info(context.Background(), logs.NetmapNonAlphabetModeIgnoreNewEpochTick)
|
||||
return true
|
||||
}
|
||||
|
||||
nextEpoch := np.epochState.EpochCounter() + 1
|
||||
np.log.Debug(logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch))
|
||||
np.log.Debug(context.Background(), logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch))
|
||||
|
||||
err := np.netmapClient.NewEpoch(nextEpoch)
|
||||
if err != nil {
|
||||
np.log.Error(logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err))
|
||||
np.log.Error(context.Background(), logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package netmap
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
|
@ -14,7 +15,7 @@ import (
|
|||
// local epoch timer.
|
||||
func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
|
||||
if !np.alphabetState.IsAlphabet() {
|
||||
np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewPeerNotification)
|
||||
np.log.Info(context.Background(), logs.NetmapNonAlphabetModeIgnoreNewPeerNotification)
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -22,7 +23,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
|
|||
tx := ev.NotaryRequest().MainTransaction
|
||||
ok, err := np.netmapClient.MorphIsValidScript(tx.Script, tx.Signers)
|
||||
if err != nil || !ok {
|
||||
np.log.Warn(logs.NetmapNonhaltNotaryTransaction,
|
||||
np.log.Warn(context.Background(), logs.NetmapNonhaltNotaryTransaction,
|
||||
zap.String("method", "netmap.AddPeer"),
|
||||
zap.String("hash", tx.Hash().StringLE()),
|
||||
zap.Error(err))
|
||||
|
@ -33,14 +34,14 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
|
|||
var nodeInfo netmap.NodeInfo
|
||||
if err := nodeInfo.Unmarshal(ev.Node()); err != nil {
|
||||
// it will be nice to have tx id at event structure to log it
|
||||
np.log.Warn(logs.NetmapCantParseNetworkMapCandidate)
|
||||
np.log.Warn(context.Background(), logs.NetmapCantParseNetworkMapCandidate)
|
||||
return false
|
||||
}
|
||||
|
||||
// validate and update node info
|
||||
err = np.nodeValidator.VerifyAndUpdate(&nodeInfo)
|
||||
if err != nil {
|
||||
np.log.Warn(logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
|
||||
np.log.Warn(context.Background(), logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
||||
|
@ -63,7 +64,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
|
|||
// That is why we need to perform `addPeerIR` only in case when node is online,
|
||||
// because in scope of this method, contract set state `ONLINE` for the node.
|
||||
if updated && nodeInfo.Status().IsOnline() {
|
||||
np.log.Info(logs.NetmapApprovingNetworkMapCandidate,
|
||||
np.log.Info(context.Background(), logs.NetmapApprovingNetworkMapCandidate,
|
||||
zap.String("key", keyString))
|
||||
|
||||
prm := netmapclient.AddPeerPrm{}
|
||||
|
@ -84,7 +85,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
|
|||
nodeInfoBinary,
|
||||
)
|
||||
if err != nil {
|
||||
np.log.Error(logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err))
|
||||
np.log.Error(context.Background(), logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err))
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -95,7 +96,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
|
|||
// Process update peer notification by sending approval tx to the smart contract.
|
||||
func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool {
|
||||
if !np.alphabetState.IsAlphabet() {
|
||||
np.log.Info(logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification)
|
||||
np.log.Info(context.Background(), logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification)
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -108,7 +109,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool {
|
|||
if ev.Maintenance() {
|
||||
err = np.nodeStateSettings.MaintenanceModeAllowed()
|
||||
if err != nil {
|
||||
np.log.Info(logs.NetmapPreventSwitchingNodeToMaintenanceState,
|
||||
np.log.Info(context.Background(), logs.NetmapPreventSwitchingNodeToMaintenanceState,
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
|
@ -117,7 +118,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool {
|
|||
}
|
||||
|
||||
if err = np.netmapClient.MorphNotarySignAndInvokeTX(ev.NotaryRequest().MainTransaction); err != nil {
|
||||
np.log.Error(logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err))
|
||||
np.log.Error(context.Background(), logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package netmap
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
|
@ -132,7 +133,7 @@ func New(p *Params) (*Processor, error) {
|
|||
return nil, errors.New("ir/netmap: node state settings is not set")
|
||||
}
|
||||
|
||||
p.Log.Debug(logs.NetmapNetmapWorkerPool, zap.Int("size", p.PoolSize))
|
||||
p.Log.Debug(context.Background(), logs.NetmapNetmapWorkerPool, zap.Int("size", p.PoolSize))
|
||||
|
||||
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
||||
if err != nil {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package innerring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
|
@ -61,7 +62,7 @@ func (s *Server) IsAlphabet() bool {
|
|||
func (s *Server) InnerRingIndex() int {
|
||||
index, err := s.statusIndex.InnerRingIndex()
|
||||
if err != nil {
|
||||
s.log.Error(logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error()))
|
||||
s.log.Error(context.Background(), logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error()))
|
||||
return -1
|
||||
}
|
||||
|
||||
|
@ -73,7 +74,7 @@ func (s *Server) InnerRingIndex() int {
|
|||
func (s *Server) InnerRingSize() int {
|
||||
size, err := s.statusIndex.InnerRingSize()
|
||||
if err != nil {
|
||||
s.log.Error(logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error()))
|
||||
s.log.Error(context.Background(), logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error()))
|
||||
return 0
|
||||
}
|
||||
|
||||
|
@ -85,7 +86,7 @@ func (s *Server) InnerRingSize() int {
|
|||
func (s *Server) AlphabetIndex() int {
|
||||
index, err := s.statusIndex.AlphabetIndex()
|
||||
if err != nil {
|
||||
s.log.Error(logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error()))
|
||||
s.log.Error(context.Background(), logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error()))
|
||||
return -1
|
||||
}
|
||||
|
||||
|
@ -97,13 +98,13 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro
|
|||
|
||||
index := s.InnerRingIndex()
|
||||
if s.contracts.alphabet.indexOutOfRange(index) {
|
||||
s.log.Info(logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange)
|
||||
s.log.Info(context.Background(), logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(validators) == 0 {
|
||||
s.log.Info(logs.InnerringIgnoreValidatorVoteEmptyValidatorsList)
|
||||
s.log.Info(context.Background(), logs.InnerringIgnoreValidatorVoteEmptyValidatorsList)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -128,7 +129,7 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro
|
|||
s.contracts.alphabet.iterate(func(letter GlagoliticLetter, contract util.Uint160) {
|
||||
_, err := s.morphClient.NotaryInvoke(contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators)
|
||||
if err != nil {
|
||||
s.log.Warn(logs.InnerringCantInvokeVoteMethodInAlphabetContract,
|
||||
s.log.Warn(context.Background(), logs.InnerringCantInvokeVoteMethodInAlphabetContract,
|
||||
zap.Int8("alphabet_index", int8(letter)),
|
||||
zap.Uint64("epoch", epoch),
|
||||
zap.String("error", err.Error()))
|
||||
|
@ -202,6 +203,6 @@ func (s *Server) notifySystemd(st control.HealthStatus) {
|
|||
err = sdnotify.Status(fmt.Sprintf("%v", st))
|
||||
}
|
||||
if err != nil {
|
||||
s.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
|
||||
s.log.Error(context.Background(), logs.FailedToReportStatusToSystemd, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ func defaultCfg(c *cfg) {
|
|||
},
|
||||
fullSizeLimit: 1 << 30, // 1GB
|
||||
objSizeLimit: 1 << 20, // 1MB
|
||||
log: &logger.Logger{Logger: zap.L()},
|
||||
log: logger.NewLoggerWrapper(zap.L()),
|
||||
metrics: &NoopMetrics{},
|
||||
}
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ func WithFullSizeLimit(lim uint64) Option {
|
|||
// WithLogger returns an option to specify Blobovnicza's logger.
|
||||
func WithLogger(l *logger.Logger) Option {
|
||||
return func(c *cfg) {
|
||||
c.log = &logger.Logger{Logger: l.With(zap.String("component", "Blobovnicza"))}
|
||||
c.log = l.With(zap.String("component", "Blobovnicza"))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package blobovnicza
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
@ -23,7 +24,7 @@ func (b *Blobovnicza) Open() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
b.log.Debug(logs.BlobovniczaCreatingDirectoryForBoltDB,
|
||||
b.log.Debug(context.Background(), logs.BlobovniczaCreatingDirectoryForBoltDB,
|
||||
zap.String("path", b.path),
|
||||
zap.Bool("ro", b.boltOptions.ReadOnly),
|
||||
)
|
||||
|
@ -37,7 +38,7 @@ func (b *Blobovnicza) Open() error {
|
|||
}
|
||||
}
|
||||
|
||||
b.log.Debug(logs.BlobovniczaOpeningBoltDB,
|
||||
b.log.Debug(context.Background(), logs.BlobovniczaOpeningBoltDB,
|
||||
zap.String("path", b.path),
|
||||
zap.Stringer("permissions", b.perm),
|
||||
)
|
||||
|
@ -63,7 +64,7 @@ func (b *Blobovnicza) Init() error {
|
|||
return errors.New("blobovnicza is not open")
|
||||
}
|
||||
|
||||
b.log.Debug(logs.BlobovniczaInitializing,
|
||||
b.log.Debug(context.Background(), logs.BlobovniczaInitializing,
|
||||
zap.Uint64("object size limit", b.objSizeLimit),
|
||||
zap.Uint64("storage size limit", b.fullSizeLimit),
|
||||
)
|
||||
|
@ -71,7 +72,7 @@ func (b *Blobovnicza) Init() error {
|
|||
size := b.dataSize.Load()
|
||||
items := b.itemsCount.Load()
|
||||
if size != 0 || items != 0 {
|
||||
b.log.Debug(logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items))
|
||||
b.log.Debug(context.Background(), logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -81,7 +82,7 @@ func (b *Blobovnicza) Init() error {
|
|||
// create size range bucket
|
||||
|
||||
rangeStr := stringifyBounds(lower, upper)
|
||||
b.log.Debug(logs.BlobovniczaCreatingBucketForSizeRange,
|
||||
b.log.Debug(context.Background(), logs.BlobovniczaCreatingBucketForSizeRange,
|
||||
zap.String("range", rangeStr))
|
||||
|
||||
_, err := tx.CreateBucketIfNotExists(key)
|
||||
|
@ -131,17 +132,17 @@ func (b *Blobovnicza) initializeCounters() error {
|
|||
return fmt.Errorf("can't determine DB size: %w", err)
|
||||
}
|
||||
if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly {
|
||||
b.log.Debug(logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items))
|
||||
b.log.Debug(context.Background(), logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items))
|
||||
if err := b.boltDB.Update(func(tx *bbolt.Tx) error {
|
||||
if err := saveDataSize(tx, size); err != nil {
|
||||
return err
|
||||
}
|
||||
return saveItemsCount(tx, items)
|
||||
}); err != nil {
|
||||
b.log.Debug(logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items))
|
||||
b.log.Debug(context.Background(), logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items))
|
||||
return fmt.Errorf("can't save blobovnicza's size and items count: %w", err)
|
||||
}
|
||||
b.log.Debug(logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items))
|
||||
b.log.Debug(context.Background(), logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items))
|
||||
}
|
||||
|
||||
b.dataSize.Store(size)
|
||||
|
@ -162,7 +163,7 @@ func (b *Blobovnicza) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
b.log.Debug(logs.BlobovniczaClosingBoltDB,
|
||||
b.log.Debug(context.Background(), logs.BlobovniczaClosingBoltDB,
|
||||
zap.String("path", b.path),
|
||||
)
|
||||
|
||||
|
|
|
@ -91,7 +91,7 @@ func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, err
|
|||
}
|
||||
|
||||
if err == nil && found {
|
||||
b.log.Debug(logs.BlobovniczaObjectWasRemovedFromBucket,
|
||||
b.log.Debug(ctx, logs.BlobovniczaObjectWasRemovedFromBucket,
|
||||
zap.String("binary size", stringifyByteSize(dataSize)),
|
||||
zap.String("range", stringifyBounds(sizeLowerBound, sizeUpperBound)),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
|
||||
|
|
|
@ -24,10 +24,10 @@ func (b *Blobovniczas) Open(mode mode.ComponentMode) error {
|
|||
//
|
||||
// Should be called exactly once.
|
||||
func (b *Blobovniczas) Init() error {
|
||||
b.log.Debug(logs.BlobovniczatreeInitializingBlobovniczas)
|
||||
b.log.Debug(context.Background(), logs.BlobovniczatreeInitializingBlobovniczas)
|
||||
|
||||
if b.readOnly {
|
||||
b.log.Debug(logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization)
|
||||
b.log.Debug(context.Background(), logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,7 @@ func (b *Blobovniczas) initializeDBs(ctx context.Context) error {
|
|||
b.deleteProtectedObjects.Add(move.Address)
|
||||
}
|
||||
|
||||
b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p))
|
||||
b.log.Debug(egCtx, logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p))
|
||||
return nil
|
||||
})
|
||||
return false, nil
|
||||
|
|
|
@ -80,7 +80,7 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
|
|||
res, err = b.deleteObjectFromLevel(ctx, bPrm, p)
|
||||
if err != nil {
|
||||
if !client.IsErrObjectNotFound(err) {
|
||||
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
|
||||
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
|
||||
zap.String("level", p),
|
||||
zap.String("error", err.Error()),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
|
||||
|
|
|
@ -55,7 +55,7 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
|
|||
_, err := b.getObjectFromLevel(ctx, gPrm, p)
|
||||
if err != nil {
|
||||
if !client.IsErrObjectNotFound(err) {
|
||||
b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
||||
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
||||
zap.String("level", p),
|
||||
zap.String("error", err.Error()),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
|
|
|
@ -67,7 +67,7 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
|
|||
res, err = b.getObjectFromLevel(ctx, bPrm, p)
|
||||
if err != nil {
|
||||
if !client.IsErrObjectNotFound(err) {
|
||||
b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
||||
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
||||
zap.String("level", p),
|
||||
zap.String("error", err.Error()),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
|
||||
|
|
|
@ -69,7 +69,7 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
|
|||
if err != nil {
|
||||
outOfBounds := isErrOutOfRange(err)
|
||||
if !outOfBounds && !client.IsErrObjectNotFound(err) {
|
||||
b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
||||
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
||||
zap.String("level", p),
|
||||
zap.String("error", err.Error()),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
|
|
|
@ -42,7 +42,7 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm
|
|||
data, err := b.compression.Decompress(elem.ObjectData())
|
||||
if err != nil {
|
||||
if prm.IgnoreErrors {
|
||||
b.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
|
||||
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||
zap.Stringer("address", elem.Address()),
|
||||
zap.String("err", err.Error()),
|
||||
zap.String("storage_id", p),
|
||||
|
@ -76,7 +76,7 @@ func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors boo
|
|||
blz, err := shBlz.Open()
|
||||
if err != nil {
|
||||
if ignoreErrors {
|
||||
b.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
|
||||
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||
zap.String("err", err.Error()),
|
||||
zap.String("storage_id", p),
|
||||
zap.String("root_path", b.rootPath))
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package blobovniczatree
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
@ -86,7 +87,7 @@ func (b *sharedDB) Close() {
|
|||
defer b.cond.L.Unlock()
|
||||
|
||||
if b.refCount == 0 {
|
||||
b.log.Error(logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path))
|
||||
b.log.Error(context.Background(), logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path))
|
||||
b.cond.Broadcast()
|
||||
return
|
||||
}
|
||||
|
@ -94,7 +95,7 @@ func (b *sharedDB) Close() {
|
|||
if b.refCount == 1 {
|
||||
b.refCount = 0
|
||||
if err := b.blcza.Close(); err != nil {
|
||||
b.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza,
|
||||
b.log.Error(context.Background(), logs.BlobovniczatreeCouldNotCloseBlobovnicza,
|
||||
zap.String("id", b.path),
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
@ -122,7 +123,7 @@ func (b *sharedDB) CloseAndRemoveFile() error {
|
|||
}
|
||||
|
||||
if err := b.blcza.Close(); err != nil {
|
||||
b.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza,
|
||||
b.log.Error(context.Background(), logs.BlobovniczatreeCouldNotCloseBlobovnicza,
|
||||
zap.String("id", b.path),
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
|
|
@ -47,7 +47,7 @@ const (
|
|||
|
||||
func initConfig(c *cfg) {
|
||||
*c = cfg{
|
||||
log: &logger.Logger{Logger: zap.L()},
|
||||
log: logger.NewLoggerWrapper(zap.L()),
|
||||
perm: defaultPerm,
|
||||
openedCacheSize: defaultOpenedCacheSize,
|
||||
openedCacheTTL: defaultOpenedCacheTTL,
|
||||
|
|
|
@ -82,7 +82,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error)
|
|||
if !isLogical(err) {
|
||||
i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
|
||||
} else {
|
||||
i.B.log.Debug(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
|
||||
i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
|
||||
zap.String("error", err.Error()),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error)
|
|||
}
|
||||
|
||||
if active == nil {
|
||||
i.B.log.Debug(logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath),
|
||||
i.B.log.Debug(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
return false, nil
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error)
|
|||
if !isLogical(err) {
|
||||
i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
|
||||
} else {
|
||||
i.B.log.Debug(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
|
||||
i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
|
||||
zap.String("path", active.SystemPath()),
|
||||
zap.String("error", err.Error()),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
|
|
|
@ -49,25 +49,25 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm
|
|||
|
||||
var res common.RebuildRes
|
||||
|
||||
b.log.Debug(logs.BlobovniczaTreeCompletingPreviousRebuild)
|
||||
b.log.Debug(ctx, logs.BlobovniczaTreeCompletingPreviousRebuild)
|
||||
completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage)
|
||||
res.ObjectsMoved += completedPreviosMoves
|
||||
if err != nil {
|
||||
b.log.Warn(logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err))
|
||||
b.log.Warn(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err))
|
||||
success = false
|
||||
return res, err
|
||||
}
|
||||
b.log.Debug(logs.BlobovniczaTreeCompletedPreviousRebuildSuccess)
|
||||
b.log.Debug(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildSuccess)
|
||||
|
||||
b.log.Debug(logs.BlobovniczaTreeCollectingDBToRebuild)
|
||||
b.log.Debug(ctx, logs.BlobovniczaTreeCollectingDBToRebuild)
|
||||
dbsToMigrate, err := b.getDBsToRebuild(ctx, prm.FillPercent)
|
||||
if err != nil {
|
||||
b.log.Warn(logs.BlobovniczaTreeCollectingDBToRebuildFailed, zap.Error(err))
|
||||
b.log.Warn(ctx, logs.BlobovniczaTreeCollectingDBToRebuildFailed, zap.Error(err))
|
||||
success = false
|
||||
return res, err
|
||||
}
|
||||
|
||||
b.log.Info(logs.BlobovniczaTreeCollectingDBToRebuildSuccess, zap.Int("blobovniczas_to_rebuild", len(dbsToMigrate)))
|
||||
b.log.Info(ctx, logs.BlobovniczaTreeCollectingDBToRebuildSuccess, zap.Int("blobovniczas_to_rebuild", len(dbsToMigrate)))
|
||||
res, err = b.migrateDBs(ctx, dbsToMigrate, prm, res)
|
||||
if err != nil {
|
||||
success = false
|
||||
|
@ -78,14 +78,14 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm
|
|||
func (b *Blobovniczas) migrateDBs(ctx context.Context, dbs []string, prm common.RebuildPrm, res common.RebuildRes) (common.RebuildRes, error) {
|
||||
var completedDBCount uint32
|
||||
for _, db := range dbs {
|
||||
b.log.Debug(logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db))
|
||||
b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db))
|
||||
movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.WorkerLimiter)
|
||||
res.ObjectsMoved += movedObjects
|
||||
if err != nil {
|
||||
b.log.Warn(logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err))
|
||||
b.log.Warn(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err))
|
||||
return res, err
|
||||
}
|
||||
b.log.Debug(logs.BlobovniczaTreeRebuildingBlobovniczaSuccess, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects))
|
||||
b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaSuccess, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects))
|
||||
res.FilesRemoved++
|
||||
completedDBCount++
|
||||
b.metrics.SetRebuildPercent((100 * completedDBCount) / uint32(len(dbs)))
|
||||
|
@ -233,7 +233,7 @@ func (b *Blobovniczas) addRebuildTempFile(path string) (func(), error) {
|
|||
}
|
||||
return func() {
|
||||
if err := os.Remove(sysPath); err != nil {
|
||||
b.log.Warn(logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
|
||||
b.log.Warn(context.Background(), logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
@ -389,7 +389,7 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co
|
|||
})
|
||||
for _, tmp := range rebuildTempFilesToRemove {
|
||||
if err := os.Remove(filepath.Join(b.rootPath, tmp)); err != nil {
|
||||
b.log.Warn(logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
|
||||
b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
|
||||
}
|
||||
}
|
||||
return count, err
|
||||
|
@ -413,14 +413,14 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob
|
|||
if client.IsErrObjectNotFound(err) {
|
||||
existsInSource = false
|
||||
} else {
|
||||
b.log.Warn(logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
|
||||
b.log.Warn(ctx, logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !existsInSource { // object was deleted by Rebuild, need to delete move info
|
||||
if err = source.DropMoveInfo(ctx, move.Address); err != nil {
|
||||
b.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
|
||||
b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
b.deleteProtectedObjects.Delete(move.Address)
|
||||
|
@ -429,7 +429,7 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob
|
|||
|
||||
existsInTarget, err := target.Exists(ctx, move.Address)
|
||||
if err != nil {
|
||||
b.log.Warn(logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
|
||||
b.log.Warn(ctx, logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -439,25 +439,25 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob
|
|||
putPrm.SetMarshaledObject(gRes.Object())
|
||||
_, err = target.Put(ctx, putPrm)
|
||||
if err != nil {
|
||||
b.log.Warn(logs.BlobovniczatreeCouldNotPutObjectToTargetDB, zap.String("path", targetDB.SystemPath()), zap.Error(err))
|
||||
b.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToTargetDB, zap.String("path", targetDB.SystemPath()), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = metaStore.UpdateStorageID(ctx, move.Address, move.TargetStorageID); err != nil {
|
||||
b.log.Warn(logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", move.Address))
|
||||
b.log.Warn(ctx, logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", move.Address))
|
||||
return err
|
||||
}
|
||||
|
||||
var deletePrm blobovnicza.DeletePrm
|
||||
deletePrm.SetAddress(move.Address)
|
||||
if _, err = source.Delete(ctx, deletePrm); err != nil {
|
||||
b.log.Warn(logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", sourcePath), zap.Error(err))
|
||||
b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", sourcePath), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
if err = source.DropMoveInfo(ctx, move.Address); err != nil {
|
||||
b.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
|
||||
b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -482,13 +482,13 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
|
|||
if !isLogical(err) {
|
||||
i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
|
||||
} else {
|
||||
i.B.log.Warn(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.Error(err))
|
||||
i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.Error(err))
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if target == nil {
|
||||
i.B.log.Warn(logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath))
|
||||
i.B.log.Warn(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath))
|
||||
return false, nil
|
||||
}
|
||||
defer target.Close()
|
||||
|
@ -505,7 +505,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
|
|||
if !isLogical(err) {
|
||||
i.B.reportError(logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, err)
|
||||
} else {
|
||||
i.B.log.Warn(logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, zap.String("path", i.SourceSysPath), zap.Error(err))
|
||||
i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, zap.String("path", i.SourceSysPath), zap.Error(err))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
@ -521,13 +521,13 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
|
|||
if !isLogical(err) {
|
||||
i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
|
||||
} else {
|
||||
i.B.log.Warn(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", target.SystemPath()), zap.Error(err))
|
||||
i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", target.SystemPath()), zap.Error(err))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if err = i.MetaStore.UpdateStorageID(ctx, i.Address, targetStorageID.Bytes()); err != nil {
|
||||
i.B.log.Warn(logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", i.Address))
|
||||
i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", i.Address))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
@ -537,7 +537,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
|
|||
if !isLogical(err) {
|
||||
i.B.reportError(logs.BlobovniczatreeCouldNotDeleteFromSource, err)
|
||||
} else {
|
||||
i.B.log.Warn(logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", i.SourceSysPath), zap.Error(err))
|
||||
i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", i.SourceSysPath), zap.Error(err))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
@ -546,7 +546,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
|
|||
if !isLogical(err) {
|
||||
i.B.reportError(logs.BlobovniczatreeCouldNotDropMoveInfo, err)
|
||||
} else {
|
||||
i.B.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", i.SourceSysPath), zap.Error(err))
|
||||
i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", i.SourceSysPath), zap.Error(err))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ type cfg struct {
|
|||
}
|
||||
|
||||
func initConfig(c *cfg) {
|
||||
c.log = &logger.Logger{Logger: zap.L()}
|
||||
c.log = logger.NewLoggerWrapper(zap.L())
|
||||
c.metrics = &noopMetrics{}
|
||||
}
|
||||
|
||||
|
@ -90,7 +90,7 @@ func WithStorages(st []SubStorage) Option {
|
|||
// WithLogger returns option to specify BlobStor's logger.
|
||||
func WithLogger(l *logger.Logger) Option {
|
||||
return func(c *cfg) {
|
||||
c.log = &logger.Logger{Logger: l.With(zap.String("component", "BlobStor"))}
|
||||
c.log = l.With(zap.String("component", "BlobStor"))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
|
||||
// Open opens BlobStor.
|
||||
func (b *BlobStor) Open(ctx context.Context, mode mode.Mode) error {
|
||||
b.log.Debug(logs.BlobstorOpening)
|
||||
b.log.Debug(ctx, logs.BlobstorOpening)
|
||||
|
||||
b.modeMtx.Lock()
|
||||
defer b.modeMtx.Unlock()
|
||||
|
@ -51,7 +51,7 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag
|
|||
//
|
||||
// Returns wrapped ErrInitBlobovniczas on blobovnicza tree's initializaiton failure.
|
||||
func (b *BlobStor) Init() error {
|
||||
b.log.Debug(logs.BlobstorInitializing)
|
||||
b.log.Debug(context.Background(), logs.BlobstorInitializing)
|
||||
|
||||
if err := b.compression.Init(); err != nil {
|
||||
return err
|
||||
|
@ -68,13 +68,13 @@ func (b *BlobStor) Init() error {
|
|||
|
||||
// Close releases all internal resources of BlobStor.
|
||||
func (b *BlobStor) Close() error {
|
||||
b.log.Debug(logs.BlobstorClosing)
|
||||
b.log.Debug(context.Background(), logs.BlobstorClosing)
|
||||
|
||||
var firstErr error
|
||||
for i := range b.storage {
|
||||
err := b.storage[i].Storage.Close()
|
||||
if err != nil {
|
||||
b.log.Info(logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error()))
|
||||
b.log.Info(context.Background(), logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error()))
|
||||
if firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.Del
|
|||
if err == nil || !client.IsErrObjectNotFound(err) {
|
||||
if err == nil {
|
||||
success = true
|
||||
logOp(b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID)
|
||||
logOp(ctx, b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID)
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.Del
|
|||
res, err := st.Delete(ctx, prm)
|
||||
if err == nil {
|
||||
success = true
|
||||
logOp(b.log, deleteOp, prm.Address, st.Type(), prm.StorageID)
|
||||
logOp(ctx, b.log, deleteOp, prm.Address, st.Type(), prm.StorageID)
|
||||
}
|
||||
|
||||
return res, err
|
||||
|
|
|
@ -73,7 +73,7 @@ func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi
|
|||
}
|
||||
|
||||
for _, err := range errors[:len(errors)-1] {
|
||||
b.log.Warn(logs.BlobstorErrorOccurredDuringObjectExistenceChecking,
|
||||
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringObjectExistenceChecking,
|
||||
zap.Stringer("address", prm.Address),
|
||||
zap.String("error", err.Error()),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
|
|
|
@ -87,7 +87,7 @@ func New(opts ...Option) *FSTree {
|
|||
DirNameLen: DirNameLen,
|
||||
metrics: &noopMetrics{},
|
||||
fileCounter: &noopCounter{},
|
||||
log: &logger.Logger{Logger: zap.L()},
|
||||
log: logger.NewLoggerWrapper(zap.L()),
|
||||
}
|
||||
for i := range opts {
|
||||
opts[i](f)
|
||||
|
@ -152,7 +152,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
|
|||
des, err := os.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
if prm.IgnoreErrors {
|
||||
t.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
|
||||
t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||
zap.String("err", err.Error()),
|
||||
zap.String("directory_path", dirPath))
|
||||
return nil
|
||||
|
@ -200,7 +200,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
|
|||
}
|
||||
if err != nil {
|
||||
if prm.IgnoreErrors {
|
||||
t.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
|
||||
t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||
zap.Stringer("address", addr),
|
||||
zap.String("err", err.Error()),
|
||||
zap.String("path", path))
|
||||
|
|
|
@ -53,6 +53,6 @@ func WithFileCounter(c FileCounter) Option {
|
|||
|
||||
func WithLogger(l *logger.Logger) Option {
|
||||
return func(f *FSTree) {
|
||||
f.log = &logger.Logger{Logger: l.With(zap.String("component", "FSTree"))}
|
||||
f.log = l.With(zap.String("component", "FSTree"))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ func (b *BlobStor) Iterate(ctx context.Context, prm common.IteratePrm) (common.I
|
|||
_, err := b.storage[i].Storage.Iterate(ctx, prm)
|
||||
if err != nil {
|
||||
if prm.IgnoreErrors {
|
||||
b.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
|
||||
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||
zap.String("storage_path", b.storage[i].Storage.Path()),
|
||||
zap.String("storage_type", b.storage[i].Storage.Type()),
|
||||
zap.String("err", err.Error()))
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package blobstor
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
|
@ -11,8 +13,8 @@ const (
|
|||
putOp = "PUT"
|
||||
)
|
||||
|
||||
func logOp(l *logger.Logger, op string, addr oid.Address, typ string, sID []byte) {
|
||||
storagelog.Write(l,
|
||||
func logOp(ctx context.Context, l *logger.Logger, op string, addr oid.Address, typ string, sID []byte) {
|
||||
storagelog.Write(ctx, l,
|
||||
storagelog.AddressField(addr),
|
||||
storagelog.OpField(op),
|
||||
storagelog.StorageTypeField(typ),
|
||||
|
|
|
@ -63,7 +63,7 @@ func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, e
|
|||
res, err := b.storage[i].Storage.Put(ctx, prm)
|
||||
if err == nil {
|
||||
success = true
|
||||
logOp(b.log, putOp, prm.Address, b.storage[i].Storage.Type(), res.StorageID)
|
||||
logOp(ctx, b.log, putOp, prm.Address, b.storage[i].Storage.Type(), res.StorageID)
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, limiter Con
|
|||
summary.FilesRemoved += res.FilesRemoved
|
||||
summary.ObjectsMoved += res.ObjectsMoved
|
||||
if err != nil {
|
||||
b.log.Error(logs.BlobstorRebuildFailedToRebuildStorages,
|
||||
b.log.Error(ctx, logs.BlobstorRebuildFailedToRebuildStorages,
|
||||
zap.String("failed_storage_path", storage.Storage.Path()),
|
||||
zap.String("failed_storage_type", storage.Storage.Type()),
|
||||
zap.Error(err))
|
||||
|
@ -38,7 +38,7 @@ func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, limiter Con
|
|||
break
|
||||
}
|
||||
}
|
||||
b.log.Info(logs.BlobstorRebuildRebuildStoragesCompleted,
|
||||
b.log.Info(ctx, logs.BlobstorRebuildRebuildStoragesCompleted,
|
||||
zap.Bool("success", rErr == nil),
|
||||
zap.Uint64("total_files_removed", summary.FilesRemoved),
|
||||
zap.Uint64("total_objects_moved", summary.ObjectsMoved))
|
||||
|
|
|
@ -49,7 +49,7 @@ func (e *StorageEngine) open(ctx context.Context) error {
|
|||
|
||||
for res := range errCh {
|
||||
if res.err != nil {
|
||||
e.log.Error(logs.EngineCouldNotOpenShardClosingAndSkipping,
|
||||
e.log.Error(ctx, logs.EngineCouldNotOpenShardClosingAndSkipping,
|
||||
zap.String("id", res.id),
|
||||
zap.Error(res.err))
|
||||
|
||||
|
@ -58,7 +58,7 @@ func (e *StorageEngine) open(ctx context.Context) error {
|
|||
|
||||
err := sh.Close()
|
||||
if err != nil {
|
||||
e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard,
|
||||
e.log.Error(ctx, logs.EngineCouldNotClosePartiallyInitializedShard,
|
||||
zap.String("id", res.id),
|
||||
zap.Error(res.err))
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
|
|||
for res := range errCh {
|
||||
if res.err != nil {
|
||||
if errors.Is(res.err, blobstor.ErrInitBlobovniczas) {
|
||||
e.log.Error(logs.EngineCouldNotInitializeShardClosingAndSkipping,
|
||||
e.log.Error(ctx, logs.EngineCouldNotInitializeShardClosingAndSkipping,
|
||||
zap.String("id", res.id),
|
||||
zap.Error(res.err))
|
||||
|
||||
|
@ -110,7 +110,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
|
|||
|
||||
err := sh.Close()
|
||||
if err != nil {
|
||||
e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard,
|
||||
e.log.Error(ctx, logs.EngineCouldNotClosePartiallyInitializedShard,
|
||||
zap.String("id", res.id),
|
||||
zap.Error(res.err))
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ func (e *StorageEngine) close(releasePools bool) error {
|
|||
|
||||
for id, sh := range e.shards {
|
||||
if err := sh.Close(); err != nil {
|
||||
e.log.Debug(logs.EngineCouldNotCloseShard,
|
||||
e.log.Debug(context.Background(), logs.EngineCouldNotCloseShard,
|
||||
zap.String("id", id),
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
@ -311,7 +311,7 @@ loop:
|
|||
for _, p := range shardsToReload {
|
||||
err := p.sh.Reload(ctx, p.opts...)
|
||||
if err != nil {
|
||||
e.log.Error(logs.EngineCouldNotReloadAShard,
|
||||
e.log.Error(ctx, logs.EngineCouldNotReloadAShard,
|
||||
zap.Stringer("shard id", p.sh.ID()),
|
||||
zap.Error(err))
|
||||
}
|
||||
|
@ -340,7 +340,7 @@ loop:
|
|||
return fmt.Errorf("could not add %s shard: %w", idStr, err)
|
||||
}
|
||||
|
||||
e.log.Info(logs.EngineAddedNewShard, zap.String("id", idStr))
|
||||
e.log.Info(ctx, logs.EngineAddedNewShard, zap.String("id", idStr))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -152,7 +152,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
|
|||
e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
|
||||
res, err := sh.Select(ctx, selectPrm)
|
||||
if err != nil {
|
||||
e.log.Warn(logs.EngineErrorDuringSearchingForObjectChildren,
|
||||
e.log.Warn(ctx, logs.EngineErrorDuringSearchingForObjectChildren,
|
||||
zap.Stringer("addr", addr),
|
||||
zap.String("error", err.Error()),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
|
@ -164,7 +164,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
|
|||
|
||||
_, err = sh.Inhume(ctx, inhumePrm)
|
||||
if err != nil {
|
||||
e.log.Debug(logs.EngineCouldNotInhumeObjectInShard,
|
||||
e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard,
|
||||
zap.Stringer("addr", addr),
|
||||
zap.String("err", err.Error()),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
|
@ -194,7 +194,7 @@ func (e *StorageEngine) deleteChunks(
|
|||
inhumePrm.MarkAsGarbage(addr)
|
||||
_, err = sh.Inhume(ctx, inhumePrm)
|
||||
if err != nil {
|
||||
e.log.Debug(logs.EngineCouldNotInhumeObjectInShard,
|
||||
e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard,
|
||||
zap.Stringer("addr", addr),
|
||||
zap.String("err", err.Error()),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
|
|
|
@ -99,20 +99,20 @@ func (e *StorageEngine) moveToDegraded(sh *shard.Shard, errCount uint32, isMeta
|
|||
if isMeta {
|
||||
err := sh.SetMode(mode.DegradedReadOnly)
|
||||
if err == nil {
|
||||
log.Info(logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold)
|
||||
log.Info(context.Background(), logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold)
|
||||
return
|
||||
}
|
||||
log.Error(logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly,
|
||||
log.Error(context.Background(), logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly,
|
||||
zap.Error(err))
|
||||
}
|
||||
|
||||
err := sh.SetMode(mode.ReadOnly)
|
||||
if err != nil {
|
||||
log.Error(logs.EngineFailedToMoveShardInReadonlyMode, zap.Error(err))
|
||||
log.Error(context.Background(), logs.EngineFailedToMoveShardInReadonlyMode, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
log.Info(logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold)
|
||||
log.Info(context.Background(), logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold)
|
||||
}
|
||||
|
||||
// reportShardErrorByID increases shard error counter and logs an error.
|
||||
|
@ -137,7 +137,7 @@ func (e *StorageEngine) reportShardError(
|
|||
fields ...zap.Field,
|
||||
) {
|
||||
if isLogical(err) {
|
||||
e.log.Warn(msg,
|
||||
e.log.Warn(context.Background(), msg,
|
||||
zap.Stringer("shard_id", sh.ID()),
|
||||
zap.String("error", err.Error()))
|
||||
return
|
||||
|
@ -147,7 +147,7 @@ func (e *StorageEngine) reportShardError(
|
|||
e.metrics.IncErrorCounter(sh.ID().String())
|
||||
|
||||
sid := sh.ID()
|
||||
e.log.Warn(msg, append([]zap.Field{
|
||||
e.log.Warn(context.Background(), msg, append([]zap.Field{
|
||||
zap.Stringer("shard_id", sid),
|
||||
zap.Uint32("error count", errCount),
|
||||
zap.String("error", err.Error()),
|
||||
|
@ -168,7 +168,7 @@ func (e *StorageEngine) reportShardError(
|
|||
default:
|
||||
// For background workers we can have a lot of such errors,
|
||||
// thus logging is done with DEBUG level.
|
||||
e.log.Debug(logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest,
|
||||
e.log.Debug(context.Background(), logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest,
|
||||
zap.Stringer("shard_id", sid),
|
||||
zap.Uint32("error_count", errCount))
|
||||
}
|
||||
|
@ -197,7 +197,7 @@ type cfg struct {
|
|||
|
||||
func defaultCfg() *cfg {
|
||||
res := &cfg{
|
||||
log: &logger.Logger{Logger: zap.L()},
|
||||
log: logger.NewLoggerWrapper(zap.L()),
|
||||
shardPoolSize: 20,
|
||||
metrics: noopMetrics{},
|
||||
}
|
||||
|
@ -269,8 +269,8 @@ type containerSource struct {
|
|||
|
||||
func (s *containerSource) IsContainerAvailable(ctx context.Context, id cid.ID) (bool, error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return false, ctx.Err()
|
||||
case <-context.Background().Done():
|
||||
return false, context.Background().Err()
|
||||
default:
|
||||
}
|
||||
|
||||
|
|
|
@ -297,12 +297,12 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p
|
|||
e.evacuateLimiter.Complete(err)
|
||||
}()
|
||||
|
||||
e.log.Info(logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
|
||||
e.log.Info(ctx, logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope))
|
||||
|
||||
err = e.getTotals(ctx, prm, shardsToEvacuate, res)
|
||||
if err != nil {
|
||||
e.log.Error(logs.EngineShardsEvacuationFailedToCount, zap.Strings("shard_ids", shardIDs), zap.Error(err), evacuationOperationLogField,
|
||||
e.log.Error(ctx, logs.EngineShardsEvacuationFailedToCount, zap.Strings("shard_ids", shardIDs), zap.Error(err), evacuationOperationLogField,
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope))
|
||||
return err
|
||||
}
|
||||
|
@ -336,12 +336,12 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p
|
|||
err = errors.Join(err, fmt.Errorf("object error: %w", errObject))
|
||||
}
|
||||
if err != nil {
|
||||
e.log.Error(logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
|
||||
e.log.Error(ctx, logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope))
|
||||
return err
|
||||
}
|
||||
|
||||
e.log.Info(logs.EngineFinishedSuccessfullyShardsEvacuation,
|
||||
e.log.Info(ctx, logs.EngineFinishedSuccessfullyShardsEvacuation,
|
||||
zap.Strings("shard_ids", shardIDs),
|
||||
evacuationOperationLogField,
|
||||
zap.Uint64("total_objects", res.ObjectsTotal()),
|
||||
|
@ -494,7 +494,7 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context
|
|||
err := sh.IterateOverContainers(ctx, cntPrm)
|
||||
if err != nil {
|
||||
cancel(err)
|
||||
e.log.Error(logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField,
|
||||
e.log.Error(ctx, logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField,
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
}
|
||||
return err
|
||||
|
@ -551,7 +551,7 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree
|
|||
return err
|
||||
}
|
||||
if success {
|
||||
e.log.Debug(logs.EngineShardsEvacuationTreeEvacuatedLocal,
|
||||
e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedLocal,
|
||||
zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
|
||||
zap.String("from_shard_id", sh.ID().String()), zap.String("to_shard_id", shardID),
|
||||
evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
|
@ -561,26 +561,26 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree
|
|||
|
||||
moved, nodePK, err := e.evacuateTreeToOtherNode(ctx, sh, contTree, prm)
|
||||
if err != nil {
|
||||
e.log.Error(logs.EngineShardsEvacuationFailedToMoveTree,
|
||||
e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree,
|
||||
zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
|
||||
zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField,
|
||||
zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
return err
|
||||
}
|
||||
if moved {
|
||||
e.log.Debug(logs.EngineShardsEvacuationTreeEvacuatedRemote,
|
||||
e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedRemote,
|
||||
zap.String("cid", contTree.CID.EncodeToString()), zap.String("treeID", contTree.TreeID),
|
||||
zap.String("from_shardID", sh.ID().String()), zap.String("to_node", nodePK),
|
||||
evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
res.trEvacuated.Add(1)
|
||||
} else if prm.IgnoreErrors {
|
||||
res.trFailed.Add(1)
|
||||
e.log.Warn(logs.EngineShardsEvacuationFailedToMoveTree,
|
||||
e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveTree,
|
||||
zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
|
||||
zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField,
|
||||
zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
} else {
|
||||
e.log.Error(logs.EngineShardsEvacuationFailedToMoveTree,
|
||||
e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree,
|
||||
zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
|
||||
zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField,
|
||||
zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
|
@ -770,7 +770,7 @@ func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objI
|
|||
res.objFailed.Add(1)
|
||||
return nil
|
||||
}
|
||||
e.log.Error(logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
|
||||
e.log.Error(ctx, logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
return err
|
||||
}
|
||||
|
@ -792,7 +792,7 @@ func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objI
|
|||
|
||||
moved, err := prm.ObjectsHandler(ctx, addr, getRes.Object())
|
||||
if err != nil {
|
||||
e.log.Error(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
|
||||
e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
return err
|
||||
}
|
||||
|
@ -800,7 +800,7 @@ func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objI
|
|||
res.objEvacuated.Add(1)
|
||||
} else if prm.IgnoreErrors {
|
||||
res.objFailed.Add(1)
|
||||
e.log.Warn(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
|
||||
e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
} else {
|
||||
return fmt.Errorf("object %s was not replicated", addr)
|
||||
|
@ -835,7 +835,7 @@ func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Add
|
|||
switch e.putToShard(ctx, shards[j].hashedShard, shards[j].pool, addr, object, container.IsIndexedContainer(cnr)).status {
|
||||
case putToShardSuccess:
|
||||
res.objEvacuated.Add(1)
|
||||
e.log.Debug(logs.EngineObjectIsMovedToAnotherShard,
|
||||
e.log.Debug(ctx, logs.EngineObjectIsMovedToAnotherShard,
|
||||
zap.Stringer("from", sh.ID()),
|
||||
zap.Stringer("to", shards[j].ID()),
|
||||
zap.Stringer("addr", addr),
|
||||
|
|
|
@ -104,7 +104,7 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
|
|||
return GetRes{}, it.OutError
|
||||
}
|
||||
if it.ShardWithMeta.Shard != nil && it.MetaError != nil {
|
||||
e.log.Warn(logs.ShardMetaInfoPresentButObjectNotFound,
|
||||
e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound,
|
||||
zap.Stringer("shard_id", it.ShardWithMeta.ID()),
|
||||
zap.String("error", it.MetaError.Error()),
|
||||
zap.Stringer("address", prm.addr),
|
||||
|
|
|
@ -90,7 +90,7 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, e
|
|||
if !prm.forceRemoval {
|
||||
locked, err := e.IsLocked(ctx, prm.addrs[i])
|
||||
if err != nil {
|
||||
e.log.Warn(logs.EngineRemovingAnObjectWithoutFullLockingCheck,
|
||||
e.log.Warn(ctx, logs.EngineRemovingAnObjectWithoutFullLockingCheck,
|
||||
zap.Error(err),
|
||||
zap.Stringer("addr", prm.addrs[i]),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
|
@ -264,7 +264,7 @@ func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, l
|
|||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
e.log.Info(logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(ctx.Err()))
|
||||
e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(ctx.Err()))
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
|
@ -278,7 +278,7 @@ func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.A
|
|||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
e.log.Info(logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(ctx.Err()))
|
||||
e.log.Info(ctx, logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(ctx.Err()))
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
|
@ -305,7 +305,7 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid
|
|||
e.iterateOverUnsortedShards(func(sh hashedShard) bool {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
e.log.Info(logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
|
||||
e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
|
||||
failed = true
|
||||
return true
|
||||
default:
|
||||
|
@ -316,7 +316,7 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid
|
|||
prm.SetContainerID(id)
|
||||
s, err := sh.ContainerSize(prm)
|
||||
if err != nil {
|
||||
e.log.Warn(logs.EngineFailedToGetContainerSize, zap.Stringer("container_id", id), zap.Error(err))
|
||||
e.log.Warn(ctx, logs.EngineFailedToGetContainerSize, zap.Stringer("container_id", id), zap.Error(err))
|
||||
failed = true
|
||||
return true
|
||||
}
|
||||
|
@ -338,7 +338,7 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid
|
|||
e.iterateOverUnsortedShards(func(sh hashedShard) bool {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
e.log.Info(logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
|
||||
e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
|
||||
failed = true
|
||||
return true
|
||||
default:
|
||||
|
@ -346,7 +346,7 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid
|
|||
|
||||
for id := range idMap {
|
||||
if err := sh.DeleteContainerSize(ctx, id); err != nil {
|
||||
e.log.Warn(logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
|
||||
e.log.Warn(ctx, logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
|
||||
failed = true
|
||||
return true
|
||||
}
|
||||
|
@ -383,7 +383,7 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci
|
|||
e.iterateOverUnsortedShards(func(sh hashedShard) bool {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
e.log.Info(logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
|
||||
e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
|
||||
failed = true
|
||||
return true
|
||||
default:
|
||||
|
@ -394,7 +394,7 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci
|
|||
prm.ContainerID = id
|
||||
s, err := sh.ContainerCount(ctx, prm)
|
||||
if err != nil {
|
||||
e.log.Warn(logs.EngineFailedToGetContainerCounters, zap.Stringer("container_id", id), zap.Error(err))
|
||||
e.log.Warn(ctx, logs.EngineFailedToGetContainerCounters, zap.Stringer("container_id", id), zap.Error(err))
|
||||
failed = true
|
||||
return true
|
||||
}
|
||||
|
@ -416,7 +416,7 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci
|
|||
e.iterateOverUnsortedShards(func(sh hashedShard) bool {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
e.log.Info(logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
|
||||
e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
|
||||
failed = true
|
||||
return true
|
||||
default:
|
||||
|
@ -424,7 +424,7 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci
|
|||
|
||||
for id := range idMap {
|
||||
if err := sh.DeleteContainerCount(ctx, id); err != nil {
|
||||
e.log.Warn(logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
|
||||
e.log.Warn(ctx, logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
|
||||
failed = true
|
||||
return true
|
||||
}
|
||||
|
@ -449,7 +449,7 @@ func (e *StorageEngine) selectNonExistentIDs(ctx context.Context, ids []cid.ID)
|
|||
for _, id := range ids {
|
||||
isAvailable, err := cs.IsContainerAvailable(ctx, id)
|
||||
if err != nil {
|
||||
e.log.Warn(logs.EngineFailedToCheckContainerAvailability, zap.Stringer("container_id", id), zap.Error(err))
|
||||
e.log.Warn(ctx, logs.EngineFailedToCheckContainerAvailability, zap.Stringer("container_id", id), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
if isAvailable {
|
||||
|
|
|
@ -141,7 +141,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti
|
|||
// expired => do nothing with it
|
||||
res.status = putToShardExists
|
||||
} else {
|
||||
e.log.Warn(logs.EngineCouldNotCheckObjectExistence,
|
||||
e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence,
|
||||
zap.Stringer("shard_id", sh.ID()),
|
||||
zap.String("error", err.Error()),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
|
@ -163,14 +163,14 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti
|
|||
if err != nil {
|
||||
if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) ||
|
||||
errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) {
|
||||
e.log.Warn(logs.EngineCouldNotPutObjectToShard,
|
||||
e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
|
||||
zap.Stringer("shard_id", sh.ID()),
|
||||
zap.String("error", err.Error()),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
return
|
||||
}
|
||||
if client.IsErrObjectAlreadyRemoved(err) {
|
||||
e.log.Warn(logs.EngineCouldNotPutObjectToShard,
|
||||
e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
|
||||
zap.Stringer("shard_id", sh.ID()),
|
||||
zap.String("error", err.Error()),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
|
@ -185,7 +185,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti
|
|||
|
||||
res.status = putToShardSuccess
|
||||
}); err != nil {
|
||||
e.log.Warn(logs.EngineCouldNotPutObjectToShard, zap.Error(err))
|
||||
e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, zap.Error(err))
|
||||
close(exitCh)
|
||||
}
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error
|
|||
return RngRes{}, it.OutError
|
||||
}
|
||||
if it.ShardWithMeta.Shard != nil && it.MetaError != nil {
|
||||
e.log.Warn(logs.ShardMetaInfoPresentButObjectNotFound,
|
||||
e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound,
|
||||
zap.Stringer("shard_id", it.ShardWithMeta.ID()),
|
||||
zap.String("error", it.MetaError.Error()),
|
||||
zap.Stringer("address", prm.addr),
|
||||
|
|
|
@ -43,7 +43,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
|
|||
prm.Concurrency = defaultRemoveDuplicatesConcurrency
|
||||
}
|
||||
|
||||
e.log.Info(logs.EngineStartingRemovalOfLocallyredundantCopies,
|
||||
e.log.Info(ctx, logs.EngineStartingRemovalOfLocallyredundantCopies,
|
||||
zap.Int("concurrency", prm.Concurrency))
|
||||
|
||||
// The mutext must be taken for the whole duration to avoid target shard being removed
|
||||
|
@ -55,7 +55,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
|
|||
// This is not currently the case, because `FreeSpace` metric used by weight sorting is always 0.
|
||||
// However we could change weights in future and easily forget this function.
|
||||
for _, sh := range e.shards {
|
||||
e.log.Debug(logs.EngineStartedDuplicatesRemovalRoutine, zap.Stringer("shard_id", sh.ID()))
|
||||
e.log.Debug(ctx, logs.EngineStartedDuplicatesRemovalRoutine, zap.Stringer("shard_id", sh.ID()))
|
||||
ch := make(chan oid.Address)
|
||||
|
||||
errG, ctx := errgroup.WithContext(ctx)
|
||||
|
@ -93,12 +93,12 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
|
|||
})
|
||||
}
|
||||
if err := errG.Wait(); err != nil {
|
||||
e.log.Error(logs.EngineFinishedRemovalOfLocallyredundantCopies, zap.Error(err))
|
||||
e.log.Error(ctx, logs.EngineFinishedRemovalOfLocallyredundantCopies, zap.Error(err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
e.log.Info(logs.EngineFinishedRemovalOfLocallyredundantCopies)
|
||||
e.log.Info(ctx, logs.EngineFinishedRemovalOfLocallyredundantCopies)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -140,7 +140,7 @@ func (e *StorageEngine) createShard(_ context.Context, opts []shard.Option) (*sh
|
|||
)...)
|
||||
|
||||
if err := sh.UpdateID(); err != nil {
|
||||
e.log.Warn(logs.FailedToUpdateShardID, zap.Stringer("shard_id", sh.ID()), zap.String("metabase_path", sh.DumpInfo().MetaBaseInfo.Path), zap.Error(err))
|
||||
e.log.Warn(context.Background(), logs.FailedToUpdateShardID, zap.Stringer("shard_id", sh.ID()), zap.String("metabase_path", sh.DumpInfo().MetaBaseInfo.Path), zap.Error(err))
|
||||
}
|
||||
|
||||
return sh, nil
|
||||
|
@ -228,7 +228,7 @@ func (e *StorageEngine) removeShards(ids ...string) {
|
|||
delete(e.shardPools, id)
|
||||
}
|
||||
|
||||
e.log.Info(logs.EngineShardHasBeenRemoved,
|
||||
e.log.Info(context.Background(), logs.EngineShardHasBeenRemoved,
|
||||
zap.String("id", id))
|
||||
}
|
||||
e.mtx.Unlock()
|
||||
|
@ -236,14 +236,14 @@ func (e *StorageEngine) removeShards(ids ...string) {
|
|||
for _, sh := range ss {
|
||||
err := sh.SetMode(mode.Disabled)
|
||||
if err != nil {
|
||||
e.log.Error(logs.EngineCouldNotChangeShardModeToDisabled,
|
||||
e.log.Error(context.Background(), logs.EngineCouldNotChangeShardModeToDisabled,
|
||||
zap.Stringer("id", sh.ID()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
err = sh.Close()
|
||||
if err != nil {
|
||||
e.log.Error(logs.EngineCouldNotCloseRemovedShard,
|
||||
e.log.Error(context.Background(), logs.EngineCouldNotCloseRemovedShard,
|
||||
zap.Stringer("id", sh.ID()),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
@ -340,7 +340,7 @@ func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) {
|
|||
return
|
||||
case sh.NotificationChannel() <- ev:
|
||||
default:
|
||||
e.log.Debug(logs.ShardEventProcessingInProgress,
|
||||
e.log.Debug(ctx, logs.ShardEventProcessingInProgress,
|
||||
zap.Uint64("epoch", epoch), zap.Stringer("shard", sh.ID()))
|
||||
}
|
||||
}
|
||||
|
@ -369,7 +369,7 @@ func (e *StorageEngine) closeShards(deletedShards []hashedShard) error {
|
|||
eg.Go(func() error {
|
||||
err := sh.SetMode(mode.Disabled)
|
||||
if err != nil {
|
||||
e.log.Error(logs.EngineCouldNotChangeShardModeToDisabled,
|
||||
e.log.Error(context.Background(), logs.EngineCouldNotChangeShardModeToDisabled,
|
||||
zap.Stringer("id", sh.ID()),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
@ -380,7 +380,7 @@ func (e *StorageEngine) closeShards(deletedShards []hashedShard) error {
|
|||
|
||||
err = sh.Close()
|
||||
if err != nil {
|
||||
e.log.Error(logs.EngineCouldNotCloseRemovedShard,
|
||||
e.log.Error(context.Background(), logs.EngineCouldNotCloseRemovedShard,
|
||||
zap.Stringer("id", sh.ID()),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
@ -432,7 +432,7 @@ func (e *StorageEngine) deleteShards(ids []*shard.ID) ([]hashedShard, error) {
|
|||
delete(e.shardPools, idStr)
|
||||
}
|
||||
|
||||
e.log.Info(logs.EngineShardHasBeenRemoved,
|
||||
e.log.Info(context.Background(), logs.EngineShardHasBeenRemoved,
|
||||
zap.String("id", idStr))
|
||||
}
|
||||
|
||||
|
|
|
@ -1,14 +1,16 @@
|
|||
package storagelog
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Write writes message about storage engine's operation to logger.
|
||||
func Write(logger *logger.Logger, fields ...zap.Field) {
|
||||
logger.Debug(logs.StorageOperation, fields...)
|
||||
func Write(ctx context.Context, logger *logger.Logger, fields ...zap.Field) {
|
||||
logger.Debug(ctx, logs.StorageOperation, fields...)
|
||||
}
|
||||
|
||||
// AddressField returns logger's field for object address.
|
||||
|
|
|
@ -57,7 +57,7 @@ func (db *DB) openDB(mode mode.Mode) error {
|
|||
return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err)
|
||||
}
|
||||
|
||||
db.log.Debug(logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path))
|
||||
db.log.Debug(context.Background(), logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path))
|
||||
|
||||
if db.boltOptions == nil {
|
||||
opts := *bbolt.DefaultOptions
|
||||
|
@ -78,9 +78,9 @@ func (db *DB) openBolt() error {
|
|||
db.boltDB.MaxBatchDelay = db.boltBatchDelay
|
||||
db.boltDB.MaxBatchSize = db.boltBatchSize
|
||||
|
||||
db.log.Debug(logs.MetabaseOpenedBoltDBInstanceForMetabase)
|
||||
db.log.Debug(context.Background(), logs.MetabaseOpenedBoltDBInstanceForMetabase)
|
||||
|
||||
db.log.Debug(logs.MetabaseCheckingMetabaseVersion)
|
||||
db.log.Debug(context.Background(), logs.MetabaseCheckingMetabaseVersion)
|
||||
return db.boltDB.View(func(tx *bbolt.Tx) error {
|
||||
// The safest way to check if the metabase is fresh is to check if it has no buckets.
|
||||
// However, shard info can be present. So here we check that the number of buckets is
|
||||
|
|
|
@ -70,7 +70,7 @@ func defaultCfg() *cfg {
|
|||
},
|
||||
boltBatchDelay: bbolt.DefaultMaxBatchDelay,
|
||||
boltBatchSize: bbolt.DefaultMaxBatchSize,
|
||||
log: &logger.Logger{Logger: zap.L()},
|
||||
log: logger.NewLoggerWrapper(zap.L()),
|
||||
metrics: &noopMetrics{},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -117,7 +117,7 @@ func (db *DB) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
|
|||
if err == nil {
|
||||
deleted = true
|
||||
for i := range prm.addrs {
|
||||
storagelog.Write(db.log,
|
||||
storagelog.Write(ctx, db.log,
|
||||
storagelog.AddressField(prm.addrs[i]),
|
||||
storagelog.OpField("metabase DELETE"))
|
||||
}
|
||||
|
|
|
@ -205,7 +205,7 @@ func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
|
|||
success = err == nil
|
||||
if success {
|
||||
for _, addr := range prm.target {
|
||||
storagelog.Write(db.log,
|
||||
storagelog.Write(ctx, db.log,
|
||||
storagelog.AddressField(addr),
|
||||
storagelog.OpField("metabase INHUME"))
|
||||
}
|
||||
|
|
|
@ -100,7 +100,7 @@ func (db *DB) Put(ctx context.Context, prm PutPrm) (res PutRes, err error) {
|
|||
})
|
||||
if err == nil {
|
||||
success = true
|
||||
storagelog.Write(db.log,
|
||||
storagelog.Write(ctx, db.log,
|
||||
storagelog.AddressField(objectCore.AddressOf(prm.obj)),
|
||||
storagelog.OpField("metabase PUT"))
|
||||
}
|
||||
|
|
|
@ -113,7 +113,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
|
|||
})
|
||||
}
|
||||
require.NoError(t, eg.Wait())
|
||||
db.log.Info("simple objects generated")
|
||||
db.log.Info(ctx, "simple objects generated")
|
||||
eg, ctx = errgroup.WithContext(context.Background())
|
||||
eg.SetLimit(generateWorkersCount)
|
||||
// complex objects
|
||||
|
@ -137,7 +137,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
|
|||
})
|
||||
}
|
||||
require.NoError(t, eg.Wait())
|
||||
db.log.Info("complex objects generated")
|
||||
db.log.Info(ctx, "complex objects generated")
|
||||
eg, ctx = errgroup.WithContext(context.Background())
|
||||
eg.SetLimit(generateWorkersCount)
|
||||
// simple objects deleted by gc marks
|
||||
|
@ -159,7 +159,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
|
|||
})
|
||||
}
|
||||
require.NoError(t, eg.Wait())
|
||||
db.log.Info("simple objects deleted by gc marks generated")
|
||||
db.log.Info(ctx, "simple objects deleted by gc marks generated")
|
||||
eg, ctx = errgroup.WithContext(context.Background())
|
||||
eg.SetLimit(10000)
|
||||
// simple objects deleted by tombstones
|
||||
|
@ -189,7 +189,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
|
|||
})
|
||||
}
|
||||
require.NoError(t, eg.Wait())
|
||||
db.log.Info("simple objects deleted by tombstones generated")
|
||||
db.log.Info(ctx, "simple objects deleted by tombstones generated")
|
||||
eg, ctx = errgroup.WithContext(context.Background())
|
||||
eg.SetLimit(generateWorkersCount)
|
||||
// simple objects locked by locks
|
||||
|
@ -216,7 +216,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
|
|||
})
|
||||
}
|
||||
require.NoError(t, eg.Wait())
|
||||
db.log.Info("simple objects locked by locks generated")
|
||||
db.log.Info(ctx, "simple objects locked by locks generated")
|
||||
require.NoError(t, db.boltDB.Sync())
|
||||
require.NoError(t, db.Close())
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
)
|
||||
|
||||
func (s *Shard) handleMetabaseFailure(stage string, err error) error {
|
||||
s.log.Error(logs.ShardMetabaseFailureSwitchingMode,
|
||||
s.log.Error(context.Background(), logs.ShardMetabaseFailureSwitchingMode,
|
||||
zap.String("stage", stage),
|
||||
zap.Stringer("mode", mode.ReadOnly),
|
||||
zap.Error(err))
|
||||
|
@ -31,7 +31,7 @@ func (s *Shard) handleMetabaseFailure(stage string, err error) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
s.log.Error(logs.ShardCantMoveShardToReadonlySwitchMode,
|
||||
s.log.Error(context.Background(), logs.ShardCantMoveShardToReadonlySwitchMode,
|
||||
zap.String("stage", stage),
|
||||
zap.Stringer("mode", mode.DegradedReadOnly),
|
||||
zap.Error(err))
|
||||
|
@ -211,7 +211,7 @@ func (s *Shard) refillMetabase(ctx context.Context) error {
|
|||
withCount := true
|
||||
totalObjects, err := s.blobStor.ObjectsCount(ctx)
|
||||
if err != nil {
|
||||
s.log.Warn(logs.EngineRefillFailedToGetObjectsCount, zap.Error(err))
|
||||
s.log.Warn(ctx, logs.EngineRefillFailedToGetObjectsCount, zap.Error(err))
|
||||
withCount = false
|
||||
}
|
||||
|
||||
|
@ -270,7 +270,7 @@ func (s *Shard) refillMetabase(ctx context.Context) error {
|
|||
func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, descriptor []byte) error {
|
||||
obj := objectSDK.New()
|
||||
if err := obj.Unmarshal(data); err != nil {
|
||||
s.log.Warn(logs.ShardCouldNotUnmarshalObject,
|
||||
s.log.Warn(ctx, logs.ShardCouldNotUnmarshalObject,
|
||||
zap.Stringer("address", addr),
|
||||
zap.String("err", err.Error()))
|
||||
return nil
|
||||
|
@ -285,7 +285,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address,
|
|||
return err
|
||||
}
|
||||
if info.Removed {
|
||||
s.log.Debug(logs.ShardSkipObjectFromResyncContainerDeleted, zap.Stringer("address", addr))
|
||||
s.log.Debug(ctx, logs.ShardSkipObjectFromResyncContainerDeleted, zap.Stringer("address", addr))
|
||||
return nil
|
||||
}
|
||||
isIndexedContainer = info.Indexed
|
||||
|
@ -386,7 +386,7 @@ func (s *Shard) Close() error {
|
|||
for _, component := range components {
|
||||
if err := component.Close(); err != nil {
|
||||
lastErr = err
|
||||
s.log.Error(logs.ShardCouldNotCloseShardComponent, zap.Error(err))
|
||||
s.log.Error(context.Background(), logs.ShardCouldNotCloseShardComponent, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -424,7 +424,7 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error {
|
|||
ok, err := s.metaBase.Reload(c.metaOpts...)
|
||||
if err != nil {
|
||||
if errors.Is(err, meta.ErrDegradedMode) {
|
||||
s.log.Error(logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err))
|
||||
s.log.Error(ctx, logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err))
|
||||
_ = s.setMode(mode.DegradedReadOnly)
|
||||
}
|
||||
return err
|
||||
|
@ -440,7 +440,7 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error {
|
|||
err = s.metaBase.Init()
|
||||
}
|
||||
if err != nil {
|
||||
s.log.Error(logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err))
|
||||
s.log.Error(ctx, logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err))
|
||||
_ = s.setMode(mode.DegradedReadOnly)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -95,7 +95,7 @@ func (s *Shard) validateWritecacheDoesntContainObject(ctx context.Context, addr
|
|||
}
|
||||
_, err := s.writeCache.Head(ctx, addr)
|
||||
if err == nil {
|
||||
s.log.Warn(logs.ObjectRemovalFailureExistsInWritecache, zap.Stringer("object_address", addr))
|
||||
s.log.Warn(ctx, logs.ObjectRemovalFailureExistsInWritecache, zap.Stringer("object_address", addr))
|
||||
return fmt.Errorf("object %s must be flushed from writecache", addr)
|
||||
}
|
||||
if client.IsErrObjectNotFound(err) {
|
||||
|
@ -110,7 +110,7 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error
|
|||
|
||||
res, err := s.metaBase.StorageID(ctx, sPrm)
|
||||
if err != nil {
|
||||
s.log.Debug(logs.StorageIDRetrievalFailure,
|
||||
s.log.Debug(ctx, logs.StorageIDRetrievalFailure,
|
||||
zap.Stringer("object", addr),
|
||||
zap.String("error", err.Error()),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
|
@ -130,7 +130,7 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error
|
|||
|
||||
_, err = s.blobStor.Delete(ctx, delPrm)
|
||||
if err != nil && !client.IsErrObjectNotFound(err) {
|
||||
s.log.Debug(logs.ObjectRemovalFailureBlobStor,
|
||||
s.log.Debug(ctx, logs.ObjectRemovalFailureBlobStor,
|
||||
zap.Stringer("object_address", addr),
|
||||
zap.String("error", err.Error()),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
|
|
|
@ -131,7 +131,7 @@ type gcCfg struct {
|
|||
func defaultGCCfg() gcCfg {
|
||||
return gcCfg{
|
||||
removerInterval: 10 * time.Second,
|
||||
log: &logger.Logger{Logger: zap.L()},
|
||||
log: logger.NewLoggerWrapper(zap.L()),
|
||||
workerPoolInit: func(int) util.WorkerPool {
|
||||
return nil
|
||||
},
|
||||
|
@ -161,14 +161,14 @@ func (gc *gc) listenEvents(ctx context.Context) {
|
|||
for {
|
||||
select {
|
||||
case <-gc.stopChannel:
|
||||
gc.log.Warn(logs.ShardStopEventListenerByClosedStopChannel)
|
||||
gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedStopChannel)
|
||||
return
|
||||
case <-ctx.Done():
|
||||
gc.log.Warn(logs.ShardStopEventListenerByContext)
|
||||
gc.log.Warn(ctx, logs.ShardStopEventListenerByContext)
|
||||
return
|
||||
case event, ok := <-gc.eventChan:
|
||||
if !ok {
|
||||
gc.log.Warn(logs.ShardStopEventListenerByClosedEventChannel)
|
||||
gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedEventChannel)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -204,7 +204,7 @@ func (gc *gc) handleEvent(ctx context.Context, event Event) {
|
|||
h(runCtx, event)
|
||||
})
|
||||
if err != nil {
|
||||
gc.log.Warn(logs.ShardCouldNotSubmitGCJobToWorkerPool,
|
||||
gc.log.Warn(ctx, logs.ShardCouldNotSubmitGCJobToWorkerPool,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
||||
|
@ -222,7 +222,7 @@ func (gc *gc) releaseResources() {
|
|||
// because it is possible that we are close it earlier than stop writing.
|
||||
// It is ok to keep it opened.
|
||||
|
||||
gc.log.Debug(logs.ShardGCIsStopped)
|
||||
gc.log.Debug(context.Background(), logs.ShardGCIsStopped)
|
||||
}
|
||||
|
||||
func (gc *gc) tickRemover(ctx context.Context) {
|
||||
|
@ -263,7 +263,7 @@ func (gc *gc) stop() {
|
|||
close(gc.stopChannel)
|
||||
})
|
||||
|
||||
gc.log.Info(logs.ShardWaitingForGCWorkersToStop)
|
||||
gc.log.Info(context.Background(), logs.ShardWaitingForGCWorkersToStop)
|
||||
gc.wg.Wait()
|
||||
}
|
||||
|
||||
|
@ -286,8 +286,8 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
|
|||
return
|
||||
}
|
||||
|
||||
s.log.Debug(logs.ShardGCRemoveGarbageStarted)
|
||||
defer s.log.Debug(logs.ShardGCRemoveGarbageCompleted)
|
||||
s.log.Debug(ctx, logs.ShardGCRemoveGarbageStarted)
|
||||
defer s.log.Debug(ctx, logs.ShardGCRemoveGarbageCompleted)
|
||||
|
||||
buf := make([]oid.Address, 0, s.rmBatchSize)
|
||||
|
||||
|
@ -312,7 +312,7 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
|
|||
// (no more than s.rmBatchSize objects)
|
||||
err := s.metaBase.IterateOverGarbage(ctx, iterPrm)
|
||||
if err != nil {
|
||||
s.log.Warn(logs.ShardIteratorOverMetabaseGraveyardFailed,
|
||||
s.log.Warn(ctx, logs.ShardIteratorOverMetabaseGraveyardFailed,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
||||
|
@ -333,7 +333,7 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
|
|||
result.success = true
|
||||
|
||||
if err != nil {
|
||||
s.log.Warn(logs.ShardCouldNotDeleteTheObjects,
|
||||
s.log.Warn(ctx, logs.ShardCouldNotDeleteTheObjects,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
result.success = false
|
||||
|
@ -356,8 +356,8 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
|
|||
s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeRegular)
|
||||
}()
|
||||
|
||||
s.log.Debug(logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", e.(newEpoch).epoch))
|
||||
defer s.log.Debug(logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", e.(newEpoch).epoch))
|
||||
s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", e.(newEpoch).epoch))
|
||||
defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", e.(newEpoch).epoch))
|
||||
|
||||
workersCount, batchSize := s.getExpiredObjectsParameters()
|
||||
|
||||
|
@ -396,7 +396,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
|
|||
})
|
||||
|
||||
if err = errGroup.Wait(); err != nil {
|
||||
s.log.Warn(logs.ShardIteratorOverExpiredObjectsFailed, zap.String("error", err.Error()))
|
||||
s.log.Warn(ctx, logs.ShardIteratorOverExpiredObjectsFailed, zap.String("error", err.Error()))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -416,7 +416,7 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address)
|
|||
|
||||
expired, err := s.getExpiredWithLinked(ctx, expired)
|
||||
if err != nil {
|
||||
s.log.Warn(logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err))
|
||||
s.log.Warn(ctx, logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -428,7 +428,7 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address)
|
|||
// inhume the collected objects
|
||||
res, err := s.metaBase.Inhume(ctx, inhumePrm)
|
||||
if err != nil {
|
||||
s.log.Warn(logs.ShardCouldNotInhumeTheObjects,
|
||||
s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
||||
|
@ -473,8 +473,8 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
|
|||
epoch := e.(newEpoch).epoch
|
||||
log := s.log.With(zap.Uint64("epoch", epoch))
|
||||
|
||||
log.Debug(logs.ShardStartedExpiredTombstonesHandling)
|
||||
defer log.Debug(logs.ShardFinishedExpiredTombstonesHandling)
|
||||
log.Debug(ctx, logs.ShardStartedExpiredTombstonesHandling)
|
||||
defer log.Debug(ctx, logs.ShardFinishedExpiredTombstonesHandling)
|
||||
|
||||
const tssDeleteBatch = 50
|
||||
tss := make([]meta.TombstonedObject, 0, tssDeleteBatch)
|
||||
|
@ -492,12 +492,12 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
|
|||
})
|
||||
|
||||
for {
|
||||
log.Debug(logs.ShardIteratingTombstones)
|
||||
log.Debug(ctx, logs.ShardIteratingTombstones)
|
||||
|
||||
s.m.RLock()
|
||||
|
||||
if s.info.Mode.NoMetabase() {
|
||||
s.log.Debug(logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones)
|
||||
s.log.Debug(ctx, logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones)
|
||||
s.m.RUnlock()
|
||||
|
||||
return
|
||||
|
@ -505,7 +505,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
|
|||
|
||||
err = s.metaBase.IterateOverGraveyard(ctx, iterPrm)
|
||||
if err != nil {
|
||||
log.Error(logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
|
||||
log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
|
||||
s.m.RUnlock()
|
||||
|
||||
return
|
||||
|
@ -524,7 +524,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
|
|||
}
|
||||
}
|
||||
|
||||
log.Debug(logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp)))
|
||||
log.Debug(ctx, logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp)))
|
||||
if len(tssExp) > 0 {
|
||||
s.expiredTombstonesCallback(ctx, tssExp)
|
||||
}
|
||||
|
@ -543,8 +543,8 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
|
|||
s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeLock)
|
||||
}()
|
||||
|
||||
s.log.Debug(logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", e.(newEpoch).epoch))
|
||||
defer s.log.Debug(logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", e.(newEpoch).epoch))
|
||||
s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", e.(newEpoch).epoch))
|
||||
defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", e.(newEpoch).epoch))
|
||||
|
||||
workersCount, batchSize := s.getExpiredObjectsParameters()
|
||||
|
||||
|
@ -584,7 +584,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
|
|||
})
|
||||
|
||||
if err = errGroup.Wait(); err != nil {
|
||||
s.log.Warn(logs.ShardIteratorOverExpiredLocksFailed, zap.String("error", err.Error()))
|
||||
s.log.Warn(ctx, logs.ShardIteratorOverExpiredLocksFailed, zap.String("error", err.Error()))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -645,7 +645,7 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston
|
|||
// inhume tombstones
|
||||
res, err := s.metaBase.Inhume(ctx, pInhume)
|
||||
if err != nil {
|
||||
s.log.Warn(logs.ShardCouldNotMarkTombstonesAsGarbage,
|
||||
s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
||||
|
@ -668,7 +668,7 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston
|
|||
// from graveyard
|
||||
err = s.metaBase.DropGraves(ctx, tss)
|
||||
if err != nil {
|
||||
s.log.Warn(logs.ShardCouldNotDropExpiredGraveRecords, zap.Error(err))
|
||||
s.log.Warn(ctx, logs.ShardCouldNotDropExpiredGraveRecords, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -680,7 +680,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
|
|||
}
|
||||
unlocked, err := s.metaBase.FreeLockedBy(lockers)
|
||||
if err != nil {
|
||||
s.log.Warn(logs.ShardFailureToUnlockObjects,
|
||||
s.log.Warn(ctx, logs.ShardFailureToUnlockObjects,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
||||
|
@ -693,7 +693,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
|
|||
|
||||
res, err := s.metaBase.Inhume(ctx, pInhume)
|
||||
if err != nil {
|
||||
s.log.Warn(logs.ShardFailureToMarkLockersAsGarbage,
|
||||
s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
||||
|
@ -718,7 +718,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
|
|||
func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unlocked []oid.Address) {
|
||||
expiredUnlocked, err := s.selectExpired(ctx, epoch, unlocked)
|
||||
if err != nil {
|
||||
s.log.Warn(logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err))
|
||||
s.log.Warn(ctx, logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -737,7 +737,7 @@ func (s *Shard) HandleDeletedLocks(lockers []oid.Address) {
|
|||
|
||||
_, err := s.metaBase.FreeLockedBy(lockers)
|
||||
if err != nil {
|
||||
s.log.Warn(logs.ShardFailureToUnlockObjects,
|
||||
s.log.Warn(context.Background(), logs.ShardFailureToUnlockObjects,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
||||
|
@ -756,8 +756,8 @@ func (s *Shard) collectExpiredMetrics(ctx context.Context, e Event) {
|
|||
|
||||
epoch := e.(newEpoch).epoch
|
||||
|
||||
s.log.Debug(logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch))
|
||||
defer s.log.Debug(logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch))
|
||||
s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch))
|
||||
defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch))
|
||||
|
||||
s.collectExpiredContainerSizeMetrics(ctx, epoch)
|
||||
s.collectExpiredContainerCountMetrics(ctx, epoch)
|
||||
|
@ -766,7 +766,7 @@ func (s *Shard) collectExpiredMetrics(ctx context.Context, e Event) {
|
|||
func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch uint64) {
|
||||
ids, err := s.metaBase.ZeroSizeContainers(ctx)
|
||||
if err != nil {
|
||||
s.log.Warn(logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err))
|
||||
s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err))
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
|
@ -778,7 +778,7 @@ func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch ui
|
|||
func (s *Shard) collectExpiredContainerCountMetrics(ctx context.Context, epoch uint64) {
|
||||
ids, err := s.metaBase.ZeroCountContainers(ctx)
|
||||
if err != nil {
|
||||
s.log.Warn(logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err))
|
||||
s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err))
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
|
|
|
@ -144,7 +144,7 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta
|
|||
return nil, false, logicerr.Wrap(new(apistatus.ObjectNotFound))
|
||||
}
|
||||
} else {
|
||||
s.log.Warn(logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr))
|
||||
s.log.Warn(ctx, logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr))
|
||||
}
|
||||
|
||||
if s.hasWriteCache() {
|
||||
|
@ -153,12 +153,12 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta
|
|||
return res, false, err
|
||||
}
|
||||
if client.IsErrObjectNotFound(err) {
|
||||
s.log.Debug(logs.ShardObjectIsMissingInWritecache,
|
||||
s.log.Debug(ctx, logs.ShardObjectIsMissingInWritecache,
|
||||
zap.Stringer("addr", addr),
|
||||
zap.Bool("skip_meta", skipMeta),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
} else {
|
||||
s.log.Error(logs.ShardFailedToFetchObjectFromWritecache,
|
||||
s.log.Error(ctx, logs.ShardFailedToFetchObjectFromWritecache,
|
||||
zap.Error(err),
|
||||
zap.Stringer("addr", addr),
|
||||
zap.Bool("skip_meta", skipMeta),
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
"github.com/mr-tron/base58"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -50,7 +49,7 @@ func (s *Shard) UpdateID() (err error) {
|
|||
s.writeCache.GetMetrics().SetShardID(shardID)
|
||||
}
|
||||
|
||||
s.log = &logger.Logger{Logger: s.log.With(zap.Stringer("shard_id", s.info.ID))}
|
||||
s.log = s.log.With(zap.Stringer("shard_id", s.info.ID))
|
||||
s.metaBase.SetLogger(s.log)
|
||||
s.blobStor.SetLogger(s.log)
|
||||
if s.hasWriteCache() {
|
||||
|
|
|
@ -109,7 +109,7 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
|
|||
return InhumeRes{}, ErrLockObjectRemoval
|
||||
}
|
||||
|
||||
s.log.Debug(logs.ShardCouldNotMarkObjectToDeleteInMetabase,
|
||||
s.log.Debug(ctx, logs.ShardCouldNotMarkObjectToDeleteInMetabase,
|
||||
zap.String("error", err.Error()),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
|
||||
)
|
||||
|
|
|
@ -122,7 +122,7 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) {
|
|||
|
||||
sRes, err := s.metaBase.Select(ctx, sPrm) // consider making List in metabase
|
||||
if err != nil {
|
||||
s.log.Debug(logs.ShardCantSelectAllObjects,
|
||||
s.log.Debug(ctx, logs.ShardCantSelectAllObjects,
|
||||
zap.Stringer("cid", lst[i]),
|
||||
zap.String("error", err.Error()),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
|
|
|
@ -30,7 +30,7 @@ func TestShard_Lock(t *testing.T) {
|
|||
rootPath := t.TempDir()
|
||||
opts := []Option{
|
||||
WithID(NewIDFromBytes([]byte{})),
|
||||
WithLogger(&logger.Logger{Logger: zap.NewNop()}),
|
||||
WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
|
||||
WithBlobStorOptions(
|
||||
blobstor.WithStorages([]blobstor.SubStorage{
|
||||
{
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package shard
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
||||
|
@ -26,7 +28,7 @@ func (s *Shard) SetMode(m mode.Mode) error {
|
|||
}
|
||||
|
||||
func (s *Shard) setMode(m mode.Mode) error {
|
||||
s.log.Info(logs.ShardSettingShardMode,
|
||||
s.log.Info(context.Background(), logs.ShardSettingShardMode,
|
||||
zap.Stringer("old_mode", s.info.Mode),
|
||||
zap.Stringer("new_mode", m))
|
||||
|
||||
|
@ -67,7 +69,7 @@ func (s *Shard) setMode(m mode.Mode) error {
|
|||
s.info.Mode = m
|
||||
s.metricsWriter.SetMode(s.info.Mode)
|
||||
|
||||
s.log.Info(logs.ShardShardModeSetSuccessfully,
|
||||
s.log.Info(context.Background(), logs.ShardShardModeSetSuccessfully,
|
||||
zap.Stringer("mode", s.info.Mode))
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
|
|||
}
|
||||
if err != nil || !tryCache {
|
||||
if err != nil {
|
||||
s.log.Debug(logs.ShardCantPutObjectToTheWritecacheTryingBlobstor,
|
||||
s.log.Debug(ctx, logs.ShardCantPutObjectToTheWritecacheTryingBlobstor,
|
||||
zap.String("err", err.Error()))
|
||||
}
|
||||
|
||||
|
|
|
@ -102,11 +102,11 @@ func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *lo
|
|||
return
|
||||
default:
|
||||
}
|
||||
log.Info(logs.BlobstoreRebuildStarted)
|
||||
log.Info(ctx, logs.BlobstoreRebuildStarted)
|
||||
if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, limiter, fillPercent); err != nil {
|
||||
log.Warn(logs.FailedToRebuildBlobstore, zap.Error(err))
|
||||
log.Warn(ctx, logs.FailedToRebuildBlobstore, zap.Error(err))
|
||||
} else {
|
||||
log.Info(logs.BlobstoreRebuildCompletedSuccessfully)
|
||||
log.Info(ctx, logs.BlobstoreRebuildCompletedSuccessfully)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -138,7 +138,7 @@ func (r *rebuilder) Stop(log *logger.Logger) {
|
|||
r.wg.Wait()
|
||||
r.cancel = nil
|
||||
r.done = nil
|
||||
log.Info(logs.BlobstoreRebuildStopped)
|
||||
log.Info(context.Background(), logs.BlobstoreRebuildStopped)
|
||||
}
|
||||
|
||||
var errMBIsNotAvailable = errors.New("metabase is not available")
|
||||
|
|
|
@ -103,7 +103,7 @@ type cfg struct {
|
|||
func defaultCfg() *cfg {
|
||||
return &cfg{
|
||||
rmBatchSize: 100,
|
||||
log: &logger.Logger{Logger: zap.L()},
|
||||
log: logger.NewLoggerWrapper(zap.L()),
|
||||
gcCfg: defaultGCCfg(),
|
||||
reportErrorFunc: func(string, string, error) {},
|
||||
zeroSizeContainersCallback: func(context.Context, []cid.ID) {},
|
||||
|
@ -401,7 +401,7 @@ func (s *Shard) updateMetrics(ctx context.Context) {
|
|||
|
||||
cc, err := s.metaBase.ObjectCounters()
|
||||
if err != nil {
|
||||
s.log.Warn(logs.ShardMetaObjectCounterRead,
|
||||
s.log.Warn(ctx, logs.ShardMetaObjectCounterRead,
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
|
@ -414,7 +414,7 @@ func (s *Shard) updateMetrics(ctx context.Context) {
|
|||
|
||||
cnrList, err := s.metaBase.Containers(ctx)
|
||||
if err != nil {
|
||||
s.log.Warn(logs.ShardMetaCantReadContainerList, zap.Error(err))
|
||||
s.log.Warn(ctx, logs.ShardMetaCantReadContainerList, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -423,7 +423,7 @@ func (s *Shard) updateMetrics(ctx context.Context) {
|
|||
for i := range cnrList {
|
||||
size, err := s.metaBase.ContainerSize(cnrList[i])
|
||||
if err != nil {
|
||||
s.log.Warn(logs.ShardMetaCantReadContainerSize,
|
||||
s.log.Warn(ctx, logs.ShardMetaCantReadContainerSize,
|
||||
zap.String("cid", cnrList[i].EncodeToString()),
|
||||
zap.Error(err))
|
||||
continue
|
||||
|
@ -436,7 +436,7 @@ func (s *Shard) updateMetrics(ctx context.Context) {
|
|||
|
||||
contCount, err := s.metaBase.ContainerCounters(ctx)
|
||||
if err != nil {
|
||||
s.log.Warn(logs.FailedToGetContainerCounters, zap.Error(err))
|
||||
s.log.Warn(ctx, logs.FailedToGetContainerCounters, zap.Error(err))
|
||||
return
|
||||
}
|
||||
for contID, count := range contCount.Counts {
|
||||
|
|
|
@ -124,12 +124,12 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error {
|
|||
close(started)
|
||||
defer cleanup()
|
||||
|
||||
s.log.Info(logs.StartedWritecacheSealAsync)
|
||||
s.log.Info(ctx, logs.StartedWritecacheSealAsync)
|
||||
if err := s.writeCache.Seal(ctx, prm); err != nil {
|
||||
s.log.Warn(logs.FailedToSealWritecacheAsync, zap.Error(err))
|
||||
s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err))
|
||||
return
|
||||
}
|
||||
s.log.Info(logs.WritecacheSealCompletedAsync)
|
||||
s.log.Info(ctx, logs.WritecacheSealCompletedAsync)
|
||||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
|
|
@ -55,7 +55,7 @@ func New(opts ...Option) Cache {
|
|||
counter: fstree.NewSimpleCounter(),
|
||||
|
||||
options: options{
|
||||
log: &logger.Logger{Logger: zap.NewNop()},
|
||||
log: logger.NewLoggerWrapper(zap.NewNop()),
|
||||
maxObjectSize: defaultMaxObjectSize,
|
||||
workersCount: defaultFlushWorkersCount,
|
||||
maxCacheSize: defaultMaxCacheSize,
|
||||
|
|
|
@ -46,7 +46,7 @@ func (c *cache) Delete(ctx context.Context, addr oid.Address) error {
|
|||
storageType = StorageTypeFSTree
|
||||
_, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr})
|
||||
if err == nil {
|
||||
storagelog.Write(c.log,
|
||||
storagelog.Write(ctx, c.log,
|
||||
storagelog.AddressField(addr.EncodeToString()),
|
||||
storagelog.StorageTypeField(wcStorageType),
|
||||
storagelog.OpField("fstree DELETE"),
|
||||
|
|
|
@ -80,7 +80,7 @@ func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) {
|
|||
}
|
||||
})
|
||||
if err != nil {
|
||||
c.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err))
|
||||
c.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err))
|
||||
}
|
||||
|
||||
c.modeMtx.RUnlock()
|
||||
|
@ -130,7 +130,7 @@ func (c *cache) reportFlushError(msg string, addr string, err error) {
|
|||
if c.reportError != nil {
|
||||
c.reportError(msg, err)
|
||||
} else {
|
||||
c.log.Error(msg,
|
||||
c.log.Error(context.Background(), msg,
|
||||
zap.String("address", addr),
|
||||
zap.Error(err))
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ func TestFlush(t *testing.T) {
|
|||
cnt := &atomic.Uint32{}
|
||||
return WithReportErrorFunc(func(msg string, err error) {
|
||||
cnt.Add(1)
|
||||
testlogger.Warn(msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err))
|
||||
testlogger.Warn(context.Background(), msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err))
|
||||
}), cnt
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue