Logger with context #1437

Merged
fyrchik merged 8 commits from dstepanov-yadro/frostfs-node:feat/log_with_ctx into master 2024-11-20 15:43:53 +00:00
302 changed files with 2074 additions and 1924 deletions

View file

@ -1,6 +1,7 @@
package main
import (
"context"
"os"
"os/signal"
"syscall"
@ -46,7 +47,7 @@ func reloadConfig() error {
return logPrm.Reload()
}
func watchForSignal(cancel func()) {
func watchForSignal(ctx context.Context, cancel func()) {
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
@ -58,49 +59,49 @@ func watchForSignal(cancel func()) {
// signals causing application to shut down should have priority over
// reconfiguration signal
case <-ch:
log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
cancel()
shutdown()
log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
shutdown(ctx)
log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
return
case err := <-intErr: // internal application error
log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
log.Info(ctx, logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
cancel()
shutdown()
shutdown(ctx)
return
default:
// block until any signal is receieved
select {
case <-ch:
log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
cancel()
shutdown()
log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
shutdown(ctx)
log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
return
case err := <-intErr: // internal application error
log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
log.Info(ctx, logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
cancel()
shutdown()
shutdown(ctx)
return
case <-sighupCh:
log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
if !innerRing.CompareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
log.Info(logs.FrostFSNodeSIGHUPSkip)
log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
if !innerRing.CompareAndSwapHealthStatus(ctx, control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
log.Info(ctx, logs.FrostFSNodeSIGHUPSkip)
break
}
err := reloadConfig()
if err != nil {
log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err))
}
pprofCmp.reload()
metricsCmp.reload()
log.Info(logs.FrostFSIRReloadExtraWallets)
pprofCmp.reload(ctx)
metricsCmp.reload(ctx)
log.Info(ctx, logs.FrostFSIRReloadExtraWallets)
err = innerRing.SetExtraWallets(cfg)
if err != nil {
log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err))
}
innerRing.CompareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
innerRing.CompareAndSwapHealthStatus(ctx, control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
}
}
}

View file

@ -1,6 +1,7 @@
package main
import (
"context"
"net/http"
"time"
@ -24,8 +25,8 @@ const (
shutdownTimeoutKeyPostfix = ".shutdown_timeout"
)
func (c *httpComponent) init() {
log.Info("init " + c.name)
func (c *httpComponent) init(ctx context.Context) {
log.Info(ctx, "init "+c.name)
c.enabled = cfg.GetBool(c.name + enabledKeyPostfix)
c.address = cfg.GetString(c.name + addressKeyPostfix)
c.shutdownDur = cfg.GetDuration(c.name + shutdownTimeoutKeyPostfix)
@ -39,14 +40,14 @@ func (c *httpComponent) init() {
httputil.WithShutdownTimeout(c.shutdownDur),
)
} else {
log.Info(c.name + " is disabled, skip")
log.Info(ctx, c.name+" is disabled, skip")
c.srv = nil
}
}
func (c *httpComponent) start() {
func (c *httpComponent) start(ctx context.Context) {
if c.srv != nil {
log.Info("start " + c.name)
log.Info(ctx, "start "+c.name)
wg.Add(1)
go func() {
defer wg.Done()
@ -55,10 +56,10 @@ func (c *httpComponent) start() {
}
}
func (c *httpComponent) shutdown() error {
func (c *httpComponent) shutdown(ctx context.Context) error {
if c.srv != nil {
log.Info("shutdown " + c.name)
return c.srv.Shutdown()
log.Info(ctx, "shutdown "+c.name)
return c.srv.Shutdown(ctx)
}
return nil
}
@ -70,17 +71,17 @@ func (c *httpComponent) needReload() bool {
return enabled != c.enabled || enabled && (address != c.address || dur != c.shutdownDur)
}
func (c *httpComponent) reload() {
log.Info("reload " + c.name)
func (c *httpComponent) reload(ctx context.Context) {
log.Info(ctx, "reload "+c.name)
if c.needReload() {
log.Info(c.name + " config updated")
if err := c.shutdown(); err != nil {
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
log.Info(ctx, c.name+" config updated")
if err := c.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.String("error", err.Error()),
)
} else {
c.init()
c.start()
c.init(ctx)
c.start(ctx)
}
}
}

View file

@ -87,48 +87,48 @@ func main() {
ctx, cancel := context.WithCancel(context.Background())
pprofCmp = newPprofComponent()
pprofCmp.init()
pprofCmp.init(ctx)
metricsCmp = newMetricsComponent()
metricsCmp.init()
metricsCmp.init(ctx)
audit.Store(cfg.GetBool("audit.enabled"))
innerRing, err = innerring.New(ctx, log, cfg, intErr, metrics, cmode, audit)
exitErr(err)
pprofCmp.start()
metricsCmp.start()
pprofCmp.start(ctx)
metricsCmp.start(ctx)
// start inner ring
err = innerRing.Start(ctx, intErr)
exitErr(err)
log.Info(logs.CommonApplicationStarted,
log.Info(ctx, logs.CommonApplicationStarted,
zap.String("version", misc.Version))
watchForSignal(cancel)
watchForSignal(ctx, cancel)
<-ctx.Done() // graceful shutdown
log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop)
log.Debug(ctx, logs.FrostFSNodeWaitingForAllProcessesToStop)
wg.Wait()
log.Info(logs.FrostFSIRApplicationStopped)
log.Info(ctx, logs.FrostFSIRApplicationStopped)
}
func shutdown() {
innerRing.Stop()
if err := metricsCmp.shutdown(); err != nil {
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
func shutdown(ctx context.Context) {
innerRing.Stop(ctx)
if err := metricsCmp.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.String("error", err.Error()),
)
}
if err := pprofCmp.shutdown(); err != nil {
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
if err := pprofCmp.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.String("error", err.Error()),
)
}
if err := sdnotify.ClearStatus(); err != nil {
log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
}
}

View file

@ -1,6 +1,7 @@
package main
import (
"context"
"runtime"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@ -28,8 +29,8 @@ func newPprofComponent() *pprofComponent {
}
}
func (c *pprofComponent) init() {
c.httpComponent.init()
func (c *pprofComponent) init(ctx context.Context) {
c.httpComponent.init(ctx)
if c.enabled {
c.blockRate = cfg.GetInt(pprofBlockRateKey)
@ -51,17 +52,17 @@ func (c *pprofComponent) needReload() bool {
c.enabled && (c.blockRate != blockRate || c.mutexRate != mutexRate)
}
func (c *pprofComponent) reload() {
log.Info("reload " + c.name)
func (c *pprofComponent) reload(ctx context.Context) {
log.Info(ctx, "reload "+c.name)
if c.needReload() {
log.Info(c.name + " config updated")
if err := c.shutdown(); err != nil {
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
log.Info(ctx, c.name+" config updated")
if err := c.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.String("error", err.Error()))
return
}
c.init()
c.start()
c.init(ctx)
c.start(ctx)
}
}

View file

@ -28,7 +28,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err))
blz := openBlobovnicza(cmd)
defer blz.Close()
defer blz.Close(cmd.Context())
var prm blobovnicza.GetPrm
prm.SetAddress(addr)

View file

@ -32,7 +32,7 @@ func listFunc(cmd *cobra.Command, _ []string) {
}
blz := openBlobovnicza(cmd)
defer blz.Close()
defer blz.Close(cmd.Context())
err := blobovnicza.IterateAddresses(context.Background(), blz, wAddr)
common.ExitOnErr(cmd, common.Errf("blobovnicza iterator failure: %w", err))

View file

@ -27,7 +27,7 @@ func openBlobovnicza(cmd *cobra.Command) *blobovnicza.Blobovnicza {
blobovnicza.WithPath(vPath),
blobovnicza.WithReadOnly(true),
)
common.ExitOnErr(cmd, blz.Open())
common.ExitOnErr(cmd, blz.Open(cmd.Context()))
return blz
}

View file

@ -31,7 +31,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err))
db := openMeta(cmd)
defer db.Close()
defer db.Close(cmd.Context())
storageID := meta.StorageIDPrm{}
storageID.SetAddress(addr)

View file

@ -19,7 +19,7 @@ func init() {
func listGarbageFunc(cmd *cobra.Command, _ []string) {
db := openMeta(cmd)
defer db.Close()
defer db.Close(cmd.Context())
var garbPrm meta.GarbageIterationPrm
garbPrm.SetHandler(

View file

@ -19,7 +19,7 @@ func init() {
func listGraveyardFunc(cmd *cobra.Command, _ []string) {
db := openMeta(cmd)
defer db.Close()
defer db.Close(cmd.Context())
var gravePrm meta.GraveyardIterationPrm
gravePrm.SetHandler(

View file

@ -397,16 +397,16 @@ type internals struct {
}
// starts node's maintenance.
func (c *cfg) startMaintenance() {
func (c *cfg) startMaintenance(ctx context.Context) {
c.isMaintenance.Store(true)
c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE)
c.log.Info(logs.FrostFSNodeStartedLocalNodesMaintenance)
c.log.Info(ctx, logs.FrostFSNodeStartedLocalNodesMaintenance)
}
// stops node's maintenance.
func (c *internals) stopMaintenance() {
func (c *internals) stopMaintenance(ctx context.Context) {
if c.isMaintenance.CompareAndSwap(true, false) {
c.log.Info(logs.FrostFSNodeStoppedLocalNodesMaintenance)
c.log.Info(ctx, logs.FrostFSNodeStoppedLocalNodesMaintenance)
}
}
@ -705,7 +705,7 @@ func initCfg(appCfg *config.Config) *cfg {
log, err := logger.NewLogger(logPrm)
fatalOnErr(err)
if loggerconfig.ToLokiConfig(appCfg).Enabled {
log.Logger = log.Logger.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
log.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg))
return lokiCore
}))
@ -1089,7 +1089,7 @@ func (c *cfg) LocalAddress() network.AddressGroup {
func initLocalStorage(ctx context.Context, c *cfg) {
ls := engine.New(c.engineOpts()...)
addNewEpochAsyncNotificationHandler(c, func(ev event.Event) {
addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, ev event.Event) {
ls.HandleNewEpoch(ctx, ev.(netmap2.NewEpoch).EpochNumber())
})
@ -1103,10 +1103,10 @@ func initLocalStorage(ctx context.Context, c *cfg) {
shard.WithTombstoneSource(c.createTombstoneSource()),
shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)))...)
if err != nil {
c.log.Error(logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
c.log.Error(ctx, logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
} else {
shardsAttached++
c.log.Info(logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id))
c.log.Info(ctx, logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id))
}
}
if shardsAttached == 0 {
@ -1116,23 +1116,23 @@ func initLocalStorage(ctx context.Context, c *cfg) {
c.cfgObject.cfgLocalStorage.localStorage = ls
c.onShutdown(func() {
c.log.Info(logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
c.log.Info(ctx, logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
err := ls.Close(context.WithoutCancel(ctx))
if err != nil {
c.log.Info(logs.FrostFSNodeStorageEngineClosingFailure,
c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure,
zap.String("error", err.Error()),
)
} else {
c.log.Info(logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
}
})
}
func initAccessPolicyEngine(_ context.Context, c *cfg) {
func initAccessPolicyEngine(ctx context.Context, c *cfg) {
var localOverrideDB chainbase.LocalOverrideDatabase
if nodeconfig.PersistentPolicyRules(c.appCfg).Path() == "" {
c.log.Warn(logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed)
c.log.Warn(ctx, logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed)
localOverrideDB = chainbase.NewInmemoryLocalOverrideDatabase()
} else {
localOverrideDB = chainbase.NewBoltLocalOverrideDatabase(
@ -1157,7 +1157,7 @@ func initAccessPolicyEngine(_ context.Context, c *cfg) {
c.onShutdown(func() {
if err := ape.LocalOverrideDatabaseCore().Close(); err != nil {
c.log.Warn(logs.FrostFSNodeAccessPolicyEngineClosingFailure,
c.log.Warn(ctx, logs.FrostFSNodeAccessPolicyEngineClosingFailure,
zap.Error(err),
)
}
@ -1206,10 +1206,10 @@ func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) {
c.cfgNetmap.state.setNodeInfo(ni)
}
func (c *cfg) updateContractNodeInfo(epoch uint64) {
func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
ni, err := c.netmapLocalNodeState(epoch)
if err != nil {
c.log.Error(logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
zap.Uint64("epoch", epoch),
zap.String("error", err.Error()))
return
@ -1221,19 +1221,19 @@ func (c *cfg) updateContractNodeInfo(epoch uint64) {
// bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract
// with the binary-encoded information from the current node's configuration.
// The state is set using the provided setter which MUST NOT be nil.
func (c *cfg) bootstrapWithState(stateSetter func(*netmap.NodeInfo)) error {
func (c *cfg) bootstrapWithState(ctx context.Context, stateSetter func(*netmap.NodeInfo)) error {
ni := c.cfgNodeInfo.localInfo
stateSetter(&ni)
prm := nmClient.AddPeerPrm{}
prm.SetNodeInfo(ni)
return c.cfgNetmap.wrapper.AddPeer(prm)
return c.cfgNetmap.wrapper.AddPeer(ctx, prm)
}
// bootstrapOnline calls cfg.bootstrapWithState with "online" state.
func bootstrapOnline(c *cfg) error {
return c.bootstrapWithState(func(ni *netmap.NodeInfo) {
func bootstrapOnline(ctx context.Context, c *cfg) error {
return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) {
ni.SetStatus(netmap.Online)
})
}
@ -1241,21 +1241,21 @@ func bootstrapOnline(c *cfg) error {
// bootstrap calls bootstrapWithState with:
// - "maintenance" state if maintenance is in progress on the current node
// - "online", otherwise
func (c *cfg) bootstrap() error {
func (c *cfg) bootstrap(ctx context.Context) error {
// switch to online except when under maintenance
st := c.cfgNetmap.state.controlNetmapStatus()
if st == control.NetmapStatus_MAINTENANCE {
c.log.Info(logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
return c.bootstrapWithState(func(ni *netmap.NodeInfo) {
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) {
ni.SetStatus(netmap.Maintenance)
})
}
c.log.Info(logs.FrostFSNodeBootstrappingWithOnlineState,
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState,
zap.Stringer("previous", st),
)
return bootstrapOnline(c)
return bootstrapOnline(ctx, c)
}
// needBootstrap checks if local node should be registered in network on bootup.
@ -1280,19 +1280,19 @@ func (c *cfg) signalWatcher(ctx context.Context) {
// signals causing application to shut down should have priority over
// reconfiguration signal
case <-ch:
c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
c.shutdown()
c.shutdown(ctx)
c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
return
case err := <-c.internalErr: // internal application error
c.log.Warn(logs.FrostFSNodeInternalApplicationError,
c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError,
zap.String("message", err.Error()))
c.shutdown()
c.shutdown(ctx)
c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete)
c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete)
return
default:
// block until any signal is receieved
@ -1300,19 +1300,19 @@ func (c *cfg) signalWatcher(ctx context.Context) {
case <-sighupCh:
c.reloadConfig(ctx)
case <-ch:
c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
c.shutdown()
c.shutdown(ctx)
c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
return
case err := <-c.internalErr: // internal application error
c.log.Warn(logs.FrostFSNodeInternalApplicationError,
c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError,
zap.String("message", err.Error()))
c.shutdown()
c.shutdown(ctx)
c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete)
c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete)
return
}
}
@ -1320,17 +1320,17 @@ func (c *cfg) signalWatcher(ctx context.Context) {
}
func (c *cfg) reloadConfig(ctx context.Context) {
c.log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
c.log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
if !c.compareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
c.log.Info(logs.FrostFSNodeSIGHUPSkip)
if !c.compareAndSwapHealthStatus(ctx, control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
c.log.Info(ctx, logs.FrostFSNodeSIGHUPSkip)
return
}
defer c.compareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
defer c.compareAndSwapHealthStatus(ctx, control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
err := c.reloadAppConfig()
if err != nil {
c.log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
c.log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err))
return
}
@ -1341,7 +1341,7 @@ func (c *cfg) reloadConfig(ctx context.Context) {
logPrm, err := c.loggerPrm()
if err != nil {
c.log.Error(logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
c.log.Error(ctx, logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
return
}
@ -1362,25 +1362,25 @@ func (c *cfg) reloadConfig(ctx context.Context) {
err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
if err != nil {
c.log.Error(logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
c.log.Error(ctx, logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
return
}
for _, component := range components {
err = component.reloadFunc()
if err != nil {
c.log.Error(logs.FrostFSNodeUpdatedConfigurationApplying,
c.log.Error(ctx, logs.FrostFSNodeUpdatedConfigurationApplying,
zap.String("component", component.name),
zap.Error(err))
}
}
if err := c.dialerSource.Update(internalNetConfig(c.appCfg, c.metricsCollector.MultinetMetrics())); err != nil {
c.log.Error(logs.FailedToUpdateMultinetConfiguration, zap.Error(err))
c.log.Error(ctx, logs.FailedToUpdateMultinetConfiguration, zap.Error(err))
return
}
c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
}
func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
@ -1388,7 +1388,7 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
components = append(components, dCmp{"logger", logPrm.Reload})
components = append(components, dCmp{"runtime", func() error {
setRuntimeParameters(c)
setRuntimeParameters(ctx, c)
return nil
}})
components = append(components, dCmp{"audit", func() error {
@ -1403,7 +1403,7 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
}
updated, err := tracing.Setup(ctx, *traceConfig)
if updated {
c.log.Info(logs.FrostFSNodeTracingConfigationUpdated)
c.log.Info(ctx, logs.FrostFSNodeTracingConfigationUpdated)
}
return err
}})
@ -1438,7 +1438,7 @@ func (c *cfg) reloadPools() error {
func (c *cfg) reloadPool(p *ants.Pool, newSize int, name string) {
oldSize := p.Cap()
if oldSize != newSize {
c.log.Info(logs.FrostFSNodePoolConfigurationUpdate, zap.String("field", name),
c.log.Info(context.Background(), logs.FrostFSNodePoolConfigurationUpdate, zap.String("field", name),
zap.Int("old", oldSize), zap.Int("new", newSize))
p.Tune(newSize)
}
@ -1474,14 +1474,14 @@ func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoPro
})
}
func (c *cfg) shutdown() {
old := c.swapHealthStatus(control.HealthStatus_SHUTTING_DOWN)
func (c *cfg) shutdown(ctx context.Context) {
old := c.swapHealthStatus(ctx, control.HealthStatus_SHUTTING_DOWN)
if old == control.HealthStatus_SHUTTING_DOWN {
c.log.Info(logs.FrostFSNodeShutdownSkip)
c.log.Info(ctx, logs.FrostFSNodeShutdownSkip)
return
}
if old == control.HealthStatus_STARTING {
c.log.Warn(logs.FrostFSNodeShutdownWhenNotReady)
c.log.Warn(ctx, logs.FrostFSNodeShutdownWhenNotReady)
}
c.ctxCancel()
@ -1491,6 +1491,6 @@ func (c *cfg) shutdown() {
}
if err := sdnotify.ClearStatus(); err != nil {
c.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
c.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
}
}

View file

@ -89,7 +89,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
if c.cfgMorph.containerCacheSize > 0 {
containerCache := newCachedContainerStorage(cnrSrc, c.cfgMorph.cacheTTL, c.cfgMorph.containerCacheSize)
subscribeToContainerCreation(c, func(e event.Event) {
subscribeToContainerCreation(c, func(ctx context.Context, e event.Event) {
ev := e.(containerEvent.PutSuccess)
// read owner of the created container in order to update the reading cache.
@ -102,21 +102,21 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
} else {
// unlike removal, we expect successful receive of the container
// after successful creation, so logging can be useful
c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
c.log.Error(ctx, logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
zap.Stringer("id", ev.ID),
zap.Error(err),
)
}
c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt,
c.log.Debug(ctx, logs.FrostFSNodeContainerCreationEventsReceipt,
zap.Stringer("id", ev.ID),
)
})
subscribeToContainerRemoval(c, func(e event.Event) {
subscribeToContainerRemoval(c, func(ctx context.Context, e event.Event) {
ev := e.(containerEvent.DeleteSuccess)
containerCache.handleRemoval(ev.ID)
c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt,
c.log.Debug(ctx, logs.FrostFSNodeContainerRemovalEventsReceipt,
zap.Stringer("id", ev.ID),
)
})
@ -237,10 +237,10 @@ type morphContainerWriter struct {
neoClient *cntClient.Client
}
func (m morphContainerWriter) Put(cnr containerCore.Container) (*cid.ID, error) {
return cntClient.Put(m.neoClient, cnr)
func (m morphContainerWriter) Put(ctx context.Context, cnr containerCore.Container) (*cid.ID, error) {
return cntClient.Put(ctx, m.neoClient, cnr)
}
func (m morphContainerWriter) Delete(witness containerCore.RemovalWitness) error {
return cntClient.Delete(m.neoClient, witness)
func (m morphContainerWriter) Delete(ctx context.Context, witness containerCore.RemovalWitness) error {
return cntClient.Delete(ctx, m.neoClient, witness)
}

View file

@ -16,7 +16,7 @@ import (
const serviceNameControl = "control"
func initControlService(c *cfg) {
func initControlService(ctx context.Context, c *cfg) {
endpoint := controlconfig.GRPC(c.appCfg).Endpoint()
if endpoint == controlconfig.GRPCEndpointDefault {
return
@ -46,21 +46,21 @@ func initControlService(c *cfg) {
lis, err := net.Listen("tcp", endpoint)
if err != nil {
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
return
}
c.cfgControlService.server = grpc.NewServer()
c.onShutdown(func() {
stopGRPC("FrostFS Control API", c.cfgControlService.server, c.log)
stopGRPC(ctx, "FrostFS Control API", c.cfgControlService.server, c.log)
})
control.RegisterControlServiceServer(c.cfgControlService.server, ctlSvc)
c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) {
runAndLog(ctx, c, serviceNameControl, false, func(context.Context, *cfg) {
c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint,
zap.String("service", serviceNameControl),
zap.String("endpoint", endpoint))
fatalOnErr(c.cfgControlService.server.Serve(lis))
@ -72,23 +72,23 @@ func (c *cfg) NetmapStatus() control.NetmapStatus {
return c.cfgNetmap.state.controlNetmapStatus()
}
func (c *cfg) setHealthStatus(st control.HealthStatus) {
c.notifySystemd(st)
func (c *cfg) setHealthStatus(ctx context.Context, st control.HealthStatus) {
c.notifySystemd(ctx, st)
c.healthStatus.Store(int32(st))
c.metricsCollector.State().SetHealth(int32(st))
}
func (c *cfg) compareAndSwapHealthStatus(oldSt, newSt control.HealthStatus) (swapped bool) {
func (c *cfg) compareAndSwapHealthStatus(ctx context.Context, oldSt, newSt control.HealthStatus) (swapped bool) {
if swapped = c.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped {
c.notifySystemd(newSt)
c.notifySystemd(ctx, newSt)
c.metricsCollector.State().SetHealth(int32(newSt))
}
return
}
func (c *cfg) swapHealthStatus(st control.HealthStatus) (old control.HealthStatus) {
func (c *cfg) swapHealthStatus(ctx context.Context, st control.HealthStatus) (old control.HealthStatus) {
old = control.HealthStatus(c.healthStatus.Swap(int32(st)))
c.notifySystemd(st)
c.notifySystemd(ctx, st)
c.metricsCollector.State().SetHealth(int32(st))
return
}
@ -97,7 +97,7 @@ func (c *cfg) HealthStatus() control.HealthStatus {
return control.HealthStatus(c.healthStatus.Load())
}
func (c *cfg) notifySystemd(st control.HealthStatus) {
func (c *cfg) notifySystemd(ctx context.Context, st control.HealthStatus) {
if !c.sdNotify {
return
}
@ -113,6 +113,6 @@ func (c *cfg) notifySystemd(st control.HealthStatus) {
err = sdnotify.Status(fmt.Sprintf("%v", st))
}
if err != nil {
c.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
c.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
}
}

View file

@ -1,6 +1,7 @@
package main
import (
"context"
"crypto/tls"
"errors"
"net"
@ -18,11 +19,11 @@ import (
const maxRecvMsgSize = 256 << 20
func initGRPC(c *cfg) {
func initGRPC(ctx context.Context, c *cfg) {
var endpointsToReconnect []string
var successCount int
grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) {
serverOpts, ok := getGrpcServerOpts(c, sc)
serverOpts, ok := getGrpcServerOpts(ctx, c, sc)
if !ok {
return
}
@ -30,7 +31,7 @@ func initGRPC(c *cfg) {
lis, err := net.Listen("tcp", sc.Endpoint())
if err != nil {
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(sc.Endpoint())
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
endpointsToReconnect = append(endpointsToReconnect, sc.Endpoint())
return
}
@ -39,7 +40,7 @@ func initGRPC(c *cfg) {
srv := grpc.NewServer(serverOpts...)
c.onShutdown(func() {
stopGRPC("FrostFS Public API", srv, c.log)
stopGRPC(ctx, "FrostFS Public API", srv, c.log)
})
c.cfgGRPC.append(sc.Endpoint(), lis, srv)
@ -52,11 +53,11 @@ func initGRPC(c *cfg) {
c.cfgGRPC.reconnectTimeout = grpcconfig.ReconnectTimeout(c.appCfg)
for _, endpoint := range endpointsToReconnect {
scheduleReconnect(endpoint, c)
scheduleReconnect(ctx, endpoint, c)
}
}
func scheduleReconnect(endpoint string, c *cfg) {
func scheduleReconnect(ctx context.Context, endpoint string, c *cfg) {
c.wg.Add(1)
go func() {
defer c.wg.Done()
@ -65,7 +66,7 @@ func scheduleReconnect(endpoint string, c *cfg) {
for {
select {
case <-t.C:
if tryReconnect(endpoint, c) {
if tryReconnect(ctx, endpoint, c) {
return
}
case <-c.done:
@ -75,20 +76,20 @@ func scheduleReconnect(endpoint string, c *cfg) {
}()
}
func tryReconnect(endpoint string, c *cfg) bool {
c.log.Info(logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint))
func tryReconnect(ctx context.Context, endpoint string, c *cfg) bool {
c.log.Info(ctx, logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint))
serverOpts, found := getGRPCEndpointOpts(endpoint, c)
serverOpts, found := getGRPCEndpointOpts(ctx, endpoint, c)
if !found {
c.log.Warn(logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint))
c.log.Warn(ctx, logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint))
return true
}
lis, err := net.Listen("tcp", endpoint)
if err != nil {
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(endpoint)
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
c.log.Warn(logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout))
c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
c.log.Warn(ctx, logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout))
return false
}
c.metricsCollector.GrpcServerMetrics().MarkHealthy(endpoint)
@ -96,16 +97,16 @@ func tryReconnect(endpoint string, c *cfg) bool {
srv := grpc.NewServer(serverOpts...)
c.onShutdown(func() {
stopGRPC("FrostFS Public API", srv, c.log)
stopGRPC(ctx, "FrostFS Public API", srv, c.log)
})
c.cfgGRPC.appendAndHandle(endpoint, lis, srv)
c.log.Info(logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint))
c.log.Info(ctx, logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint))
return true
}
func getGRPCEndpointOpts(endpoint string, c *cfg) (result []grpc.ServerOption, found bool) {
func getGRPCEndpointOpts(ctx context.Context, endpoint string, c *cfg) (result []grpc.ServerOption, found bool) {
unlock := c.LockAppConfigShared()
defer unlock()
grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) {
@ -116,7 +117,7 @@ func getGRPCEndpointOpts(endpoint string, c *cfg) (result []grpc.ServerOption, f
return
}
var ok bool
result, ok = getGrpcServerOpts(c, sc)
result, ok = getGrpcServerOpts(ctx, c, sc)
if !ok {
return
}
@ -125,7 +126,7 @@ func getGRPCEndpointOpts(endpoint string, c *cfg) (result []grpc.ServerOption, f
return
}
func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool) {
func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool) {
serverOpts := []grpc.ServerOption{
grpc.MaxRecvMsgSize(maxRecvMsgSize),
grpc.ChainUnaryInterceptor(
@ -143,7 +144,7 @@ func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool
if tlsCfg != nil {
cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile())
if err != nil {
c.log.Error(logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
c.log.Error(ctx, logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
return nil, false
}
@ -174,38 +175,38 @@ func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool
return serverOpts, true
}
func serveGRPC(c *cfg) {
func serveGRPC(ctx context.Context, c *cfg) {
c.cfgGRPC.performAndSave(func(e string, l net.Listener, s *grpc.Server) {
c.wg.Add(1)
go func() {
defer func() {
c.log.Info(logs.FrostFSNodeStopListeningGRPCEndpoint,
c.log.Info(ctx, logs.FrostFSNodeStopListeningGRPCEndpoint,
zap.Stringer("endpoint", l.Addr()),
)
c.wg.Done()
}()
c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint,
zap.String("service", "gRPC"),
zap.Stringer("endpoint", l.Addr()),
)
if err := s.Serve(l); err != nil {
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(e)
c.log.Error(logs.FrostFSNodeGRPCServerError, zap.Error(err))
c.log.Error(ctx, logs.FrostFSNodeGRPCServerError, zap.Error(err))
c.cfgGRPC.dropConnection(e)
scheduleReconnect(e, c)
scheduleReconnect(ctx, e, c)
}
}()
})
}
func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
l = &logger.Logger{Logger: l.With(zap.String("name", name))}
func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger) {
l = l.With(zap.String("name", name))
l.Info(logs.FrostFSNodeStoppingGRPCServer)
l.Info(ctx, logs.FrostFSNodeStoppingGRPCServer)
// GracefulStop() may freeze forever, see #1270
done := make(chan struct{})
@ -217,9 +218,9 @@ func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
select {
case <-done:
case <-time.After(1 * time.Minute):
l.Info(logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
l.Info(ctx, logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
s.Stop()
}
l.Info(logs.FrostFSNodeGRPCServerStoppedSuccessfully)
l.Info(ctx, logs.FrostFSNodeGRPCServerStoppedSuccessfully)
}

View file

@ -20,9 +20,9 @@ type httpComponent struct {
preReload func(c *cfg)
}
func (cmp *httpComponent) init(c *cfg) {
func (cmp *httpComponent) init(ctx context.Context, c *cfg) {
if !cmp.enabled {
c.log.Info(cmp.name + " is disabled")
c.log.Info(ctx, cmp.name+" is disabled")
return
}
// Init server with parameters
@ -39,14 +39,14 @@ func (cmp *httpComponent) init(c *cfg) {
go func() {
defer c.wg.Done()
c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint,
zap.String("service", cmp.name),
zap.String("endpoint", cmp.address))
fatalOnErr(srv.Serve())
}()
c.closers = append(c.closers, closer{
cmp.name,
func() { stopAndLog(c, cmp.name, srv.Shutdown) },
func() { stopAndLog(ctx, c, cmp.name, srv.Shutdown) },
})
}
@ -62,7 +62,7 @@ func (cmp *httpComponent) reload(ctx context.Context) error {
// Cleanup
delCloser(cmp.cfg, cmp.name)
// Init server with new parameters
cmp.init(cmp.cfg)
cmp.init(ctx, cmp.cfg)
// Start worker
if cmp.enabled {
startWorker(ctx, cmp.cfg, *getWorker(cmp.cfg, cmp.name))

View file

@ -61,21 +61,21 @@ func main() {
var ctx context.Context
ctx, c.ctxCancel = context.WithCancel(context.Background())
c.setHealthStatus(control.HealthStatus_STARTING)
c.setHealthStatus(ctx, control.HealthStatus_STARTING)
initApp(ctx, c)
bootUp(ctx, c)
c.compareAndSwapHealthStatus(control.HealthStatus_STARTING, control.HealthStatus_READY)
c.compareAndSwapHealthStatus(ctx, control.HealthStatus_STARTING, control.HealthStatus_READY)
wait(c)
}
func initAndLog(c *cfg, name string, initializer func(*cfg)) {
c.log.Info(fmt.Sprintf("initializing %s service...", name))
func initAndLog(ctx context.Context, c *cfg, name string, initializer func(*cfg)) {
c.log.Info(ctx, fmt.Sprintf("initializing %s service...", name))
initializer(c)
c.log.Info(name + " service has been successfully initialized")
c.log.Info(ctx, name+" service has been successfully initialized")
}
func initApp(ctx context.Context, c *cfg) {
@ -85,72 +85,72 @@ func initApp(ctx context.Context, c *cfg) {
c.wg.Done()
}()
setRuntimeParameters(c)
setRuntimeParameters(ctx, c)
metrics, _ := metricsComponent(c)
initAndLog(c, "profiler", initProfilerService)
initAndLog(c, metrics.name, metrics.init)
initAndLog(ctx, c, "profiler", func(c *cfg) { initProfilerService(ctx, c) })
initAndLog(ctx, c, metrics.name, func(c *cfg) { metrics.init(ctx, c) })
initAndLog(c, "tracing", func(c *cfg) { initTracing(ctx, c) })
initAndLog(ctx, c, "tracing", func(c *cfg) { initTracing(ctx, c) })
initLocalStorage(ctx, c)
initAndLog(c, "storage engine", func(c *cfg) {
initAndLog(ctx, c, "storage engine", func(c *cfg) {
fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Open(ctx))
fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Init(ctx))
})
initAndLog(c, "gRPC", initGRPC)
initAndLog(c, "netmap", func(c *cfg) { initNetmapService(ctx, c) })
initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) })
initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) })
initAccessPolicyEngine(ctx, c)
initAndLog(c, "access policy engine", func(c *cfg) {
initAndLog(ctx, c, "access policy engine", func(c *cfg) {
fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Open(ctx))
fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Init())
})
initAndLog(c, "accounting", func(c *cfg) { initAccountingService(ctx, c) })
initAndLog(c, "container", func(c *cfg) { initContainerService(ctx, c) })
initAndLog(c, "session", initSessionService)
initAndLog(c, "object", initObjectService)
initAndLog(c, "tree", initTreeService)
initAndLog(c, "apemanager", initAPEManagerService)
initAndLog(c, "control", initControlService)
initAndLog(ctx, c, "accounting", func(c *cfg) { initAccountingService(ctx, c) })
initAndLog(ctx, c, "container", func(c *cfg) { initContainerService(ctx, c) })
initAndLog(ctx, c, "session", initSessionService)
initAndLog(ctx, c, "object", initObjectService)
initAndLog(ctx, c, "tree", initTreeService)
initAndLog(ctx, c, "apemanager", initAPEManagerService)
initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) })
initAndLog(c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
}
func runAndLog(ctx context.Context, c *cfg, name string, logSuccess bool, starter func(context.Context, *cfg)) {
c.log.Info(fmt.Sprintf("starting %s service...", name))
c.log.Info(ctx, fmt.Sprintf("starting %s service...", name))
starter(ctx, c)
if logSuccess {
c.log.Info(name + " service started successfully")
c.log.Info(ctx, name+" service started successfully")
}
}
func stopAndLog(c *cfg, name string, stopper func() error) {
c.log.Debug(fmt.Sprintf("shutting down %s service", name))
func stopAndLog(ctx context.Context, c *cfg, name string, stopper func(context.Context) error) {
c.log.Debug(ctx, fmt.Sprintf("shutting down %s service", name))
err := stopper()
err := stopper(ctx)
if err != nil {
c.log.Debug(fmt.Sprintf("could not shutdown %s server", name),
c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name),
zap.String("error", err.Error()),
)
}
c.log.Debug(name + " service has been stopped")
c.log.Debug(ctx, name+" service has been stopped")
}
func bootUp(ctx context.Context, c *cfg) {
runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(c) })
runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(ctx, c) })
runAndLog(ctx, c, "notary", true, makeAndWaitNotaryDeposit)
bootstrapNode(c)
bootstrapNode(ctx, c)
startWorkers(ctx, c)
}
func wait(c *cfg) {
c.log.Info(logs.CommonApplicationStarted,
c.log.Info(context.Background(), logs.CommonApplicationStarted,
zap.String("version", misc.Version))
<-c.done // graceful shutdown
@ -160,12 +160,12 @@ func wait(c *cfg) {
go func() {
defer drain.Done()
for err := range c.internalErr {
c.log.Warn(logs.FrostFSNodeInternalApplicationError,
c.log.Warn(context.Background(), logs.FrostFSNodeInternalApplicationError,
zap.String("message", err.Error()))
}
}()
c.log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop)
c.log.Debug(context.Background(), logs.FrostFSNodeWaitingForAllProcessesToStop)
c.wg.Wait()

View file

@ -48,7 +48,7 @@ func (c *cfg) initMorphComponents(ctx context.Context) {
fatalOnErr(err)
}
c.log.Info(logs.FrostFSNodeNotarySupport,
c.log.Info(ctx, logs.FrostFSNodeNotarySupport,
zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled),
)
@ -64,7 +64,7 @@ func (c *cfg) initMorphComponents(ctx context.Context) {
msPerBlock, err := c.cfgMorph.client.MsPerBlock()
fatalOnErr(err)
c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond
c.log.Debug(logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL))
c.log.Debug(ctx, logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL))
}
if c.cfgMorph.cacheTTL < 0 {
@ -102,7 +102,7 @@ func initMorphClient(ctx context.Context, c *cfg) {
client.WithDialerSource(c.dialerSource),
)
if err != nil {
c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient,
c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient,
zap.Any("endpoints", addresses),
zap.String("error", err.Error()),
)
@ -111,12 +111,12 @@ func initMorphClient(ctx context.Context, c *cfg) {
}
c.onShutdown(func() {
c.log.Info(logs.FrostFSNodeClosingMorphComponents)
c.log.Info(ctx, logs.FrostFSNodeClosingMorphComponents)
cli.Close()
})
if err := cli.SetGroupSignerScope(); err != nil {
c.log.Info(logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
c.log.Info(ctx, logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
}
c.cfgMorph.client = cli
@ -129,14 +129,14 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
return
}
tx, vub, err := makeNotaryDeposit(c)
tx, vub, err := makeNotaryDeposit(ctx, c)
fatalOnErr(err)
if tx.Equals(util.Uint256{}) {
// non-error deposit with an empty TX hash means
// that the deposit has already been made; no
// need to wait it.
c.log.Info(logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade)
c.log.Info(ctx, logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade)
return
}
@ -144,7 +144,7 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
fatalOnErr(err)
}
func makeNotaryDeposit(c *cfg) (util.Uint256, uint32, error) {
func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error) {
const (
// gasMultiplier defines how many times more the notary
// balance must be compared to the GAS balance of the node:
@ -161,7 +161,7 @@ func makeNotaryDeposit(c *cfg) (util.Uint256, uint32, error) {
return util.Uint256{}, 0, fmt.Errorf("could not calculate notary deposit: %w", err)
}
return c.cfgMorph.client.DepositEndlessNotary(depositAmount)
return c.cfgMorph.client.DepositEndlessNotary(ctx, depositAmount)
}
var (
@ -202,7 +202,7 @@ func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32)
return fmt.Errorf("could not wait for notary deposit persists in chain: %w", err)
}
if res.Execution.VMState.HasFlag(vmstate.Halt) {
c.log.Info(logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted)
c.log.Info(ctx, logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted)
return nil
}
return errNotaryDepositFail
@ -217,7 +217,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
if err != nil {
fromSideChainBlock = 0
c.log.Warn(logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
}
subs, err = subscriber.New(ctx, &subscriber.Params{
@ -246,7 +246,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) {
res, err := netmapEvent.ParseNewEpoch(src)
if err == nil {
c.log.Info(logs.FrostFSNodeNewEpochEventFromSidechain,
c.log.Info(ctx, logs.FrostFSNodeNewEpochEventFromSidechain,
zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()),
)
}
@ -256,12 +256,12 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
registerNotificationHandlers(c.cfgNetmap.scriptHash, lis, c.cfgNetmap.parsers, c.cfgNetmap.subscribers)
registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers)
registerBlockHandler(lis, func(block *block.Block) {
c.log.Debug(logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
registerBlockHandler(lis, func(ctx context.Context, block *block.Block) {
c.log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index)
if err != nil {
c.log.Warn(logs.FrostFSNodeCantUpdatePersistentState,
c.log.Warn(ctx, logs.FrostFSNodeCantUpdatePersistentState,
zap.String("chain", "side"),
zap.Uint32("block_index", block.Index))
}

View file

@ -145,7 +145,7 @@ func initNetmapService(ctx context.Context, c *cfg) {
c.initMorphComponents(ctx)
initNetmapState(c)
initNetmapState(ctx, c)
server := netmapTransportGRPC.New(
netmapService.NewSignService(
@ -175,29 +175,29 @@ func initNetmapService(ctx context.Context, c *cfg) {
}
func addNewEpochNotificationHandlers(c *cfg) {
addNewEpochNotificationHandler(c, func(ev event.Event) {
addNewEpochNotificationHandler(c, func(_ context.Context, ev event.Event) {
c.cfgNetmap.state.setCurrentEpoch(ev.(netmapEvent.NewEpoch).EpochNumber())
})
addNewEpochAsyncNotificationHandler(c, func(ev event.Event) {
addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, ev event.Event) {
e := ev.(netmapEvent.NewEpoch).EpochNumber()
c.updateContractNodeInfo(e)
c.updateContractNodeInfo(ctx, e)
if !c.needBootstrap() || c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470
return
}
if err := c.bootstrap(); err != nil {
c.log.Warn(logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
if err := c.bootstrap(ctx); err != nil {
c.log.Warn(ctx, logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
}
})
if c.cfgMorph.notaryEnabled {
addNewEpochAsyncNotificationHandler(c, func(_ event.Event) {
_, _, err := makeNotaryDeposit(c)
addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) {
_, _, err := makeNotaryDeposit(ctx, c)
if err != nil {
c.log.Error(logs.FrostFSNodeCouldNotMakeNotaryDeposit,
c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit,
zap.String("error", err.Error()),
)
}
@ -207,13 +207,13 @@ func addNewEpochNotificationHandlers(c *cfg) {
// bootstrapNode adds current node to the Network map.
// Must be called after initNetmapService.
func bootstrapNode(c *cfg) {
func bootstrapNode(ctx context.Context, c *cfg) {
if c.needBootstrap() {
if c.IsMaintenance() {
c.log.Info(logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap)
c.log.Info(ctx, logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap)
return
}
err := c.bootstrap()
err := c.bootstrap(ctx)
fatalOnErrDetails("bootstrap error", err)
}
}
@ -240,17 +240,17 @@ func setNetmapNotificationParser(c *cfg, sTyp string, p event.NotificationParser
// initNetmapState inits current Network map state.
// Must be called after Morph components initialization.
func initNetmapState(c *cfg) {
func initNetmapState(ctx context.Context, c *cfg) {
epoch, err := c.cfgNetmap.wrapper.Epoch()
fatalOnErrDetails("could not initialize current epoch number", err)
var ni *netmapSDK.NodeInfo
ni, err = c.netmapInitLocalNodeState(epoch)
ni, err = c.netmapInitLocalNodeState(ctx, epoch)
fatalOnErrDetails("could not init network state", err)
stateWord := nodeState(ni)
c.log.Info(logs.FrostFSNodeInitialNetworkState,
c.log.Info(ctx, logs.FrostFSNodeInitialNetworkState,
zap.Uint64("epoch", epoch),
zap.String("state", stateWord),
)
@ -279,7 +279,7 @@ func nodeState(ni *netmapSDK.NodeInfo) string {
return "undefined"
}
func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) {
func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) {
nmNodes, err := c.cfgNetmap.wrapper.GetCandidates()
if err != nil {
return nil, err
@ -307,7 +307,7 @@ func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error
if nmState != candidateState {
// This happens when the node was switched to maintenance without epoch tick.
// We expect it to continue staying in maintenance.
c.log.Info(logs.CandidateStatusPriority,
c.log.Info(ctx, logs.CandidateStatusPriority,
zap.String("netmap", nmState),
zap.String("candidate", candidateState))
}
@ -353,16 +353,16 @@ func addNewEpochAsyncNotificationHandler(c *cfg, h event.Handler) {
var errRelayBootstrap = errors.New("setting netmap status is forbidden in relay mode")
func (c *cfg) SetNetmapStatus(st control.NetmapStatus) error {
func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error {
switch st {
default:
return fmt.Errorf("unsupported status %v", st)
case control.NetmapStatus_MAINTENANCE:
return c.setMaintenanceStatus(false)
return c.setMaintenanceStatus(ctx, false)
case control.NetmapStatus_ONLINE, control.NetmapStatus_OFFLINE:
}
c.stopMaintenance()
c.stopMaintenance(ctx)
if !c.needBootstrap() {
return errRelayBootstrap
@ -370,12 +370,12 @@ func (c *cfg) SetNetmapStatus(st control.NetmapStatus) error {
if st == control.NetmapStatus_ONLINE {
c.cfgNetmap.reBoostrapTurnedOff.Store(false)
return bootstrapOnline(c)
return bootstrapOnline(ctx, c)
}
c.cfgNetmap.reBoostrapTurnedOff.Store(true)
return c.updateNetMapState(func(*nmClient.UpdatePeerPrm) {})
return c.updateNetMapState(ctx, func(*nmClient.UpdatePeerPrm) {})
}
func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) {
@ -387,11 +387,11 @@ func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) {
return st, epoch, nil
}
func (c *cfg) ForceMaintenance() error {
return c.setMaintenanceStatus(true)
func (c *cfg) ForceMaintenance(ctx context.Context) error {
return c.setMaintenanceStatus(ctx, true)
}
func (c *cfg) setMaintenanceStatus(force bool) error {
func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error {
netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration()
if err != nil {
err = fmt.Errorf("read network settings to check maintenance allowance: %w", err)
@ -400,10 +400,10 @@ func (c *cfg) setMaintenanceStatus(force bool) error {
}
if err == nil || force {
c.startMaintenance()
c.startMaintenance(ctx)
if err == nil {
err = c.updateNetMapState((*nmClient.UpdatePeerPrm).SetMaintenance)
err = c.updateNetMapState(ctx, (*nmClient.UpdatePeerPrm).SetMaintenance)
}
if err != nil {
@ -416,12 +416,12 @@ func (c *cfg) setMaintenanceStatus(force bool) error {
// calls UpdatePeerState operation of Netmap contract's client for the local node.
// State setter is used to specify node state to switch to.
func (c *cfg) updateNetMapState(stateSetter func(*nmClient.UpdatePeerPrm)) error {
func (c *cfg) updateNetMapState(ctx context.Context, stateSetter func(*nmClient.UpdatePeerPrm)) error {
var prm nmClient.UpdatePeerPrm
prm.SetKey(c.key.PublicKey().Bytes())
stateSetter(&prm)
_, err := c.cfgNetmap.wrapper.UpdatePeerState(prm)
_, err := c.cfgNetmap.wrapper.UpdatePeerState(ctx, prm)
return err
}

View file

@ -58,7 +58,7 @@ type objectSvc struct {
func (c *cfg) MaxObjectSize() uint64 {
sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
if err != nil {
c.log.Error(logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
zap.String("error", err.Error()),
)
}
@ -66,11 +66,11 @@ func (c *cfg) MaxObjectSize() uint64 {
return sz
}
func (s *objectSvc) Put() (objectService.PutObjectStream, error) {
func (s *objectSvc) Put(_ context.Context) (objectService.PutObjectStream, error) {
return s.put.Put()
}
func (s *objectSvc) Patch() (objectService.PatchObjectStream, error) {
func (s *objectSvc) Patch(_ context.Context) (objectService.PatchObjectStream, error) {
return s.patch.Patch()
}
@ -223,7 +223,7 @@ func initObjectService(c *cfg) {
func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.ClientCache) {
if policerconfig.UnsafeDisable(c.appCfg) {
c.log.Warn(logs.FrostFSNodePolicerIsDisabled)
c.log.Warn(context.Background(), logs.FrostFSNodePolicerIsDisabled)
return
}
@ -287,7 +287,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
_, err := ls.Inhume(ctx, inhumePrm)
if err != nil {
c.log.Warn(logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
zap.String("error", err.Error()),
)
}

View file

@ -1,17 +1,18 @@
package main
import (
"context"
"runtime"
profilerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/profiler"
httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http"
)
func initProfilerService(c *cfg) {
func initProfilerService(ctx context.Context, c *cfg) {
tuneProfilers(c)
pprof, _ := pprofComponent(c)
pprof.init(c)
pprof.init(ctx, c)
}
func pprofComponent(c *cfg) (*httpComponent, bool) {

View file

@ -1,6 +1,7 @@
package main
import (
"context"
"os"
"runtime/debug"
@ -9,17 +10,17 @@ import (
"go.uber.org/zap"
)
func setRuntimeParameters(c *cfg) {
func setRuntimeParameters(ctx context.Context, c *cfg) {
if len(os.Getenv("GOMEMLIMIT")) != 0 {
// default limit < yaml limit < app env limit < GOMEMLIMIT
c.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
c.log.Warn(ctx, logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
return
}
memLimitBytes := runtime.GCMemoryLimitBytes(c.appCfg)
previous := debug.SetMemoryLimit(memLimitBytes)
if memLimitBytes != previous {
c.log.Info(logs.RuntimeSoftMemoryLimitUpdated,
c.log.Info(ctx, logs.RuntimeSoftMemoryLimitUpdated,
zap.Int64("new_value", memLimitBytes),
zap.Int64("old_value", previous))
}

View file

@ -48,7 +48,7 @@ func initSessionService(c *cfg) {
_ = c.privateTokenStore.Close()
})
addNewEpochNotificationHandler(c, func(ev event.Event) {
addNewEpochNotificationHandler(c, func(_ context.Context, ev event.Event) {
c.privateTokenStore.RemoveOld(ev.(netmap.NewEpoch).EpochNumber())
})

View file

@ -13,12 +13,12 @@ import (
func initTracing(ctx context.Context, c *cfg) {
conf, err := tracingconfig.ToTracingConfig(c.appCfg)
if err != nil {
c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err))
return
}
_, err = tracing.Setup(ctx, *conf)
if err != nil {
c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err))
return
}
@ -29,7 +29,7 @@ func initTracing(ctx context.Context, c *cfg) {
defer cancel()
err := tracing.Shutdown(ctx) // cfg context cancels before close
if err != nil {
c.log.Error(logs.FrostFSNodeFailedShutdownTracing, zap.Error(err))
c.log.Error(ctx, logs.FrostFSNodeFailedShutdownTracing, zap.Error(err))
}
},
})

View file

@ -44,7 +44,7 @@ func (c cnrSource) List() ([]cid.ID, error) {
func initTreeService(c *cfg) {
treeConfig := treeconfig.Tree(c.appCfg)
if !treeConfig.Enabled() {
c.log.Info(logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization)
c.log.Info(context.Background(), logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization)
return
}
@ -80,10 +80,10 @@ func initTreeService(c *cfg) {
}))
if d := treeConfig.SyncInterval(); d == 0 {
addNewEpochNotificationHandler(c, func(_ event.Event) {
addNewEpochNotificationHandler(c, func(ctx context.Context, _ event.Event) {
err := c.treeService.SynchronizeAll()
if err != nil {
c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
c.log.Error(ctx, logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
}
})
} else {
@ -94,7 +94,7 @@ func initTreeService(c *cfg) {
for range tick.C {
err := c.treeService.SynchronizeAll()
if err != nil {
c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
if errors.Is(err, tree.ErrShuttingDown) {
return
}
@ -103,15 +103,15 @@ func initTreeService(c *cfg) {
}()
}
subscribeToContainerRemoval(c, func(e event.Event) {
subscribeToContainerRemoval(c, func(ctx context.Context, e event.Event) {
ev := e.(containerEvent.DeleteSuccess)
// This is executed asynchronously, so we don't care about the operation taking some time.
c.log.Debug(logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID))
err := c.treeService.DropTree(context.Background(), ev.ID, "")
c.log.Debug(ctx, logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID))
err := c.treeService.DropTree(ctx, ev.ID, "")
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
// Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged.
c.log.Error(logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
c.log.Error(ctx, logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
zap.Stringer("cid", ev.ID),
zap.String("error", err.Error()))
}

View file

@ -1,6 +1,8 @@
package audit
import (
"context"
crypto "git.frostfs.info/TrueCloudLab/frostfs-crypto"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@ -17,15 +19,15 @@ type Target interface {
String() string
}
func LogRequest(log *logger.Logger, operation string, req Request, target Target, status bool) {
func LogRequest(ctx context.Context, log *logger.Logger, operation string, req Request, target Target, status bool) {
var key []byte
if req != nil {
key = req.GetVerificationHeader().GetBodySignature().GetKey()
}
LogRequestWithKey(log, operation, key, target, status)
LogRequestWithKey(ctx, log, operation, key, target, status)
}
func LogRequestWithKey(log *logger.Logger, operation string, key []byte, target Target, status bool) {
func LogRequestWithKey(ctx context.Context, log *logger.Logger, operation string, key []byte, target Target, status bool) {
object, subject := NotDefined, NotDefined
publicKey := crypto.UnmarshalPublicKey(key)
@ -37,7 +39,7 @@ func LogRequestWithKey(log *logger.Logger, operation string, key []byte, target
object = target.String()
}
log.Info(logs.AuditEventLogRecord,
log.Info(ctx, logs.AuditEventLogRecord,
zap.String("operation", operation),
zap.String("object", object),
zap.String("subject", subject),

View file

@ -117,7 +117,7 @@ func (v *FormatValidator) Validate(ctx context.Context, obj *objectSDK.Object, u
}
if !unprepared {
if err := v.validateSignatureKey(obj); err != nil {
if err := v.validateSignatureKey(ctx, obj); err != nil {
return fmt.Errorf("(%T) could not validate signature key: %w", v, err)
}
@ -134,7 +134,7 @@ func (v *FormatValidator) Validate(ctx context.Context, obj *objectSDK.Object, u
return nil
}
func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error {
func (v *FormatValidator) validateSignatureKey(ctx context.Context, obj *objectSDK.Object) error {
sig := obj.Signature()
if sig == nil {
return errMissingSignature
@ -156,7 +156,7 @@ func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error {
ownerID := obj.OwnerID()
if token == nil && obj.ECHeader() != nil {
role, err := v.isIROrContainerNode(obj, binKey)
role, err := v.isIROrContainerNode(ctx, obj, binKey)
if err != nil {
return err
}
@ -172,7 +172,7 @@ func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error {
}
if v.verifyTokenIssuer {
role, err := v.isIROrContainerNode(obj, binKey)
role, err := v.isIROrContainerNode(ctx, obj, binKey)
if err != nil {
return err
}
@ -190,7 +190,7 @@ func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error {
return nil
}
func (v *FormatValidator) isIROrContainerNode(obj *objectSDK.Object, signerKey []byte) (acl.Role, error) {
func (v *FormatValidator) isIROrContainerNode(ctx context.Context, obj *objectSDK.Object, signerKey []byte) (acl.Role, error) {
cnrID, containerIDSet := obj.ContainerID()
if !containerIDSet {
return acl.RoleOthers, errNilCID
@ -204,7 +204,7 @@ func (v *FormatValidator) isIROrContainerNode(obj *objectSDK.Object, signerKey [
return acl.RoleOthers, fmt.Errorf("failed to get container (id=%s): %w", cnrID.EncodeToString(), err)
}
res, err := v.senderClassifier.IsInnerRingOrContainerNode(signerKey, cnrID, cnr.Value)
res, err := v.senderClassifier.IsInnerRingOrContainerNode(ctx, signerKey, cnrID, cnr.Value)
if err != nil {
return acl.RoleOthers, err
}

View file

@ -65,7 +65,7 @@ func TestFormatValidator_Validate(t *testing.T) {
epoch: curEpoch,
}),
WithLockSource(ls),
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
)
ownerKey, err := keys.NewPrivateKey()
@ -290,7 +290,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
}),
WithLockSource(ls),
WithVerifySessionTokenIssuer(false),
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
)
tok := sessiontest.Object()
@ -339,7 +339,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
},
},
),
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
)
tok := sessiontest.Object()
@ -417,7 +417,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
currentEpoch: curEpoch,
},
),
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
)
require.NoError(t, v.Validate(context.Background(), obj, false))
@ -491,7 +491,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
currentEpoch: curEpoch,
},
),
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
)
require.NoError(t, v.Validate(context.Background(), obj, false))
@ -567,7 +567,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
currentEpoch: curEpoch,
},
),
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
)
require.Error(t, v.Validate(context.Background(), obj, false))

View file

@ -2,6 +2,7 @@ package object
import (
"bytes"
"context"
"crypto/sha256"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@ -40,6 +41,7 @@ type ClassifyResult struct {
}
func (c SenderClassifier) Classify(
ctx context.Context,
ownerID *user.ID,
ownerKey *keys.PublicKey,
idCnr cid.ID,
@ -57,14 +59,14 @@ func (c SenderClassifier) Classify(
}, nil
}
return c.IsInnerRingOrContainerNode(ownerKeyInBytes, idCnr, cnr)
return c.IsInnerRingOrContainerNode(ctx, ownerKeyInBytes, idCnr, cnr)
}
func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) {
func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) {
isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes)
if err != nil {
// do not throw error, try best case matching
c.log.Debug(logs.V2CantCheckIfRequestFromInnerRing,
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing,
zap.String("error", err.Error()))
} else if isInnerRingNode {
return &ClassifyResult{
@ -81,7 +83,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idC
// error might happen if request has `RoleOther` key and placement
// is not possible for previous epoch, so
// do not throw error, try best case matching
c.log.Debug(logs.V2CantCheckIfRequestFromContainerNode,
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromContainerNode,
zap.String("error", err.Error()))
} else if isContainerNode {
return &ClassifyResult{

View file

@ -29,7 +29,7 @@ type (
emitDuration uint32 // in blocks
}
depositor func() (util.Uint256, error)
depositor func(context.Context) (util.Uint256, error)
awaiter func(context.Context, util.Uint256) error
)
@ -66,11 +66,11 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer {
)
}
func newEmissionTimer(args *emitTimerArgs) *timer.BlockTimer {
func newEmissionTimer(ctx context.Context, args *emitTimerArgs) *timer.BlockTimer {
return timer.NewBlockTimer(
timer.StaticBlockMeter(args.emitDuration),
func() {
args.ap.HandleGasEmission(timerEvent.NewAlphabetEmitTick{})
args.ap.HandleGasEmission(ctx, timerEvent.NewAlphabetEmitTick{})
},
)
}

View file

@ -35,7 +35,7 @@ import (
"google.golang.org/grpc"
)
func (s *Server) initNetmapProcessor(cfg *viper.Viper,
func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper,
alphaSync event.Handler,
) error {
locodeValidator, err := s.newLocodeValidator(cfg)
@ -48,10 +48,13 @@ func (s *Server) initNetmapProcessor(cfg *viper.Viper,
var netMapCandidateStateValidator statevalidation.NetMapCandidateValidator
netMapCandidateStateValidator.SetNetworkSettings(netSettings)
poolSize := cfg.GetInt("workers.netmap")
s.log.Debug(ctx, logs.NetmapNetmapWorkerPool, zap.Int("size", poolSize))
s.netmapProcessor, err = netmap.New(&netmap.Params{
Log: s.log,
Metrics: s.irMetrics,
PoolSize: cfg.GetInt("workers.netmap"),
PoolSize: poolSize,
NetmapClient: netmap.NewNetmapClient(s.netmapClient),
EpochTimer: s,
EpochState: s,
@ -97,7 +100,7 @@ func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain *
fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey)
if err != nil {
fromMainChainBlock = 0
s.log.Warn(logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error()))
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error()))
}
mainnetChain.from = fromMainChainBlock
@ -137,12 +140,12 @@ func (s *Server) enableNotarySupport() error {
return nil
}
func (s *Server) initNotaryConfig() {
func (s *Server) initNotaryConfig(ctx context.Context) {
s.mainNotaryConfig = notaryConfigs(
!s.withoutMainNet && s.mainnetClient.ProbeNotary(), // if mainnet disabled then notary flag must be disabled too
)
s.log.Info(logs.InnerringNotarySupport,
s.log.Info(ctx, logs.InnerringNotarySupport,
zap.Bool("sidechain_enabled", true),
zap.Bool("mainchain_enabled", !s.mainNotaryConfig.disabled),
)
@ -152,8 +155,8 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli
var alphaSync event.Handler
if s.withoutMainNet || cfg.GetBool("governance.disable") {
alphaSync = func(event.Event) {
s.log.Debug(logs.InnerringAlphabetKeysSyncIsDisabled)
alphaSync = func(ctx context.Context, _ event.Event) {
s.log.Debug(ctx, logs.InnerringAlphabetKeysSyncIsDisabled)
}
} else {
// create governance processor
@ -196,16 +199,16 @@ func (s *Server) createIRFetcher() irFetcher {
return irf
}
func (s *Server) initTimers(cfg *viper.Viper) {
func (s *Server) initTimers(ctx context.Context, cfg *viper.Viper) {
s.epochTimer = newEpochTimer(&epochTimerArgs{
newEpochHandlers: s.newEpochTickHandlers(),
newEpochHandlers: s.newEpochTickHandlers(ctx),
epoch: s,
})
s.addBlockTimer(s.epochTimer)
// initialize emission timer
emissionTimer := newEmissionTimer(&emitTimerArgs{
emissionTimer := newEmissionTimer(ctx, &emitTimerArgs{
ap: s.alphabetProcessor,
emitDuration: cfg.GetUint32("timers.emit"),
})
@ -213,18 +216,20 @@ func (s *Server) initTimers(cfg *viper.Viper) {
s.addBlockTimer(emissionTimer)
}
func (s *Server) initAlphabetProcessor(cfg *viper.Viper) error {
func (s *Server) initAlphabetProcessor(ctx context.Context, cfg *viper.Viper) error {
parsedWallets, err := parseWalletAddressesFromStrings(cfg.GetStringSlice("emit.extra_wallets"))
if err != nil {
return err
}
poolSize := cfg.GetInt("workers.alphabet")
s.log.Debug(ctx, logs.AlphabetAlphabetWorkerPool, zap.Int("size", poolSize))
// create alphabet processor
s.alphabetProcessor, err = alphabet.New(&alphabet.Params{
ParsedWallets: parsedWallets,
Log: s.log,
Metrics: s.irMetrics,
PoolSize: cfg.GetInt("workers.alphabet"),
PoolSize: poolSize,
AlphabetContracts: s.contracts.alphabet,
NetmapClient: s.netmapClient,
MorphClient: s.morphClient,
@ -239,12 +244,14 @@ func (s *Server) initAlphabetProcessor(cfg *viper.Viper) error {
return err
}
func (s *Server) initContainerProcessor(cfg *viper.Viper, cnrClient *container.Client, frostfsIDClient *frostfsid.Client) error {
func (s *Server) initContainerProcessor(ctx context.Context, cfg *viper.Viper, cnrClient *container.Client, frostfsIDClient *frostfsid.Client) error {
poolSize := cfg.GetInt("workers.container")
s.log.Debug(ctx, logs.ContainerContainerWorkerPool, zap.Int("size", poolSize))
// container processor
containerProcessor, err := cont.New(&cont.Params{
Log: s.log,
Metrics: s.irMetrics,
PoolSize: cfg.GetInt("workers.container"),
PoolSize: poolSize,
AlphabetState: s,
ContainerClient: cnrClient,
MorphClient: cnrClient.Morph(),
@ -258,12 +265,14 @@ func (s *Server) initContainerProcessor(cfg *viper.Viper, cnrClient *container.C
return bindMorphProcessor(containerProcessor, s)
}
func (s *Server) initBalanceProcessor(cfg *viper.Viper, frostfsCli *frostfsClient.Client) error {
func (s *Server) initBalanceProcessor(ctx context.Context, cfg *viper.Viper, frostfsCli *frostfsClient.Client) error {
poolSize := cfg.GetInt("workers.balance")
s.log.Debug(ctx, logs.BalanceBalanceWorkerPool, zap.Int("size", poolSize))
// create balance processor
balanceProcessor, err := balance.New(&balance.Params{
Log: s.log,
Metrics: s.irMetrics,
PoolSize: cfg.GetInt("workers.balance"),
PoolSize: poolSize,
FrostFSClient: frostfsCli,
BalanceSC: s.contracts.balance,
AlphabetState: s,
@ -276,15 +285,17 @@ func (s *Server) initBalanceProcessor(cfg *viper.Viper, frostfsCli *frostfsClien
return bindMorphProcessor(balanceProcessor, s)
}
func (s *Server) initFrostFSMainnetProcessor(cfg *viper.Viper) error {
func (s *Server) initFrostFSMainnetProcessor(ctx context.Context, cfg *viper.Viper) error {
if s.withoutMainNet {
return nil
}
poolSize := cfg.GetInt("workers.frostfs")
s.log.Debug(ctx, logs.FrostFSFrostfsWorkerPool, zap.Int("size", poolSize))
frostfsProcessor, err := frostfs.New(&frostfs.Params{
Log: s.log,
Metrics: s.irMetrics,
PoolSize: cfg.GetInt("workers.frostfs"),
PoolSize: poolSize,
FrostFSContract: s.contracts.frostfs,
BalanceClient: s.balanceClient,
NetmapClient: s.netmapClient,
@ -304,10 +315,10 @@ func (s *Server) initFrostFSMainnetProcessor(cfg *viper.Viper) error {
return bindMainnetProcessor(frostfsProcessor, s)
}
func (s *Server) initGRPCServer(cfg *viper.Viper, log *logger.Logger, audit *atomic.Bool) error {
func (s *Server) initGRPCServer(ctx context.Context, cfg *viper.Viper, log *logger.Logger, audit *atomic.Bool) error {
controlSvcEndpoint := cfg.GetString("control.grpc.endpoint")
if controlSvcEndpoint == "" {
s.log.Info(logs.InnerringNoControlServerEndpointSpecified)
s.log.Info(ctx, logs.InnerringNoControlServerEndpointSpecified)
return nil
}
@ -403,7 +414,7 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) {
return result, nil
}
func (s *Server) initProcessors(cfg *viper.Viper, morphClients *serverMorphClients) error {
func (s *Server) initProcessors(ctx context.Context, cfg *viper.Viper, morphClients *serverMorphClients) error {
irf := s.createIRFetcher()
s.statusIndex = newInnerRingIndexer(
@ -418,27 +429,27 @@ func (s *Server) initProcessors(cfg *viper.Viper, morphClients *serverMorphClien
return err
}
err = s.initNetmapProcessor(cfg, alphaSync)
err = s.initNetmapProcessor(ctx, cfg, alphaSync)
if err != nil {
return err
}
err = s.initContainerProcessor(cfg, morphClients.CnrClient, morphClients.FrostFSIDClient)
err = s.initContainerProcessor(ctx, cfg, morphClients.CnrClient, morphClients.FrostFSIDClient)
if err != nil {
return err
}
err = s.initBalanceProcessor(cfg, morphClients.FrostFSClient)
err = s.initBalanceProcessor(ctx, cfg, morphClients.FrostFSClient)
if err != nil {
return err
}
err = s.initFrostFSMainnetProcessor(cfg)
err = s.initFrostFSMainnetProcessor(ctx, cfg)
if err != nil {
return err
}
err = s.initAlphabetProcessor(cfg)
err = s.initAlphabetProcessor(ctx, cfg)
return err
}
@ -446,7 +457,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey)
if err != nil {
fromSideChainBlock = 0
s.log.Warn(logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
}
morphChain := &chainParams{
@ -471,7 +482,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
return nil, err
}
if err := s.morphClient.SetGroupSignerScope(); err != nil {
morphChain.log.Info(logs.InnerringFailedToSetGroupSignerScope, zap.Error(err))
morphChain.log.Info(ctx, logs.InnerringFailedToSetGroupSignerScope, zap.Error(err))
}
return morphChain, nil

View file

@ -140,10 +140,10 @@ var (
// Start runs all event providers.
func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
s.setHealthStatus(control.HealthStatus_STARTING)
s.setHealthStatus(ctx, control.HealthStatus_STARTING)
defer func() {
if err == nil {
s.setHealthStatus(control.HealthStatus_READY)
s.setHealthStatus(ctx, control.HealthStatus_READY)
}
}()
@ -152,12 +152,12 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
return err
}
err = s.initConfigFromBlockchain()
err = s.initConfigFromBlockchain(ctx)
if err != nil {
return err
}
if s.IsAlphabet() {
if s.IsAlphabet(ctx) {
err = s.initMainNotary(ctx)
if err != nil {
return err
@ -173,14 +173,14 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
prm.Validators = s.predefinedValidators
// vote for sidechain validator if it is prepared in config
err = s.voteForSidechainValidator(prm)
err = s.voteForSidechainValidator(ctx, prm)
if err != nil {
// we don't stop inner ring execution on this error
s.log.Warn(logs.InnerringCantVoteForPreparedValidators,
s.log.Warn(ctx, logs.InnerringCantVoteForPreparedValidators,
zap.String("error", err.Error()))
}
s.tickInitialExpoch()
s.tickInitialExpoch(ctx)
morphErr := make(chan error)
mainnnetErr := make(chan error)
@ -217,14 +217,14 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
}
func (s *Server) registerMorphNewBlockEventHandler() {
s.morphListener.RegisterBlockHandler(func(b *block.Block) {
s.log.Debug(logs.InnerringNewBlock,
s.morphListener.RegisterBlockHandler(func(ctx context.Context, b *block.Block) {
s.log.Debug(ctx, logs.InnerringNewBlock,
zap.Uint32("index", b.Index),
)
err := s.persistate.SetUInt32(persistateSideChainLastBlockKey, b.Index)
if err != nil {
s.log.Warn(logs.InnerringCantUpdatePersistentState,
s.log.Warn(ctx, logs.InnerringCantUpdatePersistentState,
zap.String("chain", "side"),
zap.Uint32("block_index", b.Index))
}
@ -235,10 +235,10 @@ func (s *Server) registerMorphNewBlockEventHandler() {
func (s *Server) registerMainnetNewBlockEventHandler() {
if !s.withoutMainNet {
s.mainnetListener.RegisterBlockHandler(func(b *block.Block) {
s.mainnetListener.RegisterBlockHandler(func(ctx context.Context, b *block.Block) {
err := s.persistate.SetUInt32(persistateMainChainLastBlockKey, b.Index)
if err != nil {
s.log.Warn(logs.InnerringCantUpdatePersistentState,
s.log.Warn(ctx, logs.InnerringCantUpdatePersistentState,
zap.String("chain", "main"),
zap.Uint32("block_index", b.Index))
}
@ -283,11 +283,11 @@ func (s *Server) initSideNotary(ctx context.Context) error {
)
}
func (s *Server) tickInitialExpoch() {
func (s *Server) tickInitialExpoch(ctx context.Context) {
initialEpochTicker := timer.NewOneTickTimer(
timer.StaticBlockMeter(s.initialEpochTickDelta),
func() {
s.netmapProcessor.HandleNewEpochTick(timerEvent.NewEpochTick{})
s.netmapProcessor.HandleNewEpochTick(ctx, timerEvent.NewEpochTick{})
})
s.addBlockTimer(initialEpochTicker)
}
@ -299,15 +299,15 @@ func (s *Server) startWorkers(ctx context.Context) {
}
// Stop closes all subscription channels.
func (s *Server) Stop() {
s.setHealthStatus(control.HealthStatus_SHUTTING_DOWN)
func (s *Server) Stop(ctx context.Context) {
s.setHealthStatus(ctx, control.HealthStatus_SHUTTING_DOWN)
go s.morphListener.Stop()
go s.mainnetListener.Stop()
for _, c := range s.closers {
if err := c(); err != nil {
s.log.Warn(logs.InnerringCloserError,
s.log.Warn(ctx, logs.InnerringCloserError,
zap.String("error", err.Error()),
)
}
@ -349,7 +349,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan
return nil, err
}
server.setHealthStatus(control.HealthStatus_HEALTH_STATUS_UNDEFINED)
server.setHealthStatus(ctx, control.HealthStatus_HEALTH_STATUS_UNDEFINED)
// parse notary support
server.feeConfig = config.NewFeeConfig(cfg)
@ -376,7 +376,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan
return nil, err
}
server.initNotaryConfig()
server.initNotaryConfig(ctx)
err = server.initContracts(cfg)
if err != nil {
@ -400,14 +400,14 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan
return nil, err
}
err = server.initProcessors(cfg, morphClients)
err = server.initProcessors(ctx, cfg, morphClients)
if err != nil {
return nil, err
}
server.initTimers(cfg)
server.initTimers(ctx, cfg)
err = server.initGRPCServer(cfg, log, audit)
err = server.initGRPCServer(ctx, cfg, log, audit)
if err != nil {
return nil, err
}
@ -438,7 +438,7 @@ func createListener(ctx context.Context, cli *client.Client, p *chainParams) (ev
}
listener, err := event.NewListener(event.ListenerParams{
Logger: &logger.Logger{Logger: p.log.With(zap.String("chain", p.name))},
Logger: p.log.With(zap.String("chain", p.name)),
Subscriber: sub,
})
if err != nil {
@ -573,7 +573,7 @@ func parseMultinetConfig(cfg *viper.Viper, m metrics.MultinetMetrics) internalNe
return nc
}
func (s *Server) initConfigFromBlockchain() error {
func (s *Server) initConfigFromBlockchain(ctx context.Context) error {
// get current epoch
epoch, err := s.netmapClient.Epoch()
if err != nil {
@ -602,9 +602,9 @@ func (s *Server) initConfigFromBlockchain() error {
return err
}
s.log.Debug(logs.InnerringReadConfigFromBlockchain,
zap.Bool("active", s.IsActive()),
zap.Bool("alphabet", s.IsAlphabet()),
s.log.Debug(ctx, logs.InnerringReadConfigFromBlockchain,
zap.Bool("active", s.IsActive(ctx)),
zap.Bool("alphabet", s.IsAlphabet(ctx)),
zap.Uint64("epoch", epoch),
zap.Uint32("precision", balancePrecision),
zap.Uint32("init_epoch_tick_delta", s.initialEpochTickDelta),
@ -635,17 +635,17 @@ func (s *Server) nextEpochBlockDelta() (uint32, error) {
// onlyAlphabet wrapper around event handler that executes it
// only if inner ring node is alphabet node.
func (s *Server) onlyAlphabetEventHandler(f event.Handler) event.Handler {
return func(ev event.Event) {
if s.IsAlphabet() {
f(ev)
return func(ctx context.Context, ev event.Event) {
if s.IsAlphabet(ctx) {
f(ctx, ev)
}
}
}
func (s *Server) newEpochTickHandlers() []newEpochHandler {
func (s *Server) newEpochTickHandlers(ctx context.Context) []newEpochHandler {
newEpochHandlers := []newEpochHandler{
func() {
s.netmapProcessor.HandleNewEpochTick(timerEvent.NewEpochTick{})
s.netmapProcessor.HandleNewEpochTick(ctx, timerEvent.NewEpochTick{})
},
}

View file

@ -28,38 +28,39 @@ const (
gasDivisor = 2
)
func (s *Server) depositMainNotary() (tx util.Uint256, err error) {
func (s *Server) depositMainNotary(ctx context.Context) (tx util.Uint256, err error) {
depositAmount, err := client.CalculateNotaryDepositAmount(s.mainnetClient, gasMultiplier, gasDivisor)
if err != nil {
return util.Uint256{}, fmt.Errorf("could not calculate main notary deposit amount: %w", err)
}
return s.mainnetClient.DepositNotary(
ctx,
depositAmount,
uint32(s.epochDuration.Load())+notaryExtraBlocks,
)
}
func (s *Server) depositSideNotary() (util.Uint256, error) {
func (s *Server) depositSideNotary(ctx context.Context) (util.Uint256, error) {
depositAmount, err := client.CalculateNotaryDepositAmount(s.morphClient, gasMultiplier, gasDivisor)
if err != nil {
return util.Uint256{}, fmt.Errorf("could not calculate side notary deposit amount: %w", err)
}
tx, _, err := s.morphClient.DepositEndlessNotary(depositAmount)
tx, _, err := s.morphClient.DepositEndlessNotary(ctx, depositAmount)
return tx, err
}
func (s *Server) notaryHandler(_ event.Event) {
func (s *Server) notaryHandler(ctx context.Context, _ event.Event) {
if !s.mainNotaryConfig.disabled {
_, err := s.depositMainNotary()
_, err := s.depositMainNotary(ctx)
if err != nil {
s.log.Error(logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err))
s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err))
}
}
if _, err := s.depositSideNotary(); err != nil {
s.log.Error(logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err))
if _, err := s.depositSideNotary(ctx); err != nil {
s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err))
}
}
@ -72,7 +73,7 @@ func (s *Server) awaitSideNotaryDeposit(ctx context.Context, tx util.Uint256) er
}
func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaiter, msg string) error {
tx, err := deposit()
tx, err := deposit(ctx)
if err != nil {
return err
}
@ -81,11 +82,11 @@ func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaite
// non-error deposit with an empty TX hash means
// that the deposit has already been made; no
// need to wait it.
s.log.Info(logs.InnerringNotaryDepositHasAlreadyBeenMade)
s.log.Info(ctx, logs.InnerringNotaryDepositHasAlreadyBeenMade)
return nil
}
s.log.Info(msg)
s.log.Info(ctx, msg)
return await(ctx, tx)
}

View file

@ -1,6 +1,8 @@
package alphabet
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
@ -8,16 +10,16 @@ import (
"go.uber.org/zap"
)
func (ap *Processor) HandleGasEmission(ev event.Event) {
func (ap *Processor) HandleGasEmission(ctx context.Context, ev event.Event) {
_ = ev.(timers.NewAlphabetEmitTick)
ap.log.Info(logs.AlphabetTick, zap.String("type", "alphabet gas emit"))
ap.log.Info(ctx, logs.AlphabetTick, zap.String("type", "alphabet gas emit"))
// send event to the worker pool
err := processors.SubmitEvent(ap.pool, ap.metrics, "alphabet_emit_gas", ap.processEmit)
err := processors.SubmitEvent(ap.pool, ap.metrics, "alphabet_emit_gas", func() bool { return ap.processEmit(ctx) })
if err != nil {
// there system can be moved into controlled degradation stage
ap.log.Warn(logs.AlphabetAlphabetProcessorWorkerPoolDrained,
ap.log.Warn(ctx, logs.AlphabetAlphabetProcessorWorkerPoolDrained,
zap.Int("capacity", ap.pool.Cap()))
}
}

View file

@ -1,6 +1,7 @@
package alphabet_test
import (
"context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
@ -60,7 +61,7 @@ func TestProcessorEmitsGasToNetmapAndAlphabet(t *testing.T) {
processor, err := alphabet.New(params)
require.NoError(t, err, "failed to create processor instance")
processor.HandleGasEmission(timers.NewAlphabetEmitTick{})
processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{})
processor.WaitPoolRunning()
@ -137,7 +138,7 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) {
processor, err := alphabet.New(params)
require.NoError(t, err, "failed to create processor instance")
processor.HandleGasEmission(timers.NewAlphabetEmitTick{})
processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{})
processor.WaitPoolRunning()
@ -198,7 +199,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) {
processor, err := alphabet.New(params)
require.NoError(t, err, "failed to create processor instance")
processor.HandleGasEmission(timers.NewAlphabetEmitTick{})
processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{})
processor.WaitPoolRunning()
@ -219,7 +220,7 @@ type testIndexer struct {
index int
}
func (i *testIndexer) AlphabetIndex() int {
func (i *testIndexer) AlphabetIndex(context.Context) int {
return i.index
}
@ -246,7 +247,7 @@ type testMorphClient struct {
batchTransferedGas []batchTransferGas
}
func (c *testMorphClient) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) {
func (c *testMorphClient) Invoke(_ context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) {
c.invokedMethods = append(c.invokedMethods,
invokedMethod{
contract: contract,

View file

@ -1,6 +1,7 @@
package alphabet
import (
"context"
"crypto/elliptic"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@ -13,39 +14,39 @@ import (
const emitMethod = "emit"
func (ap *Processor) processEmit() bool {
index := ap.irList.AlphabetIndex()
func (ap *Processor) processEmit(ctx context.Context) bool {
index := ap.irList.AlphabetIndex(ctx)
if index < 0 {
ap.log.Info(logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent)
ap.log.Info(ctx, logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent)
return true
}
contract, ok := ap.alphabetContracts.GetByIndex(index)
if !ok {
ap.log.Debug(logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent,
ap.log.Debug(ctx, logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent,
zap.Int("index", index))
return false
}
// there is no signature collecting, so we don't need extra fee
_, err := ap.morphClient.Invoke(contract, 0, emitMethod)
_, err := ap.morphClient.Invoke(ctx, contract, 0, emitMethod)
if err != nil {
ap.log.Warn(logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error()))
ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error()))
return false
}
if ap.storageEmission == 0 {
ap.log.Info(logs.AlphabetStorageNodeEmissionIsOff)
ap.log.Info(ctx, logs.AlphabetStorageNodeEmissionIsOff)
return true
}
networkMap, err := ap.netmapClient.NetMap()
if err != nil {
ap.log.Warn(logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
zap.String("error", err.Error()))
return false
@ -58,7 +59,7 @@ func (ap *Processor) processEmit() bool {
ap.pwLock.RUnlock()
extraLen := len(pw)
ap.log.Debug(logs.AlphabetGasEmission,
ap.log.Debug(ctx, logs.AlphabetGasEmission,
zap.Int("network_map", nmLen),
zap.Int("extra_wallets", extraLen))
@ -68,20 +69,20 @@ func (ap *Processor) processEmit() bool {
gasPerNode := fixedn.Fixed8(ap.storageEmission / uint64(nmLen+extraLen))
ap.transferGasToNetmapNodes(nmNodes, gasPerNode)
ap.transferGasToNetmapNodes(ctx, nmNodes, gasPerNode)
ap.transferGasToExtraNodes(pw, gasPerNode)
ap.transferGasToExtraNodes(ctx, pw, gasPerNode)
return true
}
func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerNode fixedn.Fixed8) {
func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []netmap.NodeInfo, gasPerNode fixedn.Fixed8) {
for i := range nmNodes {
keyBytes := nmNodes[i].PublicKey()
key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256())
if err != nil {
ap.log.Warn(logs.AlphabetCantParseNodePublicKey,
ap.log.Warn(ctx, logs.AlphabetCantParseNodePublicKey,
zap.String("error", err.Error()))
continue
@ -89,7 +90,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN
err = ap.morphClient.TransferGas(key.GetScriptHash(), gasPerNode)
if err != nil {
ap.log.Warn(logs.AlphabetCantTransferGas,
ap.log.Warn(ctx, logs.AlphabetCantTransferGas,
zap.String("receiver", key.Address()),
zap.Int64("amount", int64(gasPerNode)),
zap.String("error", err.Error()),
@ -98,7 +99,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN
}
}
func (ap *Processor) transferGasToExtraNodes(pw []util.Uint160, gasPerNode fixedn.Fixed8) {
func (ap *Processor) transferGasToExtraNodes(ctx context.Context, pw []util.Uint160, gasPerNode fixedn.Fixed8) {
if len(pw) > 0 {
err := ap.morphClient.BatchTransferGas(pw, gasPerNode)
if err != nil {
@ -106,7 +107,7 @@ func (ap *Processor) transferGasToExtraNodes(pw []util.Uint160, gasPerNode fixed
for i, addr := range pw {
receiversLog[i] = addr.StringLE()
}
ap.log.Warn(logs.AlphabetCantTransferGasToWallet,
ap.log.Warn(ctx, logs.AlphabetCantTransferGasToWallet,
zap.Strings("receivers", receiversLog),
zap.Int64("amount", int64(gasPerNode)),
zap.String("error", err.Error()),

View file

@ -1,12 +1,12 @@
package alphabet
import (
"context"
"errors"
"fmt"
"sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@ -14,13 +14,12 @@ import (
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
)
type (
// Indexer is a callback interface for inner ring global state.
Indexer interface {
AlphabetIndex() int
AlphabetIndex(context.Context) int
}
// Contracts is an interface of the storage
@ -40,7 +39,7 @@ type (
}
morphClient interface {
Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error)
Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error)
TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error
BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8) error
}
@ -85,8 +84,6 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/alphabet: global state is not set")
}
p.Log.Debug(logs.AlphabetAlphabetWorkerPool, zap.Int("size", p.PoolSize))
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err)

View file

@ -1,6 +1,7 @@
package balance
import (
"context"
"encoding/hex"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@ -10,20 +11,20 @@ import (
"go.uber.org/zap"
)
func (bp *Processor) handleLock(ev event.Event) {
func (bp *Processor) handleLock(ctx context.Context, ev event.Event) {
lock := ev.(balanceEvent.Lock)
bp.log.Info(logs.Notification,
bp.log.Info(ctx, logs.Notification,
zap.String("type", "lock"),
zap.String("value", hex.EncodeToString(lock.ID())))
// send an event to the worker pool
err := processors.SubmitEvent(bp.pool, bp.metrics, "lock", func() bool {
return bp.processLock(&lock)
return bp.processLock(ctx, &lock)
})
if err != nil {
// there system can be moved into controlled degradation stage
bp.log.Warn(logs.BalanceBalanceWorkerPoolDrained,
bp.log.Warn(ctx, logs.BalanceBalanceWorkerPoolDrained,
zap.Int("capacity", bp.pool.Cap()))
}
}

View file

@ -1,6 +1,7 @@
package balance
import (
"context"
"testing"
"time"
@ -30,7 +31,7 @@ func TestProcessorCallsFrostFSContractForLockEvent(t *testing.T) {
})
require.NoError(t, err, "failed to create processor")
processor.handleLock(balanceEvent.Lock{})
processor.handleLock(context.Background(), balanceEvent.Lock{})
for processor.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@ -56,7 +57,7 @@ func TestProcessorDoesntCallFrostFSContractIfNotAlphabet(t *testing.T) {
})
require.NoError(t, err, "failed to create processor")
processor.handleLock(balanceEvent.Lock{})
processor.handleLock(context.Background(), balanceEvent.Lock{})
for processor.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@ -69,7 +70,7 @@ type testAlphabetState struct {
isAlphabet bool
}
func (s *testAlphabetState) IsAlphabet() bool {
func (s *testAlphabetState) IsAlphabet(context.Context) bool {
return s.isAlphabet
}
@ -83,7 +84,7 @@ type testFrostFSContractClient struct {
chequeCalls int
}
func (c *testFrostFSContractClient) Cheque(p frostfscontract.ChequePrm) error {
func (c *testFrostFSContractClient) Cheque(_ context.Context, p frostfscontract.ChequePrm) error {
c.chequeCalls++
return nil
}

View file

@ -1,6 +1,8 @@
package balance
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
frostfsContract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
@ -9,9 +11,9 @@ import (
// Process lock event by invoking Cheque method in main net to send assets
// back to the withdraw issuer.
func (bp *Processor) processLock(lock *balanceEvent.Lock) bool {
if !bp.alphabetState.IsAlphabet() {
bp.log.Info(logs.BalanceNonAlphabetModeIgnoreBalanceLock)
func (bp *Processor) processLock(ctx context.Context, lock *balanceEvent.Lock) bool {
if !bp.alphabetState.IsAlphabet(ctx) {
bp.log.Info(ctx, logs.BalanceNonAlphabetModeIgnoreBalanceLock)
return true
}
@ -23,9 +25,9 @@ func (bp *Processor) processLock(lock *balanceEvent.Lock) bool {
prm.SetLock(lock.LockAccount())
prm.SetHash(lock.TxHash())
err := bp.frostfsClient.Cheque(prm)
err := bp.frostfsClient.Cheque(ctx, prm)
if err != nil {
bp.log.Error(logs.BalanceCantSendLockAssetTx, zap.Error(err))
bp.log.Error(ctx, logs.BalanceCantSendLockAssetTx, zap.Error(err))
return false
}

View file

@ -1,10 +1,10 @@
package balance
import (
"context"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
@ -12,13 +12,12 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
)
type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
IsAlphabet() bool
IsAlphabet(context.Context) bool
}
// PrecisionConverter converts balance amount values.
@ -27,7 +26,7 @@ type (
}
FrostFSClient interface {
Cheque(p frostfscontract.ChequePrm) error
Cheque(ctx context.Context, p frostfscontract.ChequePrm) error
}
// Processor of events produced by balance contract in the morphchain.
@ -68,8 +67,6 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/balance: balance precision converter is not set")
}
p.Log.Debug(logs.BalanceBalanceWorkerPool, zap.Int("size", p.PoolSize))
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/balance: can't create worker pool: %w", err)

View file

@ -1,6 +1,7 @@
package container
import (
"context"
"crypto/sha256"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@ -11,40 +12,40 @@ import (
"go.uber.org/zap"
)
func (cp *Processor) handlePut(ev event.Event) {
func (cp *Processor) handlePut(ctx context.Context, ev event.Event) {
put := ev.(putEvent)
id := sha256.Sum256(put.Container())
cp.log.Info(logs.Notification,
cp.log.Info(ctx, logs.Notification,
zap.String("type", "container put"),
zap.String("id", base58.Encode(id[:])))
// send an event to the worker pool
err := processors.SubmitEvent(cp.pool, cp.metrics, "container_put", func() bool {
return cp.processContainerPut(put)
return cp.processContainerPut(ctx, put)
})
if err != nil {
// there system can be moved into controlled degradation stage
cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained,
cp.log.Warn(ctx, logs.ContainerContainerProcessorWorkerPoolDrained,
zap.Int("capacity", cp.pool.Cap()))
}
}
func (cp *Processor) handleDelete(ev event.Event) {
func (cp *Processor) handleDelete(ctx context.Context, ev event.Event) {
del := ev.(containerEvent.Delete)
cp.log.Info(logs.Notification,
cp.log.Info(ctx, logs.Notification,
zap.String("type", "container delete"),
zap.String("id", base58.Encode(del.ContainerID())))
// send an event to the worker pool
err := processors.SubmitEvent(cp.pool, cp.metrics, "container_delete", func() bool {
return cp.processContainerDelete(del)
return cp.processContainerDelete(ctx, del)
})
if err != nil {
// there system can be moved into controlled degradation stage
cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained,
cp.log.Warn(ctx, logs.ContainerContainerProcessorWorkerPoolDrained,
zap.Int("capacity", cp.pool.Cap()))
}
}

View file

@ -1,6 +1,7 @@
package container
import (
"context"
"crypto/ecdsa"
"encoding/hex"
"testing"
@ -71,7 +72,7 @@ func TestPutEvent(t *testing.T) {
nr: nr,
}
proc.handlePut(event)
proc.handlePut(context.Background(), event)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@ -143,7 +144,7 @@ func TestDeleteEvent(t *testing.T) {
Signature: signature,
}
proc.handleDelete(ev)
proc.handleDelete(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@ -160,7 +161,7 @@ type testAlphabetState struct {
isAlphabet bool
}
func (s *testAlphabetState) IsAlphabet() bool {
func (s *testAlphabetState) IsAlphabet(context.Context) bool {
return s.isAlphabet
}

View file

@ -1,6 +1,7 @@
package container
import (
"context"
"errors"
"fmt"
"strings"
@ -36,27 +37,27 @@ var errContainerAndOwnerNamespaceDontMatch = errors.New("container and owner nam
// Process a new container from the user by checking the container sanity
// and sending approve tx back to the morph.
func (cp *Processor) processContainerPut(put putEvent) bool {
if !cp.alphabetState.IsAlphabet() {
cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerPut)
func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool {
if !cp.alphabetState.IsAlphabet(ctx) {
cp.log.Info(ctx, logs.ContainerNonAlphabetModeIgnoreContainerPut)
return true
}
ctx := &putContainerContext{
pctx := &putContainerContext{
e: put,
}
err := cp.checkPutContainer(ctx)
err := cp.checkPutContainer(pctx)
if err != nil {
cp.log.Error(logs.ContainerPutContainerCheckFailed,
cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed,
zap.String("error", err.Error()),
)
return false
}
if err := cp.morphClient.NotarySignAndInvokeTX(ctx.e.NotaryRequest().MainTransaction); err != nil {
cp.log.Error(logs.ContainerCouldNotApprovePutContainer,
if err := cp.morphClient.NotarySignAndInvokeTX(pctx.e.NotaryRequest().MainTransaction); err != nil {
cp.log.Error(ctx, logs.ContainerCouldNotApprovePutContainer,
zap.String("error", err.Error()),
)
return false
@ -103,15 +104,15 @@ func (cp *Processor) checkPutContainer(ctx *putContainerContext) error {
// Process delete container operation from the user by checking container sanity
// and sending approve tx back to morph.
func (cp *Processor) processContainerDelete(e containerEvent.Delete) bool {
if !cp.alphabetState.IsAlphabet() {
cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerDelete)
func (cp *Processor) processContainerDelete(ctx context.Context, e containerEvent.Delete) bool {
if !cp.alphabetState.IsAlphabet(ctx) {
cp.log.Info(ctx, logs.ContainerNonAlphabetModeIgnoreContainerDelete)
return true
}
err := cp.checkDeleteContainer(e)
if err != nil {
cp.log.Error(logs.ContainerDeleteContainerCheckFailed,
cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed,
zap.String("error", err.Error()),
)
@ -119,7 +120,7 @@ func (cp *Processor) processContainerDelete(e containerEvent.Delete) bool {
}
if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil {
cp.log.Error(logs.ContainerCouldNotApproveDeleteContainer,
cp.log.Error(ctx, logs.ContainerCouldNotApproveDeleteContainer,
zap.String("error", err.Error()),
)

View file

@ -1,11 +1,11 @@
package container
import (
"context"
"errors"
"fmt"
frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
@ -15,13 +15,12 @@ import (
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
)
type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
IsAlphabet() bool
IsAlphabet(context.Context) bool
}
ContClient interface {
@ -97,8 +96,6 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/container: FrostFSID client is not set")
}
p.Log.Debug(logs.ContainerContainerWorkerPool, zap.Int("size", p.PoolSize))
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/container: can't create worker pool: %w", err)

View file

@ -2,6 +2,7 @@ package frostfs
import (
"bytes"
"context"
"encoding/hex"
"slices"
@ -12,67 +13,67 @@ import (
"go.uber.org/zap"
)
func (np *Processor) handleDeposit(ev event.Event) {
func (np *Processor) handleDeposit(ctx context.Context, ev event.Event) {
deposit := ev.(frostfsEvent.Deposit)
depositIDBin := bytes.Clone(deposit.ID())
slices.Reverse(depositIDBin)
np.log.Info(logs.Notification,
np.log.Info(ctx, logs.Notification,
zap.String("type", "deposit"),
zap.String("id", hex.EncodeToString(depositIDBin)))
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_deposit", func() bool {
return np.processDeposit(deposit)
return np.processDeposit(ctx, deposit)
})
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleWithdraw(ev event.Event) {
func (np *Processor) handleWithdraw(ctx context.Context, ev event.Event) {
withdraw := ev.(frostfsEvent.Withdraw)
withdrawBin := bytes.Clone(withdraw.ID())
slices.Reverse(withdrawBin)
np.log.Info(logs.Notification,
np.log.Info(ctx, logs.Notification,
zap.String("type", "withdraw"),
zap.String("id", hex.EncodeToString(withdrawBin)))
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_withdraw", func() bool {
return np.processWithdraw(withdraw)
return np.processWithdraw(ctx, withdraw)
})
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleCheque(ev event.Event) {
func (np *Processor) handleCheque(ctx context.Context, ev event.Event) {
cheque := ev.(frostfsEvent.Cheque)
np.log.Info(logs.Notification,
np.log.Info(ctx, logs.Notification,
zap.String("type", "cheque"),
zap.String("id", hex.EncodeToString(cheque.ID())))
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_cheque", func() bool {
return np.processCheque(cheque)
return np.processCheque(ctx, cheque)
})
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleConfig(ev event.Event) {
func (np *Processor) handleConfig(ctx context.Context, ev event.Event) {
cfg := ev.(frostfsEvent.Config)
np.log.Info(logs.Notification,
np.log.Info(ctx, logs.Notification,
zap.String("type", "set config"),
zap.String("key", hex.EncodeToString(cfg.Key())),
zap.String("value", hex.EncodeToString(cfg.Value())))
@ -80,11 +81,11 @@ func (np *Processor) handleConfig(ev event.Event) {
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_config", func() bool {
return np.processConfig(cfg)
return np.processConfig(ctx, cfg)
})
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}

View file

@ -1,6 +1,7 @@
package frostfs
import (
"context"
"testing"
"time"
@ -36,7 +37,7 @@ func TestHandleDeposit(t *testing.T) {
AmountValue: 1000,
}
proc.handleDeposit(ev)
proc.handleDeposit(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@ -57,7 +58,7 @@ func TestHandleDeposit(t *testing.T) {
es.epochCounter = 109
proc.handleDeposit(ev)
proc.handleDeposit(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@ -98,7 +99,7 @@ func TestHandleWithdraw(t *testing.T) {
AmountValue: 1000,
}
proc.handleWithdraw(ev)
proc.handleWithdraw(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@ -139,7 +140,7 @@ func TestHandleCheque(t *testing.T) {
LockValue: util.Uint160{200},
}
proc.handleCheque(ev)
proc.handleCheque(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@ -176,7 +177,7 @@ func TestHandleConfig(t *testing.T) {
TxHashValue: util.Uint256{100},
}
proc.handleConfig(ev)
proc.handleConfig(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@ -225,7 +226,7 @@ type testAlphabetState struct {
isAlphabet bool
}
func (s *testAlphabetState) IsAlphabet() bool {
func (s *testAlphabetState) IsAlphabet(context.Context) bool {
return s.isAlphabet
}
@ -241,17 +242,17 @@ type testBalaceClient struct {
burn []balance.BurnPrm
}
func (c *testBalaceClient) Mint(p balance.MintPrm) error {
func (c *testBalaceClient) Mint(_ context.Context, p balance.MintPrm) error {
c.mint = append(c.mint, p)
return nil
}
func (c *testBalaceClient) Lock(p balance.LockPrm) error {
func (c *testBalaceClient) Lock(_ context.Context, p balance.LockPrm) error {
c.lock = append(c.lock, p)
return nil
}
func (c *testBalaceClient) Burn(p balance.BurnPrm) error {
func (c *testBalaceClient) Burn(_ context.Context, p balance.BurnPrm) error {
c.burn = append(c.burn, p)
return nil
}
@ -260,7 +261,7 @@ type testNetmapClient struct {
config []nmClient.SetConfigPrm
}
func (c *testNetmapClient) SetConfig(p nmClient.SetConfigPrm) error {
func (c *testNetmapClient) SetConfig(_ context.Context, p nmClient.SetConfigPrm) error {
c.config = append(c.config, p)
return nil
}

View file

@ -1,6 +1,8 @@
package frostfs
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
@ -15,9 +17,9 @@ const (
// Process deposit event by invoking a balance contract and sending native
// gas in the sidechain.
func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
if !np.alphabetState.IsAlphabet() {
np.log.Info(logs.FrostFSNonAlphabetModeIgnoreDeposit)
func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.Deposit) bool {
if !np.alphabetState.IsAlphabet(ctx) {
np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreDeposit)
return true
}
@ -28,9 +30,9 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
prm.SetID(deposit.ID())
// send transferX to a balance contract
err := np.balanceClient.Mint(prm)
err := np.balanceClient.Mint(ctx, prm)
if err != nil {
np.log.Error(logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err))
np.log.Error(ctx, logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err))
}
curEpoch := np.epochState.EpochCounter()
@ -44,7 +46,7 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
val, ok := np.mintEmitCache.Get(receiver.String())
if ok && val+np.mintEmitThreshold >= curEpoch {
np.log.Warn(logs.FrostFSDoubleMintEmissionDeclined,
np.log.Warn(ctx, logs.FrostFSDoubleMintEmissionDeclined,
zap.Stringer("receiver", receiver),
zap.Uint64("last_emission", val),
zap.Uint64("current_epoch", curEpoch))
@ -56,12 +58,12 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
// before gas transfer check if the balance is greater than the threshold
balance, err := np.morphClient.GasBalance()
if err != nil {
np.log.Error(logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err))
np.log.Error(ctx, logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err))
return false
}
if balance < np.gasBalanceThreshold {
np.log.Warn(logs.FrostFSGasBalanceThresholdHasBeenReached,
np.log.Warn(ctx, logs.FrostFSGasBalanceThresholdHasBeenReached,
zap.Int64("balance", balance),
zap.Int64("threshold", np.gasBalanceThreshold))
@ -70,7 +72,7 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
err = np.morphClient.TransferGas(receiver, np.mintEmitValue)
if err != nil {
np.log.Error(logs.FrostFSCantTransferNativeGasToReceiver,
np.log.Error(ctx, logs.FrostFSCantTransferNativeGasToReceiver,
zap.String("error", err.Error()))
return false
@ -82,16 +84,16 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
}
// Process withdraw event by locking assets in the balance account.
func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool {
if !np.alphabetState.IsAlphabet() {
np.log.Info(logs.FrostFSNonAlphabetModeIgnoreWithdraw)
func (np *Processor) processWithdraw(ctx context.Context, withdraw frostfsEvent.Withdraw) bool {
if !np.alphabetState.IsAlphabet(ctx) {
np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreWithdraw)
return true
}
// create lock account
lock, err := util.Uint160DecodeBytesBE(withdraw.ID()[:util.Uint160Size])
if err != nil {
np.log.Error(logs.FrostFSCantCreateLockAccount, zap.Error(err))
np.log.Error(ctx, logs.FrostFSCantCreateLockAccount, zap.Error(err))
return false
}
@ -105,9 +107,9 @@ func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool {
prm.SetAmount(np.converter.ToBalancePrecision(withdraw.Amount()))
prm.SetDueEpoch(int64(curEpoch + lockAccountLifetime))
err = np.balanceClient.Lock(prm)
err = np.balanceClient.Lock(ctx, prm)
if err != nil {
np.log.Error(logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err))
np.log.Error(ctx, logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err))
return false
}
@ -116,9 +118,9 @@ func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool {
// Process cheque event by transferring assets from the lock account back to
// the reserve account.
func (np *Processor) processCheque(cheque frostfsEvent.Cheque) bool {
if !np.alphabetState.IsAlphabet() {
np.log.Info(logs.FrostFSNonAlphabetModeIgnoreCheque)
func (np *Processor) processCheque(ctx context.Context, cheque frostfsEvent.Cheque) bool {
if !np.alphabetState.IsAlphabet(ctx) {
np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreCheque)
return true
}
@ -128,9 +130,9 @@ func (np *Processor) processCheque(cheque frostfsEvent.Cheque) bool {
prm.SetAmount(np.converter.ToBalancePrecision(cheque.Amount()))
prm.SetID(cheque.ID())
err := np.balanceClient.Burn(prm)
err := np.balanceClient.Burn(ctx, prm)
if err != nil {
np.log.Error(logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err))
np.log.Error(ctx, logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err))
return false
}

View file

@ -1,6 +1,8 @@
package frostfs
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
@ -9,9 +11,9 @@ import (
// Process config event by setting configuration value from the mainchain in
// the sidechain.
func (np *Processor) processConfig(config frostfsEvent.Config) bool {
if !np.alphabetState.IsAlphabet() {
np.log.Info(logs.FrostFSNonAlphabetModeIgnoreConfig)
func (np *Processor) processConfig(ctx context.Context, config frostfsEvent.Config) bool {
if !np.alphabetState.IsAlphabet(ctx) {
np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreConfig)
return true
}
@ -22,9 +24,9 @@ func (np *Processor) processConfig(config frostfsEvent.Config) bool {
prm.SetValue(config.Value())
prm.SetHash(config.TxHash())
err := np.netmapClient.SetConfig(prm)
err := np.netmapClient.SetConfig(ctx, prm)
if err != nil {
np.log.Error(logs.FrostFSCantRelaySetConfigEvent, zap.Error(err))
np.log.Error(ctx, logs.FrostFSCantRelaySetConfigEvent, zap.Error(err))
return false
}

View file

@ -1,11 +1,11 @@
package frostfs
import (
"context"
"errors"
"fmt"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
@ -16,7 +16,6 @@ import (
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
)
type (
@ -27,7 +26,7 @@ type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
IsAlphabet() bool
IsAlphabet(context.Context) bool
}
// PrecisionConverter converts balance amount values.
@ -36,13 +35,13 @@ type (
}
BalanceClient interface {
Mint(p balance.MintPrm) error
Lock(p balance.LockPrm) error
Burn(p balance.BurnPrm) error
Mint(ctx context.Context, p balance.MintPrm) error
Lock(ctx context.Context, p balance.LockPrm) error
Burn(ctx context.Context, p balance.BurnPrm) error
}
NetmapClient interface {
SetConfig(p nmClient.SetConfigPrm) error
SetConfig(ctx context.Context, p nmClient.SetConfigPrm) error
}
MorphClient interface {
@ -110,8 +109,6 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/frostfs: balance precision converter is not set")
}
p.Log.Debug(logs.FrostFSFrostfsWorkerPool, zap.Int("size", p.PoolSize))
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err)

View file

@ -1,6 +1,8 @@
package governance
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
@ -11,7 +13,7 @@ import (
"go.uber.org/zap"
)
func (gp *Processor) HandleAlphabetSync(e event.Event) {
func (gp *Processor) HandleAlphabetSync(ctx context.Context, e event.Event) {
var (
typ string
hash util.Uint256
@ -32,16 +34,16 @@ func (gp *Processor) HandleAlphabetSync(e event.Event) {
return
}
gp.log.Info(logs.GovernanceNewEvent, zap.String("type", typ))
gp.log.Info(ctx, logs.GovernanceNewEvent, zap.String("type", typ))
// send event to the worker pool
err := processors.SubmitEvent(gp.pool, gp.metrics, "alphabet_sync", func() bool {
return gp.processAlphabetSync(hash)
return gp.processAlphabetSync(ctx, hash)
})
if err != nil {
// there system can be moved into controlled degradation stage
gp.log.Warn(logs.GovernanceGovernanceWorkerPoolDrained,
gp.log.Warn(ctx, logs.GovernanceGovernanceWorkerPoolDrained,
zap.Int("capacity", gp.pool.Cap()))
}
}

View file

@ -1,6 +1,7 @@
package governance
import (
"context"
"encoding/binary"
"sort"
"testing"
@ -57,7 +58,7 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
txHash: util.Uint256{100},
}
proc.HandleAlphabetSync(ev)
proc.HandleAlphabetSync(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@ -133,7 +134,7 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
Role: noderoles.NeoFSAlphabet,
}
proc.HandleAlphabetSync(ev)
proc.HandleAlphabetSync(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@ -218,7 +219,7 @@ type testAlphabetState struct {
isAlphabet bool
}
func (s *testAlphabetState) IsAlphabet() bool {
func (s *testAlphabetState) IsAlphabet(context.Context) bool {
return s.isAlphabet
}
@ -226,7 +227,7 @@ type testVoter struct {
votes []VoteValidatorPrm
}
func (v *testVoter) VoteForSidechainValidator(prm VoteValidatorPrm) error {
func (v *testVoter) VoteForSidechainValidator(_ context.Context, prm VoteValidatorPrm) error {
v.votes = append(v.votes, prm)
return nil
}
@ -250,12 +251,12 @@ func (c *testMorphClient) Committee() (res keys.PublicKeys, err error) {
return c.commiteeKeys, nil
}
func (c *testMorphClient) UpdateNeoFSAlphabetList(prm client.UpdateAlphabetListPrm) error {
func (c *testMorphClient) UpdateNeoFSAlphabetList(_ context.Context, prm client.UpdateAlphabetListPrm) error {
c.alphabetUpdates = append(c.alphabetUpdates, prm)
return nil
}
func (c *testMorphClient) UpdateNotaryList(prm client.UpdateNotaryListPrm) error {
func (c *testMorphClient) UpdateNotaryList(_ context.Context, prm client.UpdateNotaryListPrm) error {
c.notaryUpdates = append(c.notaryUpdates, prm)
return nil
}
@ -277,7 +278,7 @@ type testFrostFSClient struct {
updates []frostfscontract.AlphabetUpdatePrm
}
func (c *testFrostFSClient) AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm) error {
func (c *testFrostFSClient) AlphabetUpdate(_ context.Context, p frostfscontract.AlphabetUpdatePrm) error {
c.updates = append(c.updates, p)
return nil
}

View file

@ -1,6 +1,7 @@
package governance
import (
"context"
"encoding/binary"
"encoding/hex"
"sort"
@ -18,39 +19,39 @@ const (
alphabetUpdateIDPrefix = "AlphabetUpdate"
)
func (gp *Processor) processAlphabetSync(txHash util.Uint256) bool {
if !gp.alphabetState.IsAlphabet() {
gp.log.Info(logs.GovernanceNonAlphabetModeIgnoreAlphabetSync)
func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint256) bool {
if !gp.alphabetState.IsAlphabet(ctx) {
gp.log.Info(ctx, logs.GovernanceNonAlphabetModeIgnoreAlphabetSync)
return true
}
mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList()
if err != nil {
gp.log.Error(logs.GovernanceCantFetchAlphabetListFromMainNet,
gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromMainNet,
zap.String("error", err.Error()))
return false
}
sidechainAlphabet, err := gp.morphClient.Committee()
if err != nil {
gp.log.Error(logs.GovernanceCantFetchAlphabetListFromSideChain,
gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromSideChain,
zap.String("error", err.Error()))
return false
}
newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet)
if err != nil {
gp.log.Error(logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
gp.log.Error(ctx, logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
zap.String("error", err.Error()))
return false
}
if newAlphabet == nil {
gp.log.Info(logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged)
gp.log.Info(ctx, logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged)
return true
}
gp.log.Info(logs.GovernanceAlphabetListHasBeenChangedStartingUpdate,
gp.log.Info(ctx, logs.GovernanceAlphabetListHasBeenChangedStartingUpdate,
zap.String("side_chain_alphabet", prettyKeys(sidechainAlphabet)),
zap.String("new_alphabet", prettyKeys(newAlphabet)),
)
@ -61,22 +62,22 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) bool {
}
// 1. Vote to sidechain committee via alphabet contracts.
err = gp.voter.VoteForSidechainValidator(votePrm)
err = gp.voter.VoteForSidechainValidator(ctx, votePrm)
if err != nil {
gp.log.Error(logs.GovernanceCantVoteForSideChainCommittee,
gp.log.Error(ctx, logs.GovernanceCantVoteForSideChainCommittee,
zap.String("error", err.Error()))
}
// 2. Update NeoFSAlphabet role in the sidechain.
gp.updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet, txHash)
gp.updateNeoFSAlphabetRoleInSidechain(ctx, sidechainAlphabet, newAlphabet, txHash)
// 3. Update notary role in the sidechain.
gp.updateNotaryRoleInSidechain(newAlphabet, txHash)
gp.updateNotaryRoleInSidechain(ctx, newAlphabet, txHash)
// 4. Update FrostFS contract in the mainnet.
gp.updateFrostFSContractInMainnet(newAlphabet)
gp.updateFrostFSContractInMainnet(ctx, newAlphabet)
gp.log.Info(logs.GovernanceFinishedAlphabetListUpdate)
gp.log.Info(ctx, logs.GovernanceFinishedAlphabetListUpdate)
return true
}
@ -93,24 +94,24 @@ func prettyKeys(keys keys.PublicKeys) string {
return strings.TrimRight(sb.String(), delimiter)
}
func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) {
func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) {
innerRing, err := gp.irFetcher.InnerRingKeys()
if err != nil {
gp.log.Error(logs.GovernanceCantFetchInnerRingListFromSideChain,
gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain,
zap.String("error", err.Error()))
return
}
newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet)
if err != nil {
gp.log.Error(logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
gp.log.Error(ctx, logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
zap.String("error", err.Error()))
return
}
sort.Sort(newInnerRing)
gp.log.Info(logs.GovernanceUpdateOfTheInnerRingList,
gp.log.Info(ctx, logs.GovernanceUpdateOfTheInnerRingList,
zap.String("before", prettyKeys(innerRing)),
zap.String("after", prettyKeys(newInnerRing)),
)
@ -119,26 +120,26 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAl
updPrm.SetList(newInnerRing)
updPrm.SetHash(txHash)
if err = gp.morphClient.UpdateNeoFSAlphabetList(updPrm); err != nil {
gp.log.Error(logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
if err = gp.morphClient.UpdateNeoFSAlphabetList(ctx, updPrm); err != nil {
gp.log.Error(ctx, logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
zap.String("error", err.Error()))
}
}
func (gp *Processor) updateNotaryRoleInSidechain(newAlphabet keys.PublicKeys, txHash util.Uint256) {
func (gp *Processor) updateNotaryRoleInSidechain(ctx context.Context, newAlphabet keys.PublicKeys, txHash util.Uint256) {
updPrm := client.UpdateNotaryListPrm{}
updPrm.SetList(newAlphabet)
updPrm.SetHash(txHash)
err := gp.morphClient.UpdateNotaryList(updPrm)
err := gp.morphClient.UpdateNotaryList(ctx, updPrm)
if err != nil {
gp.log.Error(logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
gp.log.Error(ctx, logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
zap.String("error", err.Error()))
}
}
func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys) {
func (gp *Processor) updateFrostFSContractInMainnet(ctx context.Context, newAlphabet keys.PublicKeys) {
epoch := gp.epochState.EpochCounter()
buf := make([]byte, 8)
@ -151,9 +152,9 @@ func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys)
prm.SetID(id)
prm.SetPubs(newAlphabet)
err := gp.frostfsClient.AlphabetUpdate(prm)
err := gp.frostfsClient.AlphabetUpdate(ctx, prm)
if err != nil {
gp.log.Error(logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
gp.log.Error(ctx, logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
zap.String("error", err.Error()))
}
}

View file

@ -1,6 +1,7 @@
package governance
import (
"context"
"errors"
"fmt"
@ -25,7 +26,7 @@ const ProcessorPoolSize = 1
type (
// AlphabetState is a callback interface for innerring global state.
AlphabetState interface {
IsAlphabet() bool
IsAlphabet(context.Context) bool
}
)
@ -38,7 +39,7 @@ type VoteValidatorPrm struct {
// Voter is a callback interface for alphabet contract voting.
type Voter interface {
VoteForSidechainValidator(VoteValidatorPrm) error
VoteForSidechainValidator(context.Context, VoteValidatorPrm) error
}
type (
@ -55,7 +56,7 @@ type (
}
FrostFSClient interface {
AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm) error
AlphabetUpdate(ctx context.Context, p frostfscontract.AlphabetUpdatePrm) error
}
NetmapClient interface {
@ -69,8 +70,8 @@ type (
MorphClient interface {
Committee() (res keys.PublicKeys, err error)
UpdateNeoFSAlphabetList(prm client.UpdateAlphabetListPrm) error
UpdateNotaryList(prm client.UpdateNotaryListPrm) error
UpdateNeoFSAlphabetList(ctx context.Context, prm client.UpdateAlphabetListPrm) error
UpdateNotaryList(ctx context.Context, prm client.UpdateNotaryListPrm) error
}
// Processor of events related to governance in the network.

View file

@ -1,6 +1,7 @@
package netmap
import (
"context"
"encoding/hex"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@ -11,93 +12,93 @@ import (
"go.uber.org/zap"
)
func (np *Processor) HandleNewEpochTick(ev event.Event) {
func (np *Processor) HandleNewEpochTick(ctx context.Context, ev event.Event) {
_ = ev.(timerEvent.NewEpochTick)
np.log.Info(logs.NetmapTick, zap.String("type", "epoch"))
np.log.Info(ctx, logs.NetmapTick, zap.String("type", "epoch"))
// send an event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", np.processNewEpochTick)
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", func() bool { return np.processNewEpochTick(ctx) })
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleNewEpoch(ev event.Event) {
func (np *Processor) handleNewEpoch(ctx context.Context, ev event.Event) {
epochEvent := ev.(netmapEvent.NewEpoch)
np.log.Info(logs.Notification,
np.log.Info(ctx, logs.Notification,
zap.String("type", "new epoch"),
zap.Uint64("value", epochEvent.EpochNumber()))
// send an event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch", func() bool {
return np.processNewEpoch(epochEvent)
return np.processNewEpoch(ctx, epochEvent)
})
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleAddPeer(ev event.Event) {
func (np *Processor) handleAddPeer(ctx context.Context, ev event.Event) {
newPeer := ev.(netmapEvent.AddPeer)
np.log.Info(logs.Notification,
np.log.Info(ctx, logs.Notification,
zap.String("type", "add peer"),
)
// send an event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_add_peer", func() bool {
return np.processAddPeer(newPeer)
return np.processAddPeer(ctx, newPeer)
})
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleUpdateState(ev event.Event) {
func (np *Processor) handleUpdateState(ctx context.Context, ev event.Event) {
updPeer := ev.(netmapEvent.UpdatePeer)
np.log.Info(logs.Notification,
np.log.Info(ctx, logs.Notification,
zap.String("type", "update peer state"),
zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes())))
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_update_peer", func() bool {
return np.processUpdatePeer(updPeer)
return np.processUpdatePeer(ctx, updPeer)
})
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleCleanupTick(ev event.Event) {
func (np *Processor) handleCleanupTick(ctx context.Context, ev event.Event) {
if !np.netmapSnapshot.enabled {
np.log.Debug(logs.NetmapNetmapCleanUpRoutineIsDisabled518)
np.log.Debug(ctx, logs.NetmapNetmapCleanUpRoutineIsDisabled518)
return
}
cleanup := ev.(netmapCleanupTick)
np.log.Info(logs.NetmapTick, zap.String("type", "netmap cleaner"))
np.log.Info(ctx, logs.NetmapTick, zap.String("type", "netmap cleaner"))
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_cleanup_tick", func() bool {
return np.processNetmapCleanupTick(cleanup)
return np.processNetmapCleanupTick(ctx, cleanup)
})
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}

View file

@ -1,6 +1,7 @@
package netmap
import (
"context"
"fmt"
"testing"
"time"
@ -38,7 +39,7 @@ func TestNewEpochTick(t *testing.T) {
require.NoError(t, err, "failed to create processor")
ev := timerEvent.NewEpochTick{}
proc.HandleNewEpochTick(ev)
proc.HandleNewEpochTick(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@ -90,7 +91,7 @@ func TestNewEpoch(t *testing.T) {
Num: 101,
Hash: util.Uint256{101},
}
proc.handleNewEpoch(ev)
proc.handleNewEpoch(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@ -130,7 +131,7 @@ func TestAddPeer(t *testing.T) {
MainTransaction: &transaction.Transaction{},
},
}
proc.handleAddPeer(ev)
proc.handleAddPeer(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@ -145,7 +146,7 @@ func TestAddPeer(t *testing.T) {
MainTransaction: &transaction.Transaction{},
},
}
proc.handleAddPeer(ev)
proc.handleAddPeer(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@ -188,7 +189,7 @@ func TestUpdateState(t *testing.T) {
MainTransaction: &transaction.Transaction{},
},
}
proc.handleUpdateState(ev)
proc.handleUpdateState(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@ -232,7 +233,7 @@ func TestCleanupTick(t *testing.T) {
txHash: util.Uint256{123},
}
proc.handleCleanupTick(ev)
proc.handleCleanupTick(context.Background(), ev)
for proc.pool.Running() > 0 {
time.Sleep(10 * time.Millisecond)
@ -340,7 +341,7 @@ type testAlphabetState struct {
isAlphabet bool
}
func (s *testAlphabetState) IsAlphabet() bool {
func (s *testAlphabetState) IsAlphabet(context.Context) bool {
return s.isAlphabet
}
@ -364,7 +365,7 @@ type testNetmapClient struct {
invokedTxs []*transaction.Transaction
}
func (c *testNetmapClient) MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
func (c *testNetmapClient) MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
c.notaryInvokes = append(c.notaryInvokes, notaryInvoke{
contract: contract,
fee: fee,
@ -395,7 +396,7 @@ func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) {
return c.netmap, nil
}
func (c *testNetmapClient) NewEpoch(epoch uint64) error {
func (c *testNetmapClient) NewEpoch(_ context.Context, epoch uint64) error {
c.newEpochs = append(c.newEpochs, epoch)
return nil
}
@ -413,6 +414,6 @@ type testEventHandler struct {
handledEvents []event.Event
}
func (h *testEventHandler) Handle(e event.Event) {
func (h *testEventHandler) Handle(_ context.Context, e event.Event) {
h.handledEvents = append(h.handledEvents, e)
}

View file

@ -1,15 +1,17 @@
package netmap
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
v2netmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"go.uber.org/zap"
)
func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
if !np.alphabetState.IsAlphabet() {
np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick)
func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapCleanupTick) bool {
if !np.alphabetState.IsAlphabet(ctx) {
np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick)
return true
}
@ -17,13 +19,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
err := np.netmapSnapshot.forEachRemoveCandidate(ev.epoch, func(s string) error {
key, err := keys.NewPublicKeyFromString(s)
if err != nil {
np.log.Warn(logs.NetmapCantDecodePublicKeyOfNetmapNode,
np.log.Warn(ctx, logs.NetmapCantDecodePublicKeyOfNetmapNode,
zap.String("key", s))
return nil
}
np.log.Info(logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s))
np.log.Info(ctx, logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s))
// In notary environments we call UpdateStateIR method instead of UpdateState.
// It differs from UpdateState only by name, so we can do this in the same form.
@ -31,6 +33,7 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
const methodUpdateStateNotary = "updateStateIR"
err = np.netmapClient.MorphNotaryInvoke(
ctx,
np.netmapClient.ContractAddress(),
0,
uint32(ev.epoch),
@ -39,13 +42,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
int64(v2netmap.Offline), key.Bytes(),
)
if err != nil {
np.log.Error(logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err))
np.log.Error(ctx, logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err))
}
return nil
})
if err != nil {
np.log.Warn(logs.NetmapCantIterateOnNetmapCleanerCache,
np.log.Warn(ctx, logs.NetmapCantIterateOnNetmapCleanerCache,
zap.String("error", err.Error()))
return false
}

View file

@ -1,6 +1,8 @@
package netmap
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
@ -9,12 +11,12 @@ import (
// Process new epoch notification by setting global epoch value and resetting
// local epoch timer.
func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) bool {
func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoch) bool {
epoch := ev.EpochNumber()
epochDuration, err := np.netmapClient.EpochDuration()
if err != nil {
np.log.Warn(logs.NetmapCantGetEpochDuration,
np.log.Warn(ctx, logs.NetmapCantGetEpochDuration,
zap.String("error", err.Error()))
} else {
np.epochState.SetEpochDuration(epochDuration)
@ -24,46 +26,46 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) bool {
h, err := np.netmapClient.MorphTxHeight(ev.TxHash())
if err != nil {
np.log.Warn(logs.NetmapCantGetTransactionHeight,
np.log.Warn(ctx, logs.NetmapCantGetTransactionHeight,
zap.String("hash", ev.TxHash().StringLE()),
zap.String("error", err.Error()))
}
if err := np.epochTimer.ResetEpochTimer(h); err != nil {
np.log.Warn(logs.NetmapCantResetEpochTimer,
np.log.Warn(ctx, logs.NetmapCantResetEpochTimer,
zap.String("error", err.Error()))
}
// get new netmap snapshot
networkMap, err := np.netmapClient.NetMap()
if err != nil {
np.log.Warn(logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
np.log.Warn(ctx, logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
zap.String("error", err.Error()))
return false
}
np.netmapSnapshot.update(*networkMap, epoch)
np.handleCleanupTick(netmapCleanupTick{epoch: epoch, txHash: ev.TxHash()})
np.handleAlphabetSync(governance.NewSyncEvent(ev.TxHash()))
np.handleNotaryDeposit(ev)
np.handleCleanupTick(ctx, netmapCleanupTick{epoch: epoch, txHash: ev.TxHash()})
np.handleAlphabetSync(ctx, governance.NewSyncEvent(ev.TxHash()))
np.handleNotaryDeposit(ctx, ev)
return true
}
// Process new epoch tick by invoking new epoch method in network map contract.
func (np *Processor) processNewEpochTick() bool {
if !np.alphabetState.IsAlphabet() {
np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewEpochTick)
func (np *Processor) processNewEpochTick(ctx context.Context) bool {
if !np.alphabetState.IsAlphabet(ctx) {
np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewEpochTick)
return true
}
nextEpoch := np.epochState.EpochCounter() + 1
np.log.Debug(logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch))
np.log.Debug(ctx, logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch))
err := np.netmapClient.NewEpoch(nextEpoch)
err := np.netmapClient.NewEpoch(ctx, nextEpoch)
if err != nil {
np.log.Error(logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err))
np.log.Error(ctx, logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err))
return false
}

View file

@ -1,6 +1,7 @@
package netmap
import (
"context"
"encoding/hex"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@ -12,9 +13,9 @@ import (
// Process add peer notification by sanity check of new node
// local epoch timer.
func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
if !np.alphabetState.IsAlphabet() {
np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewPeerNotification)
func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer) bool {
if !np.alphabetState.IsAlphabet(ctx) {
np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewPeerNotification)
return true
}
@ -22,7 +23,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
tx := ev.NotaryRequest().MainTransaction
ok, err := np.netmapClient.MorphIsValidScript(tx.Script, tx.Signers)
if err != nil || !ok {
np.log.Warn(logs.NetmapNonhaltNotaryTransaction,
np.log.Warn(ctx, logs.NetmapNonhaltNotaryTransaction,
zap.String("method", "netmap.AddPeer"),
zap.String("hash", tx.Hash().StringLE()),
zap.Error(err))
@ -33,14 +34,14 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
var nodeInfo netmap.NodeInfo
if err := nodeInfo.Unmarshal(ev.Node()); err != nil {
// it will be nice to have tx id at event structure to log it
np.log.Warn(logs.NetmapCantParseNetworkMapCandidate)
np.log.Warn(ctx, logs.NetmapCantParseNetworkMapCandidate)
return false
}
// validate and update node info
err = np.nodeValidator.VerifyAndUpdate(&nodeInfo)
if err != nil {
np.log.Warn(logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
zap.String("error", err.Error()),
)
@ -63,7 +64,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
// That is why we need to perform `addPeerIR` only in case when node is online,
// because in scope of this method, contract set state `ONLINE` for the node.
if updated && nodeInfo.Status().IsOnline() {
np.log.Info(logs.NetmapApprovingNetworkMapCandidate,
np.log.Info(ctx, logs.NetmapApprovingNetworkMapCandidate,
zap.String("key", keyString))
prm := netmapclient.AddPeerPrm{}
@ -76,6 +77,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
// create new notary request with the original nonce
err = np.netmapClient.MorphNotaryInvoke(
ctx,
np.netmapClient.ContractAddress(),
0,
ev.NotaryRequest().MainTransaction.Nonce,
@ -84,7 +86,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
nodeInfoBinary,
)
if err != nil {
np.log.Error(logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err))
np.log.Error(ctx, logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err))
return false
}
}
@ -93,9 +95,9 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
}
// Process update peer notification by sending approval tx to the smart contract.
func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool {
if !np.alphabetState.IsAlphabet() {
np.log.Info(logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification)
func (np *Processor) processUpdatePeer(ctx context.Context, ev netmapEvent.UpdatePeer) bool {
if !np.alphabetState.IsAlphabet(ctx) {
np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification)
return true
}
@ -108,7 +110,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool {
if ev.Maintenance() {
err = np.nodeStateSettings.MaintenanceModeAllowed()
if err != nil {
np.log.Info(logs.NetmapPreventSwitchingNodeToMaintenanceState,
np.log.Info(ctx, logs.NetmapPreventSwitchingNodeToMaintenanceState,
zap.Error(err),
)
@ -117,7 +119,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool {
}
if err = np.netmapClient.MorphNotarySignAndInvokeTX(ev.NotaryRequest().MainTransaction); err != nil {
np.log.Error(logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err))
np.log.Error(ctx, logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err))
return false
}

View file

@ -1,10 +1,10 @@
package netmap
import (
"context"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
@ -16,7 +16,6 @@ import (
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
)
type (
@ -35,7 +34,7 @@ type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
IsAlphabet() bool
IsAlphabet(context.Context) bool
}
// NodeValidator wraps basic method of checking the correctness
@ -54,12 +53,12 @@ type (
}
Client interface {
MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error
MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error
ContractAddress() util.Uint160
EpochDuration() (uint64, error)
MorphTxHeight(h util.Uint256) (res uint32, err error)
NetMap() (*netmap.NetMap, error)
NewEpoch(epoch uint64) error
NewEpoch(ctx context.Context, epoch uint64) error
MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error)
MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error
}
@ -132,8 +131,6 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/netmap: node state settings is not set")
}
p.Log.Debug(logs.NetmapNetmapWorkerPool, zap.Int("size", p.PoolSize))
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/netmap: can't create worker pool: %w", err)

View file

@ -1,6 +1,8 @@
package netmap
import (
"context"
netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
@ -18,13 +20,13 @@ type netmapClientWrapper struct {
netmapClient *netmapclient.Client
}
func (w *netmapClientWrapper) UpdatePeerState(p netmapclient.UpdatePeerPrm) error {
_, err := w.netmapClient.UpdatePeerState(p)
func (w *netmapClientWrapper) UpdatePeerState(ctx context.Context, p netmapclient.UpdatePeerPrm) error {
_, err := w.netmapClient.UpdatePeerState(ctx, p)
return err
}
func (w *netmapClientWrapper) MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
_, err := w.netmapClient.Morph().NotaryInvoke(contract, fee, nonce, vub, method, args...)
func (w *netmapClientWrapper) MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
_, err := w.netmapClient.Morph().NotaryInvoke(ctx, contract, fee, nonce, vub, method, args...)
return err
}
@ -44,16 +46,16 @@ func (w *netmapClientWrapper) NetMap() (*netmap.NetMap, error) {
return w.netmapClient.NetMap()
}
func (w *netmapClientWrapper) NewEpoch(epoch uint64) error {
return w.netmapClient.NewEpoch(epoch)
func (w *netmapClientWrapper) NewEpoch(ctx context.Context, epoch uint64) error {
return w.netmapClient.NewEpoch(ctx, epoch)
}
func (w *netmapClientWrapper) MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) {
return w.netmapClient.Morph().IsValidScript(script, signers)
}
func (w *netmapClientWrapper) AddPeer(p netmapclient.AddPeerPrm) error {
return w.netmapClient.AddPeer(p)
func (w *netmapClientWrapper) AddPeer(ctx context.Context, p netmapclient.AddPeerPrm) error {
return w.netmapClient.AddPeer(ctx, p)
}
func (w *netmapClientWrapper) MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error {

View file

@ -1,6 +1,7 @@
package innerring
import (
"context"
"fmt"
"sort"
@ -47,21 +48,21 @@ func (s *Server) SetEpochDuration(val uint64) {
}
// IsActive is a getter for a global active flag state.
func (s *Server) IsActive() bool {
return s.InnerRingIndex() >= 0
func (s *Server) IsActive(ctx context.Context) bool {
return s.InnerRingIndex(ctx) >= 0
}
// IsAlphabet is a getter for a global alphabet flag state.
func (s *Server) IsAlphabet() bool {
return s.AlphabetIndex() >= 0
func (s *Server) IsAlphabet(ctx context.Context) bool {
return s.AlphabetIndex(ctx) >= 0
}
// InnerRingIndex is a getter for a global index of node in inner ring list. Negative
// index means that node is not in the inner ring list.
func (s *Server) InnerRingIndex() int {
func (s *Server) InnerRingIndex(ctx context.Context) int {
index, err := s.statusIndex.InnerRingIndex()
if err != nil {
s.log.Error(logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error()))
s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error()))
return -1
}
@ -70,10 +71,10 @@ func (s *Server) InnerRingIndex() int {
// InnerRingSize is a getter for a global size of inner ring list. This value
// paired with inner ring index.
func (s *Server) InnerRingSize() int {
func (s *Server) InnerRingSize(ctx context.Context) int {
size, err := s.statusIndex.InnerRingSize()
if err != nil {
s.log.Error(logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error()))
s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error()))
return 0
}
@ -82,28 +83,28 @@ func (s *Server) InnerRingSize() int {
// AlphabetIndex is a getter for a global index of node in alphabet list.
// Negative index means that node is not in the alphabet list.
func (s *Server) AlphabetIndex() int {
func (s *Server) AlphabetIndex(ctx context.Context) int {
index, err := s.statusIndex.AlphabetIndex()
if err != nil {
s.log.Error(logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error()))
s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error()))
return -1
}
return int(index)
}
func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) error {
func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.VoteValidatorPrm) error {
validators := prm.Validators
index := s.InnerRingIndex()
index := s.InnerRingIndex(ctx)
if s.contracts.alphabet.indexOutOfRange(index) {
s.log.Info(logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange)
s.log.Info(ctx, logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange)
return nil
}
if len(validators) == 0 {
s.log.Info(logs.InnerringIgnoreValidatorVoteEmptyValidatorsList)
s.log.Info(ctx, logs.InnerringIgnoreValidatorVoteEmptyValidatorsList)
return nil
}
@ -126,9 +127,9 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro
}
s.contracts.alphabet.iterate(func(letter GlagoliticLetter, contract util.Uint160) {
_, err := s.morphClient.NotaryInvoke(contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators)
_, err := s.morphClient.NotaryInvoke(ctx, contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators)
if err != nil {
s.log.Warn(logs.InnerringCantInvokeVoteMethodInAlphabetContract,
s.log.Warn(ctx, logs.InnerringCantInvokeVoteMethodInAlphabetContract,
zap.Int8("alphabet_index", int8(letter)),
zap.Uint64("epoch", epoch),
zap.String("error", err.Error()))
@ -140,9 +141,9 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro
// VoteForSidechainValidator calls vote method on alphabet contracts with
// the provided list of keys.
func (s *Server) VoteForSidechainValidator(prm governance.VoteValidatorPrm) error {
func (s *Server) VoteForSidechainValidator(ctx context.Context, prm governance.VoteValidatorPrm) error {
sort.Sort(prm.Validators)
return s.voteForSidechainValidator(prm)
return s.voteForSidechainValidator(ctx, prm)
}
// ResetEpochTimer resets the block timer that produces events to update epoch
@ -153,17 +154,17 @@ func (s *Server) ResetEpochTimer(h uint32) error {
return s.epochTimer.Reset()
}
func (s *Server) setHealthStatus(hs control.HealthStatus) {
func (s *Server) setHealthStatus(ctx context.Context, hs control.HealthStatus) {
s.healthStatus.Store(int32(hs))
s.notifySystemd(hs)
s.notifySystemd(ctx, hs)
if s.irMetrics != nil {
s.irMetrics.SetHealth(int32(hs))
}
}
func (s *Server) CompareAndSwapHealthStatus(oldSt, newSt control.HealthStatus) (swapped bool) {
func (s *Server) CompareAndSwapHealthStatus(ctx context.Context, oldSt, newSt control.HealthStatus) (swapped bool) {
if swapped = s.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped {
s.notifySystemd(newSt)
s.notifySystemd(ctx, newSt)
if s.irMetrics != nil {
s.irMetrics.SetHealth(int32(newSt))
}
@ -186,7 +187,7 @@ func initPersistentStateStorage(cfg *viper.Viper) (*state.PersistentStorage, err
return persistStorage, nil
}
func (s *Server) notifySystemd(st control.HealthStatus) {
func (s *Server) notifySystemd(ctx context.Context, st control.HealthStatus) {
if !s.sdNotify {
return
}
@ -202,6 +203,6 @@ func (s *Server) notifySystemd(st control.HealthStatus) {
err = sdnotify.Status(fmt.Sprintf("%v", st))
}
if err != nil {
s.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
s.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
}
}

View file

@ -1,6 +1,7 @@
package innerring
import (
"context"
"testing"
"time"
@ -42,12 +43,12 @@ func TestServerState(t *testing.T) {
require.Equal(t, epochDuration, srv.EpochDuration(), "invalid epoch duration")
var healthStatus control.HealthStatus = control.HealthStatus_READY
srv.setHealthStatus(healthStatus)
srv.setHealthStatus(context.Background(), healthStatus)
require.Equal(t, healthStatus, srv.HealthStatus(), "invalid health status")
require.True(t, srv.IsActive(), "invalid IsActive result")
require.True(t, srv.IsAlphabet(), "invalid IsAlphabet result")
require.Equal(t, 0, srv.InnerRingIndex(), "invalid IR index")
require.Equal(t, 1, srv.InnerRingSize(), "invalid IR index")
require.Equal(t, 0, srv.AlphabetIndex(), "invalid alphabet index")
require.True(t, srv.IsActive(context.Background()), "invalid IsActive result")
require.True(t, srv.IsAlphabet(context.Background()), "invalid IsAlphabet result")
require.Equal(t, 0, srv.InnerRingIndex(context.Background()), "invalid IR index")
require.Equal(t, 1, srv.InnerRingSize(context.Background()), "invalid IR index")
require.Equal(t, 0, srv.AlphabetIndex(context.Background()), "invalid alphabet index")
}

View file

@ -58,7 +58,7 @@ func defaultCfg(c *cfg) {
},
fullSizeLimit: 1 << 30, // 1GB
objSizeLimit: 1 << 20, // 1MB
log: &logger.Logger{Logger: zap.L()},
log: logger.NewLoggerWrapper(zap.L()),
metrics: &NoopMetrics{},
}
}
@ -110,7 +110,7 @@ func WithFullSizeLimit(lim uint64) Option {
// WithLogger returns an option to specify Blobovnicza's logger.
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
c.log = &logger.Logger{Logger: l.With(zap.String("component", "Blobovnicza"))}
c.log = l.With(zap.String("component", "Blobovnicza"))
}
}

View file

@ -69,10 +69,10 @@ func TestBlobovnicza(t *testing.T) {
defer os.Remove(p)
// open Blobovnicza
require.NoError(t, blz.Open())
require.NoError(t, blz.Open(context.Background()))
// initialize Blobovnicza
require.NoError(t, blz.Init())
require.NoError(t, blz.Init(context.Background()))
// try to read non-existent address
testGet(t, blz, oidtest.Address(), nil, client.IsErrObjectNotFound)
@ -102,5 +102,5 @@ func TestBlobovnicza(t *testing.T) {
return err == nil
}, nil)
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
}

View file

@ -1,6 +1,7 @@
package blobovnicza
import (
"context"
"errors"
"fmt"
"path/filepath"
@ -15,7 +16,7 @@ import (
//
// If the database file does not exist, it will be created automatically.
// If blobovnicza is already open, does nothing.
func (b *Blobovnicza) Open() error {
func (b *Blobovnicza) Open(ctx context.Context) error {
b.controlMtx.Lock()
defer b.controlMtx.Unlock()
@ -23,7 +24,7 @@ func (b *Blobovnicza) Open() error {
return nil
}
b.log.Debug(logs.BlobovniczaCreatingDirectoryForBoltDB,
b.log.Debug(ctx, logs.BlobovniczaCreatingDirectoryForBoltDB,
zap.String("path", b.path),
zap.Bool("ro", b.boltOptions.ReadOnly),
)
@ -37,7 +38,7 @@ func (b *Blobovnicza) Open() error {
}
}
b.log.Debug(logs.BlobovniczaOpeningBoltDB,
b.log.Debug(ctx, logs.BlobovniczaOpeningBoltDB,
zap.String("path", b.path),
zap.Stringer("permissions", b.perm),
)
@ -55,7 +56,7 @@ func (b *Blobovnicza) Open() error {
//
// If Blobovnicza is already initialized, no action is taken.
// Blobovnicza must be open, otherwise an error will return.
func (b *Blobovnicza) Init() error {
func (b *Blobovnicza) Init(ctx context.Context) error {
b.controlMtx.Lock()
defer b.controlMtx.Unlock()
@ -63,7 +64,7 @@ func (b *Blobovnicza) Init() error {
return errors.New("blobovnicza is not open")
}
b.log.Debug(logs.BlobovniczaInitializing,
b.log.Debug(ctx, logs.BlobovniczaInitializing,
zap.Uint64("object size limit", b.objSizeLimit),
zap.Uint64("storage size limit", b.fullSizeLimit),
)
@ -71,7 +72,7 @@ func (b *Blobovnicza) Init() error {
size := b.dataSize.Load()
items := b.itemsCount.Load()
if size != 0 || items != 0 {
b.log.Debug(logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items))
b.log.Debug(ctx, logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items))
return nil
}
@ -81,7 +82,7 @@ func (b *Blobovnicza) Init() error {
// create size range bucket
rangeStr := stringifyBounds(lower, upper)
b.log.Debug(logs.BlobovniczaCreatingBucketForSizeRange,
b.log.Debug(ctx, logs.BlobovniczaCreatingBucketForSizeRange,
zap.String("range", rangeStr))
_, err := tx.CreateBucketIfNotExists(key)
@ -98,14 +99,14 @@ func (b *Blobovnicza) Init() error {
}
}
return b.initializeCounters()
return b.initializeCounters(ctx)
}
func (b *Blobovnicza) ObjectsCount() uint64 {
return b.itemsCount.Load()
}
func (b *Blobovnicza) initializeCounters() error {
func (b *Blobovnicza) initializeCounters(ctx context.Context) error {
var size uint64
var items uint64
var sizeExists bool
@ -131,17 +132,17 @@ func (b *Blobovnicza) initializeCounters() error {
return fmt.Errorf("can't determine DB size: %w", err)
}
if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly {
b.log.Debug(logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items))
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items))
if err := b.boltDB.Update(func(tx *bbolt.Tx) error {
if err := saveDataSize(tx, size); err != nil {
return err
}
return saveItemsCount(tx, items)
}); err != nil {
b.log.Debug(logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items))
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items))
return fmt.Errorf("can't save blobovnicza's size and items count: %w", err)
}
b.log.Debug(logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items))
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items))
}
b.dataSize.Store(size)
@ -154,7 +155,7 @@ func (b *Blobovnicza) initializeCounters() error {
// Close releases all internal database resources.
//
// If blobovnicza is already closed, does nothing.
func (b *Blobovnicza) Close() error {
func (b *Blobovnicza) Close(ctx context.Context) error {
b.controlMtx.Lock()
defer b.controlMtx.Unlock()
@ -162,7 +163,7 @@ func (b *Blobovnicza) Close() error {
return nil
}
b.log.Debug(logs.BlobovniczaClosingBoltDB,
b.log.Debug(ctx, logs.BlobovniczaClosingBoltDB,
zap.String("path", b.path),
)

View file

@ -91,7 +91,7 @@ func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, err
}
if err == nil && found {
b.log.Debug(logs.BlobovniczaObjectWasRemovedFromBucket,
b.log.Debug(ctx, logs.BlobovniczaObjectWasRemovedFromBucket,
zap.String("binary size", stringifyByteSize(dataSize)),
zap.String("range", stringifyBounds(sizeLowerBound, sizeUpperBound)),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),

View file

@ -14,11 +14,11 @@ func TestBlobovnicza_Get(t *testing.T) {
filename := filepath.Join(t.TempDir(), "blob")
var blz *Blobovnicza
defer func() { require.NoError(t, blz.Close()) }()
defer func() { require.NoError(t, blz.Close(context.Background())) }()
fnInit := func(szLimit uint64) {
if blz != nil {
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
}
blz = New(
@ -26,8 +26,8 @@ func TestBlobovnicza_Get(t *testing.T) {
WithObjectSizeLimit(szLimit),
)
require.NoError(t, blz.Open())
require.NoError(t, blz.Init())
require.NoError(t, blz.Open(context.Background()))
require.NoError(t, blz.Init(context.Background()))
}
// initial distribution: [0:32K] (32K:64K]

View file

@ -15,8 +15,8 @@ import (
func TestBlobovniczaIterate(t *testing.T) {
filename := filepath.Join(t.TempDir(), "blob")
b := New(WithPath(filename))
require.NoError(t, b.Open())
require.NoError(t, b.Init())
require.NoError(t, b.Open(context.Background()))
require.NoError(t, b.Init(context.Background()))
data := [][]byte{{0, 1, 2, 3}, {5, 6, 7, 8}}
addr := oidtest.Address()

View file

@ -1,6 +1,7 @@
package blobovniczatree
import (
"context"
"path/filepath"
"sync"
@ -17,8 +18,8 @@ func (db *activeDB) Blobovnicza() *blobovnicza.Blobovnicza {
return db.blz
}
func (db *activeDB) Close() {
db.shDB.Close()
func (db *activeDB) Close(ctx context.Context) {
db.shDB.Close(ctx)
}
func (db *activeDB) SystemPath() string {
@ -53,8 +54,8 @@ func newActiveDBManager(dbManager *dbManager, rootPath string) *activeDBManager
// GetOpenedActiveDBForLevel returns active DB for level.
// DB must be closed after use.
func (m *activeDBManager) GetOpenedActiveDBForLevel(lvlPath string) (*activeDB, error) {
activeDB, err := m.getCurrentActiveIfOk(lvlPath)
func (m *activeDBManager) GetOpenedActiveDBForLevel(ctx context.Context, lvlPath string) (*activeDB, error) {
activeDB, err := m.getCurrentActiveIfOk(ctx, lvlPath)
if err != nil {
return nil, err
}
@ -62,7 +63,7 @@ func (m *activeDBManager) GetOpenedActiveDBForLevel(lvlPath string) (*activeDB,
return activeDB, nil
}
return m.updateAndGetActive(lvlPath)
return m.updateAndGetActive(ctx, lvlPath)
}
func (m *activeDBManager) Open() {
@ -72,18 +73,18 @@ func (m *activeDBManager) Open() {
m.closed = false
}
func (m *activeDBManager) Close() {
func (m *activeDBManager) Close(ctx context.Context) {
m.levelToActiveDBGuard.Lock()
defer m.levelToActiveDBGuard.Unlock()
for _, db := range m.levelToActiveDB {
db.Close()
db.Close(ctx)
}
m.levelToActiveDB = make(map[string]*sharedDB)
m.closed = true
}
func (m *activeDBManager) getCurrentActiveIfOk(lvlPath string) (*activeDB, error) {
func (m *activeDBManager) getCurrentActiveIfOk(ctx context.Context, lvlPath string) (*activeDB, error) {
m.levelToActiveDBGuard.RLock()
defer m.levelToActiveDBGuard.RUnlock()
@ -96,13 +97,13 @@ func (m *activeDBManager) getCurrentActiveIfOk(lvlPath string) (*activeDB, error
return nil, nil
}
blz, err := db.Open() // open db for usage, will be closed on activeDB.Close()
blz, err := db.Open(ctx) // open db for usage, will be closed on activeDB.Close()
if err != nil {
return nil, err
}
if blz.IsFull() {
db.Close()
db.Close(ctx)
return nil, nil
}
@ -112,11 +113,11 @@ func (m *activeDBManager) getCurrentActiveIfOk(lvlPath string) (*activeDB, error
}, nil
}
func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error) {
func (m *activeDBManager) updateAndGetActive(ctx context.Context, lvlPath string) (*activeDB, error) {
m.levelLock.Lock(lvlPath)
defer m.levelLock.Unlock(lvlPath)
current, err := m.getCurrentActiveIfOk(lvlPath)
current, err := m.getCurrentActiveIfOk(ctx, lvlPath)
if err != nil {
return nil, err
}
@ -124,7 +125,7 @@ func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error)
return current, nil
}
nextShDB, err := m.getNextSharedDB(lvlPath)
nextShDB, err := m.getNextSharedDB(ctx, lvlPath)
if err != nil {
return nil, err
}
@ -133,7 +134,7 @@ func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error)
return nil, nil
}
blz, err := nextShDB.Open() // open db for client, client must call Close() after usage
blz, err := nextShDB.Open(ctx) // open db for client, client must call Close() after usage
if err != nil {
return nil, err
}
@ -143,7 +144,7 @@ func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error)
}, nil
}
func (m *activeDBManager) getNextSharedDB(lvlPath string) (*sharedDB, error) {
func (m *activeDBManager) getNextSharedDB(ctx context.Context, lvlPath string) (*sharedDB, error) {
var nextActiveDBIdx uint64
hasActive, currentIdx := m.hasActiveDB(lvlPath)
if hasActive {
@ -160,17 +161,17 @@ func (m *activeDBManager) getNextSharedDB(lvlPath string) (*sharedDB, error) {
path := filepath.Join(lvlPath, u64ToHexStringExt(nextActiveDBIdx))
next := m.dbManager.GetByPath(path)
_, err := next.Open() // open db to hold active DB open, will be closed if db is full, after m.replace or by activeDBManager.Close()
_, err := next.Open(ctx) // open db to hold active DB open, will be closed if db is full, after m.replace or by activeDBManager.Close()
if err != nil {
return nil, err
}
previous, updated := m.replace(lvlPath, next)
if !updated && next != nil {
next.Close() // manager is closed, so don't hold active DB open
next.Close(ctx) // manager is closed, so don't hold active DB open
}
if updated && previous != nil {
previous.Close()
previous.Close(ctx)
}
return next, nil
}

View file

@ -167,7 +167,7 @@ func (b *Blobovniczas) Compressor() *compression.Config {
}
// SetReportErrorFunc implements common.Storage.
func (b *Blobovniczas) SetReportErrorFunc(f func(string, error)) {
func (b *Blobovniczas) SetReportErrorFunc(f func(context.Context, string, error)) {
b.reportError = f
}

View file

@ -32,7 +32,7 @@ func newDBCache(parentCtx context.Context, size int,
ch := cache.NewCache[string, *sharedDB]().
WithTTL(ttl).WithLRU().WithMaxKeys(size).
WithOnEvicted(func(_ string, db *sharedDB) {
db.Close()
db.Close(parentCtx)
})
ctx, cancel := context.WithCancel(parentCtx)
res := &dbCache{
@ -81,12 +81,12 @@ func (c *dbCache) Close() {
c.closed = true
}
func (c *dbCache) GetOrCreate(path string) *sharedDB {
func (c *dbCache) GetOrCreate(ctx context.Context, path string) *sharedDB {
value := c.getExisted(path)
if value != nil {
return value
}
return c.create(path)
return c.create(ctx, path)
}
func (c *dbCache) EvictAndMarkNonCached(path string) {
@ -122,7 +122,7 @@ func (c *dbCache) getExisted(path string) *sharedDB {
return nil
}
func (c *dbCache) create(path string) *sharedDB {
func (c *dbCache) create(ctx context.Context, path string) *sharedDB {
c.pathLock.Lock(path)
defer c.pathLock.Unlock(path)
@ -133,12 +133,12 @@ func (c *dbCache) create(path string) *sharedDB {
value = c.dbManager.GetByPath(path)
_, err := value.Open() // open db to hold reference, closed by evictedDB.Close() or if cache closed
_, err := value.Open(ctx) // open db to hold reference, closed by evictedDB.Close() or if cache closed
if err != nil {
return value
}
if added := c.put(path, value); !added {
value.Close()
value.Close(ctx)
}
return value
}

View file

@ -27,7 +27,7 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) {
require.NoError(t, st.Open(mode.ComponentReadWrite))
require.NoError(t, st.Init())
defer func() {
require.NoError(t, st.Close())
require.NoError(t, st.Close(context.Background()))
}()
objGen := &testutil.SeqObjGenerator{ObjSize: 1}

View file

@ -24,10 +24,10 @@ func (b *Blobovniczas) Open(mode mode.ComponentMode) error {
//
// Should be called exactly once.
func (b *Blobovniczas) Init() error {
b.log.Debug(logs.BlobovniczatreeInitializingBlobovniczas)
b.log.Debug(context.Background(), logs.BlobovniczatreeInitializingBlobovniczas)
if b.readOnly {
b.log.Debug(logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization)
b.log.Debug(context.Background(), logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization)
return nil
}
@ -46,11 +46,11 @@ func (b *Blobovniczas) initializeDBs(ctx context.Context) error {
eg.Go(func() error {
p = strings.TrimSuffix(p, rebuildSuffix)
shBlz := b.getBlobovniczaWithoutCaching(p)
blz, err := shBlz.Open()
blz, err := shBlz.Open(egCtx)
if err != nil {
return err
}
defer shBlz.Close()
defer shBlz.Close(egCtx)
moveInfo, err := blz.ListMoveInfo(egCtx)
if err != nil {
@ -60,7 +60,7 @@ func (b *Blobovniczas) initializeDBs(ctx context.Context) error {
b.deleteProtectedObjects.Add(move.Address)
}
b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p))
b.log.Debug(egCtx, logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p))
return nil
})
return false, nil
@ -80,9 +80,9 @@ func (b *Blobovniczas) openManagers() {
}
// Close implements common.Storage.
func (b *Blobovniczas) Close() error {
func (b *Blobovniczas) Close(ctx context.Context) error {
b.dbCache.Close() // order important
b.activeDBManager.Close()
b.activeDBManager.Close(ctx)
b.commondbManager.Close()
return nil
@ -91,8 +91,8 @@ func (b *Blobovniczas) Close() error {
// returns blobovnicza with path p
//
// If blobovnicza is already cached, instance from cache is returned w/o changes.
func (b *Blobovniczas) getBlobovnicza(p string) *sharedDB {
return b.dbCache.GetOrCreate(p)
func (b *Blobovniczas) getBlobovnicza(ctx context.Context, p string) *sharedDB {
return b.dbCache.GetOrCreate(ctx, p)
}
func (b *Blobovniczas) getBlobovniczaWithoutCaching(p string) *sharedDB {

View file

@ -51,7 +51,7 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) {
require.NoError(t, err)
require.EqualValues(t, obj35, gRes.Object)
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
// change depth and width
blz = NewBlobovniczaTree(
@ -89,7 +89,7 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) {
})
require.NoError(t, err)
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
// change depth and width back
blz = NewBlobovniczaTree(
@ -127,5 +127,5 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) {
require.NoError(t, err)
require.EqualValues(t, obj52, gRes.Object)
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
}

View file

@ -16,17 +16,17 @@ func (b *Blobovniczas) ObjectsCount(ctx context.Context) (uint64, error) {
b.metrics.ObjectsCount(time.Since(startedAt), success)
}()
_, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.ObjectsCount")
ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.ObjectsCount")
defer span.End()
var result uint64
err := b.iterateExistingDBPaths(ctx, func(p string) (bool, error) {
shDB := b.getBlobovniczaWithoutCaching(p)
blz, err := shDB.Open()
blz, err := shDB.Open(ctx)
if err != nil {
return true, err
}
defer shDB.Close()
defer shDB.Close(ctx)
result += blz.ObjectsCount()
return false, nil

View file

@ -61,12 +61,12 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
if prm.StorageID != nil {
id := NewIDFromBytes(prm.StorageID)
shBlz := b.getBlobovnicza(id.Path())
blz, err := shBlz.Open()
shBlz := b.getBlobovnicza(ctx, id.Path())
blz, err := shBlz.Open(ctx)
if err != nil {
return res, err
}
defer shBlz.Close()
defer shBlz.Close(ctx)
if res, err = b.deleteObject(ctx, blz, bPrm); err == nil {
success = true
@ -80,7 +80,7 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
res, err = b.deleteObjectFromLevel(ctx, bPrm, p)
if err != nil {
if !client.IsErrObjectNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
zap.String("level", p),
zap.String("error", err.Error()),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
@ -109,12 +109,12 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
//
// returns no error if object was removed from some blobovnicza of the same level.
func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicza.DeletePrm, blzPath string) (common.DeleteRes, error) {
shBlz := b.getBlobovnicza(blzPath)
blz, err := shBlz.Open()
shBlz := b.getBlobovnicza(ctx, blzPath)
blz, err := shBlz.Open(ctx)
if err != nil {
return common.DeleteRes{}, err
}
defer shBlz.Close()
defer shBlz.Close(ctx)
return b.deleteObject(ctx, blz, prm)
}

View file

@ -37,12 +37,12 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
if prm.StorageID != nil {
id := NewIDFromBytes(prm.StorageID)
shBlz := b.getBlobovnicza(id.Path())
blz, err := shBlz.Open()
shBlz := b.getBlobovnicza(ctx, id.Path())
blz, err := shBlz.Open(ctx)
if err != nil {
return common.ExistsRes{}, err
}
defer shBlz.Close()
defer shBlz.Close(ctx)
exists, err := blz.Exists(ctx, prm.Address)
return common.ExistsRes{Exists: exists}, err
@ -55,7 +55,7 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
_, err := b.getObjectFromLevel(ctx, gPrm, p)
if err != nil {
if !client.IsErrObjectNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
zap.String("error", err.Error()),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))

View file

@ -27,7 +27,7 @@ func TestExistsInvalidStorageID(t *testing.T) {
WithBlobovniczaSize(1<<20))
require.NoError(t, b.Open(mode.ComponentReadWrite))
require.NoError(t, b.Init())
defer func() { require.NoError(t, b.Close()) }()
defer func() { require.NoError(t, b.Close(context.Background())) }()
obj := blobstortest.NewObject(1024)
addr := object.AddressOf(obj)

View file

@ -48,12 +48,12 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
if prm.StorageID != nil {
id := NewIDFromBytes(prm.StorageID)
shBlz := b.getBlobovnicza(id.Path())
blz, err := shBlz.Open()
shBlz := b.getBlobovnicza(ctx, id.Path())
blz, err := shBlz.Open(ctx)
if err != nil {
return res, err
}
defer shBlz.Close()
defer shBlz.Close(ctx)
res, err = b.getObject(ctx, blz, bPrm)
if err == nil {
@ -67,7 +67,7 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
res, err = b.getObjectFromLevel(ctx, bPrm, p)
if err != nil {
if !client.IsErrObjectNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
zap.String("error", err.Error()),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
@ -95,12 +95,12 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
// returns error if object could not be read from any blobovnicza of the same level.
func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.GetPrm, blzPath string) (common.GetRes, error) {
// open blobovnicza (cached inside)
shBlz := b.getBlobovnicza(blzPath)
blz, err := shBlz.Open()
shBlz := b.getBlobovnicza(ctx, blzPath)
blz, err := shBlz.Open(ctx)
if err != nil {
return common.GetRes{}, err
}
defer shBlz.Close()
defer shBlz.Close(ctx)
return b.getObject(ctx, blz, prm)
}

View file

@ -47,12 +47,12 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
if prm.StorageID != nil {
id := NewIDFromBytes(prm.StorageID)
shBlz := b.getBlobovnicza(id.Path())
blz, err := shBlz.Open()
shBlz := b.getBlobovnicza(ctx, id.Path())
blz, err := shBlz.Open(ctx)
if err != nil {
return common.GetRangeRes{}, err
}
defer shBlz.Close()
defer shBlz.Close(ctx)
res, err := b.getObjectRange(ctx, blz, prm)
if err == nil {
@ -69,7 +69,7 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
if err != nil {
outOfBounds := isErrOutOfRange(err)
if !outOfBounds && !client.IsErrObjectNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
zap.String("error", err.Error()),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
@ -103,12 +103,12 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
// returns error if object could not be read from any blobovnicza of the same level.
func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRangePrm, blzPath string) (common.GetRangeRes, error) {
// open blobovnicza (cached inside)
shBlz := b.getBlobovnicza(blzPath)
blz, err := shBlz.Open()
shBlz := b.getBlobovnicza(ctx, blzPath)
blz, err := shBlz.Open(ctx)
if err != nil {
return common.GetRangeRes{}, err
}
defer shBlz.Close()
defer shBlz.Close(ctx)
return b.getObjectRange(ctx, blz, prm)
}

View file

@ -42,7 +42,7 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm
data, err := b.compression.Decompress(elem.ObjectData())
if err != nil {
if prm.IgnoreErrors {
b.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
zap.Stringer("address", elem.Address()),
zap.String("err", err.Error()),
zap.String("storage_id", p),
@ -72,11 +72,11 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm
// iterator over all Blobovniczas in unsorted order. Break on f's error return.
func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors bool, f func(string, *blobovnicza.Blobovnicza) error) error {
return b.iterateExistingDBPaths(ctx, func(p string) (bool, error) {
shBlz := b.getBlobovnicza(p)
blz, err := shBlz.Open()
shBlz := b.getBlobovnicza(ctx, p)
blz, err := shBlz.Open(ctx)
if err != nil {
if ignoreErrors {
b.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
zap.String("err", err.Error()),
zap.String("storage_id", p),
zap.String("root_path", b.rootPath))
@ -84,7 +84,7 @@ func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors boo
}
return false, fmt.Errorf("could not open blobovnicza %s: %w", p, err)
}
defer shBlz.Close()
defer shBlz.Close(ctx)
err = f(p, blz)

View file

@ -1,6 +1,7 @@
package blobovniczatree
import (
"context"
"errors"
"fmt"
"os"
@ -48,7 +49,7 @@ func newSharedDB(options []blobovnicza.Option, path string, readOnly bool,
}
}
func (b *sharedDB) Open() (*blobovnicza.Blobovnicza, error) {
func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) {
if b.closedFlag.Load() {
return nil, errClosed
}
@ -67,10 +68,10 @@ func (b *sharedDB) Open() (*blobovnicza.Blobovnicza, error) {
blobovnicza.WithMetrics(b.metrics),
)...)
if err := blz.Open(); err != nil {
if err := blz.Open(ctx); err != nil {
return nil, fmt.Errorf("could not open blobovnicza %s: %w", b.path, err)
}
if err := blz.Init(); err != nil {
if err := blz.Init(ctx); err != nil {
return nil, fmt.Errorf("could not init blobovnicza %s: %w", b.path, err)
}
@ -81,20 +82,20 @@ func (b *sharedDB) Open() (*blobovnicza.Blobovnicza, error) {
return blz, nil
}
func (b *sharedDB) Close() {
func (b *sharedDB) Close(ctx context.Context) {
b.cond.L.Lock()
defer b.cond.L.Unlock()
if b.refCount == 0 {
b.log.Error(logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path))
b.log.Error(ctx, logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path))
b.cond.Broadcast()
return
}
if b.refCount == 1 {
b.refCount = 0
if err := b.blcza.Close(); err != nil {
b.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza,
if err := b.blcza.Close(ctx); err != nil {
b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
zap.String("id", b.path),
zap.String("error", err.Error()),
)
@ -110,7 +111,7 @@ func (b *sharedDB) Close() {
}
}
func (b *sharedDB) CloseAndRemoveFile() error {
func (b *sharedDB) CloseAndRemoveFile(ctx context.Context) error {
b.cond.L.Lock()
if b.refCount > 1 {
b.cond.Wait()
@ -121,8 +122,8 @@ func (b *sharedDB) CloseAndRemoveFile() error {
return errClosingClosedBlobovnicza
}
if err := b.blcza.Close(); err != nil {
b.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza,
if err := b.blcza.Close(ctx); err != nil {
b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
zap.String("id", b.path),
zap.String("error", err.Error()),
)

View file

@ -1,6 +1,7 @@
package blobovniczatree
import (
"context"
"io/fs"
"time"
@ -20,7 +21,7 @@ type cfg struct {
blzShallowWidth uint64
compression *compression.Config
blzOpts []blobovnicza.Option
reportError func(string, error) // reportError is the function called when encountering disk errors.
reportError func(context.Context, string, error) // reportError is the function called when encountering disk errors.
metrics Metrics
waitBeforeDropDB time.Duration
blzInitWorkerCount int
@ -47,14 +48,14 @@ const (
func initConfig(c *cfg) {
*c = cfg{
log: &logger.Logger{Logger: zap.L()},
log: logger.NewLoggerWrapper(zap.L()),
perm: defaultPerm,
openedCacheSize: defaultOpenedCacheSize,
openedCacheTTL: defaultOpenedCacheTTL,
openedCacheExpInterval: defaultOpenedCacheInterval,
blzShallowDepth: defaultBlzShallowDepth,
blzShallowWidth: defaultBlzShallowWidth,
reportError: func(string, error) {},
reportError: func(context.Context, string, error) {},
metrics: &noopMetrics{},
waitBeforeDropDB: defaultWaitBeforeDropDB,
blzInitWorkerCount: defaultBlzInitWorkerCount,

View file

@ -77,12 +77,12 @@ type putIterator struct {
}
func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) {
active, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(lvlPath)
active, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath)
if err != nil {
if !isLogical(err) {
i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
} else {
i.B.log.Debug(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
zap.String("error", err.Error()),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
@ -91,20 +91,20 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error)
}
if active == nil {
i.B.log.Debug(logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath),
i.B.log.Debug(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return false, nil
}
defer active.Close()
defer active.Close(ctx)
i.AllFull = false
_, err = active.Blobovnicza().Put(ctx, i.PutPrm)
if err != nil {
if !isLogical(err) {
i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
} else {
i.B.log.Debug(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
zap.String("path", active.SystemPath()),
zap.String("error", err.Error()),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))

View file

@ -49,25 +49,25 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm
var res common.RebuildRes
b.log.Debug(logs.BlobovniczaTreeCompletingPreviousRebuild)
b.log.Debug(ctx, logs.BlobovniczaTreeCompletingPreviousRebuild)
completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage)
res.ObjectsMoved += completedPreviosMoves
if err != nil {
b.log.Warn(logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err))
b.log.Warn(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err))
success = false
return res, err
}
b.log.Debug(logs.BlobovniczaTreeCompletedPreviousRebuildSuccess)
b.log.Debug(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildSuccess)
b.log.Debug(logs.BlobovniczaTreeCollectingDBToRebuild)
b.log.Debug(ctx, logs.BlobovniczaTreeCollectingDBToRebuild)
dbsToMigrate, err := b.getDBsToRebuild(ctx, prm.FillPercent)
if err != nil {
b.log.Warn(logs.BlobovniczaTreeCollectingDBToRebuildFailed, zap.Error(err))
b.log.Warn(ctx, logs.BlobovniczaTreeCollectingDBToRebuildFailed, zap.Error(err))
success = false
return res, err
}
b.log.Info(logs.BlobovniczaTreeCollectingDBToRebuildSuccess, zap.Int("blobovniczas_to_rebuild", len(dbsToMigrate)))
b.log.Info(ctx, logs.BlobovniczaTreeCollectingDBToRebuildSuccess, zap.Int("blobovniczas_to_rebuild", len(dbsToMigrate)))
res, err = b.migrateDBs(ctx, dbsToMigrate, prm, res)
if err != nil {
success = false
@ -78,14 +78,14 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm
func (b *Blobovniczas) migrateDBs(ctx context.Context, dbs []string, prm common.RebuildPrm, res common.RebuildRes) (common.RebuildRes, error) {
var completedDBCount uint32
for _, db := range dbs {
b.log.Debug(logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db))
b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db))
movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.WorkerLimiter)
res.ObjectsMoved += movedObjects
if err != nil {
b.log.Warn(logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err))
b.log.Warn(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err))
return res, err
}
b.log.Debug(logs.BlobovniczaTreeRebuildingBlobovniczaSuccess, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects))
b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaSuccess, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects))
res.FilesRemoved++
completedDBCount++
b.metrics.SetRebuildPercent((100 * completedDBCount) / uint32(len(dbs)))
@ -165,7 +165,7 @@ func (b *Blobovniczas) selectDBsDoNotMatchFillPercent(ctx context.Context, targe
continue
}
path := filepath.Join(lvlPath, e.Name())
resettlementRequired, err := b.rebuildBySize(path, target)
resettlementRequired, err := b.rebuildBySize(ctx, path, target)
if err != nil {
return false, err
}
@ -180,13 +180,13 @@ func (b *Blobovniczas) selectDBsDoNotMatchFillPercent(ctx context.Context, targe
return result, nil
}
func (b *Blobovniczas) rebuildBySize(path string, targetFillPercent int) (bool, error) {
shDB := b.getBlobovnicza(path)
blz, err := shDB.Open()
func (b *Blobovniczas) rebuildBySize(ctx context.Context, path string, targetFillPercent int) (bool, error) {
shDB := b.getBlobovnicza(ctx, path)
blz, err := shDB.Open(ctx)
if err != nil {
return false, err
}
defer shDB.Close()
defer shDB.Close(ctx)
fp := blz.FillPercent()
// accepted fill percent defines as
// |----|+++++++++++++++++|+++++++++++++++++|---------------
@ -196,8 +196,8 @@ func (b *Blobovniczas) rebuildBySize(path string, targetFillPercent int) (bool,
}
func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) {
shDB := b.getBlobovnicza(path)
blz, err := shDB.Open()
shDB := b.getBlobovnicza(ctx, path)
blz, err := shDB.Open(ctx)
if err != nil {
return 0, err
}
@ -206,9 +206,9 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M
if shDBClosed {
return
}
shDB.Close()
shDB.Close(ctx)
}()
dropTempFile, err := b.addRebuildTempFile(path)
dropTempFile, err := b.addRebuildTempFile(ctx, path)
if err != nil {
return 0, err
}
@ -224,7 +224,7 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M
return migratedObjects, err
}
func (b *Blobovniczas) addRebuildTempFile(path string) (func(), error) {
func (b *Blobovniczas) addRebuildTempFile(ctx context.Context, path string) (func(), error) {
sysPath := filepath.Join(b.rootPath, path)
sysPath = sysPath + rebuildSuffix
_, err := os.OpenFile(sysPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, b.perm)
@ -233,7 +233,7 @@ func (b *Blobovniczas) addRebuildTempFile(path string) (func(), error) {
}
return func() {
if err := os.Remove(sysPath); err != nil {
b.log.Warn(logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
}
}, nil
}
@ -330,7 +330,7 @@ func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDb *sharedDB)
b.dbFilesGuard.Lock()
defer b.dbFilesGuard.Unlock()
if err := shDb.CloseAndRemoveFile(); err != nil {
if err := shDb.CloseAndRemoveFile(ctx); err != nil {
return false, err
}
b.commondbManager.CleanResources(path)
@ -365,12 +365,12 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co
err := b.iterateIncompletedRebuildDBPaths(ctx, func(s string) (bool, error) {
rebuildTmpFilePath := s
s = strings.TrimSuffix(s, rebuildSuffix)
shDB := b.getBlobovnicza(s)
blz, err := shDB.Open()
shDB := b.getBlobovnicza(ctx, s)
blz, err := shDB.Open(ctx)
if err != nil {
return true, err
}
defer shDB.Close()
defer shDB.Close(ctx)
incompletedMoves, err := blz.ListMoveInfo(ctx)
if err != nil {
@ -389,7 +389,7 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co
})
for _, tmp := range rebuildTempFilesToRemove {
if err := os.Remove(filepath.Join(b.rootPath, tmp)); err != nil {
b.log.Warn(logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
}
}
return count, err
@ -398,12 +398,12 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co
func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blobovnicza, sourcePath string,
move blobovnicza.MoveInfo, metaStore common.MetaStorage,
) error {
targetDB := b.getBlobovnicza(NewIDFromBytes(move.TargetStorageID).Path())
target, err := targetDB.Open()
targetDB := b.getBlobovnicza(ctx, NewIDFromBytes(move.TargetStorageID).Path())
target, err := targetDB.Open(ctx)
if err != nil {
return err
}
defer targetDB.Close()
defer targetDB.Close(ctx)
existsInSource := true
var gPrm blobovnicza.GetPrm
@ -413,14 +413,14 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob
if client.IsErrObjectNotFound(err) {
existsInSource = false
} else {
b.log.Warn(logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
b.log.Warn(ctx, logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
return err
}
}
if !existsInSource { // object was deleted by Rebuild, need to delete move info
if err = source.DropMoveInfo(ctx, move.Address); err != nil {
b.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
return err
}
b.deleteProtectedObjects.Delete(move.Address)
@ -429,7 +429,7 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob
existsInTarget, err := target.Exists(ctx, move.Address)
if err != nil {
b.log.Warn(logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
b.log.Warn(ctx, logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
return err
}
@ -439,25 +439,25 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob
putPrm.SetMarshaledObject(gRes.Object())
_, err = target.Put(ctx, putPrm)
if err != nil {
b.log.Warn(logs.BlobovniczatreeCouldNotPutObjectToTargetDB, zap.String("path", targetDB.SystemPath()), zap.Error(err))
b.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToTargetDB, zap.String("path", targetDB.SystemPath()), zap.Error(err))
return err
}
}
if err = metaStore.UpdateStorageID(ctx, move.Address, move.TargetStorageID); err != nil {
b.log.Warn(logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", move.Address))
b.log.Warn(ctx, logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", move.Address))
return err
}
var deletePrm blobovnicza.DeletePrm
deletePrm.SetAddress(move.Address)
if _, err = source.Delete(ctx, deletePrm); err != nil {
b.log.Warn(logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", sourcePath), zap.Error(err))
b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", sourcePath), zap.Error(err))
return err
}
if err = source.DropMoveInfo(ctx, move.Address); err != nil {
b.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
return err
}
@ -477,21 +477,21 @@ type moveIterator struct {
}
func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, error) {
target, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(lvlPath)
target, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath)
if err != nil {
if !isLogical(err) {
i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
} else {
i.B.log.Warn(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.Error(err))
i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.Error(err))
}
return false, nil
}
if target == nil {
i.B.log.Warn(logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath))
i.B.log.Warn(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath))
return false, nil
}
defer target.Close()
defer target.Close(ctx)
i.AllFull = false
@ -503,9 +503,9 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
TargetStorageID: targetStorageID.Bytes(),
}); err != nil {
if !isLogical(err) {
i.B.reportError(logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, err)
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, err)
} else {
i.B.log.Warn(logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, zap.String("path", i.SourceSysPath), zap.Error(err))
i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, zap.String("path", i.SourceSysPath), zap.Error(err))
}
return true, nil
}
@ -519,15 +519,15 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
_, err = target.Blobovnicza().Put(ctx, putPrm)
if err != nil {
if !isLogical(err) {
i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
} else {
i.B.log.Warn(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", target.SystemPath()), zap.Error(err))
i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", target.SystemPath()), zap.Error(err))
}
return true, nil
}
if err = i.MetaStore.UpdateStorageID(ctx, i.Address, targetStorageID.Bytes()); err != nil {
i.B.log.Warn(logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", i.Address))
i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", i.Address))
return true, nil
}
@ -535,18 +535,18 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
deletePrm.SetAddress(i.Address)
if _, err = i.Source.Delete(ctx, deletePrm); err != nil {
if !isLogical(err) {
i.B.reportError(logs.BlobovniczatreeCouldNotDeleteFromSource, err)
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, err)
} else {
i.B.log.Warn(logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", i.SourceSysPath), zap.Error(err))
i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", i.SourceSysPath), zap.Error(err))
}
return true, nil
}
if err = i.Source.DropMoveInfo(ctx, i.Address); err != nil {
if !isLogical(err) {
i.B.reportError(logs.BlobovniczatreeCouldNotDropMoveInfo, err)
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, err)
} else {
i.B.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", i.SourceSysPath), zap.Error(err))
i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", i.SourceSysPath), zap.Error(err))
}
return true, nil
}

View file

@ -35,8 +35,8 @@ func testRebuildFailoverOnlyMoveInfoSaved(t *testing.T) {
dir := t.TempDir()
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
require.NoError(t, blz.Open())
require.NoError(t, blz.Init())
require.NoError(t, blz.Open(context.Background()))
require.NoError(t, blz.Init(context.Background()))
obj := blobstortest.NewObject(1024)
data, err := obj.Marshal()
@ -53,7 +53,7 @@ func testRebuildFailoverOnlyMoveInfoSaved(t *testing.T) {
TargetStorageID: []byte("0/0/0"),
}))
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
_, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
require.NoError(t, err)
@ -65,8 +65,8 @@ func testRebuildFailoverObjectSavedToTarget(t *testing.T) {
dir := t.TempDir()
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
require.NoError(t, blz.Open())
require.NoError(t, blz.Init())
require.NoError(t, blz.Open(context.Background()))
require.NoError(t, blz.Init(context.Background()))
obj := blobstortest.NewObject(1024)
data, err := obj.Marshal()
@ -83,19 +83,19 @@ func testRebuildFailoverObjectSavedToTarget(t *testing.T) {
TargetStorageID: []byte("0/0/0"),
}))
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
_, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
require.NoError(t, err)
blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
require.NoError(t, blz.Open())
require.NoError(t, blz.Init())
require.NoError(t, blz.Open(context.Background()))
require.NoError(t, blz.Init(context.Background()))
_, err = blz.Put(context.Background(), pPrm)
require.NoError(t, err)
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
testRebuildFailoverValidate(t, dir, obj, true)
}
@ -105,8 +105,8 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) {
dir := t.TempDir()
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
require.NoError(t, blz.Open())
require.NoError(t, blz.Init())
require.NoError(t, blz.Open(context.Background()))
require.NoError(t, blz.Init(context.Background()))
obj := blobstortest.NewObject(1024)
data, err := obj.Marshal()
@ -117,14 +117,14 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) {
TargetStorageID: []byte("0/0/0"),
}))
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
_, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
require.NoError(t, err)
blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
require.NoError(t, blz.Open())
require.NoError(t, blz.Init())
require.NoError(t, blz.Open(context.Background()))
require.NoError(t, blz.Init(context.Background()))
var pPrm blobovnicza.PutPrm
pPrm.SetAddress(object.AddressOf(obj))
@ -132,7 +132,7 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) {
_, err = blz.Put(context.Background(), pPrm)
require.NoError(t, err)
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
testRebuildFailoverValidate(t, dir, obj, false)
}
@ -170,11 +170,11 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object
require.Equal(t, uint64(1), rRes.ObjectsMoved)
require.Equal(t, uint64(0), rRes.FilesRemoved)
require.NoError(t, b.Close())
require.NoError(t, b.Close(context.Background()))
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
require.NoError(t, blz.Open())
require.NoError(t, blz.Init())
require.NoError(t, blz.Open(context.Background()))
require.NoError(t, blz.Init(context.Background()))
moveInfo, err := blz.ListMoveInfo(context.Background())
require.NoError(t, err)
@ -185,11 +185,11 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object
_, err = blz.Get(context.Background(), gPrm)
require.True(t, client.IsErrObjectNotFound(err))
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
require.NoError(t, blz.Open())
require.NoError(t, blz.Init())
require.NoError(t, blz.Open(context.Background()))
require.NoError(t, blz.Init(context.Background()))
moveInfo, err = blz.ListMoveInfo(context.Background())
require.NoError(t, err)
@ -203,7 +203,7 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object
require.True(t, bytes.Equal([]byte("0/0/0"), metaStub.storageIDs[object.AddressOf(obj)]))
}
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
_, err = os.Stat(filepath.Join(dir, "0", "0", "1.db.rebuild"))
require.True(t, os.IsNotExist(err))

View file

@ -93,7 +93,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, err)
}
require.NoError(t, b.Close())
require.NoError(t, b.Close(context.Background()))
})
t.Run("no rebuild single db", func(t *testing.T) {
@ -145,7 +145,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, err)
}
require.NoError(t, b.Close())
require.NoError(t, b.Close(context.Background()))
})
t.Run("rebuild by fill percent", func(t *testing.T) {
@ -214,7 +214,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, err)
}
require.NoError(t, b.Close())
require.NoError(t, b.Close(context.Background()))
})
t.Run("rebuild by overflow", func(t *testing.T) {
@ -251,7 +251,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
storageIDs: storageIDs,
guard: &sync.Mutex{},
}
require.NoError(t, b.Close())
require.NoError(t, b.Close(context.Background()))
b = NewBlobovniczaTree(
context.Background(),
WithLogger(test.NewLogger(t)),
@ -284,7 +284,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, err)
}
require.NoError(t, b.Close())
require.NoError(t, b.Close(context.Background()))
})
}
@ -318,7 +318,7 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
storageIDs := make(map[oid.Address][]byte)
storageIDs[prm.Address] = res.StorageID
require.NoError(t, b.Close())
require.NoError(t, b.Close(context.Background()))
b = NewBlobovniczaTree(
context.Background(),
@ -355,7 +355,7 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
require.NoError(t, err)
}
require.NoError(t, b.Close())
require.NoError(t, b.Close(context.Background()))
}
func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, targetDepth, targetWidth uint64, shouldMigrate bool) {
@ -399,7 +399,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta
}
require.NoError(t, eg.Wait())
require.NoError(t, b.Close())
require.NoError(t, b.Close(context.Background()))
b = NewBlobovniczaTree(
context.Background(),
@ -444,7 +444,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta
require.NoError(t, err)
}
require.NoError(t, b.Close())
require.NoError(t, b.Close(context.Background()))
}
type storageIDUpdateStub struct {

View file

@ -1,6 +1,7 @@
package blobstor
import (
"context"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@ -47,7 +48,7 @@ type cfg struct {
}
func initConfig(c *cfg) {
c.log = &logger.Logger{Logger: zap.L()}
c.log = logger.NewLoggerWrapper(zap.L())
c.metrics = &noopMetrics{}
}
@ -90,7 +91,7 @@ func WithStorages(st []SubStorage) Option {
// WithLogger returns option to specify BlobStor's logger.
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
c.log = &logger.Logger{Logger: l.With(zap.String("component", "BlobStor"))}
c.log = l.With(zap.String("component", "BlobStor"))
}
}
@ -139,7 +140,7 @@ func WithUncompressableContentTypes(values []string) Option {
// SetReportErrorFunc allows to provide a function to be called on disk errors.
// This function MUST be called before Open.
func (b *BlobStor) SetReportErrorFunc(f func(string, error)) {
func (b *BlobStor) SetReportErrorFunc(f func(context.Context, string, error)) {
for i := range b.storage {
b.storage[i].Storage.SetReportErrorFunc(f)
}

View file

@ -54,7 +54,7 @@ func TestCompression(t *testing.T) {
WithCompressObjects(compress),
WithStorages(defaultStorages(dir, smallSizeLimit)))
require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
require.NoError(t, bs.Init())
require.NoError(t, bs.Init(context.Background()))
return bs
}
@ -91,20 +91,20 @@ func TestCompression(t *testing.T) {
blobStor := newBlobStor(t, false)
testPut(t, blobStor, 0)
testGet(t, blobStor, 0)
require.NoError(t, blobStor.Close())
require.NoError(t, blobStor.Close(context.Background()))
blobStor = newBlobStor(t, true)
testGet(t, blobStor, 0) // get uncompressed object with compress enabled
testPut(t, blobStor, 1)
testGet(t, blobStor, 1)
require.NoError(t, blobStor.Close())
require.NoError(t, blobStor.Close(context.Background()))
blobStor = newBlobStor(t, false)
testGet(t, blobStor, 0) // get old uncompressed object
testGet(t, blobStor, 1) // get compressed object with compression disabled
testPut(t, blobStor, 2)
testGet(t, blobStor, 2)
require.NoError(t, blobStor.Close())
require.NoError(t, blobStor.Close(context.Background()))
}
func TestBlobstor_needsCompression(t *testing.T) {
@ -130,7 +130,7 @@ func TestBlobstor_needsCompression(t *testing.T) {
},
}))
require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
require.NoError(t, bs.Init())
require.NoError(t, bs.Init(context.Background()))
return bs
}
@ -192,7 +192,7 @@ func TestConcurrentPut(t *testing.T) {
blobStor := New(
WithStorages(defaultStorages(dir, smallSizeLimit)))
require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite))
require.NoError(t, blobStor.Init())
require.NoError(t, blobStor.Init(context.Background()))
testGet := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) {
res, err := b.Get(context.Background(), common.GetPrm{Address: object.AddressOf(obj)})
@ -272,7 +272,7 @@ func TestConcurrentDelete(t *testing.T) {
blobStor := New(
WithStorages(defaultStorages(dir, smallSizeLimit)))
require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite))
require.NoError(t, blobStor.Init())
require.NoError(t, blobStor.Init(context.Background()))
testPut := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) {
var prm common.PutPrm

View file

@ -12,7 +12,7 @@ import (
type Storage interface {
Open(mode mode.ComponentMode) error
Init() error
Close() error
Close(context.Context) error
Type() string
Path() string
@ -23,7 +23,7 @@ type Storage interface {
// SetReportErrorFunc allows to provide a function to be called on disk errors.
// This function MUST be called before Open.
SetReportErrorFunc(f func(string, error))
SetReportErrorFunc(f func(context.Context, string, error))
SetParentID(parentID string)
Get(context.Context, GetPrm) (GetRes, error)

View file

@ -12,7 +12,7 @@ import (
// Open opens BlobStor.
func (b *BlobStor) Open(ctx context.Context, mode mode.Mode) error {
b.log.Debug(logs.BlobstorOpening)
b.log.Debug(ctx, logs.BlobstorOpening)
b.modeMtx.Lock()
defer b.modeMtx.Unlock()
@ -50,8 +50,8 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag
// If BlobStor is already initialized, no action is taken.
//
// Returns wrapped ErrInitBlobovniczas on blobovnicza tree's initializaiton failure.
func (b *BlobStor) Init() error {
b.log.Debug(logs.BlobstorInitializing)
func (b *BlobStor) Init(ctx context.Context) error {
b.log.Debug(ctx, logs.BlobstorInitializing)
if err := b.compression.Init(); err != nil {
return err
@ -67,14 +67,14 @@ func (b *BlobStor) Init() error {
}
// Close releases all internal resources of BlobStor.
func (b *BlobStor) Close() error {
b.log.Debug(logs.BlobstorClosing)
func (b *BlobStor) Close(ctx context.Context) error {
b.log.Debug(ctx, logs.BlobstorClosing)
var firstErr error
for i := range b.storage {
err := b.storage[i].Storage.Close()
err := b.storage[i].Storage.Close(ctx)
if err != nil {
b.log.Info(logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error()))
b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error()))
if firstErr == nil {
firstErr = err
}

View file

@ -39,7 +39,7 @@ func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.Del
if err == nil || !client.IsErrObjectNotFound(err) {
if err == nil {
success = true
logOp(b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID)
logOp(ctx, b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID)
}
return res, err
}
@ -58,7 +58,7 @@ func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.Del
res, err := st.Delete(ctx, prm)
if err == nil {
success = true
logOp(b.log, deleteOp, prm.Address, st.Type(), prm.StorageID)
logOp(ctx, b.log, deleteOp, prm.Address, st.Type(), prm.StorageID)
}
return res, err

View file

@ -73,7 +73,7 @@ func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi
}
for _, err := range errors[:len(errors)-1] {
b.log.Warn(logs.BlobstorErrorOccurredDuringObjectExistenceChecking,
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringObjectExistenceChecking,
zap.Stringer("address", prm.Address),
zap.String("error", err.Error()),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))

View file

@ -22,7 +22,7 @@ func TestExists(t *testing.T) {
b := New(WithStorages(storages))
require.NoError(t, b.Open(context.Background(), mode.ReadWrite))
require.NoError(t, b.Init())
require.NoError(t, b.Init(context.Background()))
objects := []*objectSDK.Object{
testObject(smallSizeLimit / 2),

View file

@ -1,6 +1,8 @@
package fstree
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
)
@ -28,7 +30,7 @@ func (t *FSTree) Init() error {
}
// Close implements common.Storage.
func (t *FSTree) Close() error {
func (t *FSTree) Close(_ context.Context) error {
t.metrics.Close()
return nil
}

View file

@ -87,7 +87,7 @@ func New(opts ...Option) *FSTree {
DirNameLen: DirNameLen,
metrics: &noopMetrics{},
fileCounter: &noopCounter{},
log: &logger.Logger{Logger: zap.L()},
log: logger.NewLoggerWrapper(zap.L()),
}
for i := range opts {
opts[i](f)
@ -152,7 +152,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
des, err := os.ReadDir(dirPath)
if err != nil {
if prm.IgnoreErrors {
t.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
zap.String("err", err.Error()),
zap.String("directory_path", dirPath))
return nil
@ -200,7 +200,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
}
if err != nil {
if prm.IgnoreErrors {
t.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
zap.Stringer("address", addr),
zap.String("err", err.Error()),
zap.String("path", path))
@ -606,7 +606,7 @@ func (t *FSTree) Compressor() *compression.Config {
}
// SetReportErrorFunc implements common.Storage.
func (t *FSTree) SetReportErrorFunc(_ func(string, error)) {
func (t *FSTree) SetReportErrorFunc(_ func(context.Context, string, error)) {
// Do nothing, FSTree can encounter only one error which is returned.
}

View file

@ -52,7 +52,7 @@ func TestObjectCounter(t *testing.T) {
require.Equal(t, uint64(0), size)
defer func() {
require.NoError(t, fst.Close())
require.NoError(t, fst.Close(context.Background()))
}()
addr := oidtest.Address()

View file

@ -53,6 +53,6 @@ func WithFileCounter(c FileCounter) Option {
func WithLogger(l *logger.Logger) Option {
return func(f *FSTree) {
f.log = &logger.Logger{Logger: l.With(zap.String("component", "FSTree"))}
f.log = l.With(zap.String("component", "FSTree"))
}
}

View file

@ -19,7 +19,7 @@ func TestControl(t *testing.T, cons Constructor, minSize, maxSize uint64) {
require.NoError(t, s.Init())
objects := prepare(t, 10, s, minSize, maxSize)
require.NoError(t, s.Close())
require.NoError(t, s.Close(context.Background()))
require.NoError(t, s.Open(mode.ComponentReadOnly))
for i := range objects {

View file

@ -15,7 +15,7 @@ func TestDelete(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
defer func() { require.NoError(t, s.Close()) }()
defer func() { require.NoError(t, s.Close(context.Background())) }()
objects := prepare(t, 4, s, minSize, maxSize)

Some files were not shown because too many files have changed in this diff Show more