forked from TrueCloudLab/frostfs-node
Compare commits
8 commits
6e283295a2
...
ff1e7b531f
Author | SHA1 | Date | |
---|---|---|---|
ff1e7b531f | |||
5ffd1541a8 | |||
56f723e4fe | |||
4440eeb0e1 | |||
efa22b6ded | |||
3c50022aba | |||
0cf5d70177 | |||
1bb4eb1ec0 |
302 changed files with 2073 additions and 1923 deletions
|
@ -1,6 +1,7 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
@ -46,7 +47,7 @@ func reloadConfig() error {
|
||||||
return logPrm.Reload()
|
return logPrm.Reload()
|
||||||
}
|
}
|
||||||
|
|
||||||
func watchForSignal(cancel func()) {
|
func watchForSignal(ctx context.Context, cancel func()) {
|
||||||
ch := make(chan os.Signal, 1)
|
ch := make(chan os.Signal, 1)
|
||||||
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
|
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
|
||||||
|
@ -58,49 +59,49 @@ func watchForSignal(cancel func()) {
|
||||||
// signals causing application to shut down should have priority over
|
// signals causing application to shut down should have priority over
|
||||||
// reconfiguration signal
|
// reconfiguration signal
|
||||||
case <-ch:
|
case <-ch:
|
||||||
log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||||
cancel()
|
cancel()
|
||||||
shutdown()
|
shutdown(ctx)
|
||||||
log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||||
return
|
return
|
||||||
case err := <-intErr: // internal application error
|
case err := <-intErr: // internal application error
|
||||||
log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
|
log.Info(ctx, logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
|
||||||
cancel()
|
cancel()
|
||||||
shutdown()
|
shutdown(ctx)
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
// block until any signal is receieved
|
// block until any signal is receieved
|
||||||
select {
|
select {
|
||||||
case <-ch:
|
case <-ch:
|
||||||
log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||||
cancel()
|
cancel()
|
||||||
shutdown()
|
shutdown(ctx)
|
||||||
log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||||
return
|
return
|
||||||
case err := <-intErr: // internal application error
|
case err := <-intErr: // internal application error
|
||||||
log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
|
log.Info(ctx, logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
|
||||||
cancel()
|
cancel()
|
||||||
shutdown()
|
shutdown(ctx)
|
||||||
return
|
return
|
||||||
case <-sighupCh:
|
case <-sighupCh:
|
||||||
log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
|
log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
|
||||||
if !innerRing.CompareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
|
if !innerRing.CompareAndSwapHealthStatus(ctx, control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
|
||||||
log.Info(logs.FrostFSNodeSIGHUPSkip)
|
log.Info(ctx, logs.FrostFSNodeSIGHUPSkip)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
err := reloadConfig()
|
err := reloadConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||||
}
|
}
|
||||||
pprofCmp.reload()
|
pprofCmp.reload(ctx)
|
||||||
metricsCmp.reload()
|
metricsCmp.reload(ctx)
|
||||||
log.Info(logs.FrostFSIRReloadExtraWallets)
|
log.Info(ctx, logs.FrostFSIRReloadExtraWallets)
|
||||||
err = innerRing.SetExtraWallets(cfg)
|
err = innerRing.SetExtraWallets(cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||||
}
|
}
|
||||||
innerRing.CompareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
|
innerRing.CompareAndSwapHealthStatus(ctx, control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
|
||||||
log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -24,8 +25,8 @@ const (
|
||||||
shutdownTimeoutKeyPostfix = ".shutdown_timeout"
|
shutdownTimeoutKeyPostfix = ".shutdown_timeout"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (c *httpComponent) init() {
|
func (c *httpComponent) init(ctx context.Context) {
|
||||||
log.Info("init " + c.name)
|
log.Info(ctx, "init "+c.name)
|
||||||
c.enabled = cfg.GetBool(c.name + enabledKeyPostfix)
|
c.enabled = cfg.GetBool(c.name + enabledKeyPostfix)
|
||||||
c.address = cfg.GetString(c.name + addressKeyPostfix)
|
c.address = cfg.GetString(c.name + addressKeyPostfix)
|
||||||
c.shutdownDur = cfg.GetDuration(c.name + shutdownTimeoutKeyPostfix)
|
c.shutdownDur = cfg.GetDuration(c.name + shutdownTimeoutKeyPostfix)
|
||||||
|
@ -39,14 +40,14 @@ func (c *httpComponent) init() {
|
||||||
httputil.WithShutdownTimeout(c.shutdownDur),
|
httputil.WithShutdownTimeout(c.shutdownDur),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
log.Info(c.name + " is disabled, skip")
|
log.Info(ctx, c.name+" is disabled, skip")
|
||||||
c.srv = nil
|
c.srv = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *httpComponent) start() {
|
func (c *httpComponent) start(ctx context.Context) {
|
||||||
if c.srv != nil {
|
if c.srv != nil {
|
||||||
log.Info("start " + c.name)
|
log.Info(ctx, "start "+c.name)
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
@ -55,10 +56,10 @@ func (c *httpComponent) start() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *httpComponent) shutdown() error {
|
func (c *httpComponent) shutdown(ctx context.Context) error {
|
||||||
if c.srv != nil {
|
if c.srv != nil {
|
||||||
log.Info("shutdown " + c.name)
|
log.Info(ctx, "shutdown "+c.name)
|
||||||
return c.srv.Shutdown()
|
return c.srv.Shutdown(ctx)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -70,17 +71,17 @@ func (c *httpComponent) needReload() bool {
|
||||||
return enabled != c.enabled || enabled && (address != c.address || dur != c.shutdownDur)
|
return enabled != c.enabled || enabled && (address != c.address || dur != c.shutdownDur)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *httpComponent) reload() {
|
func (c *httpComponent) reload(ctx context.Context) {
|
||||||
log.Info("reload " + c.name)
|
log.Info(ctx, "reload "+c.name)
|
||||||
if c.needReload() {
|
if c.needReload() {
|
||||||
log.Info(c.name + " config updated")
|
log.Info(ctx, c.name+" config updated")
|
||||||
if err := c.shutdown(); err != nil {
|
if err := c.shutdown(ctx); err != nil {
|
||||||
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
|
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
c.init()
|
c.init(ctx)
|
||||||
c.start()
|
c.start(ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -87,48 +87,48 @@ func main() {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
pprofCmp = newPprofComponent()
|
pprofCmp = newPprofComponent()
|
||||||
pprofCmp.init()
|
pprofCmp.init(ctx)
|
||||||
|
|
||||||
metricsCmp = newMetricsComponent()
|
metricsCmp = newMetricsComponent()
|
||||||
metricsCmp.init()
|
metricsCmp.init(ctx)
|
||||||
audit.Store(cfg.GetBool("audit.enabled"))
|
audit.Store(cfg.GetBool("audit.enabled"))
|
||||||
|
|
||||||
innerRing, err = innerring.New(ctx, log, cfg, intErr, metrics, cmode, audit)
|
innerRing, err = innerring.New(ctx, log, cfg, intErr, metrics, cmode, audit)
|
||||||
exitErr(err)
|
exitErr(err)
|
||||||
|
|
||||||
pprofCmp.start()
|
pprofCmp.start(ctx)
|
||||||
metricsCmp.start()
|
metricsCmp.start(ctx)
|
||||||
|
|
||||||
// start inner ring
|
// start inner ring
|
||||||
err = innerRing.Start(ctx, intErr)
|
err = innerRing.Start(ctx, intErr)
|
||||||
exitErr(err)
|
exitErr(err)
|
||||||
|
|
||||||
log.Info(logs.CommonApplicationStarted,
|
log.Info(ctx, logs.CommonApplicationStarted,
|
||||||
zap.String("version", misc.Version))
|
zap.String("version", misc.Version))
|
||||||
|
|
||||||
watchForSignal(cancel)
|
watchForSignal(ctx, cancel)
|
||||||
|
|
||||||
<-ctx.Done() // graceful shutdown
|
<-ctx.Done() // graceful shutdown
|
||||||
log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop)
|
log.Debug(ctx, logs.FrostFSNodeWaitingForAllProcessesToStop)
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
log.Info(logs.FrostFSIRApplicationStopped)
|
log.Info(ctx, logs.FrostFSIRApplicationStopped)
|
||||||
}
|
}
|
||||||
|
|
||||||
func shutdown() {
|
func shutdown(ctx context.Context) {
|
||||||
innerRing.Stop()
|
innerRing.Stop(ctx)
|
||||||
if err := metricsCmp.shutdown(); err != nil {
|
if err := metricsCmp.shutdown(ctx); err != nil {
|
||||||
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
|
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if err := pprofCmp.shutdown(); err != nil {
|
if err := pprofCmp.shutdown(ctx); err != nil {
|
||||||
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
|
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := sdnotify.ClearStatus(); err != nil {
|
if err := sdnotify.ClearStatus(); err != nil {
|
||||||
log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
|
log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
|
@ -28,8 +29,8 @@ func newPprofComponent() *pprofComponent {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *pprofComponent) init() {
|
func (c *pprofComponent) init(ctx context.Context) {
|
||||||
c.httpComponent.init()
|
c.httpComponent.init(ctx)
|
||||||
|
|
||||||
if c.enabled {
|
if c.enabled {
|
||||||
c.blockRate = cfg.GetInt(pprofBlockRateKey)
|
c.blockRate = cfg.GetInt(pprofBlockRateKey)
|
||||||
|
@ -51,17 +52,17 @@ func (c *pprofComponent) needReload() bool {
|
||||||
c.enabled && (c.blockRate != blockRate || c.mutexRate != mutexRate)
|
c.enabled && (c.blockRate != blockRate || c.mutexRate != mutexRate)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *pprofComponent) reload() {
|
func (c *pprofComponent) reload(ctx context.Context) {
|
||||||
log.Info("reload " + c.name)
|
log.Info(ctx, "reload "+c.name)
|
||||||
if c.needReload() {
|
if c.needReload() {
|
||||||
log.Info(c.name + " config updated")
|
log.Info(ctx, c.name+" config updated")
|
||||||
if err := c.shutdown(); err != nil {
|
if err := c.shutdown(ctx); err != nil {
|
||||||
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
|
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
c.init()
|
c.init(ctx)
|
||||||
c.start()
|
c.start(ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
|
||||||
common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err))
|
common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err))
|
||||||
|
|
||||||
blz := openBlobovnicza(cmd)
|
blz := openBlobovnicza(cmd)
|
||||||
defer blz.Close()
|
defer blz.Close(cmd.Context())
|
||||||
|
|
||||||
var prm blobovnicza.GetPrm
|
var prm blobovnicza.GetPrm
|
||||||
prm.SetAddress(addr)
|
prm.SetAddress(addr)
|
||||||
|
|
|
@ -32,7 +32,7 @@ func listFunc(cmd *cobra.Command, _ []string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
blz := openBlobovnicza(cmd)
|
blz := openBlobovnicza(cmd)
|
||||||
defer blz.Close()
|
defer blz.Close(cmd.Context())
|
||||||
|
|
||||||
err := blobovnicza.IterateAddresses(context.Background(), blz, wAddr)
|
err := blobovnicza.IterateAddresses(context.Background(), blz, wAddr)
|
||||||
common.ExitOnErr(cmd, common.Errf("blobovnicza iterator failure: %w", err))
|
common.ExitOnErr(cmd, common.Errf("blobovnicza iterator failure: %w", err))
|
||||||
|
|
|
@ -27,7 +27,7 @@ func openBlobovnicza(cmd *cobra.Command) *blobovnicza.Blobovnicza {
|
||||||
blobovnicza.WithPath(vPath),
|
blobovnicza.WithPath(vPath),
|
||||||
blobovnicza.WithReadOnly(true),
|
blobovnicza.WithReadOnly(true),
|
||||||
)
|
)
|
||||||
common.ExitOnErr(cmd, blz.Open())
|
common.ExitOnErr(cmd, blz.Open(cmd.Context()))
|
||||||
|
|
||||||
return blz
|
return blz
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
|
||||||
common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err))
|
common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err))
|
||||||
|
|
||||||
db := openMeta(cmd)
|
db := openMeta(cmd)
|
||||||
defer db.Close()
|
defer db.Close(cmd.Context())
|
||||||
|
|
||||||
storageID := meta.StorageIDPrm{}
|
storageID := meta.StorageIDPrm{}
|
||||||
storageID.SetAddress(addr)
|
storageID.SetAddress(addr)
|
||||||
|
|
|
@ -19,7 +19,7 @@ func init() {
|
||||||
|
|
||||||
func listGarbageFunc(cmd *cobra.Command, _ []string) {
|
func listGarbageFunc(cmd *cobra.Command, _ []string) {
|
||||||
db := openMeta(cmd)
|
db := openMeta(cmd)
|
||||||
defer db.Close()
|
defer db.Close(cmd.Context())
|
||||||
|
|
||||||
var garbPrm meta.GarbageIterationPrm
|
var garbPrm meta.GarbageIterationPrm
|
||||||
garbPrm.SetHandler(
|
garbPrm.SetHandler(
|
||||||
|
|
|
@ -19,7 +19,7 @@ func init() {
|
||||||
|
|
||||||
func listGraveyardFunc(cmd *cobra.Command, _ []string) {
|
func listGraveyardFunc(cmd *cobra.Command, _ []string) {
|
||||||
db := openMeta(cmd)
|
db := openMeta(cmd)
|
||||||
defer db.Close()
|
defer db.Close(cmd.Context())
|
||||||
|
|
||||||
var gravePrm meta.GraveyardIterationPrm
|
var gravePrm meta.GraveyardIterationPrm
|
||||||
gravePrm.SetHandler(
|
gravePrm.SetHandler(
|
||||||
|
|
|
@ -397,16 +397,16 @@ type internals struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// starts node's maintenance.
|
// starts node's maintenance.
|
||||||
func (c *cfg) startMaintenance() {
|
func (c *cfg) startMaintenance(ctx context.Context) {
|
||||||
c.isMaintenance.Store(true)
|
c.isMaintenance.Store(true)
|
||||||
c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE)
|
c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE)
|
||||||
c.log.Info(logs.FrostFSNodeStartedLocalNodesMaintenance)
|
c.log.Info(ctx, logs.FrostFSNodeStartedLocalNodesMaintenance)
|
||||||
}
|
}
|
||||||
|
|
||||||
// stops node's maintenance.
|
// stops node's maintenance.
|
||||||
func (c *internals) stopMaintenance() {
|
func (c *internals) stopMaintenance(ctx context.Context) {
|
||||||
if c.isMaintenance.CompareAndSwap(true, false) {
|
if c.isMaintenance.CompareAndSwap(true, false) {
|
||||||
c.log.Info(logs.FrostFSNodeStoppedLocalNodesMaintenance)
|
c.log.Info(ctx, logs.FrostFSNodeStoppedLocalNodesMaintenance)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -707,7 +707,7 @@ func initCfg(appCfg *config.Config) *cfg {
|
||||||
log, err := logger.NewLogger(logPrm)
|
log, err := logger.NewLogger(logPrm)
|
||||||
fatalOnErr(err)
|
fatalOnErr(err)
|
||||||
if loggerconfig.ToLokiConfig(appCfg).Enabled {
|
if loggerconfig.ToLokiConfig(appCfg).Enabled {
|
||||||
log.Logger = log.Logger.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
|
log.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
|
||||||
lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg))
|
lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg))
|
||||||
return lokiCore
|
return lokiCore
|
||||||
}))
|
}))
|
||||||
|
@ -1091,7 +1091,7 @@ func (c *cfg) LocalAddress() network.AddressGroup {
|
||||||
func initLocalStorage(ctx context.Context, c *cfg) {
|
func initLocalStorage(ctx context.Context, c *cfg) {
|
||||||
ls := engine.New(c.engineOpts()...)
|
ls := engine.New(c.engineOpts()...)
|
||||||
|
|
||||||
addNewEpochAsyncNotificationHandler(c, func(ev event.Event) {
|
addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, ev event.Event) {
|
||||||
ls.HandleNewEpoch(ctx, ev.(netmap2.NewEpoch).EpochNumber())
|
ls.HandleNewEpoch(ctx, ev.(netmap2.NewEpoch).EpochNumber())
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -1105,10 +1105,10 @@ func initLocalStorage(ctx context.Context, c *cfg) {
|
||||||
shard.WithTombstoneSource(c.createTombstoneSource()),
|
shard.WithTombstoneSource(c.createTombstoneSource()),
|
||||||
shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)))...)
|
shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)))...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error(logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
|
c.log.Error(ctx, logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
|
||||||
} else {
|
} else {
|
||||||
shardsAttached++
|
shardsAttached++
|
||||||
c.log.Info(logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id))
|
c.log.Info(ctx, logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if shardsAttached == 0 {
|
if shardsAttached == 0 {
|
||||||
|
@ -1118,23 +1118,23 @@ func initLocalStorage(ctx context.Context, c *cfg) {
|
||||||
c.cfgObject.cfgLocalStorage.localStorage = ls
|
c.cfgObject.cfgLocalStorage.localStorage = ls
|
||||||
|
|
||||||
c.onShutdown(func() {
|
c.onShutdown(func() {
|
||||||
c.log.Info(logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
|
c.log.Info(ctx, logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
|
||||||
|
|
||||||
err := ls.Close(context.WithoutCancel(ctx))
|
err := ls.Close(context.WithoutCancel(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Info(logs.FrostFSNodeStorageEngineClosingFailure,
|
c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
c.log.Info(logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
|
c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func initAccessPolicyEngine(_ context.Context, c *cfg) {
|
func initAccessPolicyEngine(ctx context.Context, c *cfg) {
|
||||||
var localOverrideDB chainbase.LocalOverrideDatabase
|
var localOverrideDB chainbase.LocalOverrideDatabase
|
||||||
if nodeconfig.PersistentPolicyRules(c.appCfg).Path() == "" {
|
if nodeconfig.PersistentPolicyRules(c.appCfg).Path() == "" {
|
||||||
c.log.Warn(logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed)
|
c.log.Warn(ctx, logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed)
|
||||||
localOverrideDB = chainbase.NewInmemoryLocalOverrideDatabase()
|
localOverrideDB = chainbase.NewInmemoryLocalOverrideDatabase()
|
||||||
} else {
|
} else {
|
||||||
localOverrideDB = chainbase.NewBoltLocalOverrideDatabase(
|
localOverrideDB = chainbase.NewBoltLocalOverrideDatabase(
|
||||||
|
@ -1159,7 +1159,7 @@ func initAccessPolicyEngine(_ context.Context, c *cfg) {
|
||||||
|
|
||||||
c.onShutdown(func() {
|
c.onShutdown(func() {
|
||||||
if err := ape.LocalOverrideDatabaseCore().Close(); err != nil {
|
if err := ape.LocalOverrideDatabaseCore().Close(); err != nil {
|
||||||
c.log.Warn(logs.FrostFSNodeAccessPolicyEngineClosingFailure,
|
c.log.Warn(ctx, logs.FrostFSNodeAccessPolicyEngineClosingFailure,
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -1208,10 +1208,10 @@ func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) {
|
||||||
c.cfgNetmap.state.setNodeInfo(ni)
|
c.cfgNetmap.state.setNodeInfo(ni)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cfg) updateContractNodeInfo(epoch uint64) {
|
func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
|
||||||
ni, err := c.netmapLocalNodeState(epoch)
|
ni, err := c.netmapLocalNodeState(epoch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error(logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
|
c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
|
||||||
zap.Uint64("epoch", epoch),
|
zap.Uint64("epoch", epoch),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
return
|
return
|
||||||
|
@ -1223,19 +1223,19 @@ func (c *cfg) updateContractNodeInfo(epoch uint64) {
|
||||||
// bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract
|
// bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract
|
||||||
// with the binary-encoded information from the current node's configuration.
|
// with the binary-encoded information from the current node's configuration.
|
||||||
// The state is set using the provided setter which MUST NOT be nil.
|
// The state is set using the provided setter which MUST NOT be nil.
|
||||||
func (c *cfg) bootstrapWithState(stateSetter func(*netmap.NodeInfo)) error {
|
func (c *cfg) bootstrapWithState(ctx context.Context, stateSetter func(*netmap.NodeInfo)) error {
|
||||||
ni := c.cfgNodeInfo.localInfo
|
ni := c.cfgNodeInfo.localInfo
|
||||||
stateSetter(&ni)
|
stateSetter(&ni)
|
||||||
|
|
||||||
prm := nmClient.AddPeerPrm{}
|
prm := nmClient.AddPeerPrm{}
|
||||||
prm.SetNodeInfo(ni)
|
prm.SetNodeInfo(ni)
|
||||||
|
|
||||||
return c.cfgNetmap.wrapper.AddPeer(prm)
|
return c.cfgNetmap.wrapper.AddPeer(ctx, prm)
|
||||||
}
|
}
|
||||||
|
|
||||||
// bootstrapOnline calls cfg.bootstrapWithState with "online" state.
|
// bootstrapOnline calls cfg.bootstrapWithState with "online" state.
|
||||||
func bootstrapOnline(c *cfg) error {
|
func bootstrapOnline(ctx context.Context, c *cfg) error {
|
||||||
return c.bootstrapWithState(func(ni *netmap.NodeInfo) {
|
return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) {
|
||||||
ni.SetStatus(netmap.Online)
|
ni.SetStatus(netmap.Online)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -1243,21 +1243,21 @@ func bootstrapOnline(c *cfg) error {
|
||||||
// bootstrap calls bootstrapWithState with:
|
// bootstrap calls bootstrapWithState with:
|
||||||
// - "maintenance" state if maintenance is in progress on the current node
|
// - "maintenance" state if maintenance is in progress on the current node
|
||||||
// - "online", otherwise
|
// - "online", otherwise
|
||||||
func (c *cfg) bootstrap() error {
|
func (c *cfg) bootstrap(ctx context.Context) error {
|
||||||
// switch to online except when under maintenance
|
// switch to online except when under maintenance
|
||||||
st := c.cfgNetmap.state.controlNetmapStatus()
|
st := c.cfgNetmap.state.controlNetmapStatus()
|
||||||
if st == control.NetmapStatus_MAINTENANCE {
|
if st == control.NetmapStatus_MAINTENANCE {
|
||||||
c.log.Info(logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
|
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
|
||||||
return c.bootstrapWithState(func(ni *netmap.NodeInfo) {
|
return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) {
|
||||||
ni.SetStatus(netmap.Maintenance)
|
ni.SetStatus(netmap.Maintenance)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
c.log.Info(logs.FrostFSNodeBootstrappingWithOnlineState,
|
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState,
|
||||||
zap.Stringer("previous", st),
|
zap.Stringer("previous", st),
|
||||||
)
|
)
|
||||||
|
|
||||||
return bootstrapOnline(c)
|
return bootstrapOnline(ctx, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
// needBootstrap checks if local node should be registered in network on bootup.
|
// needBootstrap checks if local node should be registered in network on bootup.
|
||||||
|
@ -1282,19 +1282,19 @@ func (c *cfg) signalWatcher(ctx context.Context) {
|
||||||
// signals causing application to shut down should have priority over
|
// signals causing application to shut down should have priority over
|
||||||
// reconfiguration signal
|
// reconfiguration signal
|
||||||
case <-ch:
|
case <-ch:
|
||||||
c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||||
|
|
||||||
c.shutdown()
|
c.shutdown(ctx)
|
||||||
|
|
||||||
c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||||
return
|
return
|
||||||
case err := <-c.internalErr: // internal application error
|
case err := <-c.internalErr: // internal application error
|
||||||
c.log.Warn(logs.FrostFSNodeInternalApplicationError,
|
c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError,
|
||||||
zap.String("message", err.Error()))
|
zap.String("message", err.Error()))
|
||||||
|
|
||||||
c.shutdown()
|
c.shutdown(ctx)
|
||||||
|
|
||||||
c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete)
|
c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete)
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
// block until any signal is receieved
|
// block until any signal is receieved
|
||||||
|
@ -1302,19 +1302,19 @@ func (c *cfg) signalWatcher(ctx context.Context) {
|
||||||
case <-sighupCh:
|
case <-sighupCh:
|
||||||
c.reloadConfig(ctx)
|
c.reloadConfig(ctx)
|
||||||
case <-ch:
|
case <-ch:
|
||||||
c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||||
|
|
||||||
c.shutdown()
|
c.shutdown(ctx)
|
||||||
|
|
||||||
c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||||
return
|
return
|
||||||
case err := <-c.internalErr: // internal application error
|
case err := <-c.internalErr: // internal application error
|
||||||
c.log.Warn(logs.FrostFSNodeInternalApplicationError,
|
c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError,
|
||||||
zap.String("message", err.Error()))
|
zap.String("message", err.Error()))
|
||||||
|
|
||||||
c.shutdown()
|
c.shutdown(ctx)
|
||||||
|
|
||||||
c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete)
|
c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1322,17 +1322,17 @@ func (c *cfg) signalWatcher(ctx context.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cfg) reloadConfig(ctx context.Context) {
|
func (c *cfg) reloadConfig(ctx context.Context) {
|
||||||
c.log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
|
c.log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
|
||||||
|
|
||||||
if !c.compareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
|
if !c.compareAndSwapHealthStatus(ctx, control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
|
||||||
c.log.Info(logs.FrostFSNodeSIGHUPSkip)
|
c.log.Info(ctx, logs.FrostFSNodeSIGHUPSkip)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer c.compareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
|
defer c.compareAndSwapHealthStatus(ctx, control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
|
||||||
|
|
||||||
err := c.reloadAppConfig()
|
err := c.reloadAppConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
c.log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1343,7 +1343,7 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
||||||
|
|
||||||
logPrm, err := c.loggerPrm()
|
logPrm, err := c.loggerPrm()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error(logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
|
c.log.Error(ctx, logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1364,25 +1364,25 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
||||||
|
|
||||||
err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
|
err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error(logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
|
c.log.Error(ctx, logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, component := range components {
|
for _, component := range components {
|
||||||
err = component.reloadFunc()
|
err = component.reloadFunc()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error(logs.FrostFSNodeUpdatedConfigurationApplying,
|
c.log.Error(ctx, logs.FrostFSNodeUpdatedConfigurationApplying,
|
||||||
zap.String("component", component.name),
|
zap.String("component", component.name),
|
||||||
zap.Error(err))
|
zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.dialerSource.Update(internalNetConfig(c.appCfg, c.metricsCollector.MultinetMetrics())); err != nil {
|
if err := c.dialerSource.Update(internalNetConfig(c.appCfg, c.metricsCollector.MultinetMetrics())); err != nil {
|
||||||
c.log.Error(logs.FailedToUpdateMultinetConfiguration, zap.Error(err))
|
c.log.Error(ctx, logs.FailedToUpdateMultinetConfiguration, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
|
func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
|
||||||
|
@ -1390,7 +1390,7 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
|
||||||
|
|
||||||
components = append(components, dCmp{"logger", logPrm.Reload})
|
components = append(components, dCmp{"logger", logPrm.Reload})
|
||||||
components = append(components, dCmp{"runtime", func() error {
|
components = append(components, dCmp{"runtime", func() error {
|
||||||
setRuntimeParameters(c)
|
setRuntimeParameters(ctx, c)
|
||||||
return nil
|
return nil
|
||||||
}})
|
}})
|
||||||
components = append(components, dCmp{"audit", func() error {
|
components = append(components, dCmp{"audit", func() error {
|
||||||
|
@ -1405,7 +1405,7 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
|
||||||
}
|
}
|
||||||
updated, err := tracing.Setup(ctx, *traceConfig)
|
updated, err := tracing.Setup(ctx, *traceConfig)
|
||||||
if updated {
|
if updated {
|
||||||
c.log.Info(logs.FrostFSNodeTracingConfigationUpdated)
|
c.log.Info(ctx, logs.FrostFSNodeTracingConfigationUpdated)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}})
|
}})
|
||||||
|
@ -1440,7 +1440,7 @@ func (c *cfg) reloadPools() error {
|
||||||
func (c *cfg) reloadPool(p *ants.Pool, newSize int, name string) {
|
func (c *cfg) reloadPool(p *ants.Pool, newSize int, name string) {
|
||||||
oldSize := p.Cap()
|
oldSize := p.Cap()
|
||||||
if oldSize != newSize {
|
if oldSize != newSize {
|
||||||
c.log.Info(logs.FrostFSNodePoolConfigurationUpdate, zap.String("field", name),
|
c.log.Info(context.Background(), logs.FrostFSNodePoolConfigurationUpdate, zap.String("field", name),
|
||||||
zap.Int("old", oldSize), zap.Int("new", newSize))
|
zap.Int("old", oldSize), zap.Int("new", newSize))
|
||||||
p.Tune(newSize)
|
p.Tune(newSize)
|
||||||
}
|
}
|
||||||
|
@ -1476,14 +1476,14 @@ func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoPro
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cfg) shutdown() {
|
func (c *cfg) shutdown(ctx context.Context) {
|
||||||
old := c.swapHealthStatus(control.HealthStatus_SHUTTING_DOWN)
|
old := c.swapHealthStatus(ctx, control.HealthStatus_SHUTTING_DOWN)
|
||||||
if old == control.HealthStatus_SHUTTING_DOWN {
|
if old == control.HealthStatus_SHUTTING_DOWN {
|
||||||
c.log.Info(logs.FrostFSNodeShutdownSkip)
|
c.log.Info(ctx, logs.FrostFSNodeShutdownSkip)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if old == control.HealthStatus_STARTING {
|
if old == control.HealthStatus_STARTING {
|
||||||
c.log.Warn(logs.FrostFSNodeShutdownWhenNotReady)
|
c.log.Warn(ctx, logs.FrostFSNodeShutdownWhenNotReady)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.ctxCancel()
|
c.ctxCancel()
|
||||||
|
@ -1493,6 +1493,6 @@ func (c *cfg) shutdown() {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := sdnotify.ClearStatus(); err != nil {
|
if err := sdnotify.ClearStatus(); err != nil {
|
||||||
c.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
|
c.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -92,7 +92,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
|
||||||
if c.cfgMorph.containerCacheSize > 0 {
|
if c.cfgMorph.containerCacheSize > 0 {
|
||||||
containerCache := newCachedContainerStorage(cnrSrc, c.cfgMorph.cacheTTL, c.cfgMorph.containerCacheSize)
|
containerCache := newCachedContainerStorage(cnrSrc, c.cfgMorph.cacheTTL, c.cfgMorph.containerCacheSize)
|
||||||
|
|
||||||
subscribeToContainerCreation(c, func(e event.Event) {
|
subscribeToContainerCreation(c, func(ctx context.Context, e event.Event) {
|
||||||
ev := e.(containerEvent.PutSuccess)
|
ev := e.(containerEvent.PutSuccess)
|
||||||
|
|
||||||
// read owner of the created container in order to update the reading cache.
|
// read owner of the created container in order to update the reading cache.
|
||||||
|
@ -105,21 +105,21 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
|
||||||
} else {
|
} else {
|
||||||
// unlike removal, we expect successful receive of the container
|
// unlike removal, we expect successful receive of the container
|
||||||
// after successful creation, so logging can be useful
|
// after successful creation, so logging can be useful
|
||||||
c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
|
c.log.Error(ctx, logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
|
||||||
zap.Stringer("id", ev.ID),
|
zap.Stringer("id", ev.ID),
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt,
|
c.log.Debug(ctx, logs.FrostFSNodeContainerCreationEventsReceipt,
|
||||||
zap.Stringer("id", ev.ID),
|
zap.Stringer("id", ev.ID),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
|
||||||
subscribeToContainerRemoval(c, func(e event.Event) {
|
subscribeToContainerRemoval(c, func(ctx context.Context, e event.Event) {
|
||||||
ev := e.(containerEvent.DeleteSuccess)
|
ev := e.(containerEvent.DeleteSuccess)
|
||||||
containerCache.handleRemoval(ev.ID)
|
containerCache.handleRemoval(ev.ID)
|
||||||
c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt,
|
c.log.Debug(ctx, logs.FrostFSNodeContainerRemovalEventsReceipt,
|
||||||
zap.Stringer("id", ev.ID),
|
zap.Stringer("id", ev.ID),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
@ -250,10 +250,10 @@ type morphContainerWriter struct {
|
||||||
neoClient *cntClient.Client
|
neoClient *cntClient.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m morphContainerWriter) Put(cnr containerCore.Container) (*cid.ID, error) {
|
func (m morphContainerWriter) Put(ctx context.Context, cnr containerCore.Container) (*cid.ID, error) {
|
||||||
return cntClient.Put(m.neoClient, cnr)
|
return cntClient.Put(ctx, m.neoClient, cnr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m morphContainerWriter) Delete(witness containerCore.RemovalWitness) error {
|
func (m morphContainerWriter) Delete(ctx context.Context, witness containerCore.RemovalWitness) error {
|
||||||
return cntClient.Delete(m.neoClient, witness)
|
return cntClient.Delete(ctx, m.neoClient, witness)
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,7 @@ import (
|
||||||
|
|
||||||
const serviceNameControl = "control"
|
const serviceNameControl = "control"
|
||||||
|
|
||||||
func initControlService(c *cfg) {
|
func initControlService(ctx context.Context, c *cfg) {
|
||||||
endpoint := controlconfig.GRPC(c.appCfg).Endpoint()
|
endpoint := controlconfig.GRPC(c.appCfg).Endpoint()
|
||||||
if endpoint == controlconfig.GRPCEndpointDefault {
|
if endpoint == controlconfig.GRPCEndpointDefault {
|
||||||
return
|
return
|
||||||
|
@ -46,21 +46,21 @@ func initControlService(c *cfg) {
|
||||||
|
|
||||||
lis, err := net.Listen("tcp", endpoint)
|
lis, err := net.Listen("tcp", endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
|
c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
c.cfgControlService.server = grpc.NewServer()
|
c.cfgControlService.server = grpc.NewServer()
|
||||||
|
|
||||||
c.onShutdown(func() {
|
c.onShutdown(func() {
|
||||||
stopGRPC("FrostFS Control API", c.cfgControlService.server, c.log)
|
stopGRPC(ctx, "FrostFS Control API", c.cfgControlService.server, c.log)
|
||||||
})
|
})
|
||||||
|
|
||||||
control.RegisterControlServiceServer(c.cfgControlService.server, ctlSvc)
|
control.RegisterControlServiceServer(c.cfgControlService.server, ctlSvc)
|
||||||
|
|
||||||
c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) {
|
c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) {
|
||||||
runAndLog(ctx, c, serviceNameControl, false, func(context.Context, *cfg) {
|
runAndLog(ctx, c, serviceNameControl, false, func(context.Context, *cfg) {
|
||||||
c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
|
c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint,
|
||||||
zap.String("service", serviceNameControl),
|
zap.String("service", serviceNameControl),
|
||||||
zap.String("endpoint", endpoint))
|
zap.String("endpoint", endpoint))
|
||||||
fatalOnErr(c.cfgControlService.server.Serve(lis))
|
fatalOnErr(c.cfgControlService.server.Serve(lis))
|
||||||
|
@ -72,23 +72,23 @@ func (c *cfg) NetmapStatus() control.NetmapStatus {
|
||||||
return c.cfgNetmap.state.controlNetmapStatus()
|
return c.cfgNetmap.state.controlNetmapStatus()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cfg) setHealthStatus(st control.HealthStatus) {
|
func (c *cfg) setHealthStatus(ctx context.Context, st control.HealthStatus) {
|
||||||
c.notifySystemd(st)
|
c.notifySystemd(ctx, st)
|
||||||
c.healthStatus.Store(int32(st))
|
c.healthStatus.Store(int32(st))
|
||||||
c.metricsCollector.State().SetHealth(int32(st))
|
c.metricsCollector.State().SetHealth(int32(st))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cfg) compareAndSwapHealthStatus(oldSt, newSt control.HealthStatus) (swapped bool) {
|
func (c *cfg) compareAndSwapHealthStatus(ctx context.Context, oldSt, newSt control.HealthStatus) (swapped bool) {
|
||||||
if swapped = c.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped {
|
if swapped = c.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped {
|
||||||
c.notifySystemd(newSt)
|
c.notifySystemd(ctx, newSt)
|
||||||
c.metricsCollector.State().SetHealth(int32(newSt))
|
c.metricsCollector.State().SetHealth(int32(newSt))
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cfg) swapHealthStatus(st control.HealthStatus) (old control.HealthStatus) {
|
func (c *cfg) swapHealthStatus(ctx context.Context, st control.HealthStatus) (old control.HealthStatus) {
|
||||||
old = control.HealthStatus(c.healthStatus.Swap(int32(st)))
|
old = control.HealthStatus(c.healthStatus.Swap(int32(st)))
|
||||||
c.notifySystemd(st)
|
c.notifySystemd(ctx, st)
|
||||||
c.metricsCollector.State().SetHealth(int32(st))
|
c.metricsCollector.State().SetHealth(int32(st))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -97,7 +97,7 @@ func (c *cfg) HealthStatus() control.HealthStatus {
|
||||||
return control.HealthStatus(c.healthStatus.Load())
|
return control.HealthStatus(c.healthStatus.Load())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cfg) notifySystemd(st control.HealthStatus) {
|
func (c *cfg) notifySystemd(ctx context.Context, st control.HealthStatus) {
|
||||||
if !c.sdNotify {
|
if !c.sdNotify {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -113,6 +113,6 @@ func (c *cfg) notifySystemd(st control.HealthStatus) {
|
||||||
err = sdnotify.Status(fmt.Sprintf("%v", st))
|
err = sdnotify.Status(fmt.Sprintf("%v", st))
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
|
c.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"errors"
|
"errors"
|
||||||
"net"
|
"net"
|
||||||
|
@ -18,11 +19,11 @@ import (
|
||||||
|
|
||||||
const maxRecvMsgSize = 256 << 20
|
const maxRecvMsgSize = 256 << 20
|
||||||
|
|
||||||
func initGRPC(c *cfg) {
|
func initGRPC(ctx context.Context, c *cfg) {
|
||||||
var endpointsToReconnect []string
|
var endpointsToReconnect []string
|
||||||
var successCount int
|
var successCount int
|
||||||
grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) {
|
grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) {
|
||||||
serverOpts, ok := getGrpcServerOpts(c, sc)
|
serverOpts, ok := getGrpcServerOpts(ctx, c, sc)
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -30,7 +31,7 @@ func initGRPC(c *cfg) {
|
||||||
lis, err := net.Listen("tcp", sc.Endpoint())
|
lis, err := net.Listen("tcp", sc.Endpoint())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(sc.Endpoint())
|
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(sc.Endpoint())
|
||||||
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
|
c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
|
||||||
endpointsToReconnect = append(endpointsToReconnect, sc.Endpoint())
|
endpointsToReconnect = append(endpointsToReconnect, sc.Endpoint())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -39,7 +40,7 @@ func initGRPC(c *cfg) {
|
||||||
srv := grpc.NewServer(serverOpts...)
|
srv := grpc.NewServer(serverOpts...)
|
||||||
|
|
||||||
c.onShutdown(func() {
|
c.onShutdown(func() {
|
||||||
stopGRPC("FrostFS Public API", srv, c.log)
|
stopGRPC(ctx, "FrostFS Public API", srv, c.log)
|
||||||
})
|
})
|
||||||
|
|
||||||
c.cfgGRPC.append(sc.Endpoint(), lis, srv)
|
c.cfgGRPC.append(sc.Endpoint(), lis, srv)
|
||||||
|
@ -52,11 +53,11 @@ func initGRPC(c *cfg) {
|
||||||
c.cfgGRPC.reconnectTimeout = grpcconfig.ReconnectTimeout(c.appCfg)
|
c.cfgGRPC.reconnectTimeout = grpcconfig.ReconnectTimeout(c.appCfg)
|
||||||
|
|
||||||
for _, endpoint := range endpointsToReconnect {
|
for _, endpoint := range endpointsToReconnect {
|
||||||
scheduleReconnect(endpoint, c)
|
scheduleReconnect(ctx, endpoint, c)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func scheduleReconnect(endpoint string, c *cfg) {
|
func scheduleReconnect(ctx context.Context, endpoint string, c *cfg) {
|
||||||
c.wg.Add(1)
|
c.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer c.wg.Done()
|
defer c.wg.Done()
|
||||||
|
@ -65,7 +66,7 @@ func scheduleReconnect(endpoint string, c *cfg) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-t.C:
|
case <-t.C:
|
||||||
if tryReconnect(endpoint, c) {
|
if tryReconnect(ctx, endpoint, c) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
case <-c.done:
|
case <-c.done:
|
||||||
|
@ -75,20 +76,20 @@ func scheduleReconnect(endpoint string, c *cfg) {
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func tryReconnect(endpoint string, c *cfg) bool {
|
func tryReconnect(ctx context.Context, endpoint string, c *cfg) bool {
|
||||||
c.log.Info(logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint))
|
c.log.Info(ctx, logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint))
|
||||||
|
|
||||||
serverOpts, found := getGRPCEndpointOpts(endpoint, c)
|
serverOpts, found := getGRPCEndpointOpts(ctx, endpoint, c)
|
||||||
if !found {
|
if !found {
|
||||||
c.log.Warn(logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint))
|
c.log.Warn(ctx, logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint))
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
lis, err := net.Listen("tcp", endpoint)
|
lis, err := net.Listen("tcp", endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(endpoint)
|
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(endpoint)
|
||||||
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
|
c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
|
||||||
c.log.Warn(logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout))
|
c.log.Warn(ctx, logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
c.metricsCollector.GrpcServerMetrics().MarkHealthy(endpoint)
|
c.metricsCollector.GrpcServerMetrics().MarkHealthy(endpoint)
|
||||||
|
@ -96,16 +97,16 @@ func tryReconnect(endpoint string, c *cfg) bool {
|
||||||
srv := grpc.NewServer(serverOpts...)
|
srv := grpc.NewServer(serverOpts...)
|
||||||
|
|
||||||
c.onShutdown(func() {
|
c.onShutdown(func() {
|
||||||
stopGRPC("FrostFS Public API", srv, c.log)
|
stopGRPC(ctx, "FrostFS Public API", srv, c.log)
|
||||||
})
|
})
|
||||||
|
|
||||||
c.cfgGRPC.appendAndHandle(endpoint, lis, srv)
|
c.cfgGRPC.appendAndHandle(endpoint, lis, srv)
|
||||||
|
|
||||||
c.log.Info(logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint))
|
c.log.Info(ctx, logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint))
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func getGRPCEndpointOpts(endpoint string, c *cfg) (result []grpc.ServerOption, found bool) {
|
func getGRPCEndpointOpts(ctx context.Context, endpoint string, c *cfg) (result []grpc.ServerOption, found bool) {
|
||||||
unlock := c.LockAppConfigShared()
|
unlock := c.LockAppConfigShared()
|
||||||
defer unlock()
|
defer unlock()
|
||||||
grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) {
|
grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) {
|
||||||
|
@ -116,7 +117,7 @@ func getGRPCEndpointOpts(endpoint string, c *cfg) (result []grpc.ServerOption, f
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var ok bool
|
var ok bool
|
||||||
result, ok = getGrpcServerOpts(c, sc)
|
result, ok = getGrpcServerOpts(ctx, c, sc)
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -125,7 +126,7 @@ func getGRPCEndpointOpts(endpoint string, c *cfg) (result []grpc.ServerOption, f
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool) {
|
func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool) {
|
||||||
serverOpts := []grpc.ServerOption{
|
serverOpts := []grpc.ServerOption{
|
||||||
grpc.MaxRecvMsgSize(maxRecvMsgSize),
|
grpc.MaxRecvMsgSize(maxRecvMsgSize),
|
||||||
grpc.ChainUnaryInterceptor(
|
grpc.ChainUnaryInterceptor(
|
||||||
|
@ -143,7 +144,7 @@ func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool
|
||||||
if tlsCfg != nil {
|
if tlsCfg != nil {
|
||||||
cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile())
|
cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error(logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
|
c.log.Error(ctx, logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -174,38 +175,38 @@ func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool
|
||||||
return serverOpts, true
|
return serverOpts, true
|
||||||
}
|
}
|
||||||
|
|
||||||
func serveGRPC(c *cfg) {
|
func serveGRPC(ctx context.Context, c *cfg) {
|
||||||
c.cfgGRPC.performAndSave(func(e string, l net.Listener, s *grpc.Server) {
|
c.cfgGRPC.performAndSave(func(e string, l net.Listener, s *grpc.Server) {
|
||||||
c.wg.Add(1)
|
c.wg.Add(1)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
defer func() {
|
defer func() {
|
||||||
c.log.Info(logs.FrostFSNodeStopListeningGRPCEndpoint,
|
c.log.Info(ctx, logs.FrostFSNodeStopListeningGRPCEndpoint,
|
||||||
zap.Stringer("endpoint", l.Addr()),
|
zap.Stringer("endpoint", l.Addr()),
|
||||||
)
|
)
|
||||||
|
|
||||||
c.wg.Done()
|
c.wg.Done()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
|
c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint,
|
||||||
zap.String("service", "gRPC"),
|
zap.String("service", "gRPC"),
|
||||||
zap.Stringer("endpoint", l.Addr()),
|
zap.Stringer("endpoint", l.Addr()),
|
||||||
)
|
)
|
||||||
|
|
||||||
if err := s.Serve(l); err != nil {
|
if err := s.Serve(l); err != nil {
|
||||||
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(e)
|
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(e)
|
||||||
c.log.Error(logs.FrostFSNodeGRPCServerError, zap.Error(err))
|
c.log.Error(ctx, logs.FrostFSNodeGRPCServerError, zap.Error(err))
|
||||||
c.cfgGRPC.dropConnection(e)
|
c.cfgGRPC.dropConnection(e)
|
||||||
scheduleReconnect(e, c)
|
scheduleReconnect(ctx, e, c)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
|
func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger) {
|
||||||
l = &logger.Logger{Logger: l.With(zap.String("name", name))}
|
l = l.With(zap.String("name", name))
|
||||||
|
|
||||||
l.Info(logs.FrostFSNodeStoppingGRPCServer)
|
l.Info(ctx, logs.FrostFSNodeStoppingGRPCServer)
|
||||||
|
|
||||||
// GracefulStop() may freeze forever, see #1270
|
// GracefulStop() may freeze forever, see #1270
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
|
@ -217,9 +218,9 @@ func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
|
||||||
select {
|
select {
|
||||||
case <-done:
|
case <-done:
|
||||||
case <-time.After(1 * time.Minute):
|
case <-time.After(1 * time.Minute):
|
||||||
l.Info(logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
|
l.Info(ctx, logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
|
||||||
s.Stop()
|
s.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
l.Info(logs.FrostFSNodeGRPCServerStoppedSuccessfully)
|
l.Info(ctx, logs.FrostFSNodeGRPCServerStoppedSuccessfully)
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,9 +20,9 @@ type httpComponent struct {
|
||||||
preReload func(c *cfg)
|
preReload func(c *cfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cmp *httpComponent) init(c *cfg) {
|
func (cmp *httpComponent) init(ctx context.Context, c *cfg) {
|
||||||
if !cmp.enabled {
|
if !cmp.enabled {
|
||||||
c.log.Info(cmp.name + " is disabled")
|
c.log.Info(ctx, cmp.name+" is disabled")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Init server with parameters
|
// Init server with parameters
|
||||||
|
@ -39,14 +39,14 @@ func (cmp *httpComponent) init(c *cfg) {
|
||||||
go func() {
|
go func() {
|
||||||
defer c.wg.Done()
|
defer c.wg.Done()
|
||||||
|
|
||||||
c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
|
c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint,
|
||||||
zap.String("service", cmp.name),
|
zap.String("service", cmp.name),
|
||||||
zap.String("endpoint", cmp.address))
|
zap.String("endpoint", cmp.address))
|
||||||
fatalOnErr(srv.Serve())
|
fatalOnErr(srv.Serve())
|
||||||
}()
|
}()
|
||||||
c.closers = append(c.closers, closer{
|
c.closers = append(c.closers, closer{
|
||||||
cmp.name,
|
cmp.name,
|
||||||
func() { stopAndLog(c, cmp.name, srv.Shutdown) },
|
func() { stopAndLog(ctx, c, cmp.name, srv.Shutdown) },
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,7 +62,7 @@ func (cmp *httpComponent) reload(ctx context.Context) error {
|
||||||
// Cleanup
|
// Cleanup
|
||||||
delCloser(cmp.cfg, cmp.name)
|
delCloser(cmp.cfg, cmp.name)
|
||||||
// Init server with new parameters
|
// Init server with new parameters
|
||||||
cmp.init(cmp.cfg)
|
cmp.init(ctx, cmp.cfg)
|
||||||
// Start worker
|
// Start worker
|
||||||
if cmp.enabled {
|
if cmp.enabled {
|
||||||
startWorker(ctx, cmp.cfg, *getWorker(cmp.cfg, cmp.name))
|
startWorker(ctx, cmp.cfg, *getWorker(cmp.cfg, cmp.name))
|
||||||
|
|
|
@ -61,21 +61,21 @@ func main() {
|
||||||
var ctx context.Context
|
var ctx context.Context
|
||||||
ctx, c.ctxCancel = context.WithCancel(context.Background())
|
ctx, c.ctxCancel = context.WithCancel(context.Background())
|
||||||
|
|
||||||
c.setHealthStatus(control.HealthStatus_STARTING)
|
c.setHealthStatus(ctx, control.HealthStatus_STARTING)
|
||||||
|
|
||||||
initApp(ctx, c)
|
initApp(ctx, c)
|
||||||
|
|
||||||
bootUp(ctx, c)
|
bootUp(ctx, c)
|
||||||
|
|
||||||
c.compareAndSwapHealthStatus(control.HealthStatus_STARTING, control.HealthStatus_READY)
|
c.compareAndSwapHealthStatus(ctx, control.HealthStatus_STARTING, control.HealthStatus_READY)
|
||||||
|
|
||||||
wait(c)
|
wait(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func initAndLog(c *cfg, name string, initializer func(*cfg)) {
|
func initAndLog(ctx context.Context, c *cfg, name string, initializer func(*cfg)) {
|
||||||
c.log.Info(fmt.Sprintf("initializing %s service...", name))
|
c.log.Info(ctx, fmt.Sprintf("initializing %s service...", name))
|
||||||
initializer(c)
|
initializer(c)
|
||||||
c.log.Info(name + " service has been successfully initialized")
|
c.log.Info(ctx, name+" service has been successfully initialized")
|
||||||
}
|
}
|
||||||
|
|
||||||
func initApp(ctx context.Context, c *cfg) {
|
func initApp(ctx context.Context, c *cfg) {
|
||||||
|
@ -85,72 +85,72 @@ func initApp(ctx context.Context, c *cfg) {
|
||||||
c.wg.Done()
|
c.wg.Done()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
setRuntimeParameters(c)
|
setRuntimeParameters(ctx, c)
|
||||||
metrics, _ := metricsComponent(c)
|
metrics, _ := metricsComponent(c)
|
||||||
initAndLog(c, "profiler", initProfilerService)
|
initAndLog(ctx, c, "profiler", func(c *cfg) { initProfilerService(ctx, c) })
|
||||||
initAndLog(c, metrics.name, metrics.init)
|
initAndLog(ctx, c, metrics.name, func(c *cfg) { metrics.init(ctx, c) })
|
||||||
|
|
||||||
initAndLog(c, "tracing", func(c *cfg) { initTracing(ctx, c) })
|
initAndLog(ctx, c, "tracing", func(c *cfg) { initTracing(ctx, c) })
|
||||||
|
|
||||||
initLocalStorage(ctx, c)
|
initLocalStorage(ctx, c)
|
||||||
|
|
||||||
initAndLog(c, "storage engine", func(c *cfg) {
|
initAndLog(ctx, c, "storage engine", func(c *cfg) {
|
||||||
fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Open(ctx))
|
fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Open(ctx))
|
||||||
fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Init(ctx))
|
fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Init(ctx))
|
||||||
})
|
})
|
||||||
|
|
||||||
initAndLog(c, "gRPC", initGRPC)
|
initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) })
|
||||||
initAndLog(c, "netmap", func(c *cfg) { initNetmapService(ctx, c) })
|
initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) })
|
||||||
|
|
||||||
initAccessPolicyEngine(ctx, c)
|
initAccessPolicyEngine(ctx, c)
|
||||||
initAndLog(c, "access policy engine", func(c *cfg) {
|
initAndLog(ctx, c, "access policy engine", func(c *cfg) {
|
||||||
fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Open(ctx))
|
fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Open(ctx))
|
||||||
fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Init())
|
fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Init())
|
||||||
})
|
})
|
||||||
|
|
||||||
initAndLog(c, "accounting", func(c *cfg) { initAccountingService(ctx, c) })
|
initAndLog(ctx, c, "accounting", func(c *cfg) { initAccountingService(ctx, c) })
|
||||||
initAndLog(c, "container", func(c *cfg) { initContainerService(ctx, c) })
|
initAndLog(ctx, c, "container", func(c *cfg) { initContainerService(ctx, c) })
|
||||||
initAndLog(c, "session", initSessionService)
|
initAndLog(ctx, c, "session", initSessionService)
|
||||||
initAndLog(c, "object", initObjectService)
|
initAndLog(ctx, c, "object", initObjectService)
|
||||||
initAndLog(c, "tree", initTreeService)
|
initAndLog(ctx, c, "tree", initTreeService)
|
||||||
initAndLog(c, "apemanager", initAPEManagerService)
|
initAndLog(ctx, c, "apemanager", initAPEManagerService)
|
||||||
initAndLog(c, "control", initControlService)
|
initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) })
|
||||||
|
|
||||||
initAndLog(c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
|
initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
|
||||||
}
|
}
|
||||||
|
|
||||||
func runAndLog(ctx context.Context, c *cfg, name string, logSuccess bool, starter func(context.Context, *cfg)) {
|
func runAndLog(ctx context.Context, c *cfg, name string, logSuccess bool, starter func(context.Context, *cfg)) {
|
||||||
c.log.Info(fmt.Sprintf("starting %s service...", name))
|
c.log.Info(ctx, fmt.Sprintf("starting %s service...", name))
|
||||||
starter(ctx, c)
|
starter(ctx, c)
|
||||||
|
|
||||||
if logSuccess {
|
if logSuccess {
|
||||||
c.log.Info(name + " service started successfully")
|
c.log.Info(ctx, name+" service started successfully")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func stopAndLog(c *cfg, name string, stopper func() error) {
|
func stopAndLog(ctx context.Context, c *cfg, name string, stopper func(context.Context) error) {
|
||||||
c.log.Debug(fmt.Sprintf("shutting down %s service", name))
|
c.log.Debug(ctx, fmt.Sprintf("shutting down %s service", name))
|
||||||
|
|
||||||
err := stopper()
|
err := stopper(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Debug(fmt.Sprintf("could not shutdown %s server", name),
|
c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.log.Debug(name + " service has been stopped")
|
c.log.Debug(ctx, name+" service has been stopped")
|
||||||
}
|
}
|
||||||
|
|
||||||
func bootUp(ctx context.Context, c *cfg) {
|
func bootUp(ctx context.Context, c *cfg) {
|
||||||
runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(c) })
|
runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(ctx, c) })
|
||||||
runAndLog(ctx, c, "notary", true, makeAndWaitNotaryDeposit)
|
runAndLog(ctx, c, "notary", true, makeAndWaitNotaryDeposit)
|
||||||
|
|
||||||
bootstrapNode(c)
|
bootstrapNode(ctx, c)
|
||||||
startWorkers(ctx, c)
|
startWorkers(ctx, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func wait(c *cfg) {
|
func wait(c *cfg) {
|
||||||
c.log.Info(logs.CommonApplicationStarted,
|
c.log.Info(context.Background(), logs.CommonApplicationStarted,
|
||||||
zap.String("version", misc.Version))
|
zap.String("version", misc.Version))
|
||||||
|
|
||||||
<-c.done // graceful shutdown
|
<-c.done // graceful shutdown
|
||||||
|
@ -160,12 +160,12 @@ func wait(c *cfg) {
|
||||||
go func() {
|
go func() {
|
||||||
defer drain.Done()
|
defer drain.Done()
|
||||||
for err := range c.internalErr {
|
for err := range c.internalErr {
|
||||||
c.log.Warn(logs.FrostFSNodeInternalApplicationError,
|
c.log.Warn(context.Background(), logs.FrostFSNodeInternalApplicationError,
|
||||||
zap.String("message", err.Error()))
|
zap.String("message", err.Error()))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
c.log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop)
|
c.log.Debug(context.Background(), logs.FrostFSNodeWaitingForAllProcessesToStop)
|
||||||
|
|
||||||
c.wg.Wait()
|
c.wg.Wait()
|
||||||
|
|
||||||
|
|
|
@ -48,7 +48,7 @@ func (c *cfg) initMorphComponents(ctx context.Context) {
|
||||||
fatalOnErr(err)
|
fatalOnErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.log.Info(logs.FrostFSNodeNotarySupport,
|
c.log.Info(ctx, logs.FrostFSNodeNotarySupport,
|
||||||
zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled),
|
zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ func (c *cfg) initMorphComponents(ctx context.Context) {
|
||||||
msPerBlock, err := c.cfgMorph.client.MsPerBlock()
|
msPerBlock, err := c.cfgMorph.client.MsPerBlock()
|
||||||
fatalOnErr(err)
|
fatalOnErr(err)
|
||||||
c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond
|
c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond
|
||||||
c.log.Debug(logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL))
|
c.log.Debug(ctx, logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL))
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.cfgMorph.cacheTTL < 0 {
|
if c.cfgMorph.cacheTTL < 0 {
|
||||||
|
@ -102,7 +102,7 @@ func initMorphClient(ctx context.Context, c *cfg) {
|
||||||
client.WithDialerSource(c.dialerSource),
|
client.WithDialerSource(c.dialerSource),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient,
|
c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient,
|
||||||
zap.Any("endpoints", addresses),
|
zap.Any("endpoints", addresses),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
@ -111,12 +111,12 @@ func initMorphClient(ctx context.Context, c *cfg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
c.onShutdown(func() {
|
c.onShutdown(func() {
|
||||||
c.log.Info(logs.FrostFSNodeClosingMorphComponents)
|
c.log.Info(ctx, logs.FrostFSNodeClosingMorphComponents)
|
||||||
cli.Close()
|
cli.Close()
|
||||||
})
|
})
|
||||||
|
|
||||||
if err := cli.SetGroupSignerScope(); err != nil {
|
if err := cli.SetGroupSignerScope(); err != nil {
|
||||||
c.log.Info(logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
|
c.log.Info(ctx, logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
c.cfgMorph.client = cli
|
c.cfgMorph.client = cli
|
||||||
|
@ -129,14 +129,14 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
tx, vub, err := makeNotaryDeposit(c)
|
tx, vub, err := makeNotaryDeposit(ctx, c)
|
||||||
fatalOnErr(err)
|
fatalOnErr(err)
|
||||||
|
|
||||||
if tx.Equals(util.Uint256{}) {
|
if tx.Equals(util.Uint256{}) {
|
||||||
// non-error deposit with an empty TX hash means
|
// non-error deposit with an empty TX hash means
|
||||||
// that the deposit has already been made; no
|
// that the deposit has already been made; no
|
||||||
// need to wait it.
|
// need to wait it.
|
||||||
c.log.Info(logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade)
|
c.log.Info(ctx, logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -144,7 +144,7 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
|
||||||
fatalOnErr(err)
|
fatalOnErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeNotaryDeposit(c *cfg) (util.Uint256, uint32, error) {
|
func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error) {
|
||||||
const (
|
const (
|
||||||
// gasMultiplier defines how many times more the notary
|
// gasMultiplier defines how many times more the notary
|
||||||
// balance must be compared to the GAS balance of the node:
|
// balance must be compared to the GAS balance of the node:
|
||||||
|
@ -161,7 +161,7 @@ func makeNotaryDeposit(c *cfg) (util.Uint256, uint32, error) {
|
||||||
return util.Uint256{}, 0, fmt.Errorf("could not calculate notary deposit: %w", err)
|
return util.Uint256{}, 0, fmt.Errorf("could not calculate notary deposit: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.cfgMorph.client.DepositEndlessNotary(depositAmount)
|
return c.cfgMorph.client.DepositEndlessNotary(ctx, depositAmount)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -202,7 +202,7 @@ func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32)
|
||||||
return fmt.Errorf("could not wait for notary deposit persists in chain: %w", err)
|
return fmt.Errorf("could not wait for notary deposit persists in chain: %w", err)
|
||||||
}
|
}
|
||||||
if res.Execution.VMState.HasFlag(vmstate.Halt) {
|
if res.Execution.VMState.HasFlag(vmstate.Halt) {
|
||||||
c.log.Info(logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted)
|
c.log.Info(ctx, logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return errNotaryDepositFail
|
return errNotaryDepositFail
|
||||||
|
@ -217,7 +217,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
|
||||||
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
|
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fromSideChainBlock = 0
|
fromSideChainBlock = 0
|
||||||
c.log.Warn(logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
|
c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
subs, err = subscriber.New(ctx, &subscriber.Params{
|
subs, err = subscriber.New(ctx, &subscriber.Params{
|
||||||
|
@ -246,7 +246,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
|
||||||
setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) {
|
setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) {
|
||||||
res, err := netmapEvent.ParseNewEpoch(src)
|
res, err := netmapEvent.ParseNewEpoch(src)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
c.log.Info(logs.FrostFSNodeNewEpochEventFromSidechain,
|
c.log.Info(ctx, logs.FrostFSNodeNewEpochEventFromSidechain,
|
||||||
zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()),
|
zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -256,12 +256,12 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
|
||||||
registerNotificationHandlers(c.cfgNetmap.scriptHash, lis, c.cfgNetmap.parsers, c.cfgNetmap.subscribers)
|
registerNotificationHandlers(c.cfgNetmap.scriptHash, lis, c.cfgNetmap.parsers, c.cfgNetmap.subscribers)
|
||||||
registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers)
|
registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers)
|
||||||
|
|
||||||
registerBlockHandler(lis, func(block *block.Block) {
|
registerBlockHandler(lis, func(ctx context.Context, block *block.Block) {
|
||||||
c.log.Debug(logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
|
c.log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
|
||||||
|
|
||||||
err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index)
|
err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Warn(logs.FrostFSNodeCantUpdatePersistentState,
|
c.log.Warn(ctx, logs.FrostFSNodeCantUpdatePersistentState,
|
||||||
zap.String("chain", "side"),
|
zap.String("chain", "side"),
|
||||||
zap.Uint32("block_index", block.Index))
|
zap.Uint32("block_index", block.Index))
|
||||||
}
|
}
|
||||||
|
|
|
@ -145,7 +145,7 @@ func initNetmapService(ctx context.Context, c *cfg) {
|
||||||
|
|
||||||
c.initMorphComponents(ctx)
|
c.initMorphComponents(ctx)
|
||||||
|
|
||||||
initNetmapState(c)
|
initNetmapState(ctx, c)
|
||||||
|
|
||||||
server := netmapTransportGRPC.New(
|
server := netmapTransportGRPC.New(
|
||||||
netmapService.NewSignService(
|
netmapService.NewSignService(
|
||||||
|
@ -172,29 +172,29 @@ func initNetmapService(ctx context.Context, c *cfg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func addNewEpochNotificationHandlers(c *cfg) {
|
func addNewEpochNotificationHandlers(c *cfg) {
|
||||||
addNewEpochNotificationHandler(c, func(ev event.Event) {
|
addNewEpochNotificationHandler(c, func(_ context.Context, ev event.Event) {
|
||||||
c.cfgNetmap.state.setCurrentEpoch(ev.(netmapEvent.NewEpoch).EpochNumber())
|
c.cfgNetmap.state.setCurrentEpoch(ev.(netmapEvent.NewEpoch).EpochNumber())
|
||||||
})
|
})
|
||||||
|
|
||||||
addNewEpochAsyncNotificationHandler(c, func(ev event.Event) {
|
addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, ev event.Event) {
|
||||||
e := ev.(netmapEvent.NewEpoch).EpochNumber()
|
e := ev.(netmapEvent.NewEpoch).EpochNumber()
|
||||||
|
|
||||||
c.updateContractNodeInfo(e)
|
c.updateContractNodeInfo(ctx, e)
|
||||||
|
|
||||||
if !c.needBootstrap() || c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470
|
if !c.needBootstrap() || c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.bootstrap(); err != nil {
|
if err := c.bootstrap(ctx); err != nil {
|
||||||
c.log.Warn(logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
|
c.log.Warn(ctx, logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
if c.cfgMorph.notaryEnabled {
|
if c.cfgMorph.notaryEnabled {
|
||||||
addNewEpochAsyncNotificationHandler(c, func(_ event.Event) {
|
addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) {
|
||||||
_, _, err := makeNotaryDeposit(c)
|
_, _, err := makeNotaryDeposit(ctx, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error(logs.FrostFSNodeCouldNotMakeNotaryDeposit,
|
c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -204,13 +204,13 @@ func addNewEpochNotificationHandlers(c *cfg) {
|
||||||
|
|
||||||
// bootstrapNode adds current node to the Network map.
|
// bootstrapNode adds current node to the Network map.
|
||||||
// Must be called after initNetmapService.
|
// Must be called after initNetmapService.
|
||||||
func bootstrapNode(c *cfg) {
|
func bootstrapNode(ctx context.Context, c *cfg) {
|
||||||
if c.needBootstrap() {
|
if c.needBootstrap() {
|
||||||
if c.IsMaintenance() {
|
if c.IsMaintenance() {
|
||||||
c.log.Info(logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap)
|
c.log.Info(ctx, logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
err := c.bootstrap()
|
err := c.bootstrap(ctx)
|
||||||
fatalOnErrDetails("bootstrap error", err)
|
fatalOnErrDetails("bootstrap error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -237,17 +237,17 @@ func setNetmapNotificationParser(c *cfg, sTyp string, p event.NotificationParser
|
||||||
|
|
||||||
// initNetmapState inits current Network map state.
|
// initNetmapState inits current Network map state.
|
||||||
// Must be called after Morph components initialization.
|
// Must be called after Morph components initialization.
|
||||||
func initNetmapState(c *cfg) {
|
func initNetmapState(ctx context.Context, c *cfg) {
|
||||||
epoch, err := c.cfgNetmap.wrapper.Epoch()
|
epoch, err := c.cfgNetmap.wrapper.Epoch()
|
||||||
fatalOnErrDetails("could not initialize current epoch number", err)
|
fatalOnErrDetails("could not initialize current epoch number", err)
|
||||||
|
|
||||||
var ni *netmapSDK.NodeInfo
|
var ni *netmapSDK.NodeInfo
|
||||||
ni, err = c.netmapInitLocalNodeState(epoch)
|
ni, err = c.netmapInitLocalNodeState(ctx, epoch)
|
||||||
fatalOnErrDetails("could not init network state", err)
|
fatalOnErrDetails("could not init network state", err)
|
||||||
|
|
||||||
stateWord := nodeState(ni)
|
stateWord := nodeState(ni)
|
||||||
|
|
||||||
c.log.Info(logs.FrostFSNodeInitialNetworkState,
|
c.log.Info(ctx, logs.FrostFSNodeInitialNetworkState,
|
||||||
zap.Uint64("epoch", epoch),
|
zap.Uint64("epoch", epoch),
|
||||||
zap.String("state", stateWord),
|
zap.String("state", stateWord),
|
||||||
)
|
)
|
||||||
|
@ -276,7 +276,7 @@ func nodeState(ni *netmapSDK.NodeInfo) string {
|
||||||
return "undefined"
|
return "undefined"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) {
|
func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) {
|
||||||
nmNodes, err := c.cfgNetmap.wrapper.GetCandidates()
|
nmNodes, err := c.cfgNetmap.wrapper.GetCandidates()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -304,7 +304,7 @@ func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error
|
||||||
if nmState != candidateState {
|
if nmState != candidateState {
|
||||||
// This happens when the node was switched to maintenance without epoch tick.
|
// This happens when the node was switched to maintenance without epoch tick.
|
||||||
// We expect it to continue staying in maintenance.
|
// We expect it to continue staying in maintenance.
|
||||||
c.log.Info(logs.CandidateStatusPriority,
|
c.log.Info(ctx, logs.CandidateStatusPriority,
|
||||||
zap.String("netmap", nmState),
|
zap.String("netmap", nmState),
|
||||||
zap.String("candidate", candidateState))
|
zap.String("candidate", candidateState))
|
||||||
}
|
}
|
||||||
|
@ -350,16 +350,16 @@ func addNewEpochAsyncNotificationHandler(c *cfg, h event.Handler) {
|
||||||
|
|
||||||
var errRelayBootstrap = errors.New("setting netmap status is forbidden in relay mode")
|
var errRelayBootstrap = errors.New("setting netmap status is forbidden in relay mode")
|
||||||
|
|
||||||
func (c *cfg) SetNetmapStatus(st control.NetmapStatus) error {
|
func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error {
|
||||||
switch st {
|
switch st {
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unsupported status %v", st)
|
return fmt.Errorf("unsupported status %v", st)
|
||||||
case control.NetmapStatus_MAINTENANCE:
|
case control.NetmapStatus_MAINTENANCE:
|
||||||
return c.setMaintenanceStatus(false)
|
return c.setMaintenanceStatus(ctx, false)
|
||||||
case control.NetmapStatus_ONLINE, control.NetmapStatus_OFFLINE:
|
case control.NetmapStatus_ONLINE, control.NetmapStatus_OFFLINE:
|
||||||
}
|
}
|
||||||
|
|
||||||
c.stopMaintenance()
|
c.stopMaintenance(ctx)
|
||||||
|
|
||||||
if !c.needBootstrap() {
|
if !c.needBootstrap() {
|
||||||
return errRelayBootstrap
|
return errRelayBootstrap
|
||||||
|
@ -367,12 +367,12 @@ func (c *cfg) SetNetmapStatus(st control.NetmapStatus) error {
|
||||||
|
|
||||||
if st == control.NetmapStatus_ONLINE {
|
if st == control.NetmapStatus_ONLINE {
|
||||||
c.cfgNetmap.reBoostrapTurnedOff.Store(false)
|
c.cfgNetmap.reBoostrapTurnedOff.Store(false)
|
||||||
return bootstrapOnline(c)
|
return bootstrapOnline(ctx, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.cfgNetmap.reBoostrapTurnedOff.Store(true)
|
c.cfgNetmap.reBoostrapTurnedOff.Store(true)
|
||||||
|
|
||||||
return c.updateNetMapState(func(*nmClient.UpdatePeerPrm) {})
|
return c.updateNetMapState(ctx, func(*nmClient.UpdatePeerPrm) {})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) {
|
func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) {
|
||||||
|
@ -384,11 +384,11 @@ func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) {
|
||||||
return st, epoch, nil
|
return st, epoch, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cfg) ForceMaintenance() error {
|
func (c *cfg) ForceMaintenance(ctx context.Context) error {
|
||||||
return c.setMaintenanceStatus(true)
|
return c.setMaintenanceStatus(ctx, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cfg) setMaintenanceStatus(force bool) error {
|
func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error {
|
||||||
netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration()
|
netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("read network settings to check maintenance allowance: %w", err)
|
err = fmt.Errorf("read network settings to check maintenance allowance: %w", err)
|
||||||
|
@ -397,10 +397,10 @@ func (c *cfg) setMaintenanceStatus(force bool) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err == nil || force {
|
if err == nil || force {
|
||||||
c.startMaintenance()
|
c.startMaintenance(ctx)
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = c.updateNetMapState((*nmClient.UpdatePeerPrm).SetMaintenance)
|
err = c.updateNetMapState(ctx, (*nmClient.UpdatePeerPrm).SetMaintenance)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -413,12 +413,12 @@ func (c *cfg) setMaintenanceStatus(force bool) error {
|
||||||
|
|
||||||
// calls UpdatePeerState operation of Netmap contract's client for the local node.
|
// calls UpdatePeerState operation of Netmap contract's client for the local node.
|
||||||
// State setter is used to specify node state to switch to.
|
// State setter is used to specify node state to switch to.
|
||||||
func (c *cfg) updateNetMapState(stateSetter func(*nmClient.UpdatePeerPrm)) error {
|
func (c *cfg) updateNetMapState(ctx context.Context, stateSetter func(*nmClient.UpdatePeerPrm)) error {
|
||||||
var prm nmClient.UpdatePeerPrm
|
var prm nmClient.UpdatePeerPrm
|
||||||
prm.SetKey(c.key.PublicKey().Bytes())
|
prm.SetKey(c.key.PublicKey().Bytes())
|
||||||
stateSetter(&prm)
|
stateSetter(&prm)
|
||||||
|
|
||||||
_, err := c.cfgNetmap.wrapper.UpdatePeerState(prm)
|
_, err := c.cfgNetmap.wrapper.UpdatePeerState(ctx, prm)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -61,7 +61,7 @@ type objectSvc struct {
|
||||||
func (c *cfg) MaxObjectSize() uint64 {
|
func (c *cfg) MaxObjectSize() uint64 {
|
||||||
sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
|
sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error(logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
|
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -69,11 +69,11 @@ func (c *cfg) MaxObjectSize() uint64 {
|
||||||
return sz
|
return sz
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *objectSvc) Put() (objectService.PutObjectStream, error) {
|
func (s *objectSvc) Put(_ context.Context) (objectService.PutObjectStream, error) {
|
||||||
return s.put.Put()
|
return s.put.Put()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *objectSvc) Patch() (objectService.PatchObjectStream, error) {
|
func (s *objectSvc) Patch(_ context.Context) (objectService.PatchObjectStream, error) {
|
||||||
return s.patch.Patch()
|
return s.patch.Patch()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -223,7 +223,7 @@ func initObjectService(c *cfg) {
|
||||||
|
|
||||||
func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.ClientCache) {
|
func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.ClientCache) {
|
||||||
if policerconfig.UnsafeDisable(c.appCfg) {
|
if policerconfig.UnsafeDisable(c.appCfg) {
|
||||||
c.log.Warn(logs.FrostFSNodePolicerIsDisabled)
|
c.log.Warn(context.Background(), logs.FrostFSNodePolicerIsDisabled)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -287,7 +287,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
|
||||||
|
|
||||||
_, err := ls.Inhume(ctx, inhumePrm)
|
_, err := ls.Inhume(ctx, inhumePrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Warn(logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
|
c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,17 +1,18 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
profilerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/profiler"
|
profilerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/profiler"
|
||||||
httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http"
|
httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http"
|
||||||
)
|
)
|
||||||
|
|
||||||
func initProfilerService(c *cfg) {
|
func initProfilerService(ctx context.Context, c *cfg) {
|
||||||
tuneProfilers(c)
|
tuneProfilers(c)
|
||||||
|
|
||||||
pprof, _ := pprofComponent(c)
|
pprof, _ := pprofComponent(c)
|
||||||
pprof.init(c)
|
pprof.init(ctx, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func pprofComponent(c *cfg) (*httpComponent, bool) {
|
func pprofComponent(c *cfg) (*httpComponent, bool) {
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"os"
|
"os"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
|
|
||||||
|
@ -9,17 +10,17 @@ import (
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func setRuntimeParameters(c *cfg) {
|
func setRuntimeParameters(ctx context.Context, c *cfg) {
|
||||||
if len(os.Getenv("GOMEMLIMIT")) != 0 {
|
if len(os.Getenv("GOMEMLIMIT")) != 0 {
|
||||||
// default limit < yaml limit < app env limit < GOMEMLIMIT
|
// default limit < yaml limit < app env limit < GOMEMLIMIT
|
||||||
c.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
|
c.log.Warn(ctx, logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
memLimitBytes := runtime.GCMemoryLimitBytes(c.appCfg)
|
memLimitBytes := runtime.GCMemoryLimitBytes(c.appCfg)
|
||||||
previous := debug.SetMemoryLimit(memLimitBytes)
|
previous := debug.SetMemoryLimit(memLimitBytes)
|
||||||
if memLimitBytes != previous {
|
if memLimitBytes != previous {
|
||||||
c.log.Info(logs.RuntimeSoftMemoryLimitUpdated,
|
c.log.Info(ctx, logs.RuntimeSoftMemoryLimitUpdated,
|
||||||
zap.Int64("new_value", memLimitBytes),
|
zap.Int64("new_value", memLimitBytes),
|
||||||
zap.Int64("old_value", previous))
|
zap.Int64("old_value", previous))
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,7 +48,7 @@ func initSessionService(c *cfg) {
|
||||||
_ = c.privateTokenStore.Close()
|
_ = c.privateTokenStore.Close()
|
||||||
})
|
})
|
||||||
|
|
||||||
addNewEpochNotificationHandler(c, func(ev event.Event) {
|
addNewEpochNotificationHandler(c, func(_ context.Context, ev event.Event) {
|
||||||
c.privateTokenStore.RemoveOld(ev.(netmap.NewEpoch).EpochNumber())
|
c.privateTokenStore.RemoveOld(ev.(netmap.NewEpoch).EpochNumber())
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
|
@ -13,12 +13,12 @@ import (
|
||||||
func initTracing(ctx context.Context, c *cfg) {
|
func initTracing(ctx context.Context, c *cfg) {
|
||||||
conf, err := tracingconfig.ToTracingConfig(c.appCfg)
|
conf, err := tracingconfig.ToTracingConfig(c.appCfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
|
c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
_, err = tracing.Setup(ctx, *conf)
|
_, err = tracing.Setup(ctx, *conf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
|
c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ func initTracing(ctx context.Context, c *cfg) {
|
||||||
defer cancel()
|
defer cancel()
|
||||||
err := tracing.Shutdown(ctx) // cfg context cancels before close
|
err := tracing.Shutdown(ctx) // cfg context cancels before close
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error(logs.FrostFSNodeFailedShutdownTracing, zap.Error(err))
|
c.log.Error(ctx, logs.FrostFSNodeFailedShutdownTracing, zap.Error(err))
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
|
@ -44,7 +44,7 @@ func (c cnrSource) List() ([]cid.ID, error) {
|
||||||
func initTreeService(c *cfg) {
|
func initTreeService(c *cfg) {
|
||||||
treeConfig := treeconfig.Tree(c.appCfg)
|
treeConfig := treeconfig.Tree(c.appCfg)
|
||||||
if !treeConfig.Enabled() {
|
if !treeConfig.Enabled() {
|
||||||
c.log.Info(logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization)
|
c.log.Info(context.Background(), logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -79,10 +79,10 @@ func initTreeService(c *cfg) {
|
||||||
}))
|
}))
|
||||||
|
|
||||||
if d := treeConfig.SyncInterval(); d == 0 {
|
if d := treeConfig.SyncInterval(); d == 0 {
|
||||||
addNewEpochNotificationHandler(c, func(_ event.Event) {
|
addNewEpochNotificationHandler(c, func(ctx context.Context, _ event.Event) {
|
||||||
err := c.treeService.SynchronizeAll()
|
err := c.treeService.SynchronizeAll()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
|
c.log.Error(ctx, logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
|
@ -93,7 +93,7 @@ func initTreeService(c *cfg) {
|
||||||
for range tick.C {
|
for range tick.C {
|
||||||
err := c.treeService.SynchronizeAll()
|
err := c.treeService.SynchronizeAll()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
|
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
|
||||||
if errors.Is(err, tree.ErrShuttingDown) {
|
if errors.Is(err, tree.ErrShuttingDown) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -102,15 +102,15 @@ func initTreeService(c *cfg) {
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
subscribeToContainerRemoval(c, func(e event.Event) {
|
subscribeToContainerRemoval(c, func(ctx context.Context, e event.Event) {
|
||||||
ev := e.(containerEvent.DeleteSuccess)
|
ev := e.(containerEvent.DeleteSuccess)
|
||||||
|
|
||||||
// This is executed asynchronously, so we don't care about the operation taking some time.
|
// This is executed asynchronously, so we don't care about the operation taking some time.
|
||||||
c.log.Debug(logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID))
|
c.log.Debug(ctx, logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID))
|
||||||
err := c.treeService.DropTree(context.Background(), ev.ID, "")
|
err := c.treeService.DropTree(ctx, ev.ID, "")
|
||||||
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
|
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
|
||||||
// Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged.
|
// Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged.
|
||||||
c.log.Error(logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
|
c.log.Error(ctx, logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
|
||||||
zap.Stringer("cid", ev.ID),
|
zap.Stringer("cid", ev.ID),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
package audit
|
package audit
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
crypto "git.frostfs.info/TrueCloudLab/frostfs-crypto"
|
crypto "git.frostfs.info/TrueCloudLab/frostfs-crypto"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||||
|
@ -17,15 +19,15 @@ type Target interface {
|
||||||
String() string
|
String() string
|
||||||
}
|
}
|
||||||
|
|
||||||
func LogRequest(log *logger.Logger, operation string, req Request, target Target, status bool) {
|
func LogRequest(ctx context.Context, log *logger.Logger, operation string, req Request, target Target, status bool) {
|
||||||
var key []byte
|
var key []byte
|
||||||
if req != nil {
|
if req != nil {
|
||||||
key = req.GetVerificationHeader().GetBodySignature().GetKey()
|
key = req.GetVerificationHeader().GetBodySignature().GetKey()
|
||||||
}
|
}
|
||||||
LogRequestWithKey(log, operation, key, target, status)
|
LogRequestWithKey(ctx, log, operation, key, target, status)
|
||||||
}
|
}
|
||||||
|
|
||||||
func LogRequestWithKey(log *logger.Logger, operation string, key []byte, target Target, status bool) {
|
func LogRequestWithKey(ctx context.Context, log *logger.Logger, operation string, key []byte, target Target, status bool) {
|
||||||
object, subject := NotDefined, NotDefined
|
object, subject := NotDefined, NotDefined
|
||||||
|
|
||||||
publicKey := crypto.UnmarshalPublicKey(key)
|
publicKey := crypto.UnmarshalPublicKey(key)
|
||||||
|
@ -37,7 +39,7 @@ func LogRequestWithKey(log *logger.Logger, operation string, key []byte, target
|
||||||
object = target.String()
|
object = target.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info(logs.AuditEventLogRecord,
|
log.Info(ctx, logs.AuditEventLogRecord,
|
||||||
zap.String("operation", operation),
|
zap.String("operation", operation),
|
||||||
zap.String("object", object),
|
zap.String("object", object),
|
||||||
zap.String("subject", subject),
|
zap.String("subject", subject),
|
||||||
|
|
|
@ -117,7 +117,7 @@ func (v *FormatValidator) Validate(ctx context.Context, obj *objectSDK.Object, u
|
||||||
}
|
}
|
||||||
|
|
||||||
if !unprepared {
|
if !unprepared {
|
||||||
if err := v.validateSignatureKey(obj); err != nil {
|
if err := v.validateSignatureKey(ctx, obj); err != nil {
|
||||||
return fmt.Errorf("(%T) could not validate signature key: %w", v, err)
|
return fmt.Errorf("(%T) could not validate signature key: %w", v, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -134,7 +134,7 @@ func (v *FormatValidator) Validate(ctx context.Context, obj *objectSDK.Object, u
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error {
|
func (v *FormatValidator) validateSignatureKey(ctx context.Context, obj *objectSDK.Object) error {
|
||||||
sig := obj.Signature()
|
sig := obj.Signature()
|
||||||
if sig == nil {
|
if sig == nil {
|
||||||
return errMissingSignature
|
return errMissingSignature
|
||||||
|
@ -156,7 +156,7 @@ func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error {
|
||||||
ownerID := obj.OwnerID()
|
ownerID := obj.OwnerID()
|
||||||
|
|
||||||
if token == nil && obj.ECHeader() != nil {
|
if token == nil && obj.ECHeader() != nil {
|
||||||
role, err := v.isIROrContainerNode(obj, binKey)
|
role, err := v.isIROrContainerNode(ctx, obj, binKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -172,7 +172,7 @@ func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if v.verifyTokenIssuer {
|
if v.verifyTokenIssuer {
|
||||||
role, err := v.isIROrContainerNode(obj, binKey)
|
role, err := v.isIROrContainerNode(ctx, obj, binKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -190,7 +190,7 @@ func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *FormatValidator) isIROrContainerNode(obj *objectSDK.Object, signerKey []byte) (acl.Role, error) {
|
func (v *FormatValidator) isIROrContainerNode(ctx context.Context, obj *objectSDK.Object, signerKey []byte) (acl.Role, error) {
|
||||||
cnrID, containerIDSet := obj.ContainerID()
|
cnrID, containerIDSet := obj.ContainerID()
|
||||||
if !containerIDSet {
|
if !containerIDSet {
|
||||||
return acl.RoleOthers, errNilCID
|
return acl.RoleOthers, errNilCID
|
||||||
|
@ -204,7 +204,7 @@ func (v *FormatValidator) isIROrContainerNode(obj *objectSDK.Object, signerKey [
|
||||||
return acl.RoleOthers, fmt.Errorf("failed to get container (id=%s): %w", cnrID.EncodeToString(), err)
|
return acl.RoleOthers, fmt.Errorf("failed to get container (id=%s): %w", cnrID.EncodeToString(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := v.senderClassifier.IsInnerRingOrContainerNode(signerKey, cnrID, cnr.Value)
|
res, err := v.senderClassifier.IsInnerRingOrContainerNode(ctx, signerKey, cnrID, cnr.Value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return acl.RoleOthers, err
|
return acl.RoleOthers, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -65,7 +65,7 @@ func TestFormatValidator_Validate(t *testing.T) {
|
||||||
epoch: curEpoch,
|
epoch: curEpoch,
|
||||||
}),
|
}),
|
||||||
WithLockSource(ls),
|
WithLockSource(ls),
|
||||||
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
|
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
|
||||||
)
|
)
|
||||||
|
|
||||||
ownerKey, err := keys.NewPrivateKey()
|
ownerKey, err := keys.NewPrivateKey()
|
||||||
|
@ -290,7 +290,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
|
||||||
}),
|
}),
|
||||||
WithLockSource(ls),
|
WithLockSource(ls),
|
||||||
WithVerifySessionTokenIssuer(false),
|
WithVerifySessionTokenIssuer(false),
|
||||||
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
|
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
|
||||||
)
|
)
|
||||||
|
|
||||||
tok := sessiontest.Object()
|
tok := sessiontest.Object()
|
||||||
|
@ -339,7 +339,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
|
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
|
||||||
)
|
)
|
||||||
|
|
||||||
tok := sessiontest.Object()
|
tok := sessiontest.Object()
|
||||||
|
@ -417,7 +417,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
|
||||||
currentEpoch: curEpoch,
|
currentEpoch: curEpoch,
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
|
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
|
||||||
)
|
)
|
||||||
|
|
||||||
require.NoError(t, v.Validate(context.Background(), obj, false))
|
require.NoError(t, v.Validate(context.Background(), obj, false))
|
||||||
|
@ -491,7 +491,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
|
||||||
currentEpoch: curEpoch,
|
currentEpoch: curEpoch,
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
|
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
|
||||||
)
|
)
|
||||||
|
|
||||||
require.NoError(t, v.Validate(context.Background(), obj, false))
|
require.NoError(t, v.Validate(context.Background(), obj, false))
|
||||||
|
@ -567,7 +567,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
|
||||||
currentEpoch: curEpoch,
|
currentEpoch: curEpoch,
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
|
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
|
||||||
)
|
)
|
||||||
|
|
||||||
require.Error(t, v.Validate(context.Background(), obj, false))
|
require.Error(t, v.Validate(context.Background(), obj, false))
|
||||||
|
|
|
@ -2,6 +2,7 @@ package object
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
|
@ -40,6 +41,7 @@ type ClassifyResult struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c SenderClassifier) Classify(
|
func (c SenderClassifier) Classify(
|
||||||
|
ctx context.Context,
|
||||||
ownerID *user.ID,
|
ownerID *user.ID,
|
||||||
ownerKey *keys.PublicKey,
|
ownerKey *keys.PublicKey,
|
||||||
idCnr cid.ID,
|
idCnr cid.ID,
|
||||||
|
@ -57,14 +59,14 @@ func (c SenderClassifier) Classify(
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.IsInnerRingOrContainerNode(ownerKeyInBytes, idCnr, cnr)
|
return c.IsInnerRingOrContainerNode(ctx, ownerKeyInBytes, idCnr, cnr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) {
|
func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) {
|
||||||
isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes)
|
isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// do not throw error, try best case matching
|
// do not throw error, try best case matching
|
||||||
c.log.Debug(logs.V2CantCheckIfRequestFromInnerRing,
|
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
} else if isInnerRingNode {
|
} else if isInnerRingNode {
|
||||||
return &ClassifyResult{
|
return &ClassifyResult{
|
||||||
|
@ -81,7 +83,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idC
|
||||||
// error might happen if request has `RoleOther` key and placement
|
// error might happen if request has `RoleOther` key and placement
|
||||||
// is not possible for previous epoch, so
|
// is not possible for previous epoch, so
|
||||||
// do not throw error, try best case matching
|
// do not throw error, try best case matching
|
||||||
c.log.Debug(logs.V2CantCheckIfRequestFromContainerNode,
|
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromContainerNode,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
} else if isContainerNode {
|
} else if isContainerNode {
|
||||||
return &ClassifyResult{
|
return &ClassifyResult{
|
||||||
|
|
|
@ -29,7 +29,7 @@ type (
|
||||||
emitDuration uint32 // in blocks
|
emitDuration uint32 // in blocks
|
||||||
}
|
}
|
||||||
|
|
||||||
depositor func() (util.Uint256, error)
|
depositor func(context.Context) (util.Uint256, error)
|
||||||
awaiter func(context.Context, util.Uint256) error
|
awaiter func(context.Context, util.Uint256) error
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -66,11 +66,11 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newEmissionTimer(args *emitTimerArgs) *timer.BlockTimer {
|
func newEmissionTimer(ctx context.Context, args *emitTimerArgs) *timer.BlockTimer {
|
||||||
return timer.NewBlockTimer(
|
return timer.NewBlockTimer(
|
||||||
timer.StaticBlockMeter(args.emitDuration),
|
timer.StaticBlockMeter(args.emitDuration),
|
||||||
func() {
|
func() {
|
||||||
args.ap.HandleGasEmission(timerEvent.NewAlphabetEmitTick{})
|
args.ap.HandleGasEmission(ctx, timerEvent.NewAlphabetEmitTick{})
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,7 +35,7 @@ import (
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (s *Server) initNetmapProcessor(cfg *viper.Viper,
|
func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper,
|
||||||
alphaSync event.Handler,
|
alphaSync event.Handler,
|
||||||
) error {
|
) error {
|
||||||
locodeValidator, err := s.newLocodeValidator(cfg)
|
locodeValidator, err := s.newLocodeValidator(cfg)
|
||||||
|
@ -48,10 +48,13 @@ func (s *Server) initNetmapProcessor(cfg *viper.Viper,
|
||||||
var netMapCandidateStateValidator statevalidation.NetMapCandidateValidator
|
var netMapCandidateStateValidator statevalidation.NetMapCandidateValidator
|
||||||
netMapCandidateStateValidator.SetNetworkSettings(netSettings)
|
netMapCandidateStateValidator.SetNetworkSettings(netSettings)
|
||||||
|
|
||||||
|
poolSize := cfg.GetInt("workers.netmap")
|
||||||
|
s.log.Debug(ctx, logs.NetmapNetmapWorkerPool, zap.Int("size", poolSize))
|
||||||
|
|
||||||
s.netmapProcessor, err = netmap.New(&netmap.Params{
|
s.netmapProcessor, err = netmap.New(&netmap.Params{
|
||||||
Log: s.log,
|
Log: s.log,
|
||||||
Metrics: s.irMetrics,
|
Metrics: s.irMetrics,
|
||||||
PoolSize: cfg.GetInt("workers.netmap"),
|
PoolSize: poolSize,
|
||||||
NetmapClient: netmap.NewNetmapClient(s.netmapClient),
|
NetmapClient: netmap.NewNetmapClient(s.netmapClient),
|
||||||
EpochTimer: s,
|
EpochTimer: s,
|
||||||
EpochState: s,
|
EpochState: s,
|
||||||
|
@ -97,7 +100,7 @@ func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain *
|
||||||
fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey)
|
fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fromMainChainBlock = 0
|
fromMainChainBlock = 0
|
||||||
s.log.Warn(logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error()))
|
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
mainnetChain.from = fromMainChainBlock
|
mainnetChain.from = fromMainChainBlock
|
||||||
|
|
||||||
|
@ -137,12 +140,12 @@ func (s *Server) enableNotarySupport() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) initNotaryConfig() {
|
func (s *Server) initNotaryConfig(ctx context.Context) {
|
||||||
s.mainNotaryConfig = notaryConfigs(
|
s.mainNotaryConfig = notaryConfigs(
|
||||||
!s.withoutMainNet && s.mainnetClient.ProbeNotary(), // if mainnet disabled then notary flag must be disabled too
|
!s.withoutMainNet && s.mainnetClient.ProbeNotary(), // if mainnet disabled then notary flag must be disabled too
|
||||||
)
|
)
|
||||||
|
|
||||||
s.log.Info(logs.InnerringNotarySupport,
|
s.log.Info(ctx, logs.InnerringNotarySupport,
|
||||||
zap.Bool("sidechain_enabled", true),
|
zap.Bool("sidechain_enabled", true),
|
||||||
zap.Bool("mainchain_enabled", !s.mainNotaryConfig.disabled),
|
zap.Bool("mainchain_enabled", !s.mainNotaryConfig.disabled),
|
||||||
)
|
)
|
||||||
|
@ -152,8 +155,8 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli
|
||||||
var alphaSync event.Handler
|
var alphaSync event.Handler
|
||||||
|
|
||||||
if s.withoutMainNet || cfg.GetBool("governance.disable") {
|
if s.withoutMainNet || cfg.GetBool("governance.disable") {
|
||||||
alphaSync = func(event.Event) {
|
alphaSync = func(ctx context.Context, _ event.Event) {
|
||||||
s.log.Debug(logs.InnerringAlphabetKeysSyncIsDisabled)
|
s.log.Debug(ctx, logs.InnerringAlphabetKeysSyncIsDisabled)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// create governance processor
|
// create governance processor
|
||||||
|
@ -196,16 +199,16 @@ func (s *Server) createIRFetcher() irFetcher {
|
||||||
return irf
|
return irf
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) initTimers(cfg *viper.Viper) {
|
func (s *Server) initTimers(ctx context.Context, cfg *viper.Viper) {
|
||||||
s.epochTimer = newEpochTimer(&epochTimerArgs{
|
s.epochTimer = newEpochTimer(&epochTimerArgs{
|
||||||
newEpochHandlers: s.newEpochTickHandlers(),
|
newEpochHandlers: s.newEpochTickHandlers(ctx),
|
||||||
epoch: s,
|
epoch: s,
|
||||||
})
|
})
|
||||||
|
|
||||||
s.addBlockTimer(s.epochTimer)
|
s.addBlockTimer(s.epochTimer)
|
||||||
|
|
||||||
// initialize emission timer
|
// initialize emission timer
|
||||||
emissionTimer := newEmissionTimer(&emitTimerArgs{
|
emissionTimer := newEmissionTimer(ctx, &emitTimerArgs{
|
||||||
ap: s.alphabetProcessor,
|
ap: s.alphabetProcessor,
|
||||||
emitDuration: cfg.GetUint32("timers.emit"),
|
emitDuration: cfg.GetUint32("timers.emit"),
|
||||||
})
|
})
|
||||||
|
@ -213,18 +216,20 @@ func (s *Server) initTimers(cfg *viper.Viper) {
|
||||||
s.addBlockTimer(emissionTimer)
|
s.addBlockTimer(emissionTimer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) initAlphabetProcessor(cfg *viper.Viper) error {
|
func (s *Server) initAlphabetProcessor(ctx context.Context, cfg *viper.Viper) error {
|
||||||
parsedWallets, err := parseWalletAddressesFromStrings(cfg.GetStringSlice("emit.extra_wallets"))
|
parsedWallets, err := parseWalletAddressesFromStrings(cfg.GetStringSlice("emit.extra_wallets"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
poolSize := cfg.GetInt("workers.alphabet")
|
||||||
|
s.log.Debug(ctx, logs.AlphabetAlphabetWorkerPool, zap.Int("size", poolSize))
|
||||||
|
|
||||||
// create alphabet processor
|
// create alphabet processor
|
||||||
s.alphabetProcessor, err = alphabet.New(&alphabet.Params{
|
s.alphabetProcessor, err = alphabet.New(&alphabet.Params{
|
||||||
ParsedWallets: parsedWallets,
|
ParsedWallets: parsedWallets,
|
||||||
Log: s.log,
|
Log: s.log,
|
||||||
Metrics: s.irMetrics,
|
Metrics: s.irMetrics,
|
||||||
PoolSize: cfg.GetInt("workers.alphabet"),
|
PoolSize: poolSize,
|
||||||
AlphabetContracts: s.contracts.alphabet,
|
AlphabetContracts: s.contracts.alphabet,
|
||||||
NetmapClient: s.netmapClient,
|
NetmapClient: s.netmapClient,
|
||||||
MorphClient: s.morphClient,
|
MorphClient: s.morphClient,
|
||||||
|
@ -239,12 +244,14 @@ func (s *Server) initAlphabetProcessor(cfg *viper.Viper) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) initContainerProcessor(cfg *viper.Viper, cnrClient *container.Client, frostfsIDClient *frostfsid.Client) error {
|
func (s *Server) initContainerProcessor(ctx context.Context, cfg *viper.Viper, cnrClient *container.Client, frostfsIDClient *frostfsid.Client) error {
|
||||||
|
poolSize := cfg.GetInt("workers.container")
|
||||||
|
s.log.Debug(ctx, logs.ContainerContainerWorkerPool, zap.Int("size", poolSize))
|
||||||
// container processor
|
// container processor
|
||||||
containerProcessor, err := cont.New(&cont.Params{
|
containerProcessor, err := cont.New(&cont.Params{
|
||||||
Log: s.log,
|
Log: s.log,
|
||||||
Metrics: s.irMetrics,
|
Metrics: s.irMetrics,
|
||||||
PoolSize: cfg.GetInt("workers.container"),
|
PoolSize: poolSize,
|
||||||
AlphabetState: s,
|
AlphabetState: s,
|
||||||
ContainerClient: cnrClient,
|
ContainerClient: cnrClient,
|
||||||
MorphClient: cnrClient.Morph(),
|
MorphClient: cnrClient.Morph(),
|
||||||
|
@ -258,12 +265,14 @@ func (s *Server) initContainerProcessor(cfg *viper.Viper, cnrClient *container.C
|
||||||
return bindMorphProcessor(containerProcessor, s)
|
return bindMorphProcessor(containerProcessor, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) initBalanceProcessor(cfg *viper.Viper, frostfsCli *frostfsClient.Client) error {
|
func (s *Server) initBalanceProcessor(ctx context.Context, cfg *viper.Viper, frostfsCli *frostfsClient.Client) error {
|
||||||
|
poolSize := cfg.GetInt("workers.balance")
|
||||||
|
s.log.Debug(ctx, logs.BalanceBalanceWorkerPool, zap.Int("size", poolSize))
|
||||||
// create balance processor
|
// create balance processor
|
||||||
balanceProcessor, err := balance.New(&balance.Params{
|
balanceProcessor, err := balance.New(&balance.Params{
|
||||||
Log: s.log,
|
Log: s.log,
|
||||||
Metrics: s.irMetrics,
|
Metrics: s.irMetrics,
|
||||||
PoolSize: cfg.GetInt("workers.balance"),
|
PoolSize: poolSize,
|
||||||
FrostFSClient: frostfsCli,
|
FrostFSClient: frostfsCli,
|
||||||
BalanceSC: s.contracts.balance,
|
BalanceSC: s.contracts.balance,
|
||||||
AlphabetState: s,
|
AlphabetState: s,
|
||||||
|
@ -276,15 +285,17 @@ func (s *Server) initBalanceProcessor(cfg *viper.Viper, frostfsCli *frostfsClien
|
||||||
return bindMorphProcessor(balanceProcessor, s)
|
return bindMorphProcessor(balanceProcessor, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) initFrostFSMainnetProcessor(cfg *viper.Viper) error {
|
func (s *Server) initFrostFSMainnetProcessor(ctx context.Context, cfg *viper.Viper) error {
|
||||||
if s.withoutMainNet {
|
if s.withoutMainNet {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
poolSize := cfg.GetInt("workers.frostfs")
|
||||||
|
s.log.Debug(ctx, logs.FrostFSFrostfsWorkerPool, zap.Int("size", poolSize))
|
||||||
|
|
||||||
frostfsProcessor, err := frostfs.New(&frostfs.Params{
|
frostfsProcessor, err := frostfs.New(&frostfs.Params{
|
||||||
Log: s.log,
|
Log: s.log,
|
||||||
Metrics: s.irMetrics,
|
Metrics: s.irMetrics,
|
||||||
PoolSize: cfg.GetInt("workers.frostfs"),
|
PoolSize: poolSize,
|
||||||
FrostFSContract: s.contracts.frostfs,
|
FrostFSContract: s.contracts.frostfs,
|
||||||
BalanceClient: s.balanceClient,
|
BalanceClient: s.balanceClient,
|
||||||
NetmapClient: s.netmapClient,
|
NetmapClient: s.netmapClient,
|
||||||
|
@ -304,10 +315,10 @@ func (s *Server) initFrostFSMainnetProcessor(cfg *viper.Viper) error {
|
||||||
return bindMainnetProcessor(frostfsProcessor, s)
|
return bindMainnetProcessor(frostfsProcessor, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) initGRPCServer(cfg *viper.Viper, log *logger.Logger, audit *atomic.Bool) error {
|
func (s *Server) initGRPCServer(ctx context.Context, cfg *viper.Viper, log *logger.Logger, audit *atomic.Bool) error {
|
||||||
controlSvcEndpoint := cfg.GetString("control.grpc.endpoint")
|
controlSvcEndpoint := cfg.GetString("control.grpc.endpoint")
|
||||||
if controlSvcEndpoint == "" {
|
if controlSvcEndpoint == "" {
|
||||||
s.log.Info(logs.InnerringNoControlServerEndpointSpecified)
|
s.log.Info(ctx, logs.InnerringNoControlServerEndpointSpecified)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -403,7 +414,7 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) {
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) initProcessors(cfg *viper.Viper, morphClients *serverMorphClients) error {
|
func (s *Server) initProcessors(ctx context.Context, cfg *viper.Viper, morphClients *serverMorphClients) error {
|
||||||
irf := s.createIRFetcher()
|
irf := s.createIRFetcher()
|
||||||
|
|
||||||
s.statusIndex = newInnerRingIndexer(
|
s.statusIndex = newInnerRingIndexer(
|
||||||
|
@ -418,27 +429,27 @@ func (s *Server) initProcessors(cfg *viper.Viper, morphClients *serverMorphClien
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = s.initNetmapProcessor(cfg, alphaSync)
|
err = s.initNetmapProcessor(ctx, cfg, alphaSync)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = s.initContainerProcessor(cfg, morphClients.CnrClient, morphClients.FrostFSIDClient)
|
err = s.initContainerProcessor(ctx, cfg, morphClients.CnrClient, morphClients.FrostFSIDClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = s.initBalanceProcessor(cfg, morphClients.FrostFSClient)
|
err = s.initBalanceProcessor(ctx, cfg, morphClients.FrostFSClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = s.initFrostFSMainnetProcessor(cfg)
|
err = s.initFrostFSMainnetProcessor(ctx, cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = s.initAlphabetProcessor(cfg)
|
err = s.initAlphabetProcessor(ctx, cfg)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -446,7 +457,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
|
||||||
fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey)
|
fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fromSideChainBlock = 0
|
fromSideChainBlock = 0
|
||||||
s.log.Warn(logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
|
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
morphChain := &chainParams{
|
morphChain := &chainParams{
|
||||||
|
@ -471,7 +482,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := s.morphClient.SetGroupSignerScope(); err != nil {
|
if err := s.morphClient.SetGroupSignerScope(); err != nil {
|
||||||
morphChain.log.Info(logs.InnerringFailedToSetGroupSignerScope, zap.Error(err))
|
morphChain.log.Info(ctx, logs.InnerringFailedToSetGroupSignerScope, zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
return morphChain, nil
|
return morphChain, nil
|
||||||
|
|
|
@ -140,10 +140,10 @@ var (
|
||||||
|
|
||||||
// Start runs all event providers.
|
// Start runs all event providers.
|
||||||
func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
|
func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
|
||||||
s.setHealthStatus(control.HealthStatus_STARTING)
|
s.setHealthStatus(ctx, control.HealthStatus_STARTING)
|
||||||
defer func() {
|
defer func() {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
s.setHealthStatus(control.HealthStatus_READY)
|
s.setHealthStatus(ctx, control.HealthStatus_READY)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -152,12 +152,12 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = s.initConfigFromBlockchain()
|
err = s.initConfigFromBlockchain(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.IsAlphabet() {
|
if s.IsAlphabet(ctx) {
|
||||||
err = s.initMainNotary(ctx)
|
err = s.initMainNotary(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -173,14 +173,14 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
|
||||||
prm.Validators = s.predefinedValidators
|
prm.Validators = s.predefinedValidators
|
||||||
|
|
||||||
// vote for sidechain validator if it is prepared in config
|
// vote for sidechain validator if it is prepared in config
|
||||||
err = s.voteForSidechainValidator(prm)
|
err = s.voteForSidechainValidator(ctx, prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// we don't stop inner ring execution on this error
|
// we don't stop inner ring execution on this error
|
||||||
s.log.Warn(logs.InnerringCantVoteForPreparedValidators,
|
s.log.Warn(ctx, logs.InnerringCantVoteForPreparedValidators,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
s.tickInitialExpoch()
|
s.tickInitialExpoch(ctx)
|
||||||
|
|
||||||
morphErr := make(chan error)
|
morphErr := make(chan error)
|
||||||
mainnnetErr := make(chan error)
|
mainnnetErr := make(chan error)
|
||||||
|
@ -217,14 +217,14 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) registerMorphNewBlockEventHandler() {
|
func (s *Server) registerMorphNewBlockEventHandler() {
|
||||||
s.morphListener.RegisterBlockHandler(func(b *block.Block) {
|
s.morphListener.RegisterBlockHandler(func(ctx context.Context, b *block.Block) {
|
||||||
s.log.Debug(logs.InnerringNewBlock,
|
s.log.Debug(ctx, logs.InnerringNewBlock,
|
||||||
zap.Uint32("index", b.Index),
|
zap.Uint32("index", b.Index),
|
||||||
)
|
)
|
||||||
|
|
||||||
err := s.persistate.SetUInt32(persistateSideChainLastBlockKey, b.Index)
|
err := s.persistate.SetUInt32(persistateSideChainLastBlockKey, b.Index)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn(logs.InnerringCantUpdatePersistentState,
|
s.log.Warn(ctx, logs.InnerringCantUpdatePersistentState,
|
||||||
zap.String("chain", "side"),
|
zap.String("chain", "side"),
|
||||||
zap.Uint32("block_index", b.Index))
|
zap.Uint32("block_index", b.Index))
|
||||||
}
|
}
|
||||||
|
@ -235,10 +235,10 @@ func (s *Server) registerMorphNewBlockEventHandler() {
|
||||||
|
|
||||||
func (s *Server) registerMainnetNewBlockEventHandler() {
|
func (s *Server) registerMainnetNewBlockEventHandler() {
|
||||||
if !s.withoutMainNet {
|
if !s.withoutMainNet {
|
||||||
s.mainnetListener.RegisterBlockHandler(func(b *block.Block) {
|
s.mainnetListener.RegisterBlockHandler(func(ctx context.Context, b *block.Block) {
|
||||||
err := s.persistate.SetUInt32(persistateMainChainLastBlockKey, b.Index)
|
err := s.persistate.SetUInt32(persistateMainChainLastBlockKey, b.Index)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn(logs.InnerringCantUpdatePersistentState,
|
s.log.Warn(ctx, logs.InnerringCantUpdatePersistentState,
|
||||||
zap.String("chain", "main"),
|
zap.String("chain", "main"),
|
||||||
zap.Uint32("block_index", b.Index))
|
zap.Uint32("block_index", b.Index))
|
||||||
}
|
}
|
||||||
|
@ -283,11 +283,11 @@ func (s *Server) initSideNotary(ctx context.Context) error {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) tickInitialExpoch() {
|
func (s *Server) tickInitialExpoch(ctx context.Context) {
|
||||||
initialEpochTicker := timer.NewOneTickTimer(
|
initialEpochTicker := timer.NewOneTickTimer(
|
||||||
timer.StaticBlockMeter(s.initialEpochTickDelta),
|
timer.StaticBlockMeter(s.initialEpochTickDelta),
|
||||||
func() {
|
func() {
|
||||||
s.netmapProcessor.HandleNewEpochTick(timerEvent.NewEpochTick{})
|
s.netmapProcessor.HandleNewEpochTick(ctx, timerEvent.NewEpochTick{})
|
||||||
})
|
})
|
||||||
s.addBlockTimer(initialEpochTicker)
|
s.addBlockTimer(initialEpochTicker)
|
||||||
}
|
}
|
||||||
|
@ -299,15 +299,15 @@ func (s *Server) startWorkers(ctx context.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop closes all subscription channels.
|
// Stop closes all subscription channels.
|
||||||
func (s *Server) Stop() {
|
func (s *Server) Stop(ctx context.Context) {
|
||||||
s.setHealthStatus(control.HealthStatus_SHUTTING_DOWN)
|
s.setHealthStatus(ctx, control.HealthStatus_SHUTTING_DOWN)
|
||||||
|
|
||||||
go s.morphListener.Stop()
|
go s.morphListener.Stop()
|
||||||
go s.mainnetListener.Stop()
|
go s.mainnetListener.Stop()
|
||||||
|
|
||||||
for _, c := range s.closers {
|
for _, c := range s.closers {
|
||||||
if err := c(); err != nil {
|
if err := c(); err != nil {
|
||||||
s.log.Warn(logs.InnerringCloserError,
|
s.log.Warn(ctx, logs.InnerringCloserError,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -349,7 +349,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
server.setHealthStatus(control.HealthStatus_HEALTH_STATUS_UNDEFINED)
|
server.setHealthStatus(ctx, control.HealthStatus_HEALTH_STATUS_UNDEFINED)
|
||||||
|
|
||||||
// parse notary support
|
// parse notary support
|
||||||
server.feeConfig = config.NewFeeConfig(cfg)
|
server.feeConfig = config.NewFeeConfig(cfg)
|
||||||
|
@ -376,7 +376,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
server.initNotaryConfig()
|
server.initNotaryConfig(ctx)
|
||||||
|
|
||||||
err = server.initContracts(cfg)
|
err = server.initContracts(cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -400,14 +400,14 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = server.initProcessors(cfg, morphClients)
|
err = server.initProcessors(ctx, cfg, morphClients)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
server.initTimers(cfg)
|
server.initTimers(ctx, cfg)
|
||||||
|
|
||||||
err = server.initGRPCServer(cfg, log, audit)
|
err = server.initGRPCServer(ctx, cfg, log, audit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -438,7 +438,7 @@ func createListener(ctx context.Context, cli *client.Client, p *chainParams) (ev
|
||||||
}
|
}
|
||||||
|
|
||||||
listener, err := event.NewListener(event.ListenerParams{
|
listener, err := event.NewListener(event.ListenerParams{
|
||||||
Logger: &logger.Logger{Logger: p.log.With(zap.String("chain", p.name))},
|
Logger: p.log.With(zap.String("chain", p.name)),
|
||||||
Subscriber: sub,
|
Subscriber: sub,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -573,7 +573,7 @@ func parseMultinetConfig(cfg *viper.Viper, m metrics.MultinetMetrics) internalNe
|
||||||
return nc
|
return nc
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) initConfigFromBlockchain() error {
|
func (s *Server) initConfigFromBlockchain(ctx context.Context) error {
|
||||||
// get current epoch
|
// get current epoch
|
||||||
epoch, err := s.netmapClient.Epoch()
|
epoch, err := s.netmapClient.Epoch()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -602,9 +602,9 @@ func (s *Server) initConfigFromBlockchain() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
s.log.Debug(logs.InnerringReadConfigFromBlockchain,
|
s.log.Debug(ctx, logs.InnerringReadConfigFromBlockchain,
|
||||||
zap.Bool("active", s.IsActive()),
|
zap.Bool("active", s.IsActive(ctx)),
|
||||||
zap.Bool("alphabet", s.IsAlphabet()),
|
zap.Bool("alphabet", s.IsAlphabet(ctx)),
|
||||||
zap.Uint64("epoch", epoch),
|
zap.Uint64("epoch", epoch),
|
||||||
zap.Uint32("precision", balancePrecision),
|
zap.Uint32("precision", balancePrecision),
|
||||||
zap.Uint32("init_epoch_tick_delta", s.initialEpochTickDelta),
|
zap.Uint32("init_epoch_tick_delta", s.initialEpochTickDelta),
|
||||||
|
@ -635,17 +635,17 @@ func (s *Server) nextEpochBlockDelta() (uint32, error) {
|
||||||
// onlyAlphabet wrapper around event handler that executes it
|
// onlyAlphabet wrapper around event handler that executes it
|
||||||
// only if inner ring node is alphabet node.
|
// only if inner ring node is alphabet node.
|
||||||
func (s *Server) onlyAlphabetEventHandler(f event.Handler) event.Handler {
|
func (s *Server) onlyAlphabetEventHandler(f event.Handler) event.Handler {
|
||||||
return func(ev event.Event) {
|
return func(ctx context.Context, ev event.Event) {
|
||||||
if s.IsAlphabet() {
|
if s.IsAlphabet(ctx) {
|
||||||
f(ev)
|
f(ctx, ev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) newEpochTickHandlers() []newEpochHandler {
|
func (s *Server) newEpochTickHandlers(ctx context.Context) []newEpochHandler {
|
||||||
newEpochHandlers := []newEpochHandler{
|
newEpochHandlers := []newEpochHandler{
|
||||||
func() {
|
func() {
|
||||||
s.netmapProcessor.HandleNewEpochTick(timerEvent.NewEpochTick{})
|
s.netmapProcessor.HandleNewEpochTick(ctx, timerEvent.NewEpochTick{})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,38 +28,39 @@ const (
|
||||||
gasDivisor = 2
|
gasDivisor = 2
|
||||||
)
|
)
|
||||||
|
|
||||||
func (s *Server) depositMainNotary() (tx util.Uint256, err error) {
|
func (s *Server) depositMainNotary(ctx context.Context) (tx util.Uint256, err error) {
|
||||||
depositAmount, err := client.CalculateNotaryDepositAmount(s.mainnetClient, gasMultiplier, gasDivisor)
|
depositAmount, err := client.CalculateNotaryDepositAmount(s.mainnetClient, gasMultiplier, gasDivisor)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return util.Uint256{}, fmt.Errorf("could not calculate main notary deposit amount: %w", err)
|
return util.Uint256{}, fmt.Errorf("could not calculate main notary deposit amount: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return s.mainnetClient.DepositNotary(
|
return s.mainnetClient.DepositNotary(
|
||||||
|
ctx,
|
||||||
depositAmount,
|
depositAmount,
|
||||||
uint32(s.epochDuration.Load())+notaryExtraBlocks,
|
uint32(s.epochDuration.Load())+notaryExtraBlocks,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) depositSideNotary() (util.Uint256, error) {
|
func (s *Server) depositSideNotary(ctx context.Context) (util.Uint256, error) {
|
||||||
depositAmount, err := client.CalculateNotaryDepositAmount(s.morphClient, gasMultiplier, gasDivisor)
|
depositAmount, err := client.CalculateNotaryDepositAmount(s.morphClient, gasMultiplier, gasDivisor)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return util.Uint256{}, fmt.Errorf("could not calculate side notary deposit amount: %w", err)
|
return util.Uint256{}, fmt.Errorf("could not calculate side notary deposit amount: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
tx, _, err := s.morphClient.DepositEndlessNotary(depositAmount)
|
tx, _, err := s.morphClient.DepositEndlessNotary(ctx, depositAmount)
|
||||||
return tx, err
|
return tx, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) notaryHandler(_ event.Event) {
|
func (s *Server) notaryHandler(ctx context.Context, _ event.Event) {
|
||||||
if !s.mainNotaryConfig.disabled {
|
if !s.mainNotaryConfig.disabled {
|
||||||
_, err := s.depositMainNotary()
|
_, err := s.depositMainNotary(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Error(logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err))
|
s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := s.depositSideNotary(); err != nil {
|
if _, err := s.depositSideNotary(ctx); err != nil {
|
||||||
s.log.Error(logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err))
|
s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,7 +73,7 @@ func (s *Server) awaitSideNotaryDeposit(ctx context.Context, tx util.Uint256) er
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaiter, msg string) error {
|
func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaiter, msg string) error {
|
||||||
tx, err := deposit()
|
tx, err := deposit(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -81,11 +82,11 @@ func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaite
|
||||||
// non-error deposit with an empty TX hash means
|
// non-error deposit with an empty TX hash means
|
||||||
// that the deposit has already been made; no
|
// that the deposit has already been made; no
|
||||||
// need to wait it.
|
// need to wait it.
|
||||||
s.log.Info(logs.InnerringNotaryDepositHasAlreadyBeenMade)
|
s.log.Info(ctx, logs.InnerringNotaryDepositHasAlreadyBeenMade)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
s.log.Info(msg)
|
s.log.Info(ctx, msg)
|
||||||
|
|
||||||
return await(ctx, tx)
|
return await(ctx, tx)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
package alphabet
|
package alphabet
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
|
||||||
|
@ -8,16 +10,16 @@ import (
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (ap *Processor) HandleGasEmission(ev event.Event) {
|
func (ap *Processor) HandleGasEmission(ctx context.Context, ev event.Event) {
|
||||||
_ = ev.(timers.NewAlphabetEmitTick)
|
_ = ev.(timers.NewAlphabetEmitTick)
|
||||||
ap.log.Info(logs.AlphabetTick, zap.String("type", "alphabet gas emit"))
|
ap.log.Info(ctx, logs.AlphabetTick, zap.String("type", "alphabet gas emit"))
|
||||||
|
|
||||||
// send event to the worker pool
|
// send event to the worker pool
|
||||||
|
|
||||||
err := processors.SubmitEvent(ap.pool, ap.metrics, "alphabet_emit_gas", ap.processEmit)
|
err := processors.SubmitEvent(ap.pool, ap.metrics, "alphabet_emit_gas", func() bool { return ap.processEmit(ctx) })
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
ap.log.Warn(logs.AlphabetAlphabetProcessorWorkerPoolDrained,
|
ap.log.Warn(ctx, logs.AlphabetAlphabetProcessorWorkerPoolDrained,
|
||||||
zap.Int("capacity", ap.pool.Cap()))
|
zap.Int("capacity", ap.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package alphabet_test
|
package alphabet_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
|
||||||
|
@ -60,7 +61,7 @@ func TestProcessorEmitsGasToNetmapAndAlphabet(t *testing.T) {
|
||||||
processor, err := alphabet.New(params)
|
processor, err := alphabet.New(params)
|
||||||
require.NoError(t, err, "failed to create processor instance")
|
require.NoError(t, err, "failed to create processor instance")
|
||||||
|
|
||||||
processor.HandleGasEmission(timers.NewAlphabetEmitTick{})
|
processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{})
|
||||||
|
|
||||||
processor.WaitPoolRunning()
|
processor.WaitPoolRunning()
|
||||||
|
|
||||||
|
@ -137,7 +138,7 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) {
|
||||||
processor, err := alphabet.New(params)
|
processor, err := alphabet.New(params)
|
||||||
require.NoError(t, err, "failed to create processor instance")
|
require.NoError(t, err, "failed to create processor instance")
|
||||||
|
|
||||||
processor.HandleGasEmission(timers.NewAlphabetEmitTick{})
|
processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{})
|
||||||
|
|
||||||
processor.WaitPoolRunning()
|
processor.WaitPoolRunning()
|
||||||
|
|
||||||
|
@ -198,7 +199,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) {
|
||||||
processor, err := alphabet.New(params)
|
processor, err := alphabet.New(params)
|
||||||
require.NoError(t, err, "failed to create processor instance")
|
require.NoError(t, err, "failed to create processor instance")
|
||||||
|
|
||||||
processor.HandleGasEmission(timers.NewAlphabetEmitTick{})
|
processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{})
|
||||||
|
|
||||||
processor.WaitPoolRunning()
|
processor.WaitPoolRunning()
|
||||||
|
|
||||||
|
@ -219,7 +220,7 @@ type testIndexer struct {
|
||||||
index int
|
index int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *testIndexer) AlphabetIndex() int {
|
func (i *testIndexer) AlphabetIndex(context.Context) int {
|
||||||
return i.index
|
return i.index
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -246,7 +247,7 @@ type testMorphClient struct {
|
||||||
batchTransferedGas []batchTransferGas
|
batchTransferedGas []batchTransferGas
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *testMorphClient) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) {
|
func (c *testMorphClient) Invoke(_ context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) {
|
||||||
c.invokedMethods = append(c.invokedMethods,
|
c.invokedMethods = append(c.invokedMethods,
|
||||||
invokedMethod{
|
invokedMethod{
|
||||||
contract: contract,
|
contract: contract,
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package alphabet
|
package alphabet
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/elliptic"
|
"crypto/elliptic"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
|
@ -13,39 +14,39 @@ import (
|
||||||
|
|
||||||
const emitMethod = "emit"
|
const emitMethod = "emit"
|
||||||
|
|
||||||
func (ap *Processor) processEmit() bool {
|
func (ap *Processor) processEmit(ctx context.Context) bool {
|
||||||
index := ap.irList.AlphabetIndex()
|
index := ap.irList.AlphabetIndex(ctx)
|
||||||
if index < 0 {
|
if index < 0 {
|
||||||
ap.log.Info(logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent)
|
ap.log.Info(ctx, logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent)
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
contract, ok := ap.alphabetContracts.GetByIndex(index)
|
contract, ok := ap.alphabetContracts.GetByIndex(index)
|
||||||
if !ok {
|
if !ok {
|
||||||
ap.log.Debug(logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent,
|
ap.log.Debug(ctx, logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent,
|
||||||
zap.Int("index", index))
|
zap.Int("index", index))
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// there is no signature collecting, so we don't need extra fee
|
// there is no signature collecting, so we don't need extra fee
|
||||||
_, err := ap.morphClient.Invoke(contract, 0, emitMethod)
|
_, err := ap.morphClient.Invoke(ctx, contract, 0, emitMethod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ap.log.Warn(logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error()))
|
ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error()))
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if ap.storageEmission == 0 {
|
if ap.storageEmission == 0 {
|
||||||
ap.log.Info(logs.AlphabetStorageNodeEmissionIsOff)
|
ap.log.Info(ctx, logs.AlphabetStorageNodeEmissionIsOff)
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
networkMap, err := ap.netmapClient.NetMap()
|
networkMap, err := ap.netmapClient.NetMap()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ap.log.Warn(logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
|
ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
return false
|
return false
|
||||||
|
@ -58,7 +59,7 @@ func (ap *Processor) processEmit() bool {
|
||||||
ap.pwLock.RUnlock()
|
ap.pwLock.RUnlock()
|
||||||
extraLen := len(pw)
|
extraLen := len(pw)
|
||||||
|
|
||||||
ap.log.Debug(logs.AlphabetGasEmission,
|
ap.log.Debug(ctx, logs.AlphabetGasEmission,
|
||||||
zap.Int("network_map", nmLen),
|
zap.Int("network_map", nmLen),
|
||||||
zap.Int("extra_wallets", extraLen))
|
zap.Int("extra_wallets", extraLen))
|
||||||
|
|
||||||
|
@ -68,20 +69,20 @@ func (ap *Processor) processEmit() bool {
|
||||||
|
|
||||||
gasPerNode := fixedn.Fixed8(ap.storageEmission / uint64(nmLen+extraLen))
|
gasPerNode := fixedn.Fixed8(ap.storageEmission / uint64(nmLen+extraLen))
|
||||||
|
|
||||||
ap.transferGasToNetmapNodes(nmNodes, gasPerNode)
|
ap.transferGasToNetmapNodes(ctx, nmNodes, gasPerNode)
|
||||||
|
|
||||||
ap.transferGasToExtraNodes(pw, gasPerNode)
|
ap.transferGasToExtraNodes(ctx, pw, gasPerNode)
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerNode fixedn.Fixed8) {
|
func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []netmap.NodeInfo, gasPerNode fixedn.Fixed8) {
|
||||||
for i := range nmNodes {
|
for i := range nmNodes {
|
||||||
keyBytes := nmNodes[i].PublicKey()
|
keyBytes := nmNodes[i].PublicKey()
|
||||||
|
|
||||||
key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256())
|
key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ap.log.Warn(logs.AlphabetCantParseNodePublicKey,
|
ap.log.Warn(ctx, logs.AlphabetCantParseNodePublicKey,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
continue
|
continue
|
||||||
|
@ -89,7 +90,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN
|
||||||
|
|
||||||
err = ap.morphClient.TransferGas(key.GetScriptHash(), gasPerNode)
|
err = ap.morphClient.TransferGas(key.GetScriptHash(), gasPerNode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ap.log.Warn(logs.AlphabetCantTransferGas,
|
ap.log.Warn(ctx, logs.AlphabetCantTransferGas,
|
||||||
zap.String("receiver", key.Address()),
|
zap.String("receiver", key.Address()),
|
||||||
zap.Int64("amount", int64(gasPerNode)),
|
zap.Int64("amount", int64(gasPerNode)),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
|
@ -98,7 +99,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ap *Processor) transferGasToExtraNodes(pw []util.Uint160, gasPerNode fixedn.Fixed8) {
|
func (ap *Processor) transferGasToExtraNodes(ctx context.Context, pw []util.Uint160, gasPerNode fixedn.Fixed8) {
|
||||||
if len(pw) > 0 {
|
if len(pw) > 0 {
|
||||||
err := ap.morphClient.BatchTransferGas(pw, gasPerNode)
|
err := ap.morphClient.BatchTransferGas(pw, gasPerNode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -106,7 +107,7 @@ func (ap *Processor) transferGasToExtraNodes(pw []util.Uint160, gasPerNode fixed
|
||||||
for i, addr := range pw {
|
for i, addr := range pw {
|
||||||
receiversLog[i] = addr.StringLE()
|
receiversLog[i] = addr.StringLE()
|
||||||
}
|
}
|
||||||
ap.log.Warn(logs.AlphabetCantTransferGasToWallet,
|
ap.log.Warn(ctx, logs.AlphabetCantTransferGasToWallet,
|
||||||
zap.Strings("receivers", receiversLog),
|
zap.Strings("receivers", receiversLog),
|
||||||
zap.Int64("amount", int64(gasPerNode)),
|
zap.Int64("amount", int64(gasPerNode)),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
package alphabet
|
package alphabet
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||||
|
@ -14,13 +14,12 @@ import (
|
||||||
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
|
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
"github.com/panjf2000/ants/v2"
|
"github.com/panjf2000/ants/v2"
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
// Indexer is a callback interface for inner ring global state.
|
// Indexer is a callback interface for inner ring global state.
|
||||||
Indexer interface {
|
Indexer interface {
|
||||||
AlphabetIndex() int
|
AlphabetIndex(context.Context) int
|
||||||
}
|
}
|
||||||
|
|
||||||
// Contracts is an interface of the storage
|
// Contracts is an interface of the storage
|
||||||
|
@ -40,7 +39,7 @@ type (
|
||||||
}
|
}
|
||||||
|
|
||||||
morphClient interface {
|
morphClient interface {
|
||||||
Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error)
|
Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error)
|
||||||
TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error
|
TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error
|
||||||
BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8) error
|
BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8) error
|
||||||
}
|
}
|
||||||
|
@ -85,8 +84,6 @@ func New(p *Params) (*Processor, error) {
|
||||||
return nil, errors.New("ir/alphabet: global state is not set")
|
return nil, errors.New("ir/alphabet: global state is not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
p.Log.Debug(logs.AlphabetAlphabetWorkerPool, zap.Int("size", p.PoolSize))
|
|
||||||
|
|
||||||
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err)
|
return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err)
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package balance
|
package balance
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
|
@ -10,20 +11,20 @@ import (
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (bp *Processor) handleLock(ev event.Event) {
|
func (bp *Processor) handleLock(ctx context.Context, ev event.Event) {
|
||||||
lock := ev.(balanceEvent.Lock)
|
lock := ev.(balanceEvent.Lock)
|
||||||
bp.log.Info(logs.Notification,
|
bp.log.Info(ctx, logs.Notification,
|
||||||
zap.String("type", "lock"),
|
zap.String("type", "lock"),
|
||||||
zap.String("value", hex.EncodeToString(lock.ID())))
|
zap.String("value", hex.EncodeToString(lock.ID())))
|
||||||
|
|
||||||
// send an event to the worker pool
|
// send an event to the worker pool
|
||||||
|
|
||||||
err := processors.SubmitEvent(bp.pool, bp.metrics, "lock", func() bool {
|
err := processors.SubmitEvent(bp.pool, bp.metrics, "lock", func() bool {
|
||||||
return bp.processLock(&lock)
|
return bp.processLock(ctx, &lock)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
bp.log.Warn(logs.BalanceBalanceWorkerPoolDrained,
|
bp.log.Warn(ctx, logs.BalanceBalanceWorkerPoolDrained,
|
||||||
zap.Int("capacity", bp.pool.Cap()))
|
zap.Int("capacity", bp.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package balance
|
package balance
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -30,7 +31,7 @@ func TestProcessorCallsFrostFSContractForLockEvent(t *testing.T) {
|
||||||
})
|
})
|
||||||
require.NoError(t, err, "failed to create processor")
|
require.NoError(t, err, "failed to create processor")
|
||||||
|
|
||||||
processor.handleLock(balanceEvent.Lock{})
|
processor.handleLock(context.Background(), balanceEvent.Lock{})
|
||||||
|
|
||||||
for processor.pool.Running() > 0 {
|
for processor.pool.Running() > 0 {
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
@ -56,7 +57,7 @@ func TestProcessorDoesntCallFrostFSContractIfNotAlphabet(t *testing.T) {
|
||||||
})
|
})
|
||||||
require.NoError(t, err, "failed to create processor")
|
require.NoError(t, err, "failed to create processor")
|
||||||
|
|
||||||
processor.handleLock(balanceEvent.Lock{})
|
processor.handleLock(context.Background(), balanceEvent.Lock{})
|
||||||
|
|
||||||
for processor.pool.Running() > 0 {
|
for processor.pool.Running() > 0 {
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
@ -69,7 +70,7 @@ type testAlphabetState struct {
|
||||||
isAlphabet bool
|
isAlphabet bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *testAlphabetState) IsAlphabet() bool {
|
func (s *testAlphabetState) IsAlphabet(context.Context) bool {
|
||||||
return s.isAlphabet
|
return s.isAlphabet
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -83,7 +84,7 @@ type testFrostFSContractClient struct {
|
||||||
chequeCalls int
|
chequeCalls int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *testFrostFSContractClient) Cheque(p frostfscontract.ChequePrm) error {
|
func (c *testFrostFSContractClient) Cheque(_ context.Context, p frostfscontract.ChequePrm) error {
|
||||||
c.chequeCalls++
|
c.chequeCalls++
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
package balance
|
package balance
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
frostfsContract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
|
frostfsContract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
|
||||||
balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
|
balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
|
||||||
|
@ -9,9 +11,9 @@ import (
|
||||||
|
|
||||||
// Process lock event by invoking Cheque method in main net to send assets
|
// Process lock event by invoking Cheque method in main net to send assets
|
||||||
// back to the withdraw issuer.
|
// back to the withdraw issuer.
|
||||||
func (bp *Processor) processLock(lock *balanceEvent.Lock) bool {
|
func (bp *Processor) processLock(ctx context.Context, lock *balanceEvent.Lock) bool {
|
||||||
if !bp.alphabetState.IsAlphabet() {
|
if !bp.alphabetState.IsAlphabet(ctx) {
|
||||||
bp.log.Info(logs.BalanceNonAlphabetModeIgnoreBalanceLock)
|
bp.log.Info(ctx, logs.BalanceNonAlphabetModeIgnoreBalanceLock)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,9 +25,9 @@ func (bp *Processor) processLock(lock *balanceEvent.Lock) bool {
|
||||||
prm.SetLock(lock.LockAccount())
|
prm.SetLock(lock.LockAccount())
|
||||||
prm.SetHash(lock.TxHash())
|
prm.SetHash(lock.TxHash())
|
||||||
|
|
||||||
err := bp.frostfsClient.Cheque(prm)
|
err := bp.frostfsClient.Cheque(ctx, prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
bp.log.Error(logs.BalanceCantSendLockAssetTx, zap.Error(err))
|
bp.log.Error(ctx, logs.BalanceCantSendLockAssetTx, zap.Error(err))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
package balance
|
package balance
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
|
||||||
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
|
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||||
|
@ -12,13 +12,12 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
"github.com/panjf2000/ants/v2"
|
"github.com/panjf2000/ants/v2"
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
// AlphabetState is a callback interface for inner ring global state.
|
// AlphabetState is a callback interface for inner ring global state.
|
||||||
AlphabetState interface {
|
AlphabetState interface {
|
||||||
IsAlphabet() bool
|
IsAlphabet(context.Context) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrecisionConverter converts balance amount values.
|
// PrecisionConverter converts balance amount values.
|
||||||
|
@ -27,7 +26,7 @@ type (
|
||||||
}
|
}
|
||||||
|
|
||||||
FrostFSClient interface {
|
FrostFSClient interface {
|
||||||
Cheque(p frostfscontract.ChequePrm) error
|
Cheque(ctx context.Context, p frostfscontract.ChequePrm) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Processor of events produced by balance contract in the morphchain.
|
// Processor of events produced by balance contract in the morphchain.
|
||||||
|
@ -68,8 +67,6 @@ func New(p *Params) (*Processor, error) {
|
||||||
return nil, errors.New("ir/balance: balance precision converter is not set")
|
return nil, errors.New("ir/balance: balance precision converter is not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
p.Log.Debug(logs.BalanceBalanceWorkerPool, zap.Int("size", p.PoolSize))
|
|
||||||
|
|
||||||
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("ir/balance: can't create worker pool: %w", err)
|
return nil, fmt.Errorf("ir/balance: can't create worker pool: %w", err)
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package container
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
|
@ -11,40 +12,40 @@ import (
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (cp *Processor) handlePut(ev event.Event) {
|
func (cp *Processor) handlePut(ctx context.Context, ev event.Event) {
|
||||||
put := ev.(putEvent)
|
put := ev.(putEvent)
|
||||||
|
|
||||||
id := sha256.Sum256(put.Container())
|
id := sha256.Sum256(put.Container())
|
||||||
cp.log.Info(logs.Notification,
|
cp.log.Info(ctx, logs.Notification,
|
||||||
zap.String("type", "container put"),
|
zap.String("type", "container put"),
|
||||||
zap.String("id", base58.Encode(id[:])))
|
zap.String("id", base58.Encode(id[:])))
|
||||||
|
|
||||||
// send an event to the worker pool
|
// send an event to the worker pool
|
||||||
|
|
||||||
err := processors.SubmitEvent(cp.pool, cp.metrics, "container_put", func() bool {
|
err := processors.SubmitEvent(cp.pool, cp.metrics, "container_put", func() bool {
|
||||||
return cp.processContainerPut(put)
|
return cp.processContainerPut(ctx, put)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained,
|
cp.log.Warn(ctx, logs.ContainerContainerProcessorWorkerPoolDrained,
|
||||||
zap.Int("capacity", cp.pool.Cap()))
|
zap.Int("capacity", cp.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cp *Processor) handleDelete(ev event.Event) {
|
func (cp *Processor) handleDelete(ctx context.Context, ev event.Event) {
|
||||||
del := ev.(containerEvent.Delete)
|
del := ev.(containerEvent.Delete)
|
||||||
cp.log.Info(logs.Notification,
|
cp.log.Info(ctx, logs.Notification,
|
||||||
zap.String("type", "container delete"),
|
zap.String("type", "container delete"),
|
||||||
zap.String("id", base58.Encode(del.ContainerID())))
|
zap.String("id", base58.Encode(del.ContainerID())))
|
||||||
|
|
||||||
// send an event to the worker pool
|
// send an event to the worker pool
|
||||||
|
|
||||||
err := processors.SubmitEvent(cp.pool, cp.metrics, "container_delete", func() bool {
|
err := processors.SubmitEvent(cp.pool, cp.metrics, "container_delete", func() bool {
|
||||||
return cp.processContainerDelete(del)
|
return cp.processContainerDelete(ctx, del)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained,
|
cp.log.Warn(ctx, logs.ContainerContainerProcessorWorkerPoolDrained,
|
||||||
zap.Int("capacity", cp.pool.Cap()))
|
zap.Int("capacity", cp.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package container
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/ecdsa"
|
"crypto/ecdsa"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -71,7 +72,7 @@ func TestPutEvent(t *testing.T) {
|
||||||
nr: nr,
|
nr: nr,
|
||||||
}
|
}
|
||||||
|
|
||||||
proc.handlePut(event)
|
proc.handlePut(context.Background(), event)
|
||||||
|
|
||||||
for proc.pool.Running() > 0 {
|
for proc.pool.Running() > 0 {
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
@ -143,7 +144,7 @@ func TestDeleteEvent(t *testing.T) {
|
||||||
Signature: signature,
|
Signature: signature,
|
||||||
}
|
}
|
||||||
|
|
||||||
proc.handleDelete(ev)
|
proc.handleDelete(context.Background(), ev)
|
||||||
|
|
||||||
for proc.pool.Running() > 0 {
|
for proc.pool.Running() > 0 {
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
@ -160,7 +161,7 @@ type testAlphabetState struct {
|
||||||
isAlphabet bool
|
isAlphabet bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *testAlphabetState) IsAlphabet() bool {
|
func (s *testAlphabetState) IsAlphabet(context.Context) bool {
|
||||||
return s.isAlphabet
|
return s.isAlphabet
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package container
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -36,27 +37,27 @@ var errContainerAndOwnerNamespaceDontMatch = errors.New("container and owner nam
|
||||||
|
|
||||||
// Process a new container from the user by checking the container sanity
|
// Process a new container from the user by checking the container sanity
|
||||||
// and sending approve tx back to the morph.
|
// and sending approve tx back to the morph.
|
||||||
func (cp *Processor) processContainerPut(put putEvent) bool {
|
func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool {
|
||||||
if !cp.alphabetState.IsAlphabet() {
|
if !cp.alphabetState.IsAlphabet(ctx) {
|
||||||
cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerPut)
|
cp.log.Info(ctx, logs.ContainerNonAlphabetModeIgnoreContainerPut)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := &putContainerContext{
|
pctx := &putContainerContext{
|
||||||
e: put,
|
e: put,
|
||||||
}
|
}
|
||||||
|
|
||||||
err := cp.checkPutContainer(ctx)
|
err := cp.checkPutContainer(pctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cp.log.Error(logs.ContainerPutContainerCheckFailed,
|
cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cp.morphClient.NotarySignAndInvokeTX(ctx.e.NotaryRequest().MainTransaction); err != nil {
|
if err := cp.morphClient.NotarySignAndInvokeTX(pctx.e.NotaryRequest().MainTransaction); err != nil {
|
||||||
cp.log.Error(logs.ContainerCouldNotApprovePutContainer,
|
cp.log.Error(ctx, logs.ContainerCouldNotApprovePutContainer,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
return false
|
return false
|
||||||
|
@ -103,15 +104,15 @@ func (cp *Processor) checkPutContainer(ctx *putContainerContext) error {
|
||||||
|
|
||||||
// Process delete container operation from the user by checking container sanity
|
// Process delete container operation from the user by checking container sanity
|
||||||
// and sending approve tx back to morph.
|
// and sending approve tx back to morph.
|
||||||
func (cp *Processor) processContainerDelete(e containerEvent.Delete) bool {
|
func (cp *Processor) processContainerDelete(ctx context.Context, e containerEvent.Delete) bool {
|
||||||
if !cp.alphabetState.IsAlphabet() {
|
if !cp.alphabetState.IsAlphabet(ctx) {
|
||||||
cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerDelete)
|
cp.log.Info(ctx, logs.ContainerNonAlphabetModeIgnoreContainerDelete)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
err := cp.checkDeleteContainer(e)
|
err := cp.checkDeleteContainer(e)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cp.log.Error(logs.ContainerDeleteContainerCheckFailed,
|
cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -119,7 +120,7 @@ func (cp *Processor) processContainerDelete(e containerEvent.Delete) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil {
|
if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil {
|
||||||
cp.log.Error(logs.ContainerCouldNotApproveDeleteContainer,
|
cp.log.Error(ctx, logs.ContainerCouldNotApproveDeleteContainer,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
package container
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
|
frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
|
||||||
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||||
|
@ -15,13 +15,12 @@ import (
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
"github.com/panjf2000/ants/v2"
|
"github.com/panjf2000/ants/v2"
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
// AlphabetState is a callback interface for inner ring global state.
|
// AlphabetState is a callback interface for inner ring global state.
|
||||||
AlphabetState interface {
|
AlphabetState interface {
|
||||||
IsAlphabet() bool
|
IsAlphabet(context.Context) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
ContClient interface {
|
ContClient interface {
|
||||||
|
@ -97,8 +96,6 @@ func New(p *Params) (*Processor, error) {
|
||||||
return nil, errors.New("ir/container: FrostFSID client is not set")
|
return nil, errors.New("ir/container: FrostFSID client is not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
p.Log.Debug(logs.ContainerContainerWorkerPool, zap.Int("size", p.PoolSize))
|
|
||||||
|
|
||||||
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("ir/container: can't create worker pool: %w", err)
|
return nil, fmt.Errorf("ir/container: can't create worker pool: %w", err)
|
||||||
|
|
|
@ -2,6 +2,7 @@ package frostfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"slices"
|
"slices"
|
||||||
|
|
||||||
|
@ -12,67 +13,67 @@ import (
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (np *Processor) handleDeposit(ev event.Event) {
|
func (np *Processor) handleDeposit(ctx context.Context, ev event.Event) {
|
||||||
deposit := ev.(frostfsEvent.Deposit)
|
deposit := ev.(frostfsEvent.Deposit)
|
||||||
depositIDBin := bytes.Clone(deposit.ID())
|
depositIDBin := bytes.Clone(deposit.ID())
|
||||||
slices.Reverse(depositIDBin)
|
slices.Reverse(depositIDBin)
|
||||||
np.log.Info(logs.Notification,
|
np.log.Info(ctx, logs.Notification,
|
||||||
zap.String("type", "deposit"),
|
zap.String("type", "deposit"),
|
||||||
zap.String("id", hex.EncodeToString(depositIDBin)))
|
zap.String("id", hex.EncodeToString(depositIDBin)))
|
||||||
|
|
||||||
// send event to the worker pool
|
// send event to the worker pool
|
||||||
|
|
||||||
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_deposit", func() bool {
|
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_deposit", func() bool {
|
||||||
return np.processDeposit(deposit)
|
return np.processDeposit(ctx, deposit)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
|
np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
|
||||||
zap.Int("capacity", np.pool.Cap()))
|
zap.Int("capacity", np.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (np *Processor) handleWithdraw(ev event.Event) {
|
func (np *Processor) handleWithdraw(ctx context.Context, ev event.Event) {
|
||||||
withdraw := ev.(frostfsEvent.Withdraw)
|
withdraw := ev.(frostfsEvent.Withdraw)
|
||||||
withdrawBin := bytes.Clone(withdraw.ID())
|
withdrawBin := bytes.Clone(withdraw.ID())
|
||||||
slices.Reverse(withdrawBin)
|
slices.Reverse(withdrawBin)
|
||||||
np.log.Info(logs.Notification,
|
np.log.Info(ctx, logs.Notification,
|
||||||
zap.String("type", "withdraw"),
|
zap.String("type", "withdraw"),
|
||||||
zap.String("id", hex.EncodeToString(withdrawBin)))
|
zap.String("id", hex.EncodeToString(withdrawBin)))
|
||||||
|
|
||||||
// send event to the worker pool
|
// send event to the worker pool
|
||||||
|
|
||||||
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_withdraw", func() bool {
|
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_withdraw", func() bool {
|
||||||
return np.processWithdraw(withdraw)
|
return np.processWithdraw(ctx, withdraw)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
|
np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
|
||||||
zap.Int("capacity", np.pool.Cap()))
|
zap.Int("capacity", np.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (np *Processor) handleCheque(ev event.Event) {
|
func (np *Processor) handleCheque(ctx context.Context, ev event.Event) {
|
||||||
cheque := ev.(frostfsEvent.Cheque)
|
cheque := ev.(frostfsEvent.Cheque)
|
||||||
np.log.Info(logs.Notification,
|
np.log.Info(ctx, logs.Notification,
|
||||||
zap.String("type", "cheque"),
|
zap.String("type", "cheque"),
|
||||||
zap.String("id", hex.EncodeToString(cheque.ID())))
|
zap.String("id", hex.EncodeToString(cheque.ID())))
|
||||||
|
|
||||||
// send event to the worker pool
|
// send event to the worker pool
|
||||||
|
|
||||||
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_cheque", func() bool {
|
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_cheque", func() bool {
|
||||||
return np.processCheque(cheque)
|
return np.processCheque(ctx, cheque)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
|
np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
|
||||||
zap.Int("capacity", np.pool.Cap()))
|
zap.Int("capacity", np.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (np *Processor) handleConfig(ev event.Event) {
|
func (np *Processor) handleConfig(ctx context.Context, ev event.Event) {
|
||||||
cfg := ev.(frostfsEvent.Config)
|
cfg := ev.(frostfsEvent.Config)
|
||||||
np.log.Info(logs.Notification,
|
np.log.Info(ctx, logs.Notification,
|
||||||
zap.String("type", "set config"),
|
zap.String("type", "set config"),
|
||||||
zap.String("key", hex.EncodeToString(cfg.Key())),
|
zap.String("key", hex.EncodeToString(cfg.Key())),
|
||||||
zap.String("value", hex.EncodeToString(cfg.Value())))
|
zap.String("value", hex.EncodeToString(cfg.Value())))
|
||||||
|
@ -80,11 +81,11 @@ func (np *Processor) handleConfig(ev event.Event) {
|
||||||
// send event to the worker pool
|
// send event to the worker pool
|
||||||
|
|
||||||
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_config", func() bool {
|
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_config", func() bool {
|
||||||
return np.processConfig(cfg)
|
return np.processConfig(ctx, cfg)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
|
np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained,
|
||||||
zap.Int("capacity", np.pool.Cap()))
|
zap.Int("capacity", np.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package frostfs
|
package frostfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -36,7 +37,7 @@ func TestHandleDeposit(t *testing.T) {
|
||||||
AmountValue: 1000,
|
AmountValue: 1000,
|
||||||
}
|
}
|
||||||
|
|
||||||
proc.handleDeposit(ev)
|
proc.handleDeposit(context.Background(), ev)
|
||||||
|
|
||||||
for proc.pool.Running() > 0 {
|
for proc.pool.Running() > 0 {
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
@ -57,7 +58,7 @@ func TestHandleDeposit(t *testing.T) {
|
||||||
|
|
||||||
es.epochCounter = 109
|
es.epochCounter = 109
|
||||||
|
|
||||||
proc.handleDeposit(ev)
|
proc.handleDeposit(context.Background(), ev)
|
||||||
|
|
||||||
for proc.pool.Running() > 0 {
|
for proc.pool.Running() > 0 {
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
@ -98,7 +99,7 @@ func TestHandleWithdraw(t *testing.T) {
|
||||||
AmountValue: 1000,
|
AmountValue: 1000,
|
||||||
}
|
}
|
||||||
|
|
||||||
proc.handleWithdraw(ev)
|
proc.handleWithdraw(context.Background(), ev)
|
||||||
|
|
||||||
for proc.pool.Running() > 0 {
|
for proc.pool.Running() > 0 {
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
@ -139,7 +140,7 @@ func TestHandleCheque(t *testing.T) {
|
||||||
LockValue: util.Uint160{200},
|
LockValue: util.Uint160{200},
|
||||||
}
|
}
|
||||||
|
|
||||||
proc.handleCheque(ev)
|
proc.handleCheque(context.Background(), ev)
|
||||||
|
|
||||||
for proc.pool.Running() > 0 {
|
for proc.pool.Running() > 0 {
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
@ -176,7 +177,7 @@ func TestHandleConfig(t *testing.T) {
|
||||||
TxHashValue: util.Uint256{100},
|
TxHashValue: util.Uint256{100},
|
||||||
}
|
}
|
||||||
|
|
||||||
proc.handleConfig(ev)
|
proc.handleConfig(context.Background(), ev)
|
||||||
|
|
||||||
for proc.pool.Running() > 0 {
|
for proc.pool.Running() > 0 {
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
@ -225,7 +226,7 @@ type testAlphabetState struct {
|
||||||
isAlphabet bool
|
isAlphabet bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *testAlphabetState) IsAlphabet() bool {
|
func (s *testAlphabetState) IsAlphabet(context.Context) bool {
|
||||||
return s.isAlphabet
|
return s.isAlphabet
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -241,17 +242,17 @@ type testBalaceClient struct {
|
||||||
burn []balance.BurnPrm
|
burn []balance.BurnPrm
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *testBalaceClient) Mint(p balance.MintPrm) error {
|
func (c *testBalaceClient) Mint(_ context.Context, p balance.MintPrm) error {
|
||||||
c.mint = append(c.mint, p)
|
c.mint = append(c.mint, p)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *testBalaceClient) Lock(p balance.LockPrm) error {
|
func (c *testBalaceClient) Lock(_ context.Context, p balance.LockPrm) error {
|
||||||
c.lock = append(c.lock, p)
|
c.lock = append(c.lock, p)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *testBalaceClient) Burn(p balance.BurnPrm) error {
|
func (c *testBalaceClient) Burn(_ context.Context, p balance.BurnPrm) error {
|
||||||
c.burn = append(c.burn, p)
|
c.burn = append(c.burn, p)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -260,7 +261,7 @@ type testNetmapClient struct {
|
||||||
config []nmClient.SetConfigPrm
|
config []nmClient.SetConfigPrm
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *testNetmapClient) SetConfig(p nmClient.SetConfigPrm) error {
|
func (c *testNetmapClient) SetConfig(_ context.Context, p nmClient.SetConfigPrm) error {
|
||||||
c.config = append(c.config, p)
|
c.config = append(c.config, p)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
package frostfs
|
package frostfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
|
||||||
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
|
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
|
||||||
|
@ -15,9 +17,9 @@ const (
|
||||||
|
|
||||||
// Process deposit event by invoking a balance contract and sending native
|
// Process deposit event by invoking a balance contract and sending native
|
||||||
// gas in the sidechain.
|
// gas in the sidechain.
|
||||||
func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
|
func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.Deposit) bool {
|
||||||
if !np.alphabetState.IsAlphabet() {
|
if !np.alphabetState.IsAlphabet(ctx) {
|
||||||
np.log.Info(logs.FrostFSNonAlphabetModeIgnoreDeposit)
|
np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreDeposit)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -28,9 +30,9 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
|
||||||
prm.SetID(deposit.ID())
|
prm.SetID(deposit.ID())
|
||||||
|
|
||||||
// send transferX to a balance contract
|
// send transferX to a balance contract
|
||||||
err := np.balanceClient.Mint(prm)
|
err := np.balanceClient.Mint(ctx, prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error(logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err))
|
np.log.Error(ctx, logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
curEpoch := np.epochState.EpochCounter()
|
curEpoch := np.epochState.EpochCounter()
|
||||||
|
@ -44,7 +46,7 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
|
||||||
|
|
||||||
val, ok := np.mintEmitCache.Get(receiver.String())
|
val, ok := np.mintEmitCache.Get(receiver.String())
|
||||||
if ok && val+np.mintEmitThreshold >= curEpoch {
|
if ok && val+np.mintEmitThreshold >= curEpoch {
|
||||||
np.log.Warn(logs.FrostFSDoubleMintEmissionDeclined,
|
np.log.Warn(ctx, logs.FrostFSDoubleMintEmissionDeclined,
|
||||||
zap.Stringer("receiver", receiver),
|
zap.Stringer("receiver", receiver),
|
||||||
zap.Uint64("last_emission", val),
|
zap.Uint64("last_emission", val),
|
||||||
zap.Uint64("current_epoch", curEpoch))
|
zap.Uint64("current_epoch", curEpoch))
|
||||||
|
@ -56,12 +58,12 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
|
||||||
// before gas transfer check if the balance is greater than the threshold
|
// before gas transfer check if the balance is greater than the threshold
|
||||||
balance, err := np.morphClient.GasBalance()
|
balance, err := np.morphClient.GasBalance()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error(logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err))
|
np.log.Error(ctx, logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if balance < np.gasBalanceThreshold {
|
if balance < np.gasBalanceThreshold {
|
||||||
np.log.Warn(logs.FrostFSGasBalanceThresholdHasBeenReached,
|
np.log.Warn(ctx, logs.FrostFSGasBalanceThresholdHasBeenReached,
|
||||||
zap.Int64("balance", balance),
|
zap.Int64("balance", balance),
|
||||||
zap.Int64("threshold", np.gasBalanceThreshold))
|
zap.Int64("threshold", np.gasBalanceThreshold))
|
||||||
|
|
||||||
|
@ -70,7 +72,7 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
|
||||||
|
|
||||||
err = np.morphClient.TransferGas(receiver, np.mintEmitValue)
|
err = np.morphClient.TransferGas(receiver, np.mintEmitValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error(logs.FrostFSCantTransferNativeGasToReceiver,
|
np.log.Error(ctx, logs.FrostFSCantTransferNativeGasToReceiver,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
return false
|
return false
|
||||||
|
@ -82,16 +84,16 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process withdraw event by locking assets in the balance account.
|
// Process withdraw event by locking assets in the balance account.
|
||||||
func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool {
|
func (np *Processor) processWithdraw(ctx context.Context, withdraw frostfsEvent.Withdraw) bool {
|
||||||
if !np.alphabetState.IsAlphabet() {
|
if !np.alphabetState.IsAlphabet(ctx) {
|
||||||
np.log.Info(logs.FrostFSNonAlphabetModeIgnoreWithdraw)
|
np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreWithdraw)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// create lock account
|
// create lock account
|
||||||
lock, err := util.Uint160DecodeBytesBE(withdraw.ID()[:util.Uint160Size])
|
lock, err := util.Uint160DecodeBytesBE(withdraw.ID()[:util.Uint160Size])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error(logs.FrostFSCantCreateLockAccount, zap.Error(err))
|
np.log.Error(ctx, logs.FrostFSCantCreateLockAccount, zap.Error(err))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -105,9 +107,9 @@ func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool {
|
||||||
prm.SetAmount(np.converter.ToBalancePrecision(withdraw.Amount()))
|
prm.SetAmount(np.converter.ToBalancePrecision(withdraw.Amount()))
|
||||||
prm.SetDueEpoch(int64(curEpoch + lockAccountLifetime))
|
prm.SetDueEpoch(int64(curEpoch + lockAccountLifetime))
|
||||||
|
|
||||||
err = np.balanceClient.Lock(prm)
|
err = np.balanceClient.Lock(ctx, prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error(logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err))
|
np.log.Error(ctx, logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,9 +118,9 @@ func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool {
|
||||||
|
|
||||||
// Process cheque event by transferring assets from the lock account back to
|
// Process cheque event by transferring assets from the lock account back to
|
||||||
// the reserve account.
|
// the reserve account.
|
||||||
func (np *Processor) processCheque(cheque frostfsEvent.Cheque) bool {
|
func (np *Processor) processCheque(ctx context.Context, cheque frostfsEvent.Cheque) bool {
|
||||||
if !np.alphabetState.IsAlphabet() {
|
if !np.alphabetState.IsAlphabet(ctx) {
|
||||||
np.log.Info(logs.FrostFSNonAlphabetModeIgnoreCheque)
|
np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreCheque)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -128,9 +130,9 @@ func (np *Processor) processCheque(cheque frostfsEvent.Cheque) bool {
|
||||||
prm.SetAmount(np.converter.ToBalancePrecision(cheque.Amount()))
|
prm.SetAmount(np.converter.ToBalancePrecision(cheque.Amount()))
|
||||||
prm.SetID(cheque.ID())
|
prm.SetID(cheque.ID())
|
||||||
|
|
||||||
err := np.balanceClient.Burn(prm)
|
err := np.balanceClient.Burn(ctx, prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error(logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err))
|
np.log.Error(ctx, logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
package frostfs
|
package frostfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||||
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
|
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
|
||||||
|
@ -9,9 +11,9 @@ import (
|
||||||
|
|
||||||
// Process config event by setting configuration value from the mainchain in
|
// Process config event by setting configuration value from the mainchain in
|
||||||
// the sidechain.
|
// the sidechain.
|
||||||
func (np *Processor) processConfig(config frostfsEvent.Config) bool {
|
func (np *Processor) processConfig(ctx context.Context, config frostfsEvent.Config) bool {
|
||||||
if !np.alphabetState.IsAlphabet() {
|
if !np.alphabetState.IsAlphabet(ctx) {
|
||||||
np.log.Info(logs.FrostFSNonAlphabetModeIgnoreConfig)
|
np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreConfig)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,9 +24,9 @@ func (np *Processor) processConfig(config frostfsEvent.Config) bool {
|
||||||
prm.SetValue(config.Value())
|
prm.SetValue(config.Value())
|
||||||
prm.SetHash(config.TxHash())
|
prm.SetHash(config.TxHash())
|
||||||
|
|
||||||
err := np.netmapClient.SetConfig(prm)
|
err := np.netmapClient.SetConfig(ctx, prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error(logs.FrostFSCantRelaySetConfigEvent, zap.Error(err))
|
np.log.Error(ctx, logs.FrostFSCantRelaySetConfigEvent, zap.Error(err))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
package frostfs
|
package frostfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
|
||||||
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||||
|
@ -16,7 +16,6 @@ import (
|
||||||
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
|
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
"github.com/panjf2000/ants/v2"
|
"github.com/panjf2000/ants/v2"
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
@ -27,7 +26,7 @@ type (
|
||||||
|
|
||||||
// AlphabetState is a callback interface for inner ring global state.
|
// AlphabetState is a callback interface for inner ring global state.
|
||||||
AlphabetState interface {
|
AlphabetState interface {
|
||||||
IsAlphabet() bool
|
IsAlphabet(context.Context) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrecisionConverter converts balance amount values.
|
// PrecisionConverter converts balance amount values.
|
||||||
|
@ -36,13 +35,13 @@ type (
|
||||||
}
|
}
|
||||||
|
|
||||||
BalanceClient interface {
|
BalanceClient interface {
|
||||||
Mint(p balance.MintPrm) error
|
Mint(ctx context.Context, p balance.MintPrm) error
|
||||||
Lock(p balance.LockPrm) error
|
Lock(ctx context.Context, p balance.LockPrm) error
|
||||||
Burn(p balance.BurnPrm) error
|
Burn(ctx context.Context, p balance.BurnPrm) error
|
||||||
}
|
}
|
||||||
|
|
||||||
NetmapClient interface {
|
NetmapClient interface {
|
||||||
SetConfig(p nmClient.SetConfigPrm) error
|
SetConfig(ctx context.Context, p nmClient.SetConfigPrm) error
|
||||||
}
|
}
|
||||||
|
|
||||||
MorphClient interface {
|
MorphClient interface {
|
||||||
|
@ -110,8 +109,6 @@ func New(p *Params) (*Processor, error) {
|
||||||
return nil, errors.New("ir/frostfs: balance precision converter is not set")
|
return nil, errors.New("ir/frostfs: balance precision converter is not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
p.Log.Debug(logs.FrostFSFrostfsWorkerPool, zap.Int("size", p.PoolSize))
|
|
||||||
|
|
||||||
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err)
|
return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err)
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
package governance
|
package governance
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||||
|
@ -11,7 +13,7 @@ import (
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (gp *Processor) HandleAlphabetSync(e event.Event) {
|
func (gp *Processor) HandleAlphabetSync(ctx context.Context, e event.Event) {
|
||||||
var (
|
var (
|
||||||
typ string
|
typ string
|
||||||
hash util.Uint256
|
hash util.Uint256
|
||||||
|
@ -32,16 +34,16 @@ func (gp *Processor) HandleAlphabetSync(e event.Event) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
gp.log.Info(logs.GovernanceNewEvent, zap.String("type", typ))
|
gp.log.Info(ctx, logs.GovernanceNewEvent, zap.String("type", typ))
|
||||||
|
|
||||||
// send event to the worker pool
|
// send event to the worker pool
|
||||||
|
|
||||||
err := processors.SubmitEvent(gp.pool, gp.metrics, "alphabet_sync", func() bool {
|
err := processors.SubmitEvent(gp.pool, gp.metrics, "alphabet_sync", func() bool {
|
||||||
return gp.processAlphabetSync(hash)
|
return gp.processAlphabetSync(ctx, hash)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
gp.log.Warn(logs.GovernanceGovernanceWorkerPoolDrained,
|
gp.log.Warn(ctx, logs.GovernanceGovernanceWorkerPoolDrained,
|
||||||
zap.Int("capacity", gp.pool.Cap()))
|
zap.Int("capacity", gp.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package governance
|
package governance
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -57,7 +58,7 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
|
||||||
txHash: util.Uint256{100},
|
txHash: util.Uint256{100},
|
||||||
}
|
}
|
||||||
|
|
||||||
proc.HandleAlphabetSync(ev)
|
proc.HandleAlphabetSync(context.Background(), ev)
|
||||||
|
|
||||||
for proc.pool.Running() > 0 {
|
for proc.pool.Running() > 0 {
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
@ -133,7 +134,7 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
|
||||||
Role: noderoles.NeoFSAlphabet,
|
Role: noderoles.NeoFSAlphabet,
|
||||||
}
|
}
|
||||||
|
|
||||||
proc.HandleAlphabetSync(ev)
|
proc.HandleAlphabetSync(context.Background(), ev)
|
||||||
|
|
||||||
for proc.pool.Running() > 0 {
|
for proc.pool.Running() > 0 {
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
@ -218,7 +219,7 @@ type testAlphabetState struct {
|
||||||
isAlphabet bool
|
isAlphabet bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *testAlphabetState) IsAlphabet() bool {
|
func (s *testAlphabetState) IsAlphabet(context.Context) bool {
|
||||||
return s.isAlphabet
|
return s.isAlphabet
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -226,7 +227,7 @@ type testVoter struct {
|
||||||
votes []VoteValidatorPrm
|
votes []VoteValidatorPrm
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *testVoter) VoteForSidechainValidator(prm VoteValidatorPrm) error {
|
func (v *testVoter) VoteForSidechainValidator(_ context.Context, prm VoteValidatorPrm) error {
|
||||||
v.votes = append(v.votes, prm)
|
v.votes = append(v.votes, prm)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -250,12 +251,12 @@ func (c *testMorphClient) Committee() (res keys.PublicKeys, err error) {
|
||||||
return c.commiteeKeys, nil
|
return c.commiteeKeys, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *testMorphClient) UpdateNeoFSAlphabetList(prm client.UpdateAlphabetListPrm) error {
|
func (c *testMorphClient) UpdateNeoFSAlphabetList(_ context.Context, prm client.UpdateAlphabetListPrm) error {
|
||||||
c.alphabetUpdates = append(c.alphabetUpdates, prm)
|
c.alphabetUpdates = append(c.alphabetUpdates, prm)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *testMorphClient) UpdateNotaryList(prm client.UpdateNotaryListPrm) error {
|
func (c *testMorphClient) UpdateNotaryList(_ context.Context, prm client.UpdateNotaryListPrm) error {
|
||||||
c.notaryUpdates = append(c.notaryUpdates, prm)
|
c.notaryUpdates = append(c.notaryUpdates, prm)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -277,7 +278,7 @@ type testFrostFSClient struct {
|
||||||
updates []frostfscontract.AlphabetUpdatePrm
|
updates []frostfscontract.AlphabetUpdatePrm
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *testFrostFSClient) AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm) error {
|
func (c *testFrostFSClient) AlphabetUpdate(_ context.Context, p frostfscontract.AlphabetUpdatePrm) error {
|
||||||
c.updates = append(c.updates, p)
|
c.updates = append(c.updates, p)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package governance
|
package governance
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -18,39 +19,39 @@ const (
|
||||||
alphabetUpdateIDPrefix = "AlphabetUpdate"
|
alphabetUpdateIDPrefix = "AlphabetUpdate"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (gp *Processor) processAlphabetSync(txHash util.Uint256) bool {
|
func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint256) bool {
|
||||||
if !gp.alphabetState.IsAlphabet() {
|
if !gp.alphabetState.IsAlphabet(ctx) {
|
||||||
gp.log.Info(logs.GovernanceNonAlphabetModeIgnoreAlphabetSync)
|
gp.log.Info(ctx, logs.GovernanceNonAlphabetModeIgnoreAlphabetSync)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList()
|
mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error(logs.GovernanceCantFetchAlphabetListFromMainNet,
|
gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromMainNet,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
sidechainAlphabet, err := gp.morphClient.Committee()
|
sidechainAlphabet, err := gp.morphClient.Committee()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error(logs.GovernanceCantFetchAlphabetListFromSideChain,
|
gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromSideChain,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet)
|
newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error(logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
|
gp.log.Error(ctx, logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if newAlphabet == nil {
|
if newAlphabet == nil {
|
||||||
gp.log.Info(logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged)
|
gp.log.Info(ctx, logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
gp.log.Info(logs.GovernanceAlphabetListHasBeenChangedStartingUpdate,
|
gp.log.Info(ctx, logs.GovernanceAlphabetListHasBeenChangedStartingUpdate,
|
||||||
zap.String("side_chain_alphabet", prettyKeys(sidechainAlphabet)),
|
zap.String("side_chain_alphabet", prettyKeys(sidechainAlphabet)),
|
||||||
zap.String("new_alphabet", prettyKeys(newAlphabet)),
|
zap.String("new_alphabet", prettyKeys(newAlphabet)),
|
||||||
)
|
)
|
||||||
|
@ -61,22 +62,22 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// 1. Vote to sidechain committee via alphabet contracts.
|
// 1. Vote to sidechain committee via alphabet contracts.
|
||||||
err = gp.voter.VoteForSidechainValidator(votePrm)
|
err = gp.voter.VoteForSidechainValidator(ctx, votePrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error(logs.GovernanceCantVoteForSideChainCommittee,
|
gp.log.Error(ctx, logs.GovernanceCantVoteForSideChainCommittee,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. Update NeoFSAlphabet role in the sidechain.
|
// 2. Update NeoFSAlphabet role in the sidechain.
|
||||||
gp.updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet, txHash)
|
gp.updateNeoFSAlphabetRoleInSidechain(ctx, sidechainAlphabet, newAlphabet, txHash)
|
||||||
|
|
||||||
// 3. Update notary role in the sidechain.
|
// 3. Update notary role in the sidechain.
|
||||||
gp.updateNotaryRoleInSidechain(newAlphabet, txHash)
|
gp.updateNotaryRoleInSidechain(ctx, newAlphabet, txHash)
|
||||||
|
|
||||||
// 4. Update FrostFS contract in the mainnet.
|
// 4. Update FrostFS contract in the mainnet.
|
||||||
gp.updateFrostFSContractInMainnet(newAlphabet)
|
gp.updateFrostFSContractInMainnet(ctx, newAlphabet)
|
||||||
|
|
||||||
gp.log.Info(logs.GovernanceFinishedAlphabetListUpdate)
|
gp.log.Info(ctx, logs.GovernanceFinishedAlphabetListUpdate)
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -93,24 +94,24 @@ func prettyKeys(keys keys.PublicKeys) string {
|
||||||
return strings.TrimRight(sb.String(), delimiter)
|
return strings.TrimRight(sb.String(), delimiter)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) {
|
func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) {
|
||||||
innerRing, err := gp.irFetcher.InnerRingKeys()
|
innerRing, err := gp.irFetcher.InnerRingKeys()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error(logs.GovernanceCantFetchInnerRingListFromSideChain,
|
gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet)
|
newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error(logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
|
gp.log.Error(ctx, logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Sort(newInnerRing)
|
sort.Sort(newInnerRing)
|
||||||
|
|
||||||
gp.log.Info(logs.GovernanceUpdateOfTheInnerRingList,
|
gp.log.Info(ctx, logs.GovernanceUpdateOfTheInnerRingList,
|
||||||
zap.String("before", prettyKeys(innerRing)),
|
zap.String("before", prettyKeys(innerRing)),
|
||||||
zap.String("after", prettyKeys(newInnerRing)),
|
zap.String("after", prettyKeys(newInnerRing)),
|
||||||
)
|
)
|
||||||
|
@ -119,26 +120,26 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAl
|
||||||
updPrm.SetList(newInnerRing)
|
updPrm.SetList(newInnerRing)
|
||||||
updPrm.SetHash(txHash)
|
updPrm.SetHash(txHash)
|
||||||
|
|
||||||
if err = gp.morphClient.UpdateNeoFSAlphabetList(updPrm); err != nil {
|
if err = gp.morphClient.UpdateNeoFSAlphabetList(ctx, updPrm); err != nil {
|
||||||
gp.log.Error(logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
|
gp.log.Error(ctx, logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (gp *Processor) updateNotaryRoleInSidechain(newAlphabet keys.PublicKeys, txHash util.Uint256) {
|
func (gp *Processor) updateNotaryRoleInSidechain(ctx context.Context, newAlphabet keys.PublicKeys, txHash util.Uint256) {
|
||||||
updPrm := client.UpdateNotaryListPrm{}
|
updPrm := client.UpdateNotaryListPrm{}
|
||||||
|
|
||||||
updPrm.SetList(newAlphabet)
|
updPrm.SetList(newAlphabet)
|
||||||
updPrm.SetHash(txHash)
|
updPrm.SetHash(txHash)
|
||||||
|
|
||||||
err := gp.morphClient.UpdateNotaryList(updPrm)
|
err := gp.morphClient.UpdateNotaryList(ctx, updPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error(logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
|
gp.log.Error(ctx, logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys) {
|
func (gp *Processor) updateFrostFSContractInMainnet(ctx context.Context, newAlphabet keys.PublicKeys) {
|
||||||
epoch := gp.epochState.EpochCounter()
|
epoch := gp.epochState.EpochCounter()
|
||||||
|
|
||||||
buf := make([]byte, 8)
|
buf := make([]byte, 8)
|
||||||
|
@ -151,9 +152,9 @@ func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys)
|
||||||
prm.SetID(id)
|
prm.SetID(id)
|
||||||
prm.SetPubs(newAlphabet)
|
prm.SetPubs(newAlphabet)
|
||||||
|
|
||||||
err := gp.frostfsClient.AlphabetUpdate(prm)
|
err := gp.frostfsClient.AlphabetUpdate(ctx, prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error(logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
|
gp.log.Error(ctx, logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package governance
|
package governance
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
@ -25,7 +26,7 @@ const ProcessorPoolSize = 1
|
||||||
type (
|
type (
|
||||||
// AlphabetState is a callback interface for innerring global state.
|
// AlphabetState is a callback interface for innerring global state.
|
||||||
AlphabetState interface {
|
AlphabetState interface {
|
||||||
IsAlphabet() bool
|
IsAlphabet(context.Context) bool
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -38,7 +39,7 @@ type VoteValidatorPrm struct {
|
||||||
|
|
||||||
// Voter is a callback interface for alphabet contract voting.
|
// Voter is a callback interface for alphabet contract voting.
|
||||||
type Voter interface {
|
type Voter interface {
|
||||||
VoteForSidechainValidator(VoteValidatorPrm) error
|
VoteForSidechainValidator(context.Context, VoteValidatorPrm) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
@ -55,7 +56,7 @@ type (
|
||||||
}
|
}
|
||||||
|
|
||||||
FrostFSClient interface {
|
FrostFSClient interface {
|
||||||
AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm) error
|
AlphabetUpdate(ctx context.Context, p frostfscontract.AlphabetUpdatePrm) error
|
||||||
}
|
}
|
||||||
|
|
||||||
NetmapClient interface {
|
NetmapClient interface {
|
||||||
|
@ -69,8 +70,8 @@ type (
|
||||||
|
|
||||||
MorphClient interface {
|
MorphClient interface {
|
||||||
Committee() (res keys.PublicKeys, err error)
|
Committee() (res keys.PublicKeys, err error)
|
||||||
UpdateNeoFSAlphabetList(prm client.UpdateAlphabetListPrm) error
|
UpdateNeoFSAlphabetList(ctx context.Context, prm client.UpdateAlphabetListPrm) error
|
||||||
UpdateNotaryList(prm client.UpdateNotaryListPrm) error
|
UpdateNotaryList(ctx context.Context, prm client.UpdateNotaryListPrm) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Processor of events related to governance in the network.
|
// Processor of events related to governance in the network.
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package netmap
|
package netmap
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
|
@ -11,93 +12,93 @@ import (
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (np *Processor) HandleNewEpochTick(ev event.Event) {
|
func (np *Processor) HandleNewEpochTick(ctx context.Context, ev event.Event) {
|
||||||
_ = ev.(timerEvent.NewEpochTick)
|
_ = ev.(timerEvent.NewEpochTick)
|
||||||
np.log.Info(logs.NetmapTick, zap.String("type", "epoch"))
|
np.log.Info(ctx, logs.NetmapTick, zap.String("type", "epoch"))
|
||||||
|
|
||||||
// send an event to the worker pool
|
// send an event to the worker pool
|
||||||
|
|
||||||
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", np.processNewEpochTick)
|
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", func() bool { return np.processNewEpochTick(ctx) })
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
|
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
|
||||||
zap.Int("capacity", np.pool.Cap()))
|
zap.Int("capacity", np.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (np *Processor) handleNewEpoch(ev event.Event) {
|
func (np *Processor) handleNewEpoch(ctx context.Context, ev event.Event) {
|
||||||
epochEvent := ev.(netmapEvent.NewEpoch)
|
epochEvent := ev.(netmapEvent.NewEpoch)
|
||||||
np.log.Info(logs.Notification,
|
np.log.Info(ctx, logs.Notification,
|
||||||
zap.String("type", "new epoch"),
|
zap.String("type", "new epoch"),
|
||||||
zap.Uint64("value", epochEvent.EpochNumber()))
|
zap.Uint64("value", epochEvent.EpochNumber()))
|
||||||
|
|
||||||
// send an event to the worker pool
|
// send an event to the worker pool
|
||||||
|
|
||||||
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch", func() bool {
|
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch", func() bool {
|
||||||
return np.processNewEpoch(epochEvent)
|
return np.processNewEpoch(ctx, epochEvent)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
|
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
|
||||||
zap.Int("capacity", np.pool.Cap()))
|
zap.Int("capacity", np.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (np *Processor) handleAddPeer(ev event.Event) {
|
func (np *Processor) handleAddPeer(ctx context.Context, ev event.Event) {
|
||||||
newPeer := ev.(netmapEvent.AddPeer)
|
newPeer := ev.(netmapEvent.AddPeer)
|
||||||
|
|
||||||
np.log.Info(logs.Notification,
|
np.log.Info(ctx, logs.Notification,
|
||||||
zap.String("type", "add peer"),
|
zap.String("type", "add peer"),
|
||||||
)
|
)
|
||||||
|
|
||||||
// send an event to the worker pool
|
// send an event to the worker pool
|
||||||
|
|
||||||
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_add_peer", func() bool {
|
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_add_peer", func() bool {
|
||||||
return np.processAddPeer(newPeer)
|
return np.processAddPeer(ctx, newPeer)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
|
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
|
||||||
zap.Int("capacity", np.pool.Cap()))
|
zap.Int("capacity", np.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (np *Processor) handleUpdateState(ev event.Event) {
|
func (np *Processor) handleUpdateState(ctx context.Context, ev event.Event) {
|
||||||
updPeer := ev.(netmapEvent.UpdatePeer)
|
updPeer := ev.(netmapEvent.UpdatePeer)
|
||||||
np.log.Info(logs.Notification,
|
np.log.Info(ctx, logs.Notification,
|
||||||
zap.String("type", "update peer state"),
|
zap.String("type", "update peer state"),
|
||||||
zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes())))
|
zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes())))
|
||||||
|
|
||||||
// send event to the worker pool
|
// send event to the worker pool
|
||||||
|
|
||||||
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_update_peer", func() bool {
|
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_update_peer", func() bool {
|
||||||
return np.processUpdatePeer(updPeer)
|
return np.processUpdatePeer(ctx, updPeer)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
|
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
|
||||||
zap.Int("capacity", np.pool.Cap()))
|
zap.Int("capacity", np.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (np *Processor) handleCleanupTick(ev event.Event) {
|
func (np *Processor) handleCleanupTick(ctx context.Context, ev event.Event) {
|
||||||
if !np.netmapSnapshot.enabled {
|
if !np.netmapSnapshot.enabled {
|
||||||
np.log.Debug(logs.NetmapNetmapCleanUpRoutineIsDisabled518)
|
np.log.Debug(ctx, logs.NetmapNetmapCleanUpRoutineIsDisabled518)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanup := ev.(netmapCleanupTick)
|
cleanup := ev.(netmapCleanupTick)
|
||||||
|
|
||||||
np.log.Info(logs.NetmapTick, zap.String("type", "netmap cleaner"))
|
np.log.Info(ctx, logs.NetmapTick, zap.String("type", "netmap cleaner"))
|
||||||
|
|
||||||
// send event to the worker pool
|
// send event to the worker pool
|
||||||
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_cleanup_tick", func() bool {
|
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_cleanup_tick", func() bool {
|
||||||
return np.processNetmapCleanupTick(cleanup)
|
return np.processNetmapCleanupTick(ctx, cleanup)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
|
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
|
||||||
zap.Int("capacity", np.pool.Cap()))
|
zap.Int("capacity", np.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package netmap
|
package netmap
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -38,7 +39,7 @@ func TestNewEpochTick(t *testing.T) {
|
||||||
require.NoError(t, err, "failed to create processor")
|
require.NoError(t, err, "failed to create processor")
|
||||||
|
|
||||||
ev := timerEvent.NewEpochTick{}
|
ev := timerEvent.NewEpochTick{}
|
||||||
proc.HandleNewEpochTick(ev)
|
proc.HandleNewEpochTick(context.Background(), ev)
|
||||||
|
|
||||||
for proc.pool.Running() > 0 {
|
for proc.pool.Running() > 0 {
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
@ -90,7 +91,7 @@ func TestNewEpoch(t *testing.T) {
|
||||||
Num: 101,
|
Num: 101,
|
||||||
Hash: util.Uint256{101},
|
Hash: util.Uint256{101},
|
||||||
}
|
}
|
||||||
proc.handleNewEpoch(ev)
|
proc.handleNewEpoch(context.Background(), ev)
|
||||||
|
|
||||||
for proc.pool.Running() > 0 {
|
for proc.pool.Running() > 0 {
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
@ -130,7 +131,7 @@ func TestAddPeer(t *testing.T) {
|
||||||
MainTransaction: &transaction.Transaction{},
|
MainTransaction: &transaction.Transaction{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
proc.handleAddPeer(ev)
|
proc.handleAddPeer(context.Background(), ev)
|
||||||
|
|
||||||
for proc.pool.Running() > 0 {
|
for proc.pool.Running() > 0 {
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
@ -145,7 +146,7 @@ func TestAddPeer(t *testing.T) {
|
||||||
MainTransaction: &transaction.Transaction{},
|
MainTransaction: &transaction.Transaction{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
proc.handleAddPeer(ev)
|
proc.handleAddPeer(context.Background(), ev)
|
||||||
|
|
||||||
for proc.pool.Running() > 0 {
|
for proc.pool.Running() > 0 {
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
@ -188,7 +189,7 @@ func TestUpdateState(t *testing.T) {
|
||||||
MainTransaction: &transaction.Transaction{},
|
MainTransaction: &transaction.Transaction{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
proc.handleUpdateState(ev)
|
proc.handleUpdateState(context.Background(), ev)
|
||||||
|
|
||||||
for proc.pool.Running() > 0 {
|
for proc.pool.Running() > 0 {
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
@ -232,7 +233,7 @@ func TestCleanupTick(t *testing.T) {
|
||||||
txHash: util.Uint256{123},
|
txHash: util.Uint256{123},
|
||||||
}
|
}
|
||||||
|
|
||||||
proc.handleCleanupTick(ev)
|
proc.handleCleanupTick(context.Background(), ev)
|
||||||
|
|
||||||
for proc.pool.Running() > 0 {
|
for proc.pool.Running() > 0 {
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
@ -340,7 +341,7 @@ type testAlphabetState struct {
|
||||||
isAlphabet bool
|
isAlphabet bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *testAlphabetState) IsAlphabet() bool {
|
func (s *testAlphabetState) IsAlphabet(context.Context) bool {
|
||||||
return s.isAlphabet
|
return s.isAlphabet
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -364,7 +365,7 @@ type testNetmapClient struct {
|
||||||
invokedTxs []*transaction.Transaction
|
invokedTxs []*transaction.Transaction
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *testNetmapClient) MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
|
func (c *testNetmapClient) MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
|
||||||
c.notaryInvokes = append(c.notaryInvokes, notaryInvoke{
|
c.notaryInvokes = append(c.notaryInvokes, notaryInvoke{
|
||||||
contract: contract,
|
contract: contract,
|
||||||
fee: fee,
|
fee: fee,
|
||||||
|
@ -395,7 +396,7 @@ func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) {
|
||||||
return c.netmap, nil
|
return c.netmap, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *testNetmapClient) NewEpoch(epoch uint64) error {
|
func (c *testNetmapClient) NewEpoch(_ context.Context, epoch uint64) error {
|
||||||
c.newEpochs = append(c.newEpochs, epoch)
|
c.newEpochs = append(c.newEpochs, epoch)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -413,6 +414,6 @@ type testEventHandler struct {
|
||||||
handledEvents []event.Event
|
handledEvents []event.Event
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *testEventHandler) Handle(e event.Event) {
|
func (h *testEventHandler) Handle(_ context.Context, e event.Event) {
|
||||||
h.handledEvents = append(h.handledEvents, e)
|
h.handledEvents = append(h.handledEvents, e)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,15 +1,17 @@
|
||||||
package netmap
|
package netmap
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
v2netmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
|
v2netmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
|
func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapCleanupTick) bool {
|
||||||
if !np.alphabetState.IsAlphabet() {
|
if !np.alphabetState.IsAlphabet(ctx) {
|
||||||
np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick)
|
np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick)
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -17,13 +19,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
|
||||||
err := np.netmapSnapshot.forEachRemoveCandidate(ev.epoch, func(s string) error {
|
err := np.netmapSnapshot.forEachRemoveCandidate(ev.epoch, func(s string) error {
|
||||||
key, err := keys.NewPublicKeyFromString(s)
|
key, err := keys.NewPublicKeyFromString(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Warn(logs.NetmapCantDecodePublicKeyOfNetmapNode,
|
np.log.Warn(ctx, logs.NetmapCantDecodePublicKeyOfNetmapNode,
|
||||||
zap.String("key", s))
|
zap.String("key", s))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
np.log.Info(logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s))
|
np.log.Info(ctx, logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s))
|
||||||
|
|
||||||
// In notary environments we call UpdateStateIR method instead of UpdateState.
|
// In notary environments we call UpdateStateIR method instead of UpdateState.
|
||||||
// It differs from UpdateState only by name, so we can do this in the same form.
|
// It differs from UpdateState only by name, so we can do this in the same form.
|
||||||
|
@ -31,6 +33,7 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
|
||||||
const methodUpdateStateNotary = "updateStateIR"
|
const methodUpdateStateNotary = "updateStateIR"
|
||||||
|
|
||||||
err = np.netmapClient.MorphNotaryInvoke(
|
err = np.netmapClient.MorphNotaryInvoke(
|
||||||
|
ctx,
|
||||||
np.netmapClient.ContractAddress(),
|
np.netmapClient.ContractAddress(),
|
||||||
0,
|
0,
|
||||||
uint32(ev.epoch),
|
uint32(ev.epoch),
|
||||||
|
@ -39,13 +42,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
|
||||||
int64(v2netmap.Offline), key.Bytes(),
|
int64(v2netmap.Offline), key.Bytes(),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error(logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err))
|
np.log.Error(ctx, logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Warn(logs.NetmapCantIterateOnNetmapCleanerCache,
|
np.log.Warn(ctx, logs.NetmapCantIterateOnNetmapCleanerCache,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
package netmap
|
package netmap
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
|
||||||
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
|
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
|
||||||
|
@ -9,12 +11,12 @@ import (
|
||||||
|
|
||||||
// Process new epoch notification by setting global epoch value and resetting
|
// Process new epoch notification by setting global epoch value and resetting
|
||||||
// local epoch timer.
|
// local epoch timer.
|
||||||
func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) bool {
|
func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoch) bool {
|
||||||
epoch := ev.EpochNumber()
|
epoch := ev.EpochNumber()
|
||||||
|
|
||||||
epochDuration, err := np.netmapClient.EpochDuration()
|
epochDuration, err := np.netmapClient.EpochDuration()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Warn(logs.NetmapCantGetEpochDuration,
|
np.log.Warn(ctx, logs.NetmapCantGetEpochDuration,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
} else {
|
} else {
|
||||||
np.epochState.SetEpochDuration(epochDuration)
|
np.epochState.SetEpochDuration(epochDuration)
|
||||||
|
@ -24,46 +26,46 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) bool {
|
||||||
|
|
||||||
h, err := np.netmapClient.MorphTxHeight(ev.TxHash())
|
h, err := np.netmapClient.MorphTxHeight(ev.TxHash())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Warn(logs.NetmapCantGetTransactionHeight,
|
np.log.Warn(ctx, logs.NetmapCantGetTransactionHeight,
|
||||||
zap.String("hash", ev.TxHash().StringLE()),
|
zap.String("hash", ev.TxHash().StringLE()),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := np.epochTimer.ResetEpochTimer(h); err != nil {
|
if err := np.epochTimer.ResetEpochTimer(h); err != nil {
|
||||||
np.log.Warn(logs.NetmapCantResetEpochTimer,
|
np.log.Warn(ctx, logs.NetmapCantResetEpochTimer,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// get new netmap snapshot
|
// get new netmap snapshot
|
||||||
networkMap, err := np.netmapClient.NetMap()
|
networkMap, err := np.netmapClient.NetMap()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Warn(logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
|
np.log.Warn(ctx, logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
np.netmapSnapshot.update(*networkMap, epoch)
|
np.netmapSnapshot.update(*networkMap, epoch)
|
||||||
np.handleCleanupTick(netmapCleanupTick{epoch: epoch, txHash: ev.TxHash()})
|
np.handleCleanupTick(ctx, netmapCleanupTick{epoch: epoch, txHash: ev.TxHash()})
|
||||||
np.handleAlphabetSync(governance.NewSyncEvent(ev.TxHash()))
|
np.handleAlphabetSync(ctx, governance.NewSyncEvent(ev.TxHash()))
|
||||||
np.handleNotaryDeposit(ev)
|
np.handleNotaryDeposit(ctx, ev)
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process new epoch tick by invoking new epoch method in network map contract.
|
// Process new epoch tick by invoking new epoch method in network map contract.
|
||||||
func (np *Processor) processNewEpochTick() bool {
|
func (np *Processor) processNewEpochTick(ctx context.Context) bool {
|
||||||
if !np.alphabetState.IsAlphabet() {
|
if !np.alphabetState.IsAlphabet(ctx) {
|
||||||
np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewEpochTick)
|
np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewEpochTick)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
nextEpoch := np.epochState.EpochCounter() + 1
|
nextEpoch := np.epochState.EpochCounter() + 1
|
||||||
np.log.Debug(logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch))
|
np.log.Debug(ctx, logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch))
|
||||||
|
|
||||||
err := np.netmapClient.NewEpoch(nextEpoch)
|
err := np.netmapClient.NewEpoch(ctx, nextEpoch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error(logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err))
|
np.log.Error(ctx, logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package netmap
|
package netmap
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
|
@ -12,9 +13,9 @@ import (
|
||||||
|
|
||||||
// Process add peer notification by sanity check of new node
|
// Process add peer notification by sanity check of new node
|
||||||
// local epoch timer.
|
// local epoch timer.
|
||||||
func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
|
func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer) bool {
|
||||||
if !np.alphabetState.IsAlphabet() {
|
if !np.alphabetState.IsAlphabet(ctx) {
|
||||||
np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewPeerNotification)
|
np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewPeerNotification)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,7 +23,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
|
||||||
tx := ev.NotaryRequest().MainTransaction
|
tx := ev.NotaryRequest().MainTransaction
|
||||||
ok, err := np.netmapClient.MorphIsValidScript(tx.Script, tx.Signers)
|
ok, err := np.netmapClient.MorphIsValidScript(tx.Script, tx.Signers)
|
||||||
if err != nil || !ok {
|
if err != nil || !ok {
|
||||||
np.log.Warn(logs.NetmapNonhaltNotaryTransaction,
|
np.log.Warn(ctx, logs.NetmapNonhaltNotaryTransaction,
|
||||||
zap.String("method", "netmap.AddPeer"),
|
zap.String("method", "netmap.AddPeer"),
|
||||||
zap.String("hash", tx.Hash().StringLE()),
|
zap.String("hash", tx.Hash().StringLE()),
|
||||||
zap.Error(err))
|
zap.Error(err))
|
||||||
|
@ -33,14 +34,14 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
|
||||||
var nodeInfo netmap.NodeInfo
|
var nodeInfo netmap.NodeInfo
|
||||||
if err := nodeInfo.Unmarshal(ev.Node()); err != nil {
|
if err := nodeInfo.Unmarshal(ev.Node()); err != nil {
|
||||||
// it will be nice to have tx id at event structure to log it
|
// it will be nice to have tx id at event structure to log it
|
||||||
np.log.Warn(logs.NetmapCantParseNetworkMapCandidate)
|
np.log.Warn(ctx, logs.NetmapCantParseNetworkMapCandidate)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// validate and update node info
|
// validate and update node info
|
||||||
err = np.nodeValidator.VerifyAndUpdate(&nodeInfo)
|
err = np.nodeValidator.VerifyAndUpdate(&nodeInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Warn(logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
|
np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -63,7 +64,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
|
||||||
// That is why we need to perform `addPeerIR` only in case when node is online,
|
// That is why we need to perform `addPeerIR` only in case when node is online,
|
||||||
// because in scope of this method, contract set state `ONLINE` for the node.
|
// because in scope of this method, contract set state `ONLINE` for the node.
|
||||||
if updated && nodeInfo.Status().IsOnline() {
|
if updated && nodeInfo.Status().IsOnline() {
|
||||||
np.log.Info(logs.NetmapApprovingNetworkMapCandidate,
|
np.log.Info(ctx, logs.NetmapApprovingNetworkMapCandidate,
|
||||||
zap.String("key", keyString))
|
zap.String("key", keyString))
|
||||||
|
|
||||||
prm := netmapclient.AddPeerPrm{}
|
prm := netmapclient.AddPeerPrm{}
|
||||||
|
@ -76,6 +77,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
|
||||||
|
|
||||||
// create new notary request with the original nonce
|
// create new notary request with the original nonce
|
||||||
err = np.netmapClient.MorphNotaryInvoke(
|
err = np.netmapClient.MorphNotaryInvoke(
|
||||||
|
ctx,
|
||||||
np.netmapClient.ContractAddress(),
|
np.netmapClient.ContractAddress(),
|
||||||
0,
|
0,
|
||||||
ev.NotaryRequest().MainTransaction.Nonce,
|
ev.NotaryRequest().MainTransaction.Nonce,
|
||||||
|
@ -84,7 +86,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
|
||||||
nodeInfoBinary,
|
nodeInfoBinary,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error(logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err))
|
np.log.Error(ctx, logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -93,9 +95,9 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process update peer notification by sending approval tx to the smart contract.
|
// Process update peer notification by sending approval tx to the smart contract.
|
||||||
func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool {
|
func (np *Processor) processUpdatePeer(ctx context.Context, ev netmapEvent.UpdatePeer) bool {
|
||||||
if !np.alphabetState.IsAlphabet() {
|
if !np.alphabetState.IsAlphabet(ctx) {
|
||||||
np.log.Info(logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification)
|
np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -108,7 +110,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool {
|
||||||
if ev.Maintenance() {
|
if ev.Maintenance() {
|
||||||
err = np.nodeStateSettings.MaintenanceModeAllowed()
|
err = np.nodeStateSettings.MaintenanceModeAllowed()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Info(logs.NetmapPreventSwitchingNodeToMaintenanceState,
|
np.log.Info(ctx, logs.NetmapPreventSwitchingNodeToMaintenanceState,
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -117,7 +119,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = np.netmapClient.MorphNotarySignAndInvokeTX(ev.NotaryRequest().MainTransaction); err != nil {
|
if err = np.netmapClient.MorphNotarySignAndInvokeTX(ev.NotaryRequest().MainTransaction); err != nil {
|
||||||
np.log.Error(logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err))
|
np.log.Error(ctx, logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
package netmap
|
package netmap
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||||
|
@ -16,7 +16,6 @@ import (
|
||||||
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
|
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
"github.com/panjf2000/ants/v2"
|
"github.com/panjf2000/ants/v2"
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
@ -35,7 +34,7 @@ type (
|
||||||
|
|
||||||
// AlphabetState is a callback interface for inner ring global state.
|
// AlphabetState is a callback interface for inner ring global state.
|
||||||
AlphabetState interface {
|
AlphabetState interface {
|
||||||
IsAlphabet() bool
|
IsAlphabet(context.Context) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeValidator wraps basic method of checking the correctness
|
// NodeValidator wraps basic method of checking the correctness
|
||||||
|
@ -54,12 +53,12 @@ type (
|
||||||
}
|
}
|
||||||
|
|
||||||
Client interface {
|
Client interface {
|
||||||
MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error
|
MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error
|
||||||
ContractAddress() util.Uint160
|
ContractAddress() util.Uint160
|
||||||
EpochDuration() (uint64, error)
|
EpochDuration() (uint64, error)
|
||||||
MorphTxHeight(h util.Uint256) (res uint32, err error)
|
MorphTxHeight(h util.Uint256) (res uint32, err error)
|
||||||
NetMap() (*netmap.NetMap, error)
|
NetMap() (*netmap.NetMap, error)
|
||||||
NewEpoch(epoch uint64) error
|
NewEpoch(ctx context.Context, epoch uint64) error
|
||||||
MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error)
|
MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error)
|
||||||
MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error
|
MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error
|
||||||
}
|
}
|
||||||
|
@ -132,8 +131,6 @@ func New(p *Params) (*Processor, error) {
|
||||||
return nil, errors.New("ir/netmap: node state settings is not set")
|
return nil, errors.New("ir/netmap: node state settings is not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
p.Log.Debug(logs.NetmapNetmapWorkerPool, zap.Int("size", p.PoolSize))
|
|
||||||
|
|
||||||
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("ir/netmap: can't create worker pool: %w", err)
|
return nil, fmt.Errorf("ir/netmap: can't create worker pool: %w", err)
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
package netmap
|
package netmap
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||||
|
@ -18,13 +20,13 @@ type netmapClientWrapper struct {
|
||||||
netmapClient *netmapclient.Client
|
netmapClient *netmapclient.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *netmapClientWrapper) UpdatePeerState(p netmapclient.UpdatePeerPrm) error {
|
func (w *netmapClientWrapper) UpdatePeerState(ctx context.Context, p netmapclient.UpdatePeerPrm) error {
|
||||||
_, err := w.netmapClient.UpdatePeerState(p)
|
_, err := w.netmapClient.UpdatePeerState(ctx, p)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *netmapClientWrapper) MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
|
func (w *netmapClientWrapper) MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
|
||||||
_, err := w.netmapClient.Morph().NotaryInvoke(contract, fee, nonce, vub, method, args...)
|
_, err := w.netmapClient.Morph().NotaryInvoke(ctx, contract, fee, nonce, vub, method, args...)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -44,16 +46,16 @@ func (w *netmapClientWrapper) NetMap() (*netmap.NetMap, error) {
|
||||||
return w.netmapClient.NetMap()
|
return w.netmapClient.NetMap()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *netmapClientWrapper) NewEpoch(epoch uint64) error {
|
func (w *netmapClientWrapper) NewEpoch(ctx context.Context, epoch uint64) error {
|
||||||
return w.netmapClient.NewEpoch(epoch)
|
return w.netmapClient.NewEpoch(ctx, epoch)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *netmapClientWrapper) MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) {
|
func (w *netmapClientWrapper) MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) {
|
||||||
return w.netmapClient.Morph().IsValidScript(script, signers)
|
return w.netmapClient.Morph().IsValidScript(script, signers)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *netmapClientWrapper) AddPeer(p netmapclient.AddPeerPrm) error {
|
func (w *netmapClientWrapper) AddPeer(ctx context.Context, p netmapclient.AddPeerPrm) error {
|
||||||
return w.netmapClient.AddPeer(p)
|
return w.netmapClient.AddPeer(ctx, p)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *netmapClientWrapper) MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
|
func (w *netmapClientWrapper) MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package innerring
|
package innerring
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
@ -47,21 +48,21 @@ func (s *Server) SetEpochDuration(val uint64) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsActive is a getter for a global active flag state.
|
// IsActive is a getter for a global active flag state.
|
||||||
func (s *Server) IsActive() bool {
|
func (s *Server) IsActive(ctx context.Context) bool {
|
||||||
return s.InnerRingIndex() >= 0
|
return s.InnerRingIndex(ctx) >= 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsAlphabet is a getter for a global alphabet flag state.
|
// IsAlphabet is a getter for a global alphabet flag state.
|
||||||
func (s *Server) IsAlphabet() bool {
|
func (s *Server) IsAlphabet(ctx context.Context) bool {
|
||||||
return s.AlphabetIndex() >= 0
|
return s.AlphabetIndex(ctx) >= 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// InnerRingIndex is a getter for a global index of node in inner ring list. Negative
|
// InnerRingIndex is a getter for a global index of node in inner ring list. Negative
|
||||||
// index means that node is not in the inner ring list.
|
// index means that node is not in the inner ring list.
|
||||||
func (s *Server) InnerRingIndex() int {
|
func (s *Server) InnerRingIndex(ctx context.Context) int {
|
||||||
index, err := s.statusIndex.InnerRingIndex()
|
index, err := s.statusIndex.InnerRingIndex()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Error(logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error()))
|
s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error()))
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,10 +71,10 @@ func (s *Server) InnerRingIndex() int {
|
||||||
|
|
||||||
// InnerRingSize is a getter for a global size of inner ring list. This value
|
// InnerRingSize is a getter for a global size of inner ring list. This value
|
||||||
// paired with inner ring index.
|
// paired with inner ring index.
|
||||||
func (s *Server) InnerRingSize() int {
|
func (s *Server) InnerRingSize(ctx context.Context) int {
|
||||||
size, err := s.statusIndex.InnerRingSize()
|
size, err := s.statusIndex.InnerRingSize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Error(logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error()))
|
s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error()))
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -82,28 +83,28 @@ func (s *Server) InnerRingSize() int {
|
||||||
|
|
||||||
// AlphabetIndex is a getter for a global index of node in alphabet list.
|
// AlphabetIndex is a getter for a global index of node in alphabet list.
|
||||||
// Negative index means that node is not in the alphabet list.
|
// Negative index means that node is not in the alphabet list.
|
||||||
func (s *Server) AlphabetIndex() int {
|
func (s *Server) AlphabetIndex(ctx context.Context) int {
|
||||||
index, err := s.statusIndex.AlphabetIndex()
|
index, err := s.statusIndex.AlphabetIndex()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Error(logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error()))
|
s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error()))
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
return int(index)
|
return int(index)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) error {
|
func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.VoteValidatorPrm) error {
|
||||||
validators := prm.Validators
|
validators := prm.Validators
|
||||||
|
|
||||||
index := s.InnerRingIndex()
|
index := s.InnerRingIndex(ctx)
|
||||||
if s.contracts.alphabet.indexOutOfRange(index) {
|
if s.contracts.alphabet.indexOutOfRange(index) {
|
||||||
s.log.Info(logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange)
|
s.log.Info(ctx, logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(validators) == 0 {
|
if len(validators) == 0 {
|
||||||
s.log.Info(logs.InnerringIgnoreValidatorVoteEmptyValidatorsList)
|
s.log.Info(ctx, logs.InnerringIgnoreValidatorVoteEmptyValidatorsList)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -126,9 +127,9 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro
|
||||||
}
|
}
|
||||||
|
|
||||||
s.contracts.alphabet.iterate(func(letter GlagoliticLetter, contract util.Uint160) {
|
s.contracts.alphabet.iterate(func(letter GlagoliticLetter, contract util.Uint160) {
|
||||||
_, err := s.morphClient.NotaryInvoke(contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators)
|
_, err := s.morphClient.NotaryInvoke(ctx, contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn(logs.InnerringCantInvokeVoteMethodInAlphabetContract,
|
s.log.Warn(ctx, logs.InnerringCantInvokeVoteMethodInAlphabetContract,
|
||||||
zap.Int8("alphabet_index", int8(letter)),
|
zap.Int8("alphabet_index", int8(letter)),
|
||||||
zap.Uint64("epoch", epoch),
|
zap.Uint64("epoch", epoch),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
@ -140,9 +141,9 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro
|
||||||
|
|
||||||
// VoteForSidechainValidator calls vote method on alphabet contracts with
|
// VoteForSidechainValidator calls vote method on alphabet contracts with
|
||||||
// the provided list of keys.
|
// the provided list of keys.
|
||||||
func (s *Server) VoteForSidechainValidator(prm governance.VoteValidatorPrm) error {
|
func (s *Server) VoteForSidechainValidator(ctx context.Context, prm governance.VoteValidatorPrm) error {
|
||||||
sort.Sort(prm.Validators)
|
sort.Sort(prm.Validators)
|
||||||
return s.voteForSidechainValidator(prm)
|
return s.voteForSidechainValidator(ctx, prm)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResetEpochTimer resets the block timer that produces events to update epoch
|
// ResetEpochTimer resets the block timer that produces events to update epoch
|
||||||
|
@ -153,17 +154,17 @@ func (s *Server) ResetEpochTimer(h uint32) error {
|
||||||
return s.epochTimer.Reset()
|
return s.epochTimer.Reset()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) setHealthStatus(hs control.HealthStatus) {
|
func (s *Server) setHealthStatus(ctx context.Context, hs control.HealthStatus) {
|
||||||
s.healthStatus.Store(int32(hs))
|
s.healthStatus.Store(int32(hs))
|
||||||
s.notifySystemd(hs)
|
s.notifySystemd(ctx, hs)
|
||||||
if s.irMetrics != nil {
|
if s.irMetrics != nil {
|
||||||
s.irMetrics.SetHealth(int32(hs))
|
s.irMetrics.SetHealth(int32(hs))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) CompareAndSwapHealthStatus(oldSt, newSt control.HealthStatus) (swapped bool) {
|
func (s *Server) CompareAndSwapHealthStatus(ctx context.Context, oldSt, newSt control.HealthStatus) (swapped bool) {
|
||||||
if swapped = s.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped {
|
if swapped = s.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped {
|
||||||
s.notifySystemd(newSt)
|
s.notifySystemd(ctx, newSt)
|
||||||
if s.irMetrics != nil {
|
if s.irMetrics != nil {
|
||||||
s.irMetrics.SetHealth(int32(newSt))
|
s.irMetrics.SetHealth(int32(newSt))
|
||||||
}
|
}
|
||||||
|
@ -186,7 +187,7 @@ func initPersistentStateStorage(cfg *viper.Viper) (*state.PersistentStorage, err
|
||||||
return persistStorage, nil
|
return persistStorage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) notifySystemd(st control.HealthStatus) {
|
func (s *Server) notifySystemd(ctx context.Context, st control.HealthStatus) {
|
||||||
if !s.sdNotify {
|
if !s.sdNotify {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -202,6 +203,6 @@ func (s *Server) notifySystemd(st control.HealthStatus) {
|
||||||
err = sdnotify.Status(fmt.Sprintf("%v", st))
|
err = sdnotify.Status(fmt.Sprintf("%v", st))
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
|
s.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package innerring
|
package innerring
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -42,12 +43,12 @@ func TestServerState(t *testing.T) {
|
||||||
require.Equal(t, epochDuration, srv.EpochDuration(), "invalid epoch duration")
|
require.Equal(t, epochDuration, srv.EpochDuration(), "invalid epoch duration")
|
||||||
|
|
||||||
var healthStatus control.HealthStatus = control.HealthStatus_READY
|
var healthStatus control.HealthStatus = control.HealthStatus_READY
|
||||||
srv.setHealthStatus(healthStatus)
|
srv.setHealthStatus(context.Background(), healthStatus)
|
||||||
require.Equal(t, healthStatus, srv.HealthStatus(), "invalid health status")
|
require.Equal(t, healthStatus, srv.HealthStatus(), "invalid health status")
|
||||||
|
|
||||||
require.True(t, srv.IsActive(), "invalid IsActive result")
|
require.True(t, srv.IsActive(context.Background()), "invalid IsActive result")
|
||||||
require.True(t, srv.IsAlphabet(), "invalid IsAlphabet result")
|
require.True(t, srv.IsAlphabet(context.Background()), "invalid IsAlphabet result")
|
||||||
require.Equal(t, 0, srv.InnerRingIndex(), "invalid IR index")
|
require.Equal(t, 0, srv.InnerRingIndex(context.Background()), "invalid IR index")
|
||||||
require.Equal(t, 1, srv.InnerRingSize(), "invalid IR index")
|
require.Equal(t, 1, srv.InnerRingSize(context.Background()), "invalid IR index")
|
||||||
require.Equal(t, 0, srv.AlphabetIndex(), "invalid alphabet index")
|
require.Equal(t, 0, srv.AlphabetIndex(context.Background()), "invalid alphabet index")
|
||||||
}
|
}
|
||||||
|
|
|
@ -58,7 +58,7 @@ func defaultCfg(c *cfg) {
|
||||||
},
|
},
|
||||||
fullSizeLimit: 1 << 30, // 1GB
|
fullSizeLimit: 1 << 30, // 1GB
|
||||||
objSizeLimit: 1 << 20, // 1MB
|
objSizeLimit: 1 << 20, // 1MB
|
||||||
log: &logger.Logger{Logger: zap.L()},
|
log: logger.NewLoggerWrapper(zap.L()),
|
||||||
metrics: &NoopMetrics{},
|
metrics: &NoopMetrics{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -110,7 +110,7 @@ func WithFullSizeLimit(lim uint64) Option {
|
||||||
// WithLogger returns an option to specify Blobovnicza's logger.
|
// WithLogger returns an option to specify Blobovnicza's logger.
|
||||||
func WithLogger(l *logger.Logger) Option {
|
func WithLogger(l *logger.Logger) Option {
|
||||||
return func(c *cfg) {
|
return func(c *cfg) {
|
||||||
c.log = &logger.Logger{Logger: l.With(zap.String("component", "Blobovnicza"))}
|
c.log = l.With(zap.String("component", "Blobovnicza"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -69,10 +69,10 @@ func TestBlobovnicza(t *testing.T) {
|
||||||
defer os.Remove(p)
|
defer os.Remove(p)
|
||||||
|
|
||||||
// open Blobovnicza
|
// open Blobovnicza
|
||||||
require.NoError(t, blz.Open())
|
require.NoError(t, blz.Open(context.Background()))
|
||||||
|
|
||||||
// initialize Blobovnicza
|
// initialize Blobovnicza
|
||||||
require.NoError(t, blz.Init())
|
require.NoError(t, blz.Init(context.Background()))
|
||||||
|
|
||||||
// try to read non-existent address
|
// try to read non-existent address
|
||||||
testGet(t, blz, oidtest.Address(), nil, client.IsErrObjectNotFound)
|
testGet(t, blz, oidtest.Address(), nil, client.IsErrObjectNotFound)
|
||||||
|
@ -102,5 +102,5 @@ func TestBlobovnicza(t *testing.T) {
|
||||||
return err == nil
|
return err == nil
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
require.NoError(t, blz.Close())
|
require.NoError(t, blz.Close(context.Background()))
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package blobovnicza
|
package blobovnicza
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -15,7 +16,7 @@ import (
|
||||||
//
|
//
|
||||||
// If the database file does not exist, it will be created automatically.
|
// If the database file does not exist, it will be created automatically.
|
||||||
// If blobovnicza is already open, does nothing.
|
// If blobovnicza is already open, does nothing.
|
||||||
func (b *Blobovnicza) Open() error {
|
func (b *Blobovnicza) Open(ctx context.Context) error {
|
||||||
b.controlMtx.Lock()
|
b.controlMtx.Lock()
|
||||||
defer b.controlMtx.Unlock()
|
defer b.controlMtx.Unlock()
|
||||||
|
|
||||||
|
@ -23,7 +24,7 @@ func (b *Blobovnicza) Open() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
b.log.Debug(logs.BlobovniczaCreatingDirectoryForBoltDB,
|
b.log.Debug(ctx, logs.BlobovniczaCreatingDirectoryForBoltDB,
|
||||||
zap.String("path", b.path),
|
zap.String("path", b.path),
|
||||||
zap.Bool("ro", b.boltOptions.ReadOnly),
|
zap.Bool("ro", b.boltOptions.ReadOnly),
|
||||||
)
|
)
|
||||||
|
@ -37,7 +38,7 @@ func (b *Blobovnicza) Open() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
b.log.Debug(logs.BlobovniczaOpeningBoltDB,
|
b.log.Debug(ctx, logs.BlobovniczaOpeningBoltDB,
|
||||||
zap.String("path", b.path),
|
zap.String("path", b.path),
|
||||||
zap.Stringer("permissions", b.perm),
|
zap.Stringer("permissions", b.perm),
|
||||||
)
|
)
|
||||||
|
@ -55,7 +56,7 @@ func (b *Blobovnicza) Open() error {
|
||||||
//
|
//
|
||||||
// If Blobovnicza is already initialized, no action is taken.
|
// If Blobovnicza is already initialized, no action is taken.
|
||||||
// Blobovnicza must be open, otherwise an error will return.
|
// Blobovnicza must be open, otherwise an error will return.
|
||||||
func (b *Blobovnicza) Init() error {
|
func (b *Blobovnicza) Init(ctx context.Context) error {
|
||||||
b.controlMtx.Lock()
|
b.controlMtx.Lock()
|
||||||
defer b.controlMtx.Unlock()
|
defer b.controlMtx.Unlock()
|
||||||
|
|
||||||
|
@ -63,7 +64,7 @@ func (b *Blobovnicza) Init() error {
|
||||||
return errors.New("blobovnicza is not open")
|
return errors.New("blobovnicza is not open")
|
||||||
}
|
}
|
||||||
|
|
||||||
b.log.Debug(logs.BlobovniczaInitializing,
|
b.log.Debug(ctx, logs.BlobovniczaInitializing,
|
||||||
zap.Uint64("object size limit", b.objSizeLimit),
|
zap.Uint64("object size limit", b.objSizeLimit),
|
||||||
zap.Uint64("storage size limit", b.fullSizeLimit),
|
zap.Uint64("storage size limit", b.fullSizeLimit),
|
||||||
)
|
)
|
||||||
|
@ -71,7 +72,7 @@ func (b *Blobovnicza) Init() error {
|
||||||
size := b.dataSize.Load()
|
size := b.dataSize.Load()
|
||||||
items := b.itemsCount.Load()
|
items := b.itemsCount.Load()
|
||||||
if size != 0 || items != 0 {
|
if size != 0 || items != 0 {
|
||||||
b.log.Debug(logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items))
|
b.log.Debug(ctx, logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,7 +82,7 @@ func (b *Blobovnicza) Init() error {
|
||||||
// create size range bucket
|
// create size range bucket
|
||||||
|
|
||||||
rangeStr := stringifyBounds(lower, upper)
|
rangeStr := stringifyBounds(lower, upper)
|
||||||
b.log.Debug(logs.BlobovniczaCreatingBucketForSizeRange,
|
b.log.Debug(ctx, logs.BlobovniczaCreatingBucketForSizeRange,
|
||||||
zap.String("range", rangeStr))
|
zap.String("range", rangeStr))
|
||||||
|
|
||||||
_, err := tx.CreateBucketIfNotExists(key)
|
_, err := tx.CreateBucketIfNotExists(key)
|
||||||
|
@ -98,14 +99,14 @@ func (b *Blobovnicza) Init() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return b.initializeCounters()
|
return b.initializeCounters(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Blobovnicza) ObjectsCount() uint64 {
|
func (b *Blobovnicza) ObjectsCount() uint64 {
|
||||||
return b.itemsCount.Load()
|
return b.itemsCount.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Blobovnicza) initializeCounters() error {
|
func (b *Blobovnicza) initializeCounters(ctx context.Context) error {
|
||||||
var size uint64
|
var size uint64
|
||||||
var items uint64
|
var items uint64
|
||||||
var sizeExists bool
|
var sizeExists bool
|
||||||
|
@ -131,17 +132,17 @@ func (b *Blobovnicza) initializeCounters() error {
|
||||||
return fmt.Errorf("can't determine DB size: %w", err)
|
return fmt.Errorf("can't determine DB size: %w", err)
|
||||||
}
|
}
|
||||||
if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly {
|
if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly {
|
||||||
b.log.Debug(logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items))
|
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items))
|
||||||
if err := b.boltDB.Update(func(tx *bbolt.Tx) error {
|
if err := b.boltDB.Update(func(tx *bbolt.Tx) error {
|
||||||
if err := saveDataSize(tx, size); err != nil {
|
if err := saveDataSize(tx, size); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return saveItemsCount(tx, items)
|
return saveItemsCount(tx, items)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
b.log.Debug(logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items))
|
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items))
|
||||||
return fmt.Errorf("can't save blobovnicza's size and items count: %w", err)
|
return fmt.Errorf("can't save blobovnicza's size and items count: %w", err)
|
||||||
}
|
}
|
||||||
b.log.Debug(logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items))
|
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items))
|
||||||
}
|
}
|
||||||
|
|
||||||
b.dataSize.Store(size)
|
b.dataSize.Store(size)
|
||||||
|
@ -154,7 +155,7 @@ func (b *Blobovnicza) initializeCounters() error {
|
||||||
// Close releases all internal database resources.
|
// Close releases all internal database resources.
|
||||||
//
|
//
|
||||||
// If blobovnicza is already closed, does nothing.
|
// If blobovnicza is already closed, does nothing.
|
||||||
func (b *Blobovnicza) Close() error {
|
func (b *Blobovnicza) Close(ctx context.Context) error {
|
||||||
b.controlMtx.Lock()
|
b.controlMtx.Lock()
|
||||||
defer b.controlMtx.Unlock()
|
defer b.controlMtx.Unlock()
|
||||||
|
|
||||||
|
@ -162,7 +163,7 @@ func (b *Blobovnicza) Close() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
b.log.Debug(logs.BlobovniczaClosingBoltDB,
|
b.log.Debug(ctx, logs.BlobovniczaClosingBoltDB,
|
||||||
zap.String("path", b.path),
|
zap.String("path", b.path),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -91,7 +91,7 @@ func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err == nil && found {
|
if err == nil && found {
|
||||||
b.log.Debug(logs.BlobovniczaObjectWasRemovedFromBucket,
|
b.log.Debug(ctx, logs.BlobovniczaObjectWasRemovedFromBucket,
|
||||||
zap.String("binary size", stringifyByteSize(dataSize)),
|
zap.String("binary size", stringifyByteSize(dataSize)),
|
||||||
zap.String("range", stringifyBounds(sizeLowerBound, sizeUpperBound)),
|
zap.String("range", stringifyBounds(sizeLowerBound, sizeUpperBound)),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
|
||||||
|
|
|
@ -14,11 +14,11 @@ func TestBlobovnicza_Get(t *testing.T) {
|
||||||
filename := filepath.Join(t.TempDir(), "blob")
|
filename := filepath.Join(t.TempDir(), "blob")
|
||||||
|
|
||||||
var blz *Blobovnicza
|
var blz *Blobovnicza
|
||||||
defer func() { require.NoError(t, blz.Close()) }()
|
defer func() { require.NoError(t, blz.Close(context.Background())) }()
|
||||||
|
|
||||||
fnInit := func(szLimit uint64) {
|
fnInit := func(szLimit uint64) {
|
||||||
if blz != nil {
|
if blz != nil {
|
||||||
require.NoError(t, blz.Close())
|
require.NoError(t, blz.Close(context.Background()))
|
||||||
}
|
}
|
||||||
|
|
||||||
blz = New(
|
blz = New(
|
||||||
|
@ -26,8 +26,8 @@ func TestBlobovnicza_Get(t *testing.T) {
|
||||||
WithObjectSizeLimit(szLimit),
|
WithObjectSizeLimit(szLimit),
|
||||||
)
|
)
|
||||||
|
|
||||||
require.NoError(t, blz.Open())
|
require.NoError(t, blz.Open(context.Background()))
|
||||||
require.NoError(t, blz.Init())
|
require.NoError(t, blz.Init(context.Background()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// initial distribution: [0:32K] (32K:64K]
|
// initial distribution: [0:32K] (32K:64K]
|
||||||
|
|
|
@ -15,8 +15,8 @@ import (
|
||||||
func TestBlobovniczaIterate(t *testing.T) {
|
func TestBlobovniczaIterate(t *testing.T) {
|
||||||
filename := filepath.Join(t.TempDir(), "blob")
|
filename := filepath.Join(t.TempDir(), "blob")
|
||||||
b := New(WithPath(filename))
|
b := New(WithPath(filename))
|
||||||
require.NoError(t, b.Open())
|
require.NoError(t, b.Open(context.Background()))
|
||||||
require.NoError(t, b.Init())
|
require.NoError(t, b.Init(context.Background()))
|
||||||
|
|
||||||
data := [][]byte{{0, 1, 2, 3}, {5, 6, 7, 8}}
|
data := [][]byte{{0, 1, 2, 3}, {5, 6, 7, 8}}
|
||||||
addr := oidtest.Address()
|
addr := oidtest.Address()
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package blobovniczatree
|
package blobovniczatree
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
@ -17,8 +18,8 @@ func (db *activeDB) Blobovnicza() *blobovnicza.Blobovnicza {
|
||||||
return db.blz
|
return db.blz
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *activeDB) Close() {
|
func (db *activeDB) Close(ctx context.Context) {
|
||||||
db.shDB.Close()
|
db.shDB.Close(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *activeDB) SystemPath() string {
|
func (db *activeDB) SystemPath() string {
|
||||||
|
@ -53,8 +54,8 @@ func newActiveDBManager(dbManager *dbManager, rootPath string) *activeDBManager
|
||||||
|
|
||||||
// GetOpenedActiveDBForLevel returns active DB for level.
|
// GetOpenedActiveDBForLevel returns active DB for level.
|
||||||
// DB must be closed after use.
|
// DB must be closed after use.
|
||||||
func (m *activeDBManager) GetOpenedActiveDBForLevel(lvlPath string) (*activeDB, error) {
|
func (m *activeDBManager) GetOpenedActiveDBForLevel(ctx context.Context, lvlPath string) (*activeDB, error) {
|
||||||
activeDB, err := m.getCurrentActiveIfOk(lvlPath)
|
activeDB, err := m.getCurrentActiveIfOk(ctx, lvlPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -62,7 +63,7 @@ func (m *activeDBManager) GetOpenedActiveDBForLevel(lvlPath string) (*activeDB,
|
||||||
return activeDB, nil
|
return activeDB, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return m.updateAndGetActive(lvlPath)
|
return m.updateAndGetActive(ctx, lvlPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *activeDBManager) Open() {
|
func (m *activeDBManager) Open() {
|
||||||
|
@ -72,18 +73,18 @@ func (m *activeDBManager) Open() {
|
||||||
m.closed = false
|
m.closed = false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *activeDBManager) Close() {
|
func (m *activeDBManager) Close(ctx context.Context) {
|
||||||
m.levelToActiveDBGuard.Lock()
|
m.levelToActiveDBGuard.Lock()
|
||||||
defer m.levelToActiveDBGuard.Unlock()
|
defer m.levelToActiveDBGuard.Unlock()
|
||||||
|
|
||||||
for _, db := range m.levelToActiveDB {
|
for _, db := range m.levelToActiveDB {
|
||||||
db.Close()
|
db.Close(ctx)
|
||||||
}
|
}
|
||||||
m.levelToActiveDB = make(map[string]*sharedDB)
|
m.levelToActiveDB = make(map[string]*sharedDB)
|
||||||
m.closed = true
|
m.closed = true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *activeDBManager) getCurrentActiveIfOk(lvlPath string) (*activeDB, error) {
|
func (m *activeDBManager) getCurrentActiveIfOk(ctx context.Context, lvlPath string) (*activeDB, error) {
|
||||||
m.levelToActiveDBGuard.RLock()
|
m.levelToActiveDBGuard.RLock()
|
||||||
defer m.levelToActiveDBGuard.RUnlock()
|
defer m.levelToActiveDBGuard.RUnlock()
|
||||||
|
|
||||||
|
@ -96,13 +97,13 @@ func (m *activeDBManager) getCurrentActiveIfOk(lvlPath string) (*activeDB, error
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
blz, err := db.Open() // open db for usage, will be closed on activeDB.Close()
|
blz, err := db.Open(ctx) // open db for usage, will be closed on activeDB.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if blz.IsFull() {
|
if blz.IsFull() {
|
||||||
db.Close()
|
db.Close(ctx)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,11 +113,11 @@ func (m *activeDBManager) getCurrentActiveIfOk(lvlPath string) (*activeDB, error
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error) {
|
func (m *activeDBManager) updateAndGetActive(ctx context.Context, lvlPath string) (*activeDB, error) {
|
||||||
m.levelLock.Lock(lvlPath)
|
m.levelLock.Lock(lvlPath)
|
||||||
defer m.levelLock.Unlock(lvlPath)
|
defer m.levelLock.Unlock(lvlPath)
|
||||||
|
|
||||||
current, err := m.getCurrentActiveIfOk(lvlPath)
|
current, err := m.getCurrentActiveIfOk(ctx, lvlPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -124,7 +125,7 @@ func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error)
|
||||||
return current, nil
|
return current, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
nextShDB, err := m.getNextSharedDB(lvlPath)
|
nextShDB, err := m.getNextSharedDB(ctx, lvlPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -133,7 +134,7 @@ func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
blz, err := nextShDB.Open() // open db for client, client must call Close() after usage
|
blz, err := nextShDB.Open(ctx) // open db for client, client must call Close() after usage
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -143,7 +144,7 @@ func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error)
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *activeDBManager) getNextSharedDB(lvlPath string) (*sharedDB, error) {
|
func (m *activeDBManager) getNextSharedDB(ctx context.Context, lvlPath string) (*sharedDB, error) {
|
||||||
var nextActiveDBIdx uint64
|
var nextActiveDBIdx uint64
|
||||||
hasActive, currentIdx := m.hasActiveDB(lvlPath)
|
hasActive, currentIdx := m.hasActiveDB(lvlPath)
|
||||||
if hasActive {
|
if hasActive {
|
||||||
|
@ -160,17 +161,17 @@ func (m *activeDBManager) getNextSharedDB(lvlPath string) (*sharedDB, error) {
|
||||||
|
|
||||||
path := filepath.Join(lvlPath, u64ToHexStringExt(nextActiveDBIdx))
|
path := filepath.Join(lvlPath, u64ToHexStringExt(nextActiveDBIdx))
|
||||||
next := m.dbManager.GetByPath(path)
|
next := m.dbManager.GetByPath(path)
|
||||||
_, err := next.Open() // open db to hold active DB open, will be closed if db is full, after m.replace or by activeDBManager.Close()
|
_, err := next.Open(ctx) // open db to hold active DB open, will be closed if db is full, after m.replace or by activeDBManager.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
previous, updated := m.replace(lvlPath, next)
|
previous, updated := m.replace(lvlPath, next)
|
||||||
if !updated && next != nil {
|
if !updated && next != nil {
|
||||||
next.Close() // manager is closed, so don't hold active DB open
|
next.Close(ctx) // manager is closed, so don't hold active DB open
|
||||||
}
|
}
|
||||||
if updated && previous != nil {
|
if updated && previous != nil {
|
||||||
previous.Close()
|
previous.Close(ctx)
|
||||||
}
|
}
|
||||||
return next, nil
|
return next, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -167,7 +167,7 @@ func (b *Blobovniczas) Compressor() *compression.Config {
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetReportErrorFunc implements common.Storage.
|
// SetReportErrorFunc implements common.Storage.
|
||||||
func (b *Blobovniczas) SetReportErrorFunc(f func(string, error)) {
|
func (b *Blobovniczas) SetReportErrorFunc(f func(context.Context, string, error)) {
|
||||||
b.reportError = f
|
b.reportError = f
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -32,7 +32,7 @@ func newDBCache(parentCtx context.Context, size int,
|
||||||
ch := cache.NewCache[string, *sharedDB]().
|
ch := cache.NewCache[string, *sharedDB]().
|
||||||
WithTTL(ttl).WithLRU().WithMaxKeys(size).
|
WithTTL(ttl).WithLRU().WithMaxKeys(size).
|
||||||
WithOnEvicted(func(_ string, db *sharedDB) {
|
WithOnEvicted(func(_ string, db *sharedDB) {
|
||||||
db.Close()
|
db.Close(parentCtx)
|
||||||
})
|
})
|
||||||
ctx, cancel := context.WithCancel(parentCtx)
|
ctx, cancel := context.WithCancel(parentCtx)
|
||||||
res := &dbCache{
|
res := &dbCache{
|
||||||
|
@ -81,12 +81,12 @@ func (c *dbCache) Close() {
|
||||||
c.closed = true
|
c.closed = true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *dbCache) GetOrCreate(path string) *sharedDB {
|
func (c *dbCache) GetOrCreate(ctx context.Context, path string) *sharedDB {
|
||||||
value := c.getExisted(path)
|
value := c.getExisted(path)
|
||||||
if value != nil {
|
if value != nil {
|
||||||
return value
|
return value
|
||||||
}
|
}
|
||||||
return c.create(path)
|
return c.create(ctx, path)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *dbCache) EvictAndMarkNonCached(path string) {
|
func (c *dbCache) EvictAndMarkNonCached(path string) {
|
||||||
|
@ -122,7 +122,7 @@ func (c *dbCache) getExisted(path string) *sharedDB {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *dbCache) create(path string) *sharedDB {
|
func (c *dbCache) create(ctx context.Context, path string) *sharedDB {
|
||||||
c.pathLock.Lock(path)
|
c.pathLock.Lock(path)
|
||||||
defer c.pathLock.Unlock(path)
|
defer c.pathLock.Unlock(path)
|
||||||
|
|
||||||
|
@ -133,12 +133,12 @@ func (c *dbCache) create(path string) *sharedDB {
|
||||||
|
|
||||||
value = c.dbManager.GetByPath(path)
|
value = c.dbManager.GetByPath(path)
|
||||||
|
|
||||||
_, err := value.Open() // open db to hold reference, closed by evictedDB.Close() or if cache closed
|
_, err := value.Open(ctx) // open db to hold reference, closed by evictedDB.Close() or if cache closed
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return value
|
return value
|
||||||
}
|
}
|
||||||
if added := c.put(path, value); !added {
|
if added := c.put(path, value); !added {
|
||||||
value.Close()
|
value.Close(ctx)
|
||||||
}
|
}
|
||||||
return value
|
return value
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,7 +27,7 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) {
|
||||||
require.NoError(t, st.Open(mode.ComponentReadWrite))
|
require.NoError(t, st.Open(mode.ComponentReadWrite))
|
||||||
require.NoError(t, st.Init())
|
require.NoError(t, st.Init())
|
||||||
defer func() {
|
defer func() {
|
||||||
require.NoError(t, st.Close())
|
require.NoError(t, st.Close(context.Background()))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
objGen := &testutil.SeqObjGenerator{ObjSize: 1}
|
objGen := &testutil.SeqObjGenerator{ObjSize: 1}
|
||||||
|
|
|
@ -24,10 +24,10 @@ func (b *Blobovniczas) Open(mode mode.ComponentMode) error {
|
||||||
//
|
//
|
||||||
// Should be called exactly once.
|
// Should be called exactly once.
|
||||||
func (b *Blobovniczas) Init() error {
|
func (b *Blobovniczas) Init() error {
|
||||||
b.log.Debug(logs.BlobovniczatreeInitializingBlobovniczas)
|
b.log.Debug(context.Background(), logs.BlobovniczatreeInitializingBlobovniczas)
|
||||||
|
|
||||||
if b.readOnly {
|
if b.readOnly {
|
||||||
b.log.Debug(logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization)
|
b.log.Debug(context.Background(), logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,11 +46,11 @@ func (b *Blobovniczas) initializeDBs(ctx context.Context) error {
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
p = strings.TrimSuffix(p, rebuildSuffix)
|
p = strings.TrimSuffix(p, rebuildSuffix)
|
||||||
shBlz := b.getBlobovniczaWithoutCaching(p)
|
shBlz := b.getBlobovniczaWithoutCaching(p)
|
||||||
blz, err := shBlz.Open()
|
blz, err := shBlz.Open(egCtx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer shBlz.Close()
|
defer shBlz.Close(egCtx)
|
||||||
|
|
||||||
moveInfo, err := blz.ListMoveInfo(egCtx)
|
moveInfo, err := blz.ListMoveInfo(egCtx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -60,7 +60,7 @@ func (b *Blobovniczas) initializeDBs(ctx context.Context) error {
|
||||||
b.deleteProtectedObjects.Add(move.Address)
|
b.deleteProtectedObjects.Add(move.Address)
|
||||||
}
|
}
|
||||||
|
|
||||||
b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p))
|
b.log.Debug(egCtx, logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
return false, nil
|
return false, nil
|
||||||
|
@ -80,9 +80,9 @@ func (b *Blobovniczas) openManagers() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close implements common.Storage.
|
// Close implements common.Storage.
|
||||||
func (b *Blobovniczas) Close() error {
|
func (b *Blobovniczas) Close(ctx context.Context) error {
|
||||||
b.dbCache.Close() // order important
|
b.dbCache.Close() // order important
|
||||||
b.activeDBManager.Close()
|
b.activeDBManager.Close(ctx)
|
||||||
b.commondbManager.Close()
|
b.commondbManager.Close()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -91,8 +91,8 @@ func (b *Blobovniczas) Close() error {
|
||||||
// returns blobovnicza with path p
|
// returns blobovnicza with path p
|
||||||
//
|
//
|
||||||
// If blobovnicza is already cached, instance from cache is returned w/o changes.
|
// If blobovnicza is already cached, instance from cache is returned w/o changes.
|
||||||
func (b *Blobovniczas) getBlobovnicza(p string) *sharedDB {
|
func (b *Blobovniczas) getBlobovnicza(ctx context.Context, p string) *sharedDB {
|
||||||
return b.dbCache.GetOrCreate(p)
|
return b.dbCache.GetOrCreate(ctx, p)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Blobovniczas) getBlobovniczaWithoutCaching(p string) *sharedDB {
|
func (b *Blobovniczas) getBlobovniczaWithoutCaching(p string) *sharedDB {
|
||||||
|
|
|
@ -51,7 +51,7 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.EqualValues(t, obj35, gRes.Object)
|
require.EqualValues(t, obj35, gRes.Object)
|
||||||
|
|
||||||
require.NoError(t, blz.Close())
|
require.NoError(t, blz.Close(context.Background()))
|
||||||
|
|
||||||
// change depth and width
|
// change depth and width
|
||||||
blz = NewBlobovniczaTree(
|
blz = NewBlobovniczaTree(
|
||||||
|
@ -89,7 +89,7 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) {
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.NoError(t, blz.Close())
|
require.NoError(t, blz.Close(context.Background()))
|
||||||
|
|
||||||
// change depth and width back
|
// change depth and width back
|
||||||
blz = NewBlobovniczaTree(
|
blz = NewBlobovniczaTree(
|
||||||
|
@ -127,5 +127,5 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.EqualValues(t, obj52, gRes.Object)
|
require.EqualValues(t, obj52, gRes.Object)
|
||||||
|
|
||||||
require.NoError(t, blz.Close())
|
require.NoError(t, blz.Close(context.Background()))
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,17 +16,17 @@ func (b *Blobovniczas) ObjectsCount(ctx context.Context) (uint64, error) {
|
||||||
b.metrics.ObjectsCount(time.Since(startedAt), success)
|
b.metrics.ObjectsCount(time.Since(startedAt), success)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
_, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.ObjectsCount")
|
ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.ObjectsCount")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
var result uint64
|
var result uint64
|
||||||
err := b.iterateExistingDBPaths(ctx, func(p string) (bool, error) {
|
err := b.iterateExistingDBPaths(ctx, func(p string) (bool, error) {
|
||||||
shDB := b.getBlobovniczaWithoutCaching(p)
|
shDB := b.getBlobovniczaWithoutCaching(p)
|
||||||
blz, err := shDB.Open()
|
blz, err := shDB.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
defer shDB.Close()
|
defer shDB.Close(ctx)
|
||||||
|
|
||||||
result += blz.ObjectsCount()
|
result += blz.ObjectsCount()
|
||||||
return false, nil
|
return false, nil
|
||||||
|
|
|
@ -61,12 +61,12 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
|
||||||
|
|
||||||
if prm.StorageID != nil {
|
if prm.StorageID != nil {
|
||||||
id := NewIDFromBytes(prm.StorageID)
|
id := NewIDFromBytes(prm.StorageID)
|
||||||
shBlz := b.getBlobovnicza(id.Path())
|
shBlz := b.getBlobovnicza(ctx, id.Path())
|
||||||
blz, err := shBlz.Open()
|
blz, err := shBlz.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return res, err
|
return res, err
|
||||||
}
|
}
|
||||||
defer shBlz.Close()
|
defer shBlz.Close(ctx)
|
||||||
|
|
||||||
if res, err = b.deleteObject(ctx, blz, bPrm); err == nil {
|
if res, err = b.deleteObject(ctx, blz, bPrm); err == nil {
|
||||||
success = true
|
success = true
|
||||||
|
@ -80,7 +80,7 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
|
||||||
res, err = b.deleteObjectFromLevel(ctx, bPrm, p)
|
res, err = b.deleteObjectFromLevel(ctx, bPrm, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !client.IsErrObjectNotFound(err) {
|
if !client.IsErrObjectNotFound(err) {
|
||||||
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
|
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
|
||||||
zap.String("level", p),
|
zap.String("level", p),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
|
||||||
|
@ -109,12 +109,12 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
|
||||||
//
|
//
|
||||||
// returns no error if object was removed from some blobovnicza of the same level.
|
// returns no error if object was removed from some blobovnicza of the same level.
|
||||||
func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicza.DeletePrm, blzPath string) (common.DeleteRes, error) {
|
func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicza.DeletePrm, blzPath string) (common.DeleteRes, error) {
|
||||||
shBlz := b.getBlobovnicza(blzPath)
|
shBlz := b.getBlobovnicza(ctx, blzPath)
|
||||||
blz, err := shBlz.Open()
|
blz, err := shBlz.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return common.DeleteRes{}, err
|
return common.DeleteRes{}, err
|
||||||
}
|
}
|
||||||
defer shBlz.Close()
|
defer shBlz.Close(ctx)
|
||||||
|
|
||||||
return b.deleteObject(ctx, blz, prm)
|
return b.deleteObject(ctx, blz, prm)
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,12 +37,12 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
|
||||||
|
|
||||||
if prm.StorageID != nil {
|
if prm.StorageID != nil {
|
||||||
id := NewIDFromBytes(prm.StorageID)
|
id := NewIDFromBytes(prm.StorageID)
|
||||||
shBlz := b.getBlobovnicza(id.Path())
|
shBlz := b.getBlobovnicza(ctx, id.Path())
|
||||||
blz, err := shBlz.Open()
|
blz, err := shBlz.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return common.ExistsRes{}, err
|
return common.ExistsRes{}, err
|
||||||
}
|
}
|
||||||
defer shBlz.Close()
|
defer shBlz.Close(ctx)
|
||||||
|
|
||||||
exists, err := blz.Exists(ctx, prm.Address)
|
exists, err := blz.Exists(ctx, prm.Address)
|
||||||
return common.ExistsRes{Exists: exists}, err
|
return common.ExistsRes{Exists: exists}, err
|
||||||
|
@ -55,7 +55,7 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
|
||||||
_, err := b.getObjectFromLevel(ctx, gPrm, p)
|
_, err := b.getObjectFromLevel(ctx, gPrm, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !client.IsErrObjectNotFound(err) {
|
if !client.IsErrObjectNotFound(err) {
|
||||||
b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
||||||
zap.String("level", p),
|
zap.String("level", p),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||||
|
|
|
@ -27,7 +27,7 @@ func TestExistsInvalidStorageID(t *testing.T) {
|
||||||
WithBlobovniczaSize(1<<20))
|
WithBlobovniczaSize(1<<20))
|
||||||
require.NoError(t, b.Open(mode.ComponentReadWrite))
|
require.NoError(t, b.Open(mode.ComponentReadWrite))
|
||||||
require.NoError(t, b.Init())
|
require.NoError(t, b.Init())
|
||||||
defer func() { require.NoError(t, b.Close()) }()
|
defer func() { require.NoError(t, b.Close(context.Background())) }()
|
||||||
|
|
||||||
obj := blobstortest.NewObject(1024)
|
obj := blobstortest.NewObject(1024)
|
||||||
addr := object.AddressOf(obj)
|
addr := object.AddressOf(obj)
|
||||||
|
|
|
@ -48,12 +48,12 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
|
||||||
|
|
||||||
if prm.StorageID != nil {
|
if prm.StorageID != nil {
|
||||||
id := NewIDFromBytes(prm.StorageID)
|
id := NewIDFromBytes(prm.StorageID)
|
||||||
shBlz := b.getBlobovnicza(id.Path())
|
shBlz := b.getBlobovnicza(ctx, id.Path())
|
||||||
blz, err := shBlz.Open()
|
blz, err := shBlz.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return res, err
|
return res, err
|
||||||
}
|
}
|
||||||
defer shBlz.Close()
|
defer shBlz.Close(ctx)
|
||||||
|
|
||||||
res, err = b.getObject(ctx, blz, bPrm)
|
res, err = b.getObject(ctx, blz, bPrm)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -67,7 +67,7 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
|
||||||
res, err = b.getObjectFromLevel(ctx, bPrm, p)
|
res, err = b.getObjectFromLevel(ctx, bPrm, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !client.IsErrObjectNotFound(err) {
|
if !client.IsErrObjectNotFound(err) {
|
||||||
b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
||||||
zap.String("level", p),
|
zap.String("level", p),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
|
||||||
|
@ -95,12 +95,12 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
|
||||||
// returns error if object could not be read from any blobovnicza of the same level.
|
// returns error if object could not be read from any blobovnicza of the same level.
|
||||||
func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.GetPrm, blzPath string) (common.GetRes, error) {
|
func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.GetPrm, blzPath string) (common.GetRes, error) {
|
||||||
// open blobovnicza (cached inside)
|
// open blobovnicza (cached inside)
|
||||||
shBlz := b.getBlobovnicza(blzPath)
|
shBlz := b.getBlobovnicza(ctx, blzPath)
|
||||||
blz, err := shBlz.Open()
|
blz, err := shBlz.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return common.GetRes{}, err
|
return common.GetRes{}, err
|
||||||
}
|
}
|
||||||
defer shBlz.Close()
|
defer shBlz.Close(ctx)
|
||||||
|
|
||||||
return b.getObject(ctx, blz, prm)
|
return b.getObject(ctx, blz, prm)
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,12 +47,12 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
|
||||||
|
|
||||||
if prm.StorageID != nil {
|
if prm.StorageID != nil {
|
||||||
id := NewIDFromBytes(prm.StorageID)
|
id := NewIDFromBytes(prm.StorageID)
|
||||||
shBlz := b.getBlobovnicza(id.Path())
|
shBlz := b.getBlobovnicza(ctx, id.Path())
|
||||||
blz, err := shBlz.Open()
|
blz, err := shBlz.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return common.GetRangeRes{}, err
|
return common.GetRangeRes{}, err
|
||||||
}
|
}
|
||||||
defer shBlz.Close()
|
defer shBlz.Close(ctx)
|
||||||
|
|
||||||
res, err := b.getObjectRange(ctx, blz, prm)
|
res, err := b.getObjectRange(ctx, blz, prm)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -69,7 +69,7 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
|
||||||
if err != nil {
|
if err != nil {
|
||||||
outOfBounds := isErrOutOfRange(err)
|
outOfBounds := isErrOutOfRange(err)
|
||||||
if !outOfBounds && !client.IsErrObjectNotFound(err) {
|
if !outOfBounds && !client.IsErrObjectNotFound(err) {
|
||||||
b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
||||||
zap.String("level", p),
|
zap.String("level", p),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||||
|
@ -103,12 +103,12 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
|
||||||
// returns error if object could not be read from any blobovnicza of the same level.
|
// returns error if object could not be read from any blobovnicza of the same level.
|
||||||
func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRangePrm, blzPath string) (common.GetRangeRes, error) {
|
func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRangePrm, blzPath string) (common.GetRangeRes, error) {
|
||||||
// open blobovnicza (cached inside)
|
// open blobovnicza (cached inside)
|
||||||
shBlz := b.getBlobovnicza(blzPath)
|
shBlz := b.getBlobovnicza(ctx, blzPath)
|
||||||
blz, err := shBlz.Open()
|
blz, err := shBlz.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return common.GetRangeRes{}, err
|
return common.GetRangeRes{}, err
|
||||||
}
|
}
|
||||||
defer shBlz.Close()
|
defer shBlz.Close(ctx)
|
||||||
|
|
||||||
return b.getObjectRange(ctx, blz, prm)
|
return b.getObjectRange(ctx, blz, prm)
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,7 +42,7 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm
|
||||||
data, err := b.compression.Decompress(elem.ObjectData())
|
data, err := b.compression.Decompress(elem.ObjectData())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if prm.IgnoreErrors {
|
if prm.IgnoreErrors {
|
||||||
b.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
|
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||||
zap.Stringer("address", elem.Address()),
|
zap.Stringer("address", elem.Address()),
|
||||||
zap.String("err", err.Error()),
|
zap.String("err", err.Error()),
|
||||||
zap.String("storage_id", p),
|
zap.String("storage_id", p),
|
||||||
|
@ -72,11 +72,11 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm
|
||||||
// iterator over all Blobovniczas in unsorted order. Break on f's error return.
|
// iterator over all Blobovniczas in unsorted order. Break on f's error return.
|
||||||
func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors bool, f func(string, *blobovnicza.Blobovnicza) error) error {
|
func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors bool, f func(string, *blobovnicza.Blobovnicza) error) error {
|
||||||
return b.iterateExistingDBPaths(ctx, func(p string) (bool, error) {
|
return b.iterateExistingDBPaths(ctx, func(p string) (bool, error) {
|
||||||
shBlz := b.getBlobovnicza(p)
|
shBlz := b.getBlobovnicza(ctx, p)
|
||||||
blz, err := shBlz.Open()
|
blz, err := shBlz.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if ignoreErrors {
|
if ignoreErrors {
|
||||||
b.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
|
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||||
zap.String("err", err.Error()),
|
zap.String("err", err.Error()),
|
||||||
zap.String("storage_id", p),
|
zap.String("storage_id", p),
|
||||||
zap.String("root_path", b.rootPath))
|
zap.String("root_path", b.rootPath))
|
||||||
|
@ -84,7 +84,7 @@ func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors boo
|
||||||
}
|
}
|
||||||
return false, fmt.Errorf("could not open blobovnicza %s: %w", p, err)
|
return false, fmt.Errorf("could not open blobovnicza %s: %w", p, err)
|
||||||
}
|
}
|
||||||
defer shBlz.Close()
|
defer shBlz.Close(ctx)
|
||||||
|
|
||||||
err = f(p, blz)
|
err = f(p, blz)
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package blobovniczatree
|
package blobovniczatree
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
@ -48,7 +49,7 @@ func newSharedDB(options []blobovnicza.Option, path string, readOnly bool,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *sharedDB) Open() (*blobovnicza.Blobovnicza, error) {
|
func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) {
|
||||||
if b.closedFlag.Load() {
|
if b.closedFlag.Load() {
|
||||||
return nil, errClosed
|
return nil, errClosed
|
||||||
}
|
}
|
||||||
|
@ -67,10 +68,10 @@ func (b *sharedDB) Open() (*blobovnicza.Blobovnicza, error) {
|
||||||
blobovnicza.WithMetrics(b.metrics),
|
blobovnicza.WithMetrics(b.metrics),
|
||||||
)...)
|
)...)
|
||||||
|
|
||||||
if err := blz.Open(); err != nil {
|
if err := blz.Open(ctx); err != nil {
|
||||||
return nil, fmt.Errorf("could not open blobovnicza %s: %w", b.path, err)
|
return nil, fmt.Errorf("could not open blobovnicza %s: %w", b.path, err)
|
||||||
}
|
}
|
||||||
if err := blz.Init(); err != nil {
|
if err := blz.Init(ctx); err != nil {
|
||||||
return nil, fmt.Errorf("could not init blobovnicza %s: %w", b.path, err)
|
return nil, fmt.Errorf("could not init blobovnicza %s: %w", b.path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,20 +82,20 @@ func (b *sharedDB) Open() (*blobovnicza.Blobovnicza, error) {
|
||||||
return blz, nil
|
return blz, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *sharedDB) Close() {
|
func (b *sharedDB) Close(ctx context.Context) {
|
||||||
b.cond.L.Lock()
|
b.cond.L.Lock()
|
||||||
defer b.cond.L.Unlock()
|
defer b.cond.L.Unlock()
|
||||||
|
|
||||||
if b.refCount == 0 {
|
if b.refCount == 0 {
|
||||||
b.log.Error(logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path))
|
b.log.Error(ctx, logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path))
|
||||||
b.cond.Broadcast()
|
b.cond.Broadcast()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.refCount == 1 {
|
if b.refCount == 1 {
|
||||||
b.refCount = 0
|
b.refCount = 0
|
||||||
if err := b.blcza.Close(); err != nil {
|
if err := b.blcza.Close(ctx); err != nil {
|
||||||
b.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza,
|
b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
|
||||||
zap.String("id", b.path),
|
zap.String("id", b.path),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
@ -110,7 +111,7 @@ func (b *sharedDB) Close() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *sharedDB) CloseAndRemoveFile() error {
|
func (b *sharedDB) CloseAndRemoveFile(ctx context.Context) error {
|
||||||
b.cond.L.Lock()
|
b.cond.L.Lock()
|
||||||
if b.refCount > 1 {
|
if b.refCount > 1 {
|
||||||
b.cond.Wait()
|
b.cond.Wait()
|
||||||
|
@ -121,8 +122,8 @@ func (b *sharedDB) CloseAndRemoveFile() error {
|
||||||
return errClosingClosedBlobovnicza
|
return errClosingClosedBlobovnicza
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := b.blcza.Close(); err != nil {
|
if err := b.blcza.Close(ctx); err != nil {
|
||||||
b.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza,
|
b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
|
||||||
zap.String("id", b.path),
|
zap.String("id", b.path),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package blobovniczatree
|
package blobovniczatree
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -20,7 +21,7 @@ type cfg struct {
|
||||||
blzShallowWidth uint64
|
blzShallowWidth uint64
|
||||||
compression *compression.Config
|
compression *compression.Config
|
||||||
blzOpts []blobovnicza.Option
|
blzOpts []blobovnicza.Option
|
||||||
reportError func(string, error) // reportError is the function called when encountering disk errors.
|
reportError func(context.Context, string, error) // reportError is the function called when encountering disk errors.
|
||||||
metrics Metrics
|
metrics Metrics
|
||||||
waitBeforeDropDB time.Duration
|
waitBeforeDropDB time.Duration
|
||||||
blzInitWorkerCount int
|
blzInitWorkerCount int
|
||||||
|
@ -47,14 +48,14 @@ const (
|
||||||
|
|
||||||
func initConfig(c *cfg) {
|
func initConfig(c *cfg) {
|
||||||
*c = cfg{
|
*c = cfg{
|
||||||
log: &logger.Logger{Logger: zap.L()},
|
log: logger.NewLoggerWrapper(zap.L()),
|
||||||
perm: defaultPerm,
|
perm: defaultPerm,
|
||||||
openedCacheSize: defaultOpenedCacheSize,
|
openedCacheSize: defaultOpenedCacheSize,
|
||||||
openedCacheTTL: defaultOpenedCacheTTL,
|
openedCacheTTL: defaultOpenedCacheTTL,
|
||||||
openedCacheExpInterval: defaultOpenedCacheInterval,
|
openedCacheExpInterval: defaultOpenedCacheInterval,
|
||||||
blzShallowDepth: defaultBlzShallowDepth,
|
blzShallowDepth: defaultBlzShallowDepth,
|
||||||
blzShallowWidth: defaultBlzShallowWidth,
|
blzShallowWidth: defaultBlzShallowWidth,
|
||||||
reportError: func(string, error) {},
|
reportError: func(context.Context, string, error) {},
|
||||||
metrics: &noopMetrics{},
|
metrics: &noopMetrics{},
|
||||||
waitBeforeDropDB: defaultWaitBeforeDropDB,
|
waitBeforeDropDB: defaultWaitBeforeDropDB,
|
||||||
blzInitWorkerCount: defaultBlzInitWorkerCount,
|
blzInitWorkerCount: defaultBlzInitWorkerCount,
|
||||||
|
|
|
@ -77,12 +77,12 @@ type putIterator struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) {
|
func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) {
|
||||||
active, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(lvlPath)
|
active, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !isLogical(err) {
|
if !isLogical(err) {
|
||||||
i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
|
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
|
||||||
} else {
|
} else {
|
||||||
i.B.log.Debug(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
|
i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||||
}
|
}
|
||||||
|
@ -91,20 +91,20 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
if active == nil {
|
if active == nil {
|
||||||
i.B.log.Debug(logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath),
|
i.B.log.Debug(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
defer active.Close()
|
defer active.Close(ctx)
|
||||||
|
|
||||||
i.AllFull = false
|
i.AllFull = false
|
||||||
|
|
||||||
_, err = active.Blobovnicza().Put(ctx, i.PutPrm)
|
_, err = active.Blobovnicza().Put(ctx, i.PutPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !isLogical(err) {
|
if !isLogical(err) {
|
||||||
i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
|
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
|
||||||
} else {
|
} else {
|
||||||
i.B.log.Debug(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
|
i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
|
||||||
zap.String("path", active.SystemPath()),
|
zap.String("path", active.SystemPath()),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||||
|
|
|
@ -49,25 +49,25 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm
|
||||||
|
|
||||||
var res common.RebuildRes
|
var res common.RebuildRes
|
||||||
|
|
||||||
b.log.Debug(logs.BlobovniczaTreeCompletingPreviousRebuild)
|
b.log.Debug(ctx, logs.BlobovniczaTreeCompletingPreviousRebuild)
|
||||||
completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage)
|
completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage)
|
||||||
res.ObjectsMoved += completedPreviosMoves
|
res.ObjectsMoved += completedPreviosMoves
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.log.Warn(logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err))
|
b.log.Warn(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err))
|
||||||
success = false
|
success = false
|
||||||
return res, err
|
return res, err
|
||||||
}
|
}
|
||||||
b.log.Debug(logs.BlobovniczaTreeCompletedPreviousRebuildSuccess)
|
b.log.Debug(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildSuccess)
|
||||||
|
|
||||||
b.log.Debug(logs.BlobovniczaTreeCollectingDBToRebuild)
|
b.log.Debug(ctx, logs.BlobovniczaTreeCollectingDBToRebuild)
|
||||||
dbsToMigrate, err := b.getDBsToRebuild(ctx, prm.FillPercent)
|
dbsToMigrate, err := b.getDBsToRebuild(ctx, prm.FillPercent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.log.Warn(logs.BlobovniczaTreeCollectingDBToRebuildFailed, zap.Error(err))
|
b.log.Warn(ctx, logs.BlobovniczaTreeCollectingDBToRebuildFailed, zap.Error(err))
|
||||||
success = false
|
success = false
|
||||||
return res, err
|
return res, err
|
||||||
}
|
}
|
||||||
|
|
||||||
b.log.Info(logs.BlobovniczaTreeCollectingDBToRebuildSuccess, zap.Int("blobovniczas_to_rebuild", len(dbsToMigrate)))
|
b.log.Info(ctx, logs.BlobovniczaTreeCollectingDBToRebuildSuccess, zap.Int("blobovniczas_to_rebuild", len(dbsToMigrate)))
|
||||||
res, err = b.migrateDBs(ctx, dbsToMigrate, prm, res)
|
res, err = b.migrateDBs(ctx, dbsToMigrate, prm, res)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
success = false
|
success = false
|
||||||
|
@ -78,14 +78,14 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm
|
||||||
func (b *Blobovniczas) migrateDBs(ctx context.Context, dbs []string, prm common.RebuildPrm, res common.RebuildRes) (common.RebuildRes, error) {
|
func (b *Blobovniczas) migrateDBs(ctx context.Context, dbs []string, prm common.RebuildPrm, res common.RebuildRes) (common.RebuildRes, error) {
|
||||||
var completedDBCount uint32
|
var completedDBCount uint32
|
||||||
for _, db := range dbs {
|
for _, db := range dbs {
|
||||||
b.log.Debug(logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db))
|
b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db))
|
||||||
movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.WorkerLimiter)
|
movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.WorkerLimiter)
|
||||||
res.ObjectsMoved += movedObjects
|
res.ObjectsMoved += movedObjects
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.log.Warn(logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err))
|
b.log.Warn(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err))
|
||||||
return res, err
|
return res, err
|
||||||
}
|
}
|
||||||
b.log.Debug(logs.BlobovniczaTreeRebuildingBlobovniczaSuccess, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects))
|
b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaSuccess, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects))
|
||||||
res.FilesRemoved++
|
res.FilesRemoved++
|
||||||
completedDBCount++
|
completedDBCount++
|
||||||
b.metrics.SetRebuildPercent((100 * completedDBCount) / uint32(len(dbs)))
|
b.metrics.SetRebuildPercent((100 * completedDBCount) / uint32(len(dbs)))
|
||||||
|
@ -165,7 +165,7 @@ func (b *Blobovniczas) selectDBsDoNotMatchFillPercent(ctx context.Context, targe
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
path := filepath.Join(lvlPath, e.Name())
|
path := filepath.Join(lvlPath, e.Name())
|
||||||
resettlementRequired, err := b.rebuildBySize(path, target)
|
resettlementRequired, err := b.rebuildBySize(ctx, path, target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
@ -180,13 +180,13 @@ func (b *Blobovniczas) selectDBsDoNotMatchFillPercent(ctx context.Context, targe
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Blobovniczas) rebuildBySize(path string, targetFillPercent int) (bool, error) {
|
func (b *Blobovniczas) rebuildBySize(ctx context.Context, path string, targetFillPercent int) (bool, error) {
|
||||||
shDB := b.getBlobovnicza(path)
|
shDB := b.getBlobovnicza(ctx, path)
|
||||||
blz, err := shDB.Open()
|
blz, err := shDB.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
defer shDB.Close()
|
defer shDB.Close(ctx)
|
||||||
fp := blz.FillPercent()
|
fp := blz.FillPercent()
|
||||||
// accepted fill percent defines as
|
// accepted fill percent defines as
|
||||||
// |----|+++++++++++++++++|+++++++++++++++++|---------------
|
// |----|+++++++++++++++++|+++++++++++++++++|---------------
|
||||||
|
@ -196,8 +196,8 @@ func (b *Blobovniczas) rebuildBySize(path string, targetFillPercent int) (bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) {
|
func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) {
|
||||||
shDB := b.getBlobovnicza(path)
|
shDB := b.getBlobovnicza(ctx, path)
|
||||||
blz, err := shDB.Open()
|
blz, err := shDB.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -206,9 +206,9 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M
|
||||||
if shDBClosed {
|
if shDBClosed {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
shDB.Close()
|
shDB.Close(ctx)
|
||||||
}()
|
}()
|
||||||
dropTempFile, err := b.addRebuildTempFile(path)
|
dropTempFile, err := b.addRebuildTempFile(ctx, path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -224,7 +224,7 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M
|
||||||
return migratedObjects, err
|
return migratedObjects, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Blobovniczas) addRebuildTempFile(path string) (func(), error) {
|
func (b *Blobovniczas) addRebuildTempFile(ctx context.Context, path string) (func(), error) {
|
||||||
sysPath := filepath.Join(b.rootPath, path)
|
sysPath := filepath.Join(b.rootPath, path)
|
||||||
sysPath = sysPath + rebuildSuffix
|
sysPath = sysPath + rebuildSuffix
|
||||||
_, err := os.OpenFile(sysPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, b.perm)
|
_, err := os.OpenFile(sysPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, b.perm)
|
||||||
|
@ -233,7 +233,7 @@ func (b *Blobovniczas) addRebuildTempFile(path string) (func(), error) {
|
||||||
}
|
}
|
||||||
return func() {
|
return func() {
|
||||||
if err := os.Remove(sysPath); err != nil {
|
if err := os.Remove(sysPath); err != nil {
|
||||||
b.log.Warn(logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
|
b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
|
||||||
}
|
}
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -330,7 +330,7 @@ func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDb *sharedDB)
|
||||||
b.dbFilesGuard.Lock()
|
b.dbFilesGuard.Lock()
|
||||||
defer b.dbFilesGuard.Unlock()
|
defer b.dbFilesGuard.Unlock()
|
||||||
|
|
||||||
if err := shDb.CloseAndRemoveFile(); err != nil {
|
if err := shDb.CloseAndRemoveFile(ctx); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
b.commondbManager.CleanResources(path)
|
b.commondbManager.CleanResources(path)
|
||||||
|
@ -365,12 +365,12 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co
|
||||||
err := b.iterateIncompletedRebuildDBPaths(ctx, func(s string) (bool, error) {
|
err := b.iterateIncompletedRebuildDBPaths(ctx, func(s string) (bool, error) {
|
||||||
rebuildTmpFilePath := s
|
rebuildTmpFilePath := s
|
||||||
s = strings.TrimSuffix(s, rebuildSuffix)
|
s = strings.TrimSuffix(s, rebuildSuffix)
|
||||||
shDB := b.getBlobovnicza(s)
|
shDB := b.getBlobovnicza(ctx, s)
|
||||||
blz, err := shDB.Open()
|
blz, err := shDB.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
defer shDB.Close()
|
defer shDB.Close(ctx)
|
||||||
|
|
||||||
incompletedMoves, err := blz.ListMoveInfo(ctx)
|
incompletedMoves, err := blz.ListMoveInfo(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -389,7 +389,7 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co
|
||||||
})
|
})
|
||||||
for _, tmp := range rebuildTempFilesToRemove {
|
for _, tmp := range rebuildTempFilesToRemove {
|
||||||
if err := os.Remove(filepath.Join(b.rootPath, tmp)); err != nil {
|
if err := os.Remove(filepath.Join(b.rootPath, tmp)); err != nil {
|
||||||
b.log.Warn(logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
|
b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return count, err
|
return count, err
|
||||||
|
@ -398,12 +398,12 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co
|
||||||
func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blobovnicza, sourcePath string,
|
func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blobovnicza, sourcePath string,
|
||||||
move blobovnicza.MoveInfo, metaStore common.MetaStorage,
|
move blobovnicza.MoveInfo, metaStore common.MetaStorage,
|
||||||
) error {
|
) error {
|
||||||
targetDB := b.getBlobovnicza(NewIDFromBytes(move.TargetStorageID).Path())
|
targetDB := b.getBlobovnicza(ctx, NewIDFromBytes(move.TargetStorageID).Path())
|
||||||
target, err := targetDB.Open()
|
target, err := targetDB.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer targetDB.Close()
|
defer targetDB.Close(ctx)
|
||||||
|
|
||||||
existsInSource := true
|
existsInSource := true
|
||||||
var gPrm blobovnicza.GetPrm
|
var gPrm blobovnicza.GetPrm
|
||||||
|
@ -413,14 +413,14 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob
|
||||||
if client.IsErrObjectNotFound(err) {
|
if client.IsErrObjectNotFound(err) {
|
||||||
existsInSource = false
|
existsInSource = false
|
||||||
} else {
|
} else {
|
||||||
b.log.Warn(logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
|
b.log.Warn(ctx, logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !existsInSource { // object was deleted by Rebuild, need to delete move info
|
if !existsInSource { // object was deleted by Rebuild, need to delete move info
|
||||||
if err = source.DropMoveInfo(ctx, move.Address); err != nil {
|
if err = source.DropMoveInfo(ctx, move.Address); err != nil {
|
||||||
b.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
|
b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
b.deleteProtectedObjects.Delete(move.Address)
|
b.deleteProtectedObjects.Delete(move.Address)
|
||||||
|
@ -429,7 +429,7 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob
|
||||||
|
|
||||||
existsInTarget, err := target.Exists(ctx, move.Address)
|
existsInTarget, err := target.Exists(ctx, move.Address)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.log.Warn(logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
|
b.log.Warn(ctx, logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -439,25 +439,25 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob
|
||||||
putPrm.SetMarshaledObject(gRes.Object())
|
putPrm.SetMarshaledObject(gRes.Object())
|
||||||
_, err = target.Put(ctx, putPrm)
|
_, err = target.Put(ctx, putPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.log.Warn(logs.BlobovniczatreeCouldNotPutObjectToTargetDB, zap.String("path", targetDB.SystemPath()), zap.Error(err))
|
b.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToTargetDB, zap.String("path", targetDB.SystemPath()), zap.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = metaStore.UpdateStorageID(ctx, move.Address, move.TargetStorageID); err != nil {
|
if err = metaStore.UpdateStorageID(ctx, move.Address, move.TargetStorageID); err != nil {
|
||||||
b.log.Warn(logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", move.Address))
|
b.log.Warn(ctx, logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", move.Address))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var deletePrm blobovnicza.DeletePrm
|
var deletePrm blobovnicza.DeletePrm
|
||||||
deletePrm.SetAddress(move.Address)
|
deletePrm.SetAddress(move.Address)
|
||||||
if _, err = source.Delete(ctx, deletePrm); err != nil {
|
if _, err = source.Delete(ctx, deletePrm); err != nil {
|
||||||
b.log.Warn(logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", sourcePath), zap.Error(err))
|
b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", sourcePath), zap.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = source.DropMoveInfo(ctx, move.Address); err != nil {
|
if err = source.DropMoveInfo(ctx, move.Address); err != nil {
|
||||||
b.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
|
b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -477,21 +477,21 @@ type moveIterator struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, error) {
|
func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, error) {
|
||||||
target, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(lvlPath)
|
target, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !isLogical(err) {
|
if !isLogical(err) {
|
||||||
i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
|
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
|
||||||
} else {
|
} else {
|
||||||
i.B.log.Warn(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.Error(err))
|
i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.Error(err))
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if target == nil {
|
if target == nil {
|
||||||
i.B.log.Warn(logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath))
|
i.B.log.Warn(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
defer target.Close()
|
defer target.Close(ctx)
|
||||||
|
|
||||||
i.AllFull = false
|
i.AllFull = false
|
||||||
|
|
||||||
|
@ -503,9 +503,9 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
|
||||||
TargetStorageID: targetStorageID.Bytes(),
|
TargetStorageID: targetStorageID.Bytes(),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
if !isLogical(err) {
|
if !isLogical(err) {
|
||||||
i.B.reportError(logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, err)
|
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, err)
|
||||||
} else {
|
} else {
|
||||||
i.B.log.Warn(logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, zap.String("path", i.SourceSysPath), zap.Error(err))
|
i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, zap.String("path", i.SourceSysPath), zap.Error(err))
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
@ -519,15 +519,15 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
|
||||||
_, err = target.Blobovnicza().Put(ctx, putPrm)
|
_, err = target.Blobovnicza().Put(ctx, putPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !isLogical(err) {
|
if !isLogical(err) {
|
||||||
i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
|
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
|
||||||
} else {
|
} else {
|
||||||
i.B.log.Warn(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", target.SystemPath()), zap.Error(err))
|
i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", target.SystemPath()), zap.Error(err))
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = i.MetaStore.UpdateStorageID(ctx, i.Address, targetStorageID.Bytes()); err != nil {
|
if err = i.MetaStore.UpdateStorageID(ctx, i.Address, targetStorageID.Bytes()); err != nil {
|
||||||
i.B.log.Warn(logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", i.Address))
|
i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", i.Address))
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -535,18 +535,18 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
|
||||||
deletePrm.SetAddress(i.Address)
|
deletePrm.SetAddress(i.Address)
|
||||||
if _, err = i.Source.Delete(ctx, deletePrm); err != nil {
|
if _, err = i.Source.Delete(ctx, deletePrm); err != nil {
|
||||||
if !isLogical(err) {
|
if !isLogical(err) {
|
||||||
i.B.reportError(logs.BlobovniczatreeCouldNotDeleteFromSource, err)
|
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, err)
|
||||||
} else {
|
} else {
|
||||||
i.B.log.Warn(logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", i.SourceSysPath), zap.Error(err))
|
i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", i.SourceSysPath), zap.Error(err))
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = i.Source.DropMoveInfo(ctx, i.Address); err != nil {
|
if err = i.Source.DropMoveInfo(ctx, i.Address); err != nil {
|
||||||
if !isLogical(err) {
|
if !isLogical(err) {
|
||||||
i.B.reportError(logs.BlobovniczatreeCouldNotDropMoveInfo, err)
|
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, err)
|
||||||
} else {
|
} else {
|
||||||
i.B.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", i.SourceSysPath), zap.Error(err))
|
i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", i.SourceSysPath), zap.Error(err))
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,8 +35,8 @@ func testRebuildFailoverOnlyMoveInfoSaved(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
|
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
|
||||||
require.NoError(t, blz.Open())
|
require.NoError(t, blz.Open(context.Background()))
|
||||||
require.NoError(t, blz.Init())
|
require.NoError(t, blz.Init(context.Background()))
|
||||||
|
|
||||||
obj := blobstortest.NewObject(1024)
|
obj := blobstortest.NewObject(1024)
|
||||||
data, err := obj.Marshal()
|
data, err := obj.Marshal()
|
||||||
|
@ -53,7 +53,7 @@ func testRebuildFailoverOnlyMoveInfoSaved(t *testing.T) {
|
||||||
TargetStorageID: []byte("0/0/0"),
|
TargetStorageID: []byte("0/0/0"),
|
||||||
}))
|
}))
|
||||||
|
|
||||||
require.NoError(t, blz.Close())
|
require.NoError(t, blz.Close(context.Background()))
|
||||||
_, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
|
_, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
@ -65,8 +65,8 @@ func testRebuildFailoverObjectSavedToTarget(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
|
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
|
||||||
require.NoError(t, blz.Open())
|
require.NoError(t, blz.Open(context.Background()))
|
||||||
require.NoError(t, blz.Init())
|
require.NoError(t, blz.Init(context.Background()))
|
||||||
|
|
||||||
obj := blobstortest.NewObject(1024)
|
obj := blobstortest.NewObject(1024)
|
||||||
data, err := obj.Marshal()
|
data, err := obj.Marshal()
|
||||||
|
@ -83,19 +83,19 @@ func testRebuildFailoverObjectSavedToTarget(t *testing.T) {
|
||||||
TargetStorageID: []byte("0/0/0"),
|
TargetStorageID: []byte("0/0/0"),
|
||||||
}))
|
}))
|
||||||
|
|
||||||
require.NoError(t, blz.Close())
|
require.NoError(t, blz.Close(context.Background()))
|
||||||
|
|
||||||
_, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
|
_, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
|
blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
|
||||||
require.NoError(t, blz.Open())
|
require.NoError(t, blz.Open(context.Background()))
|
||||||
require.NoError(t, blz.Init())
|
require.NoError(t, blz.Init(context.Background()))
|
||||||
|
|
||||||
_, err = blz.Put(context.Background(), pPrm)
|
_, err = blz.Put(context.Background(), pPrm)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.NoError(t, blz.Close())
|
require.NoError(t, blz.Close(context.Background()))
|
||||||
|
|
||||||
testRebuildFailoverValidate(t, dir, obj, true)
|
testRebuildFailoverValidate(t, dir, obj, true)
|
||||||
}
|
}
|
||||||
|
@ -105,8 +105,8 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
|
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
|
||||||
require.NoError(t, blz.Open())
|
require.NoError(t, blz.Open(context.Background()))
|
||||||
require.NoError(t, blz.Init())
|
require.NoError(t, blz.Init(context.Background()))
|
||||||
|
|
||||||
obj := blobstortest.NewObject(1024)
|
obj := blobstortest.NewObject(1024)
|
||||||
data, err := obj.Marshal()
|
data, err := obj.Marshal()
|
||||||
|
@ -117,14 +117,14 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) {
|
||||||
TargetStorageID: []byte("0/0/0"),
|
TargetStorageID: []byte("0/0/0"),
|
||||||
}))
|
}))
|
||||||
|
|
||||||
require.NoError(t, blz.Close())
|
require.NoError(t, blz.Close(context.Background()))
|
||||||
|
|
||||||
_, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
|
_, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
|
blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
|
||||||
require.NoError(t, blz.Open())
|
require.NoError(t, blz.Open(context.Background()))
|
||||||
require.NoError(t, blz.Init())
|
require.NoError(t, blz.Init(context.Background()))
|
||||||
|
|
||||||
var pPrm blobovnicza.PutPrm
|
var pPrm blobovnicza.PutPrm
|
||||||
pPrm.SetAddress(object.AddressOf(obj))
|
pPrm.SetAddress(object.AddressOf(obj))
|
||||||
|
@ -132,7 +132,7 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) {
|
||||||
_, err = blz.Put(context.Background(), pPrm)
|
_, err = blz.Put(context.Background(), pPrm)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.NoError(t, blz.Close())
|
require.NoError(t, blz.Close(context.Background()))
|
||||||
|
|
||||||
testRebuildFailoverValidate(t, dir, obj, false)
|
testRebuildFailoverValidate(t, dir, obj, false)
|
||||||
}
|
}
|
||||||
|
@ -170,11 +170,11 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object
|
||||||
require.Equal(t, uint64(1), rRes.ObjectsMoved)
|
require.Equal(t, uint64(1), rRes.ObjectsMoved)
|
||||||
require.Equal(t, uint64(0), rRes.FilesRemoved)
|
require.Equal(t, uint64(0), rRes.FilesRemoved)
|
||||||
|
|
||||||
require.NoError(t, b.Close())
|
require.NoError(t, b.Close(context.Background()))
|
||||||
|
|
||||||
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
|
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
|
||||||
require.NoError(t, blz.Open())
|
require.NoError(t, blz.Open(context.Background()))
|
||||||
require.NoError(t, blz.Init())
|
require.NoError(t, blz.Init(context.Background()))
|
||||||
|
|
||||||
moveInfo, err := blz.ListMoveInfo(context.Background())
|
moveInfo, err := blz.ListMoveInfo(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -185,11 +185,11 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object
|
||||||
_, err = blz.Get(context.Background(), gPrm)
|
_, err = blz.Get(context.Background(), gPrm)
|
||||||
require.True(t, client.IsErrObjectNotFound(err))
|
require.True(t, client.IsErrObjectNotFound(err))
|
||||||
|
|
||||||
require.NoError(t, blz.Close())
|
require.NoError(t, blz.Close(context.Background()))
|
||||||
|
|
||||||
blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
|
blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
|
||||||
require.NoError(t, blz.Open())
|
require.NoError(t, blz.Open(context.Background()))
|
||||||
require.NoError(t, blz.Init())
|
require.NoError(t, blz.Init(context.Background()))
|
||||||
|
|
||||||
moveInfo, err = blz.ListMoveInfo(context.Background())
|
moveInfo, err = blz.ListMoveInfo(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -203,7 +203,7 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object
|
||||||
require.True(t, bytes.Equal([]byte("0/0/0"), metaStub.storageIDs[object.AddressOf(obj)]))
|
require.True(t, bytes.Equal([]byte("0/0/0"), metaStub.storageIDs[object.AddressOf(obj)]))
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, blz.Close())
|
require.NoError(t, blz.Close(context.Background()))
|
||||||
|
|
||||||
_, err = os.Stat(filepath.Join(dir, "0", "0", "1.db.rebuild"))
|
_, err = os.Stat(filepath.Join(dir, "0", "0", "1.db.rebuild"))
|
||||||
require.True(t, os.IsNotExist(err))
|
require.True(t, os.IsNotExist(err))
|
||||||
|
|
|
@ -93,7 +93,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, b.Close())
|
require.NoError(t, b.Close(context.Background()))
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("no rebuild single db", func(t *testing.T) {
|
t.Run("no rebuild single db", func(t *testing.T) {
|
||||||
|
@ -145,7 +145,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, b.Close())
|
require.NoError(t, b.Close(context.Background()))
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("rebuild by fill percent", func(t *testing.T) {
|
t.Run("rebuild by fill percent", func(t *testing.T) {
|
||||||
|
@ -214,7 +214,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, b.Close())
|
require.NoError(t, b.Close(context.Background()))
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("rebuild by overflow", func(t *testing.T) {
|
t.Run("rebuild by overflow", func(t *testing.T) {
|
||||||
|
@ -251,7 +251,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
|
||||||
storageIDs: storageIDs,
|
storageIDs: storageIDs,
|
||||||
guard: &sync.Mutex{},
|
guard: &sync.Mutex{},
|
||||||
}
|
}
|
||||||
require.NoError(t, b.Close())
|
require.NoError(t, b.Close(context.Background()))
|
||||||
b = NewBlobovniczaTree(
|
b = NewBlobovniczaTree(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
WithLogger(test.NewLogger(t)),
|
WithLogger(test.NewLogger(t)),
|
||||||
|
@ -284,7 +284,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, b.Close())
|
require.NoError(t, b.Close(context.Background()))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -318,7 +318,7 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
|
||||||
storageIDs := make(map[oid.Address][]byte)
|
storageIDs := make(map[oid.Address][]byte)
|
||||||
storageIDs[prm.Address] = res.StorageID
|
storageIDs[prm.Address] = res.StorageID
|
||||||
|
|
||||||
require.NoError(t, b.Close())
|
require.NoError(t, b.Close(context.Background()))
|
||||||
|
|
||||||
b = NewBlobovniczaTree(
|
b = NewBlobovniczaTree(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
|
@ -355,7 +355,7 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, b.Close())
|
require.NoError(t, b.Close(context.Background()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, targetDepth, targetWidth uint64, shouldMigrate bool) {
|
func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, targetDepth, targetWidth uint64, shouldMigrate bool) {
|
||||||
|
@ -399,7 +399,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, eg.Wait())
|
require.NoError(t, eg.Wait())
|
||||||
require.NoError(t, b.Close())
|
require.NoError(t, b.Close(context.Background()))
|
||||||
|
|
||||||
b = NewBlobovniczaTree(
|
b = NewBlobovniczaTree(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
|
@ -444,7 +444,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, b.Close())
|
require.NoError(t, b.Close(context.Background()))
|
||||||
}
|
}
|
||||||
|
|
||||||
type storageIDUpdateStub struct {
|
type storageIDUpdateStub struct {
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package blobstor
|
package blobstor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||||
|
@ -47,7 +48,7 @@ type cfg struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func initConfig(c *cfg) {
|
func initConfig(c *cfg) {
|
||||||
c.log = &logger.Logger{Logger: zap.L()}
|
c.log = logger.NewLoggerWrapper(zap.L())
|
||||||
c.metrics = &noopMetrics{}
|
c.metrics = &noopMetrics{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -90,7 +91,7 @@ func WithStorages(st []SubStorage) Option {
|
||||||
// WithLogger returns option to specify BlobStor's logger.
|
// WithLogger returns option to specify BlobStor's logger.
|
||||||
func WithLogger(l *logger.Logger) Option {
|
func WithLogger(l *logger.Logger) Option {
|
||||||
return func(c *cfg) {
|
return func(c *cfg) {
|
||||||
c.log = &logger.Logger{Logger: l.With(zap.String("component", "BlobStor"))}
|
c.log = l.With(zap.String("component", "BlobStor"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -139,7 +140,7 @@ func WithUncompressableContentTypes(values []string) Option {
|
||||||
|
|
||||||
// SetReportErrorFunc allows to provide a function to be called on disk errors.
|
// SetReportErrorFunc allows to provide a function to be called on disk errors.
|
||||||
// This function MUST be called before Open.
|
// This function MUST be called before Open.
|
||||||
func (b *BlobStor) SetReportErrorFunc(f func(string, error)) {
|
func (b *BlobStor) SetReportErrorFunc(f func(context.Context, string, error)) {
|
||||||
for i := range b.storage {
|
for i := range b.storage {
|
||||||
b.storage[i].Storage.SetReportErrorFunc(f)
|
b.storage[i].Storage.SetReportErrorFunc(f)
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,7 +54,7 @@ func TestCompression(t *testing.T) {
|
||||||
WithCompressObjects(compress),
|
WithCompressObjects(compress),
|
||||||
WithStorages(defaultStorages(dir, smallSizeLimit)))
|
WithStorages(defaultStorages(dir, smallSizeLimit)))
|
||||||
require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
|
require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
|
||||||
require.NoError(t, bs.Init())
|
require.NoError(t, bs.Init(context.Background()))
|
||||||
return bs
|
return bs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,20 +91,20 @@ func TestCompression(t *testing.T) {
|
||||||
blobStor := newBlobStor(t, false)
|
blobStor := newBlobStor(t, false)
|
||||||
testPut(t, blobStor, 0)
|
testPut(t, blobStor, 0)
|
||||||
testGet(t, blobStor, 0)
|
testGet(t, blobStor, 0)
|
||||||
require.NoError(t, blobStor.Close())
|
require.NoError(t, blobStor.Close(context.Background()))
|
||||||
|
|
||||||
blobStor = newBlobStor(t, true)
|
blobStor = newBlobStor(t, true)
|
||||||
testGet(t, blobStor, 0) // get uncompressed object with compress enabled
|
testGet(t, blobStor, 0) // get uncompressed object with compress enabled
|
||||||
testPut(t, blobStor, 1)
|
testPut(t, blobStor, 1)
|
||||||
testGet(t, blobStor, 1)
|
testGet(t, blobStor, 1)
|
||||||
require.NoError(t, blobStor.Close())
|
require.NoError(t, blobStor.Close(context.Background()))
|
||||||
|
|
||||||
blobStor = newBlobStor(t, false)
|
blobStor = newBlobStor(t, false)
|
||||||
testGet(t, blobStor, 0) // get old uncompressed object
|
testGet(t, blobStor, 0) // get old uncompressed object
|
||||||
testGet(t, blobStor, 1) // get compressed object with compression disabled
|
testGet(t, blobStor, 1) // get compressed object with compression disabled
|
||||||
testPut(t, blobStor, 2)
|
testPut(t, blobStor, 2)
|
||||||
testGet(t, blobStor, 2)
|
testGet(t, blobStor, 2)
|
||||||
require.NoError(t, blobStor.Close())
|
require.NoError(t, blobStor.Close(context.Background()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBlobstor_needsCompression(t *testing.T) {
|
func TestBlobstor_needsCompression(t *testing.T) {
|
||||||
|
@ -130,7 +130,7 @@ func TestBlobstor_needsCompression(t *testing.T) {
|
||||||
},
|
},
|
||||||
}))
|
}))
|
||||||
require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
|
require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
|
||||||
require.NoError(t, bs.Init())
|
require.NoError(t, bs.Init(context.Background()))
|
||||||
return bs
|
return bs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -192,7 +192,7 @@ func TestConcurrentPut(t *testing.T) {
|
||||||
blobStor := New(
|
blobStor := New(
|
||||||
WithStorages(defaultStorages(dir, smallSizeLimit)))
|
WithStorages(defaultStorages(dir, smallSizeLimit)))
|
||||||
require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite))
|
require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite))
|
||||||
require.NoError(t, blobStor.Init())
|
require.NoError(t, blobStor.Init(context.Background()))
|
||||||
|
|
||||||
testGet := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) {
|
testGet := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) {
|
||||||
res, err := b.Get(context.Background(), common.GetPrm{Address: object.AddressOf(obj)})
|
res, err := b.Get(context.Background(), common.GetPrm{Address: object.AddressOf(obj)})
|
||||||
|
@ -272,7 +272,7 @@ func TestConcurrentDelete(t *testing.T) {
|
||||||
blobStor := New(
|
blobStor := New(
|
||||||
WithStorages(defaultStorages(dir, smallSizeLimit)))
|
WithStorages(defaultStorages(dir, smallSizeLimit)))
|
||||||
require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite))
|
require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite))
|
||||||
require.NoError(t, blobStor.Init())
|
require.NoError(t, blobStor.Init(context.Background()))
|
||||||
|
|
||||||
testPut := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) {
|
testPut := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) {
|
||||||
var prm common.PutPrm
|
var prm common.PutPrm
|
||||||
|
|
|
@ -12,7 +12,7 @@ import (
|
||||||
type Storage interface {
|
type Storage interface {
|
||||||
Open(mode mode.ComponentMode) error
|
Open(mode mode.ComponentMode) error
|
||||||
Init() error
|
Init() error
|
||||||
Close() error
|
Close(context.Context) error
|
||||||
|
|
||||||
Type() string
|
Type() string
|
||||||
Path() string
|
Path() string
|
||||||
|
@ -23,7 +23,7 @@ type Storage interface {
|
||||||
|
|
||||||
// SetReportErrorFunc allows to provide a function to be called on disk errors.
|
// SetReportErrorFunc allows to provide a function to be called on disk errors.
|
||||||
// This function MUST be called before Open.
|
// This function MUST be called before Open.
|
||||||
SetReportErrorFunc(f func(string, error))
|
SetReportErrorFunc(f func(context.Context, string, error))
|
||||||
SetParentID(parentID string)
|
SetParentID(parentID string)
|
||||||
|
|
||||||
Get(context.Context, GetPrm) (GetRes, error)
|
Get(context.Context, GetPrm) (GetRes, error)
|
||||||
|
|
|
@ -12,7 +12,7 @@ import (
|
||||||
|
|
||||||
// Open opens BlobStor.
|
// Open opens BlobStor.
|
||||||
func (b *BlobStor) Open(ctx context.Context, mode mode.Mode) error {
|
func (b *BlobStor) Open(ctx context.Context, mode mode.Mode) error {
|
||||||
b.log.Debug(logs.BlobstorOpening)
|
b.log.Debug(ctx, logs.BlobstorOpening)
|
||||||
|
|
||||||
b.modeMtx.Lock()
|
b.modeMtx.Lock()
|
||||||
defer b.modeMtx.Unlock()
|
defer b.modeMtx.Unlock()
|
||||||
|
@ -50,8 +50,8 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag
|
||||||
// If BlobStor is already initialized, no action is taken.
|
// If BlobStor is already initialized, no action is taken.
|
||||||
//
|
//
|
||||||
// Returns wrapped ErrInitBlobovniczas on blobovnicza tree's initializaiton failure.
|
// Returns wrapped ErrInitBlobovniczas on blobovnicza tree's initializaiton failure.
|
||||||
func (b *BlobStor) Init() error {
|
func (b *BlobStor) Init(ctx context.Context) error {
|
||||||
b.log.Debug(logs.BlobstorInitializing)
|
b.log.Debug(ctx, logs.BlobstorInitializing)
|
||||||
|
|
||||||
if err := b.compression.Init(); err != nil {
|
if err := b.compression.Init(); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -67,14 +67,14 @@ func (b *BlobStor) Init() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close releases all internal resources of BlobStor.
|
// Close releases all internal resources of BlobStor.
|
||||||
func (b *BlobStor) Close() error {
|
func (b *BlobStor) Close(ctx context.Context) error {
|
||||||
b.log.Debug(logs.BlobstorClosing)
|
b.log.Debug(ctx, logs.BlobstorClosing)
|
||||||
|
|
||||||
var firstErr error
|
var firstErr error
|
||||||
for i := range b.storage {
|
for i := range b.storage {
|
||||||
err := b.storage[i].Storage.Close()
|
err := b.storage[i].Storage.Close(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.log.Info(logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error()))
|
b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error()))
|
||||||
if firstErr == nil {
|
if firstErr == nil {
|
||||||
firstErr = err
|
firstErr = err
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,7 +39,7 @@ func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.Del
|
||||||
if err == nil || !client.IsErrObjectNotFound(err) {
|
if err == nil || !client.IsErrObjectNotFound(err) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
success = true
|
success = true
|
||||||
logOp(b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID)
|
logOp(ctx, b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID)
|
||||||
}
|
}
|
||||||
return res, err
|
return res, err
|
||||||
}
|
}
|
||||||
|
@ -58,7 +58,7 @@ func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.Del
|
||||||
res, err := st.Delete(ctx, prm)
|
res, err := st.Delete(ctx, prm)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
success = true
|
success = true
|
||||||
logOp(b.log, deleteOp, prm.Address, st.Type(), prm.StorageID)
|
logOp(ctx, b.log, deleteOp, prm.Address, st.Type(), prm.StorageID)
|
||||||
}
|
}
|
||||||
|
|
||||||
return res, err
|
return res, err
|
||||||
|
|
|
@ -73,7 +73,7 @@ func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, err := range errors[:len(errors)-1] {
|
for _, err := range errors[:len(errors)-1] {
|
||||||
b.log.Warn(logs.BlobstorErrorOccurredDuringObjectExistenceChecking,
|
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringObjectExistenceChecking,
|
||||||
zap.Stringer("address", prm.Address),
|
zap.Stringer("address", prm.Address),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||||
|
|
|
@ -22,7 +22,7 @@ func TestExists(t *testing.T) {
|
||||||
b := New(WithStorages(storages))
|
b := New(WithStorages(storages))
|
||||||
|
|
||||||
require.NoError(t, b.Open(context.Background(), mode.ReadWrite))
|
require.NoError(t, b.Open(context.Background(), mode.ReadWrite))
|
||||||
require.NoError(t, b.Init())
|
require.NoError(t, b.Init(context.Background()))
|
||||||
|
|
||||||
objects := []*objectSDK.Object{
|
objects := []*objectSDK.Object{
|
||||||
testObject(smallSizeLimit / 2),
|
testObject(smallSizeLimit / 2),
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
package fstree
|
package fstree
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
|
||||||
)
|
)
|
||||||
|
@ -28,7 +30,7 @@ func (t *FSTree) Init() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close implements common.Storage.
|
// Close implements common.Storage.
|
||||||
func (t *FSTree) Close() error {
|
func (t *FSTree) Close(_ context.Context) error {
|
||||||
t.metrics.Close()
|
t.metrics.Close()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -87,7 +87,7 @@ func New(opts ...Option) *FSTree {
|
||||||
DirNameLen: DirNameLen,
|
DirNameLen: DirNameLen,
|
||||||
metrics: &noopMetrics{},
|
metrics: &noopMetrics{},
|
||||||
fileCounter: &noopCounter{},
|
fileCounter: &noopCounter{},
|
||||||
log: &logger.Logger{Logger: zap.L()},
|
log: logger.NewLoggerWrapper(zap.L()),
|
||||||
}
|
}
|
||||||
for i := range opts {
|
for i := range opts {
|
||||||
opts[i](f)
|
opts[i](f)
|
||||||
|
@ -152,7 +152,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
|
||||||
des, err := os.ReadDir(dirPath)
|
des, err := os.ReadDir(dirPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if prm.IgnoreErrors {
|
if prm.IgnoreErrors {
|
||||||
t.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
|
t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||||
zap.String("err", err.Error()),
|
zap.String("err", err.Error()),
|
||||||
zap.String("directory_path", dirPath))
|
zap.String("directory_path", dirPath))
|
||||||
return nil
|
return nil
|
||||||
|
@ -200,7 +200,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if prm.IgnoreErrors {
|
if prm.IgnoreErrors {
|
||||||
t.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
|
t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||||
zap.Stringer("address", addr),
|
zap.Stringer("address", addr),
|
||||||
zap.String("err", err.Error()),
|
zap.String("err", err.Error()),
|
||||||
zap.String("path", path))
|
zap.String("path", path))
|
||||||
|
@ -606,7 +606,7 @@ func (t *FSTree) Compressor() *compression.Config {
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetReportErrorFunc implements common.Storage.
|
// SetReportErrorFunc implements common.Storage.
|
||||||
func (t *FSTree) SetReportErrorFunc(_ func(string, error)) {
|
func (t *FSTree) SetReportErrorFunc(_ func(context.Context, string, error)) {
|
||||||
// Do nothing, FSTree can encounter only one error which is returned.
|
// Do nothing, FSTree can encounter only one error which is returned.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -52,7 +52,7 @@ func TestObjectCounter(t *testing.T) {
|
||||||
require.Equal(t, uint64(0), size)
|
require.Equal(t, uint64(0), size)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
require.NoError(t, fst.Close())
|
require.NoError(t, fst.Close(context.Background()))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
addr := oidtest.Address()
|
addr := oidtest.Address()
|
||||||
|
|
|
@ -53,6 +53,6 @@ func WithFileCounter(c FileCounter) Option {
|
||||||
|
|
||||||
func WithLogger(l *logger.Logger) Option {
|
func WithLogger(l *logger.Logger) Option {
|
||||||
return func(f *FSTree) {
|
return func(f *FSTree) {
|
||||||
f.log = &logger.Logger{Logger: l.With(zap.String("component", "FSTree"))}
|
f.log = l.With(zap.String("component", "FSTree"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,7 @@ func TestControl(t *testing.T, cons Constructor, minSize, maxSize uint64) {
|
||||||
require.NoError(t, s.Init())
|
require.NoError(t, s.Init())
|
||||||
|
|
||||||
objects := prepare(t, 10, s, minSize, maxSize)
|
objects := prepare(t, 10, s, minSize, maxSize)
|
||||||
require.NoError(t, s.Close())
|
require.NoError(t, s.Close(context.Background()))
|
||||||
|
|
||||||
require.NoError(t, s.Open(mode.ComponentReadOnly))
|
require.NoError(t, s.Open(mode.ComponentReadOnly))
|
||||||
for i := range objects {
|
for i := range objects {
|
||||||
|
|
|
@ -15,7 +15,7 @@ func TestDelete(t *testing.T, cons Constructor, minSize, maxSize uint64) {
|
||||||
s := cons(t)
|
s := cons(t)
|
||||||
require.NoError(t, s.Open(mode.ComponentReadWrite))
|
require.NoError(t, s.Open(mode.ComponentReadWrite))
|
||||||
require.NoError(t, s.Init())
|
require.NoError(t, s.Init())
|
||||||
defer func() { require.NoError(t, s.Close()) }()
|
defer func() { require.NoError(t, s.Close(context.Background())) }()
|
||||||
|
|
||||||
objects := prepare(t, 4, s, minSize, maxSize)
|
objects := prepare(t, 4, s, minSize, maxSize)
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue