[#1437] node: Fix contextcheck linter

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
Dmitrii Stepanov 2024-10-21 16:27:28 +03:00
parent c75f845f4b
commit c2f4390f4d
Signed by: dstepanov-yadro
GPG key ID: 237AF1A763293BC0
209 changed files with 1068 additions and 1036 deletions

View file

@ -93,8 +93,8 @@ func watchForSignal(ctx context.Context, cancel func()) {
if err != nil {
log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err))
}
pprofCmp.reload()
metricsCmp.reload()
pprofCmp.reload(ctx)
metricsCmp.reload(ctx)
log.Info(ctx, logs.FrostFSIRReloadExtraWallets)
err = innerRing.SetExtraWallets(cfg)
if err != nil {

View file

@ -25,8 +25,8 @@ const (
shutdownTimeoutKeyPostfix = ".shutdown_timeout"
)
func (c *httpComponent) init() {
log.Info(context.Background(), "init "+c.name)
func (c *httpComponent) init(ctx context.Context) {
log.Info(ctx, "init "+c.name)
c.enabled = cfg.GetBool(c.name + enabledKeyPostfix)
c.address = cfg.GetString(c.name + addressKeyPostfix)
c.shutdownDur = cfg.GetDuration(c.name + shutdownTimeoutKeyPostfix)
@ -40,14 +40,14 @@ func (c *httpComponent) init() {
httputil.WithShutdownTimeout(c.shutdownDur),
)
} else {
log.Info(context.Background(), c.name+" is disabled, skip")
log.Info(ctx, c.name+" is disabled, skip")
c.srv = nil
}
}
func (c *httpComponent) start() {
func (c *httpComponent) start(ctx context.Context) {
if c.srv != nil {
log.Info(context.Background(), "start "+c.name)
log.Info(ctx, "start "+c.name)
wg.Add(1)
go func() {
defer wg.Done()
@ -56,10 +56,10 @@ func (c *httpComponent) start() {
}
}
func (c *httpComponent) shutdown() error {
func (c *httpComponent) shutdown(ctx context.Context) error {
if c.srv != nil {
log.Info(context.Background(), "shutdown "+c.name)
return c.srv.Shutdown()
log.Info(ctx, "shutdown "+c.name)
return c.srv.Shutdown(ctx)
}
return nil
}
@ -71,17 +71,17 @@ func (c *httpComponent) needReload() bool {
return enabled != c.enabled || enabled && (address != c.address || dur != c.shutdownDur)
}
func (c *httpComponent) reload() {
log.Info(context.Background(), "reload "+c.name)
func (c *httpComponent) reload(ctx context.Context) {
log.Info(ctx, "reload "+c.name)
if c.needReload() {
log.Info(context.Background(), c.name+" config updated")
if err := c.shutdown(); err != nil {
log.Debug(context.Background(), logs.FrostFSIRCouldNotShutdownHTTPServer,
log.Info(ctx, c.name+" config updated")
if err := c.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.String("error", err.Error()),
)
} else {
c.init()
c.start()
c.init(ctx)
c.start(ctx)
}
}
}

View file

@ -87,17 +87,17 @@ func main() {
ctx, cancel := context.WithCancel(context.Background())
pprofCmp = newPprofComponent()
pprofCmp.init()
pprofCmp.init(ctx)
metricsCmp = newMetricsComponent()
metricsCmp.init()
metricsCmp.init(ctx)
audit.Store(cfg.GetBool("audit.enabled"))
innerRing, err = innerring.New(ctx, log, cfg, intErr, metrics, cmode, audit)
exitErr(err)
pprofCmp.start()
metricsCmp.start()
pprofCmp.start(ctx)
metricsCmp.start(ctx)
// start inner ring
err = innerRing.Start(ctx, intErr)
@ -117,12 +117,12 @@ func main() {
func shutdown(ctx context.Context) {
innerRing.Stop(ctx)
if err := metricsCmp.shutdown(); err != nil {
if err := metricsCmp.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.String("error", err.Error()),
)
}
if err := pprofCmp.shutdown(); err != nil {
if err := pprofCmp.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.String("error", err.Error()),
)

View file

@ -29,8 +29,8 @@ func newPprofComponent() *pprofComponent {
}
}
func (c *pprofComponent) init() {
c.httpComponent.init()
func (c *pprofComponent) init(ctx context.Context) {
c.httpComponent.init(ctx)
if c.enabled {
c.blockRate = cfg.GetInt(pprofBlockRateKey)
@ -52,17 +52,17 @@ func (c *pprofComponent) needReload() bool {
c.enabled && (c.blockRate != blockRate || c.mutexRate != mutexRate)
}
func (c *pprofComponent) reload() {
log.Info(context.Background(), "reload "+c.name)
func (c *pprofComponent) reload(ctx context.Context) {
log.Info(ctx, "reload "+c.name)
if c.needReload() {
log.Info(context.Background(), c.name+" config updated")
if err := c.shutdown(); err != nil {
log.Debug(context.Background(), logs.FrostFSIRCouldNotShutdownHTTPServer,
log.Info(ctx, c.name+" config updated")
if err := c.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.String("error", err.Error()))
return
}
c.init()
c.start()
c.init(ctx)
c.start(ctx)
}
}

View file

@ -28,7 +28,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err))
blz := openBlobovnicza(cmd)
defer blz.Close()
defer blz.Close(cmd.Context())
var prm blobovnicza.GetPrm
prm.SetAddress(addr)

View file

@ -32,7 +32,7 @@ func listFunc(cmd *cobra.Command, _ []string) {
}
blz := openBlobovnicza(cmd)
defer blz.Close()
defer blz.Close(cmd.Context())
err := blobovnicza.IterateAddresses(context.Background(), blz, wAddr)
common.ExitOnErr(cmd, common.Errf("blobovnicza iterator failure: %w", err))

View file

@ -31,7 +31,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err))
db := openMeta(cmd)
defer db.Close()
defer db.Close(cmd.Context())
storageID := meta.StorageIDPrm{}
storageID.SetAddress(addr)

View file

@ -19,7 +19,7 @@ func init() {
func listGarbageFunc(cmd *cobra.Command, _ []string) {
db := openMeta(cmd)
defer db.Close()
defer db.Close(cmd.Context())
var garbPrm meta.GarbageIterationPrm
garbPrm.SetHandler(

View file

@ -19,7 +19,7 @@ func init() {
func listGraveyardFunc(cmd *cobra.Command, _ []string) {
db := openMeta(cmd)
defer db.Close()
defer db.Close(cmd.Context())
var gravePrm meta.GraveyardIterationPrm
gravePrm.SetHandler(

View file

@ -397,16 +397,16 @@ type internals struct {
}
// starts node's maintenance.
func (c *cfg) startMaintenance() {
func (c *cfg) startMaintenance(ctx context.Context) {
c.isMaintenance.Store(true)
c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE)
c.log.Info(context.Background(), logs.FrostFSNodeStartedLocalNodesMaintenance)
c.log.Info(ctx, logs.FrostFSNodeStartedLocalNodesMaintenance)
}
// stops node's maintenance.
func (c *internals) stopMaintenance() {
func (c *internals) stopMaintenance(ctx context.Context) {
if c.isMaintenance.CompareAndSwap(true, false) {
c.log.Info(context.Background(), logs.FrostFSNodeStoppedLocalNodesMaintenance)
c.log.Info(ctx, logs.FrostFSNodeStoppedLocalNodesMaintenance)
}
}
@ -1129,10 +1129,10 @@ func initLocalStorage(ctx context.Context, c *cfg) {
})
}
func initAccessPolicyEngine(_ context.Context, c *cfg) {
func initAccessPolicyEngine(ctx context.Context, c *cfg) {
var localOverrideDB chainbase.LocalOverrideDatabase
if nodeconfig.PersistentPolicyRules(c.appCfg).Path() == "" {
c.log.Warn(context.Background(), logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed)
c.log.Warn(ctx, logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed)
localOverrideDB = chainbase.NewInmemoryLocalOverrideDatabase()
} else {
localOverrideDB = chainbase.NewBoltLocalOverrideDatabase(
@ -1157,7 +1157,7 @@ func initAccessPolicyEngine(_ context.Context, c *cfg) {
c.onShutdown(func() {
if err := ape.LocalOverrideDatabaseCore().Close(); err != nil {
c.log.Warn(context.Background(), logs.FrostFSNodeAccessPolicyEngineClosingFailure,
c.log.Warn(ctx, logs.FrostFSNodeAccessPolicyEngineClosingFailure,
zap.Error(err),
)
}
@ -1206,10 +1206,10 @@ func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) {
c.cfgNetmap.state.setNodeInfo(ni)
}
func (c *cfg) updateContractNodeInfo(epoch uint64) {
func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
ni, err := c.netmapLocalNodeState(epoch)
if err != nil {
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
zap.Uint64("epoch", epoch),
zap.String("error", err.Error()))
return
@ -1221,19 +1221,19 @@ func (c *cfg) updateContractNodeInfo(epoch uint64) {
// bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract
// with the binary-encoded information from the current node's configuration.
// The state is set using the provided setter which MUST NOT be nil.
func (c *cfg) bootstrapWithState(stateSetter func(*netmap.NodeInfo)) error {
func (c *cfg) bootstrapWithState(ctx context.Context, stateSetter func(*netmap.NodeInfo)) error {
ni := c.cfgNodeInfo.localInfo
stateSetter(&ni)
prm := nmClient.AddPeerPrm{}
prm.SetNodeInfo(ni)
return c.cfgNetmap.wrapper.AddPeer(prm)
return c.cfgNetmap.wrapper.AddPeer(ctx, prm)
}
// bootstrapOnline calls cfg.bootstrapWithState with "online" state.
func bootstrapOnline(c *cfg) error {
return c.bootstrapWithState(func(ni *netmap.NodeInfo) {
func bootstrapOnline(ctx context.Context, c *cfg) error {
return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) {
ni.SetStatus(netmap.Online)
})
}
@ -1241,21 +1241,21 @@ func bootstrapOnline(c *cfg) error {
// bootstrap calls bootstrapWithState with:
// - "maintenance" state if maintenance is in progress on the current node
// - "online", otherwise
func (c *cfg) bootstrap() error {
func (c *cfg) bootstrap(ctx context.Context) error {
// switch to online except when under maintenance
st := c.cfgNetmap.state.controlNetmapStatus()
if st == control.NetmapStatus_MAINTENANCE {
c.log.Info(context.Background(), logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
return c.bootstrapWithState(func(ni *netmap.NodeInfo) {
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) {
ni.SetStatus(netmap.Maintenance)
})
}
c.log.Info(context.Background(), logs.FrostFSNodeBootstrappingWithOnlineState,
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState,
zap.Stringer("previous", st),
)
return bootstrapOnline(c)
return bootstrapOnline(ctx, c)
}
// needBootstrap checks if local node should be registered in network on bootup.
@ -1282,7 +1282,7 @@ func (c *cfg) signalWatcher(ctx context.Context) {
case <-ch:
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
c.shutdown()
c.shutdown(ctx)
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
return
@ -1290,7 +1290,7 @@ func (c *cfg) signalWatcher(ctx context.Context) {
c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError,
zap.String("message", err.Error()))
c.shutdown()
c.shutdown(ctx)
c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete)
return
@ -1302,7 +1302,7 @@ func (c *cfg) signalWatcher(ctx context.Context) {
case <-ch:
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
c.shutdown()
c.shutdown(ctx)
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
return
@ -1310,7 +1310,7 @@ func (c *cfg) signalWatcher(ctx context.Context) {
c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError,
zap.String("message", err.Error()))
c.shutdown()
c.shutdown(ctx)
c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete)
return
@ -1322,11 +1322,11 @@ func (c *cfg) signalWatcher(ctx context.Context) {
func (c *cfg) reloadConfig(ctx context.Context) {
c.log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
if !c.compareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
if !c.compareAndSwapHealthStatus(ctx, control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
c.log.Info(ctx, logs.FrostFSNodeSIGHUPSkip)
return
}
defer c.compareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
defer c.compareAndSwapHealthStatus(ctx, control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
err := c.reloadAppConfig()
if err != nil {
@ -1388,7 +1388,7 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
components = append(components, dCmp{"logger", logPrm.Reload})
components = append(components, dCmp{"runtime", func() error {
setRuntimeParameters(c)
setRuntimeParameters(ctx, c)
return nil
}})
components = append(components, dCmp{"audit", func() error {
@ -1474,14 +1474,14 @@ func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoPro
})
}
func (c *cfg) shutdown() {
old := c.swapHealthStatus(control.HealthStatus_SHUTTING_DOWN)
func (c *cfg) shutdown(ctx context.Context) {
old := c.swapHealthStatus(ctx, control.HealthStatus_SHUTTING_DOWN)
if old == control.HealthStatus_SHUTTING_DOWN {
c.log.Info(context.Background(), logs.FrostFSNodeShutdownSkip)
c.log.Info(ctx, logs.FrostFSNodeShutdownSkip)
return
}
if old == control.HealthStatus_STARTING {
c.log.Warn(context.Background(), logs.FrostFSNodeShutdownWhenNotReady)
c.log.Warn(ctx, logs.FrostFSNodeShutdownWhenNotReady)
}
c.ctxCancel()
@ -1491,6 +1491,6 @@ func (c *cfg) shutdown() {
}
if err := sdnotify.ClearStatus(); err != nil {
c.log.Error(context.Background(), logs.FailedToReportStatusToSystemd, zap.Error(err))
c.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
}
}

View file

@ -237,10 +237,10 @@ type morphContainerWriter struct {
neoClient *cntClient.Client
}
func (m morphContainerWriter) Put(cnr containerCore.Container) (*cid.ID, error) {
return cntClient.Put(m.neoClient, cnr)
func (m morphContainerWriter) Put(ctx context.Context, cnr containerCore.Container) (*cid.ID, error) {
return cntClient.Put(ctx, m.neoClient, cnr)
}
func (m morphContainerWriter) Delete(witness containerCore.RemovalWitness) error {
return cntClient.Delete(m.neoClient, witness)
func (m morphContainerWriter) Delete(ctx context.Context, witness containerCore.RemovalWitness) error {
return cntClient.Delete(ctx, m.neoClient, witness)
}

View file

@ -16,7 +16,7 @@ import (
const serviceNameControl = "control"
func initControlService(c *cfg) {
func initControlService(ctx context.Context, c *cfg) {
endpoint := controlconfig.GRPC(c.appCfg).Endpoint()
if endpoint == controlconfig.GRPCEndpointDefault {
return
@ -46,14 +46,14 @@ func initControlService(c *cfg) {
lis, err := net.Listen("tcp", endpoint)
if err != nil {
c.log.Error(context.Background(), logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
return
}
c.cfgControlService.server = grpc.NewServer()
c.onShutdown(func() {
stopGRPC("FrostFS Control API", c.cfgControlService.server, c.log)
stopGRPC(ctx, "FrostFS Control API", c.cfgControlService.server, c.log)
})
control.RegisterControlServiceServer(c.cfgControlService.server, ctlSvc)
@ -72,23 +72,23 @@ func (c *cfg) NetmapStatus() control.NetmapStatus {
return c.cfgNetmap.state.controlNetmapStatus()
}
func (c *cfg) setHealthStatus(st control.HealthStatus) {
c.notifySystemd(st)
func (c *cfg) setHealthStatus(ctx context.Context, st control.HealthStatus) {
c.notifySystemd(ctx, st)
c.healthStatus.Store(int32(st))
c.metricsCollector.State().SetHealth(int32(st))
}
func (c *cfg) compareAndSwapHealthStatus(oldSt, newSt control.HealthStatus) (swapped bool) {
func (c *cfg) compareAndSwapHealthStatus(ctx context.Context, oldSt, newSt control.HealthStatus) (swapped bool) {
if swapped = c.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped {
c.notifySystemd(newSt)
c.notifySystemd(ctx, newSt)
c.metricsCollector.State().SetHealth(int32(newSt))
}
return
}
func (c *cfg) swapHealthStatus(st control.HealthStatus) (old control.HealthStatus) {
func (c *cfg) swapHealthStatus(ctx context.Context, st control.HealthStatus) (old control.HealthStatus) {
old = control.HealthStatus(c.healthStatus.Swap(int32(st)))
c.notifySystemd(st)
c.notifySystemd(ctx, st)
c.metricsCollector.State().SetHealth(int32(st))
return
}
@ -97,7 +97,7 @@ func (c *cfg) HealthStatus() control.HealthStatus {
return control.HealthStatus(c.healthStatus.Load())
}
func (c *cfg) notifySystemd(st control.HealthStatus) {
func (c *cfg) notifySystemd(ctx context.Context, st control.HealthStatus) {
if !c.sdNotify {
return
}
@ -113,6 +113,6 @@ func (c *cfg) notifySystemd(st control.HealthStatus) {
err = sdnotify.Status(fmt.Sprintf("%v", st))
}
if err != nil {
c.log.Error(context.Background(), logs.FailedToReportStatusToSystemd, zap.Error(err))
c.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
}
}

View file

@ -19,11 +19,11 @@ import (
const maxRecvMsgSize = 256 << 20
func initGRPC(c *cfg) {
func initGRPC(ctx context.Context, c *cfg) {
var endpointsToReconnect []string
var successCount int
grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) {
serverOpts, ok := getGrpcServerOpts(c, sc)
serverOpts, ok := getGrpcServerOpts(ctx, c, sc)
if !ok {
return
}
@ -31,7 +31,7 @@ func initGRPC(c *cfg) {
lis, err := net.Listen("tcp", sc.Endpoint())
if err != nil {
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(sc.Endpoint())
c.log.Error(context.Background(), logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
endpointsToReconnect = append(endpointsToReconnect, sc.Endpoint())
return
}
@ -40,7 +40,7 @@ func initGRPC(c *cfg) {
srv := grpc.NewServer(serverOpts...)
c.onShutdown(func() {
stopGRPC("FrostFS Public API", srv, c.log)
stopGRPC(ctx, "FrostFS Public API", srv, c.log)
})
c.cfgGRPC.append(sc.Endpoint(), lis, srv)
@ -53,11 +53,11 @@ func initGRPC(c *cfg) {
c.cfgGRPC.reconnectTimeout = grpcconfig.ReconnectTimeout(c.appCfg)
for _, endpoint := range endpointsToReconnect {
scheduleReconnect(endpoint, c)
scheduleReconnect(ctx, endpoint, c)
}
}
func scheduleReconnect(endpoint string, c *cfg) {
func scheduleReconnect(ctx context.Context, endpoint string, c *cfg) {
c.wg.Add(1)
go func() {
defer c.wg.Done()
@ -66,7 +66,7 @@ func scheduleReconnect(endpoint string, c *cfg) {
for {
select {
case <-t.C:
if tryReconnect(endpoint, c) {
if tryReconnect(ctx, endpoint, c) {
return
}
case <-c.done:
@ -76,20 +76,20 @@ func scheduleReconnect(endpoint string, c *cfg) {
}()
}
func tryReconnect(endpoint string, c *cfg) bool {
c.log.Info(context.Background(), logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint))
func tryReconnect(ctx context.Context, endpoint string, c *cfg) bool {
c.log.Info(ctx, logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint))
serverOpts, found := getGRPCEndpointOpts(endpoint, c)
serverOpts, found := getGRPCEndpointOpts(ctx, endpoint, c)
if !found {
c.log.Warn(context.Background(), logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint))
c.log.Warn(ctx, logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint))
return true
}
lis, err := net.Listen("tcp", endpoint)
if err != nil {
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(endpoint)
c.log.Error(context.Background(), logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
c.log.Warn(context.Background(), logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout))
c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
c.log.Warn(ctx, logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout))
return false
}
c.metricsCollector.GrpcServerMetrics().MarkHealthy(endpoint)
@ -97,16 +97,16 @@ func tryReconnect(endpoint string, c *cfg) bool {
srv := grpc.NewServer(serverOpts...)
c.onShutdown(func() {
stopGRPC("FrostFS Public API", srv, c.log)
stopGRPC(ctx, "FrostFS Public API", srv, c.log)
})
c.cfgGRPC.appendAndHandle(endpoint, lis, srv)
c.log.Info(context.Background(), logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint))
c.log.Info(ctx, logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint))
return true
}
func getGRPCEndpointOpts(endpoint string, c *cfg) (result []grpc.ServerOption, found bool) {
func getGRPCEndpointOpts(ctx context.Context, endpoint string, c *cfg) (result []grpc.ServerOption, found bool) {
unlock := c.LockAppConfigShared()
defer unlock()
grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) {
@ -117,7 +117,7 @@ func getGRPCEndpointOpts(endpoint string, c *cfg) (result []grpc.ServerOption, f
return
}
var ok bool
result, ok = getGrpcServerOpts(c, sc)
result, ok = getGrpcServerOpts(ctx, c, sc)
if !ok {
return
}
@ -126,7 +126,7 @@ func getGRPCEndpointOpts(endpoint string, c *cfg) (result []grpc.ServerOption, f
return
}
func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool) {
func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool) {
serverOpts := []grpc.ServerOption{
grpc.MaxRecvMsgSize(maxRecvMsgSize),
grpc.ChainUnaryInterceptor(
@ -144,7 +144,7 @@ func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool
if tlsCfg != nil {
cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile())
if err != nil {
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
c.log.Error(ctx, logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
return nil, false
}
@ -175,38 +175,38 @@ func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool
return serverOpts, true
}
func serveGRPC(c *cfg) {
func serveGRPC(ctx context.Context, c *cfg) {
c.cfgGRPC.performAndSave(func(e string, l net.Listener, s *grpc.Server) {
c.wg.Add(1)
go func() {
defer func() {
c.log.Info(context.Background(), logs.FrostFSNodeStopListeningGRPCEndpoint,
c.log.Info(ctx, logs.FrostFSNodeStopListeningGRPCEndpoint,
zap.Stringer("endpoint", l.Addr()),
)
c.wg.Done()
}()
c.log.Info(context.Background(), logs.FrostFSNodeStartListeningEndpoint,
c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint,
zap.String("service", "gRPC"),
zap.Stringer("endpoint", l.Addr()),
)
if err := s.Serve(l); err != nil {
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(e)
c.log.Error(context.Background(), logs.FrostFSNodeGRPCServerError, zap.Error(err))
c.log.Error(ctx, logs.FrostFSNodeGRPCServerError, zap.Error(err))
c.cfgGRPC.dropConnection(e)
scheduleReconnect(e, c)
scheduleReconnect(ctx, e, c)
}
}()
})
}
func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger) {
l = l.With(zap.String("name", name))
l.Info(context.Background(), logs.FrostFSNodeStoppingGRPCServer)
l.Info(ctx, logs.FrostFSNodeStoppingGRPCServer)
// GracefulStop() may freeze forever, see #1270
done := make(chan struct{})
@ -218,9 +218,9 @@ func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
select {
case <-done:
case <-time.After(1 * time.Minute):
l.Info(context.Background(), logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
l.Info(ctx, logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
s.Stop()
}
l.Info(context.Background(), logs.FrostFSNodeGRPCServerStoppedSuccessfully)
l.Info(ctx, logs.FrostFSNodeGRPCServerStoppedSuccessfully)
}

View file

@ -20,9 +20,9 @@ type httpComponent struct {
preReload func(c *cfg)
}
func (cmp *httpComponent) init(c *cfg) {
func (cmp *httpComponent) init(ctx context.Context, c *cfg) {
if !cmp.enabled {
c.log.Info(context.Background(), cmp.name+" is disabled")
c.log.Info(ctx, cmp.name+" is disabled")
return
}
// Init server with parameters
@ -39,14 +39,14 @@ func (cmp *httpComponent) init(c *cfg) {
go func() {
defer c.wg.Done()
c.log.Info(context.Background(), logs.FrostFSNodeStartListeningEndpoint,
c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint,
zap.String("service", cmp.name),
zap.String("endpoint", cmp.address))
fatalOnErr(srv.Serve())
}()
c.closers = append(c.closers, closer{
cmp.name,
func() { stopAndLog(c, cmp.name, srv.Shutdown) },
func() { stopAndLog(ctx, c, cmp.name, srv.Shutdown) },
})
}
@ -62,7 +62,7 @@ func (cmp *httpComponent) reload(ctx context.Context) error {
// Cleanup
delCloser(cmp.cfg, cmp.name)
// Init server with new parameters
cmp.init(cmp.cfg)
cmp.init(ctx, cmp.cfg)
// Start worker
if cmp.enabled {
startWorker(ctx, cmp.cfg, *getWorker(cmp.cfg, cmp.name))

View file

@ -61,21 +61,21 @@ func main() {
var ctx context.Context
ctx, c.ctxCancel = context.WithCancel(context.Background())
c.setHealthStatus(control.HealthStatus_STARTING)
c.setHealthStatus(ctx, control.HealthStatus_STARTING)
initApp(ctx, c)
bootUp(ctx, c)
c.compareAndSwapHealthStatus(control.HealthStatus_STARTING, control.HealthStatus_READY)
c.compareAndSwapHealthStatus(ctx, control.HealthStatus_STARTING, control.HealthStatus_READY)
wait(c)
}
func initAndLog(c *cfg, name string, initializer func(*cfg)) {
c.log.Info(context.Background(), fmt.Sprintf("initializing %s service...", name))
func initAndLog(ctx context.Context, c *cfg, name string, initializer func(*cfg)) {
c.log.Info(ctx, fmt.Sprintf("initializing %s service...", name))
initializer(c)
c.log.Info(context.Background(), name+" service has been successfully initialized")
c.log.Info(ctx, name+" service has been successfully initialized")
}
func initApp(ctx context.Context, c *cfg) {
@ -85,38 +85,38 @@ func initApp(ctx context.Context, c *cfg) {
c.wg.Done()
}()
setRuntimeParameters(c)
setRuntimeParameters(ctx, c)
metrics, _ := metricsComponent(c)
initAndLog(c, "profiler", initProfilerService)
initAndLog(c, metrics.name, metrics.init)
initAndLog(ctx, c, "profiler", func(c *cfg) { initProfilerService(ctx, c) })
initAndLog(ctx, c, metrics.name, func(c *cfg) { metrics.init(ctx, c) })
initAndLog(c, "tracing", func(c *cfg) { initTracing(ctx, c) })
initAndLog(ctx, c, "tracing", func(c *cfg) { initTracing(ctx, c) })
initLocalStorage(ctx, c)
initAndLog(c, "storage engine", func(c *cfg) {
initAndLog(ctx, c, "storage engine", func(c *cfg) {
fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Open(ctx))
fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Init(ctx))
})
initAndLog(c, "gRPC", initGRPC)
initAndLog(c, "netmap", func(c *cfg) { initNetmapService(ctx, c) })
initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) })
initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) })
initAccessPolicyEngine(ctx, c)
initAndLog(c, "access policy engine", func(c *cfg) {
initAndLog(ctx, c, "access policy engine", func(c *cfg) {
fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Open(ctx))
fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Init())
})
initAndLog(c, "accounting", func(c *cfg) { initAccountingService(ctx, c) })
initAndLog(c, "container", func(c *cfg) { initContainerService(ctx, c) })
initAndLog(c, "session", initSessionService)
initAndLog(c, "object", initObjectService)
initAndLog(c, "tree", initTreeService)
initAndLog(c, "apemanager", initAPEManagerService)
initAndLog(c, "control", initControlService)
initAndLog(ctx, c, "accounting", func(c *cfg) { initAccountingService(ctx, c) })
initAndLog(ctx, c, "container", func(c *cfg) { initContainerService(ctx, c) })
initAndLog(ctx, c, "session", initSessionService)
initAndLog(ctx, c, "object", initObjectService)
initAndLog(ctx, c, "tree", initTreeService)
initAndLog(ctx, c, "apemanager", initAPEManagerService)
initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) })
initAndLog(c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
}
func runAndLog(ctx context.Context, c *cfg, name string, logSuccess bool, starter func(context.Context, *cfg)) {
@ -128,24 +128,24 @@ func runAndLog(ctx context.Context, c *cfg, name string, logSuccess bool, starte
}
}
func stopAndLog(c *cfg, name string, stopper func() error) {
c.log.Debug(context.Background(), fmt.Sprintf("shutting down %s service", name))
func stopAndLog(ctx context.Context, c *cfg, name string, stopper func(context.Context) error) {
c.log.Debug(ctx, fmt.Sprintf("shutting down %s service", name))
err := stopper()
err := stopper(ctx)
if err != nil {
c.log.Debug(context.Background(), fmt.Sprintf("could not shutdown %s server", name),
c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name),
zap.String("error", err.Error()),
)
}
c.log.Debug(context.Background(), name+" service has been stopped")
c.log.Debug(ctx, name+" service has been stopped")
}
func bootUp(ctx context.Context, c *cfg) {
runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(c) })
runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(ctx, c) })
runAndLog(ctx, c, "notary", true, makeAndWaitNotaryDeposit)
bootstrapNode(c)
bootstrapNode(ctx, c)
startWorkers(ctx, c)
}

View file

@ -129,7 +129,7 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
return
}
tx, vub, err := makeNotaryDeposit(c)
tx, vub, err := makeNotaryDeposit(ctx, c)
fatalOnErr(err)
if tx.Equals(util.Uint256{}) {
@ -144,7 +144,7 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
fatalOnErr(err)
}
func makeNotaryDeposit(c *cfg) (util.Uint256, uint32, error) {
func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error) {
const (
// gasMultiplier defines how many times more the notary
// balance must be compared to the GAS balance of the node:
@ -161,7 +161,7 @@ func makeNotaryDeposit(c *cfg) (util.Uint256, uint32, error) {
return util.Uint256{}, 0, fmt.Errorf("could not calculate notary deposit: %w", err)
}
return c.cfgMorph.client.DepositEndlessNotary(depositAmount)
return c.cfgMorph.client.DepositEndlessNotary(ctx, depositAmount)
}
var (
@ -256,7 +256,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
registerNotificationHandlers(c.cfgNetmap.scriptHash, lis, c.cfgNetmap.parsers, c.cfgNetmap.subscribers)
registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers)
registerBlockHandler(lis, func(block *block.Block) {
registerBlockHandler(lis, func(ctx context.Context, block *block.Block) {
c.log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index)

View file

@ -145,7 +145,7 @@ func initNetmapService(ctx context.Context, c *cfg) {
c.initMorphComponents(ctx)
initNetmapState(c)
initNetmapState(ctx, c)
server := netmapTransportGRPC.New(
netmapService.NewSignService(
@ -182,20 +182,20 @@ func addNewEpochNotificationHandlers(c *cfg) {
addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, ev event.Event) {
e := ev.(netmapEvent.NewEpoch).EpochNumber()
c.updateContractNodeInfo(e)
c.updateContractNodeInfo(ctx, e)
if !c.needBootstrap() || c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470
return
}
if err := c.bootstrap(); err != nil {
if err := c.bootstrap(ctx); err != nil {
c.log.Warn(ctx, logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
}
})
if c.cfgMorph.notaryEnabled {
addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) {
_, _, err := makeNotaryDeposit(c)
_, _, err := makeNotaryDeposit(ctx, c)
if err != nil {
c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit,
zap.String("error", err.Error()),
@ -207,13 +207,13 @@ func addNewEpochNotificationHandlers(c *cfg) {
// bootstrapNode adds current node to the Network map.
// Must be called after initNetmapService.
func bootstrapNode(c *cfg) {
func bootstrapNode(ctx context.Context, c *cfg) {
if c.needBootstrap() {
if c.IsMaintenance() {
c.log.Info(context.Background(), logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap)
c.log.Info(ctx, logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap)
return
}
err := c.bootstrap()
err := c.bootstrap(ctx)
fatalOnErrDetails("bootstrap error", err)
}
}
@ -240,17 +240,17 @@ func setNetmapNotificationParser(c *cfg, sTyp string, p event.NotificationParser
// initNetmapState inits current Network map state.
// Must be called after Morph components initialization.
func initNetmapState(c *cfg) {
func initNetmapState(ctx context.Context, c *cfg) {
epoch, err := c.cfgNetmap.wrapper.Epoch()
fatalOnErrDetails("could not initialize current epoch number", err)
var ni *netmapSDK.NodeInfo
ni, err = c.netmapInitLocalNodeState(epoch)
ni, err = c.netmapInitLocalNodeState(ctx, epoch)
fatalOnErrDetails("could not init network state", err)
stateWord := nodeState(ni)
c.log.Info(context.Background(), logs.FrostFSNodeInitialNetworkState,
c.log.Info(ctx, logs.FrostFSNodeInitialNetworkState,
zap.Uint64("epoch", epoch),
zap.String("state", stateWord),
)
@ -279,7 +279,7 @@ func nodeState(ni *netmapSDK.NodeInfo) string {
return "undefined"
}
func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) {
func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) {
nmNodes, err := c.cfgNetmap.wrapper.GetCandidates()
if err != nil {
return nil, err
@ -307,7 +307,7 @@ func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error
if nmState != candidateState {
// This happens when the node was switched to maintenance without epoch tick.
// We expect it to continue staying in maintenance.
c.log.Info(context.Background(), logs.CandidateStatusPriority,
c.log.Info(ctx, logs.CandidateStatusPriority,
zap.String("netmap", nmState),
zap.String("candidate", candidateState))
}
@ -353,16 +353,16 @@ func addNewEpochAsyncNotificationHandler(c *cfg, h event.Handler) {
var errRelayBootstrap = errors.New("setting netmap status is forbidden in relay mode")
func (c *cfg) SetNetmapStatus(st control.NetmapStatus) error {
func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error {
switch st {
default:
return fmt.Errorf("unsupported status %v", st)
case control.NetmapStatus_MAINTENANCE:
return c.setMaintenanceStatus(false)
return c.setMaintenanceStatus(ctx, false)
case control.NetmapStatus_ONLINE, control.NetmapStatus_OFFLINE:
}
c.stopMaintenance()
c.stopMaintenance(ctx)
if !c.needBootstrap() {
return errRelayBootstrap
@ -370,12 +370,12 @@ func (c *cfg) SetNetmapStatus(st control.NetmapStatus) error {
if st == control.NetmapStatus_ONLINE {
c.cfgNetmap.reBoostrapTurnedOff.Store(false)
return bootstrapOnline(c)
return bootstrapOnline(ctx, c)
}
c.cfgNetmap.reBoostrapTurnedOff.Store(true)
return c.updateNetMapState(func(*nmClient.UpdatePeerPrm) {})
return c.updateNetMapState(ctx, func(*nmClient.UpdatePeerPrm) {})
}
func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) {
@ -387,11 +387,11 @@ func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) {
return st, epoch, nil
}
func (c *cfg) ForceMaintenance() error {
return c.setMaintenanceStatus(true)
func (c *cfg) ForceMaintenance(ctx context.Context) error {
return c.setMaintenanceStatus(ctx, true)
}
func (c *cfg) setMaintenanceStatus(force bool) error {
func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error {
netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration()
if err != nil {
err = fmt.Errorf("read network settings to check maintenance allowance: %w", err)
@ -400,10 +400,10 @@ func (c *cfg) setMaintenanceStatus(force bool) error {
}
if err == nil || force {
c.startMaintenance()
c.startMaintenance(ctx)
if err == nil {
err = c.updateNetMapState((*nmClient.UpdatePeerPrm).SetMaintenance)
err = c.updateNetMapState(ctx, (*nmClient.UpdatePeerPrm).SetMaintenance)
}
if err != nil {
@ -416,12 +416,12 @@ func (c *cfg) setMaintenanceStatus(force bool) error {
// calls UpdatePeerState operation of Netmap contract's client for the local node.
// State setter is used to specify node state to switch to.
func (c *cfg) updateNetMapState(stateSetter func(*nmClient.UpdatePeerPrm)) error {
func (c *cfg) updateNetMapState(ctx context.Context, stateSetter func(*nmClient.UpdatePeerPrm)) error {
var prm nmClient.UpdatePeerPrm
prm.SetKey(c.key.PublicKey().Bytes())
stateSetter(&prm)
_, err := c.cfgNetmap.wrapper.UpdatePeerState(prm)
_, err := c.cfgNetmap.wrapper.UpdatePeerState(ctx, prm)
return err
}

View file

@ -66,11 +66,11 @@ func (c *cfg) MaxObjectSize() uint64 {
return sz
}
func (s *objectSvc) Put() (objectService.PutObjectStream, error) {
func (s *objectSvc) Put(_ context.Context) (objectService.PutObjectStream, error) {
return s.put.Put()
}
func (s *objectSvc) Patch() (objectService.PatchObjectStream, error) {
func (s *objectSvc) Patch(_ context.Context) (objectService.PatchObjectStream, error) {
return s.patch.Patch()
}

View file

@ -1,17 +1,18 @@
package main
import (
"context"
"runtime"
profilerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/profiler"
httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http"
)
func initProfilerService(c *cfg) {
func initProfilerService(ctx context.Context, c *cfg) {
tuneProfilers(c)
pprof, _ := pprofComponent(c)
pprof.init(c)
pprof.init(ctx, c)
}
func pprofComponent(c *cfg) (*httpComponent, bool) {

View file

@ -10,17 +10,17 @@ import (
"go.uber.org/zap"
)
func setRuntimeParameters(c *cfg) {
func setRuntimeParameters(ctx context.Context, c *cfg) {
if len(os.Getenv("GOMEMLIMIT")) != 0 {
// default limit < yaml limit < app env limit < GOMEMLIMIT
c.log.Warn(context.Background(), logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
c.log.Warn(ctx, logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
return
}
memLimitBytes := runtime.GCMemoryLimitBytes(c.appCfg)
previous := debug.SetMemoryLimit(memLimitBytes)
if memLimitBytes != previous {
c.log.Info(context.Background(), logs.RuntimeSoftMemoryLimitUpdated,
c.log.Info(ctx, logs.RuntimeSoftMemoryLimitUpdated,
zap.Int64("new_value", memLimitBytes),
zap.Int64("old_value", previous))
}

View file

@ -19,15 +19,15 @@ type Target interface {
String() string
}
func LogRequest(log *logger.Logger, operation string, req Request, target Target, status bool) {
func LogRequest(ctx context.Context, log *logger.Logger, operation string, req Request, target Target, status bool) {
var key []byte
if req != nil {
key = req.GetVerificationHeader().GetBodySignature().GetKey()
}
LogRequestWithKey(log, operation, key, target, status)
LogRequestWithKey(ctx, log, operation, key, target, status)
}
func LogRequestWithKey(log *logger.Logger, operation string, key []byte, target Target, status bool) {
func LogRequestWithKey(ctx context.Context, log *logger.Logger, operation string, key []byte, target Target, status bool) {
object, subject := NotDefined, NotDefined
publicKey := crypto.UnmarshalPublicKey(key)
@ -39,7 +39,7 @@ func LogRequestWithKey(log *logger.Logger, operation string, key []byte, target
object = target.String()
}
log.Info(context.Background(), logs.AuditEventLogRecord,
log.Info(ctx, logs.AuditEventLogRecord,
zap.String("operation", operation),
zap.String("object", object),
zap.String("subject", subject),

View file

@ -117,7 +117,7 @@ func (v *FormatValidator) Validate(ctx context.Context, obj *objectSDK.Object, u
}
if !unprepared {
if err := v.validateSignatureKey(obj); err != nil {
if err := v.validateSignatureKey(ctx, obj); err != nil {
return fmt.Errorf("(%T) could not validate signature key: %w", v, err)
}
@ -134,7 +134,7 @@ func (v *FormatValidator) Validate(ctx context.Context, obj *objectSDK.Object, u
return nil
}
func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error {
func (v *FormatValidator) validateSignatureKey(ctx context.Context, obj *objectSDK.Object) error {
sig := obj.Signature()
if sig == nil {
return errMissingSignature
@ -156,7 +156,7 @@ func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error {
ownerID := obj.OwnerID()
if token == nil && obj.ECHeader() != nil {
role, err := v.isIROrContainerNode(obj, binKey)
role, err := v.isIROrContainerNode(ctx, obj, binKey)
if err != nil {
return err
}
@ -172,7 +172,7 @@ func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error {
}
if v.verifyTokenIssuer {
role, err := v.isIROrContainerNode(obj, binKey)
role, err := v.isIROrContainerNode(ctx, obj, binKey)
if err != nil {
return err
}
@ -190,7 +190,7 @@ func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error {
return nil
}
func (v *FormatValidator) isIROrContainerNode(obj *objectSDK.Object, signerKey []byte) (acl.Role, error) {
func (v *FormatValidator) isIROrContainerNode(ctx context.Context, obj *objectSDK.Object, signerKey []byte) (acl.Role, error) {
cnrID, containerIDSet := obj.ContainerID()
if !containerIDSet {
return acl.RoleOthers, errNilCID
@ -204,7 +204,7 @@ func (v *FormatValidator) isIROrContainerNode(obj *objectSDK.Object, signerKey [
return acl.RoleOthers, fmt.Errorf("failed to get container (id=%s): %w", cnrID.EncodeToString(), err)
}
res, err := v.senderClassifier.IsInnerRingOrContainerNode(signerKey, cnrID, cnr.Value)
res, err := v.senderClassifier.IsInnerRingOrContainerNode(ctx, signerKey, cnrID, cnr.Value)
if err != nil {
return acl.RoleOthers, err
}

View file

@ -41,6 +41,7 @@ type ClassifyResult struct {
}
func (c SenderClassifier) Classify(
ctx context.Context,
ownerID *user.ID,
ownerKey *keys.PublicKey,
idCnr cid.ID,
@ -58,14 +59,14 @@ func (c SenderClassifier) Classify(
}, nil
}
return c.IsInnerRingOrContainerNode(ownerKeyInBytes, idCnr, cnr)
return c.IsInnerRingOrContainerNode(ctx, ownerKeyInBytes, idCnr, cnr)
}
func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) {
func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) {
isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes)
if err != nil {
// do not throw error, try best case matching
c.log.Debug(context.Background(), logs.V2CantCheckIfRequestFromInnerRing,
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing,
zap.String("error", err.Error()))
} else if isInnerRingNode {
return &ClassifyResult{
@ -82,7 +83,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idC
// error might happen if request has `RoleOther` key and placement
// is not possible for previous epoch, so
// do not throw error, try best case matching
c.log.Debug(context.Background(), logs.V2CantCheckIfRequestFromContainerNode,
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromContainerNode,
zap.String("error", err.Error()))
} else if isContainerNode {
return &ClassifyResult{

View file

@ -29,7 +29,7 @@ type (
emitDuration uint32 // in blocks
}
depositor func() (util.Uint256, error)
depositor func(context.Context) (util.Uint256, error)
awaiter func(context.Context, util.Uint256) error
)
@ -66,11 +66,11 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer {
)
}
func newEmissionTimer(args *emitTimerArgs) *timer.BlockTimer {
func newEmissionTimer(ctx context.Context, args *emitTimerArgs) *timer.BlockTimer {
return timer.NewBlockTimer(
timer.StaticBlockMeter(args.emitDuration),
func() {
args.ap.HandleGasEmission(timerEvent.NewAlphabetEmitTick{})
args.ap.HandleGasEmission(ctx, timerEvent.NewAlphabetEmitTick{})
},
)
}

View file

@ -35,7 +35,7 @@ import (
"google.golang.org/grpc"
)
func (s *Server) initNetmapProcessor(cfg *viper.Viper,
func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper,
alphaSync event.Handler,
) error {
locodeValidator, err := s.newLocodeValidator(cfg)
@ -48,10 +48,13 @@ func (s *Server) initNetmapProcessor(cfg *viper.Viper,
var netMapCandidateStateValidator statevalidation.NetMapCandidateValidator
netMapCandidateStateValidator.SetNetworkSettings(netSettings)
poolSize := cfg.GetInt("workers.netmap")
s.log.Debug(ctx, logs.NetmapNetmapWorkerPool, zap.Int("size", poolSize))
s.netmapProcessor, err = netmap.New(&netmap.Params{
Log: s.log,
Metrics: s.irMetrics,
PoolSize: cfg.GetInt("workers.netmap"),
PoolSize: poolSize,
NetmapClient: netmap.NewNetmapClient(s.netmapClient),
EpochTimer: s,
EpochState: s,
@ -205,7 +208,7 @@ func (s *Server) initTimers(ctx context.Context, cfg *viper.Viper) {
s.addBlockTimer(s.epochTimer)
// initialize emission timer
emissionTimer := newEmissionTimer(&emitTimerArgs{
emissionTimer := newEmissionTimer(ctx, &emitTimerArgs{
ap: s.alphabetProcessor,
emitDuration: cfg.GetUint32("timers.emit"),
})
@ -213,18 +216,20 @@ func (s *Server) initTimers(ctx context.Context, cfg *viper.Viper) {
s.addBlockTimer(emissionTimer)
}
func (s *Server) initAlphabetProcessor(cfg *viper.Viper) error {
func (s *Server) initAlphabetProcessor(ctx context.Context, cfg *viper.Viper) error {
parsedWallets, err := parseWalletAddressesFromStrings(cfg.GetStringSlice("emit.extra_wallets"))
if err != nil {
return err
}
poolSize := cfg.GetInt("workers.alphabet")
s.log.Debug(ctx, logs.AlphabetAlphabetWorkerPool, zap.Int("size", poolSize))
// create alphabet processor
s.alphabetProcessor, err = alphabet.New(&alphabet.Params{
ParsedWallets: parsedWallets,
Log: s.log,
Metrics: s.irMetrics,
PoolSize: cfg.GetInt("workers.alphabet"),
PoolSize: poolSize,
AlphabetContracts: s.contracts.alphabet,
NetmapClient: s.netmapClient,
MorphClient: s.morphClient,
@ -239,12 +244,14 @@ func (s *Server) initAlphabetProcessor(cfg *viper.Viper) error {
return err
}
func (s *Server) initContainerProcessor(cfg *viper.Viper, cnrClient *container.Client, frostfsIDClient *frostfsid.Client) error {
func (s *Server) initContainerProcessor(ctx context.Context, cfg *viper.Viper, cnrClient *container.Client, frostfsIDClient *frostfsid.Client) error {
poolSize := cfg.GetInt("workers.container")
s.log.Debug(ctx, logs.ContainerContainerWorkerPool, zap.Int("size", poolSize))
// container processor
containerProcessor, err := cont.New(&cont.Params{
Log: s.log,
Metrics: s.irMetrics,
PoolSize: cfg.GetInt("workers.container"),
PoolSize: poolSize,
AlphabetState: s,
ContainerClient: cnrClient,
MorphClient: cnrClient.Morph(),
@ -258,12 +265,14 @@ func (s *Server) initContainerProcessor(cfg *viper.Viper, cnrClient *container.C
return bindMorphProcessor(containerProcessor, s)
}
func (s *Server) initBalanceProcessor(cfg *viper.Viper, frostfsCli *frostfsClient.Client) error {
func (s *Server) initBalanceProcessor(ctx context.Context, cfg *viper.Viper, frostfsCli *frostfsClient.Client) error {
poolSize := cfg.GetInt("workers.balance")
s.log.Debug(ctx, logs.BalanceBalanceWorkerPool, zap.Int("size", poolSize))
// create balance processor
balanceProcessor, err := balance.New(&balance.Params{
Log: s.log,
Metrics: s.irMetrics,
PoolSize: cfg.GetInt("workers.balance"),
PoolSize: poolSize,
FrostFSClient: frostfsCli,
BalanceSC: s.contracts.balance,
AlphabetState: s,
@ -276,15 +285,17 @@ func (s *Server) initBalanceProcessor(cfg *viper.Viper, frostfsCli *frostfsClien
return bindMorphProcessor(balanceProcessor, s)
}
func (s *Server) initFrostFSMainnetProcessor(cfg *viper.Viper) error {
func (s *Server) initFrostFSMainnetProcessor(ctx context.Context, cfg *viper.Viper) error {
if s.withoutMainNet {
return nil
}
poolSize := cfg.GetInt("workers.frostfs")
s.log.Debug(ctx, logs.FrostFSFrostfsWorkerPool, zap.Int("size", poolSize))
frostfsProcessor, err := frostfs.New(&frostfs.Params{
Log: s.log,
Metrics: s.irMetrics,
PoolSize: cfg.GetInt("workers.frostfs"),
PoolSize: poolSize,
FrostFSContract: s.contracts.frostfs,
BalanceClient: s.balanceClient,
NetmapClient: s.netmapClient,
@ -304,10 +315,10 @@ func (s *Server) initFrostFSMainnetProcessor(cfg *viper.Viper) error {
return bindMainnetProcessor(frostfsProcessor, s)
}
func (s *Server) initGRPCServer(cfg *viper.Viper, log *logger.Logger, audit *atomic.Bool) error {
func (s *Server) initGRPCServer(ctx context.Context, cfg *viper.Viper, log *logger.Logger, audit *atomic.Bool) error {
controlSvcEndpoint := cfg.GetString("control.grpc.endpoint")
if controlSvcEndpoint == "" {
s.log.Info(context.Background(), logs.InnerringNoControlServerEndpointSpecified)
s.log.Info(ctx, logs.InnerringNoControlServerEndpointSpecified)
return nil
}
@ -403,7 +414,7 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) {
return result, nil
}
func (s *Server) initProcessors(cfg *viper.Viper, morphClients *serverMorphClients) error {
func (s *Server) initProcessors(ctx context.Context, cfg *viper.Viper, morphClients *serverMorphClients) error {
irf := s.createIRFetcher()
s.statusIndex = newInnerRingIndexer(
@ -418,27 +429,27 @@ func (s *Server) initProcessors(cfg *viper.Viper, morphClients *serverMorphClien
return err
}
err = s.initNetmapProcessor(cfg, alphaSync)
err = s.initNetmapProcessor(ctx, cfg, alphaSync)
if err != nil {
return err
}
err = s.initContainerProcessor(cfg, morphClients.CnrClient, morphClients.FrostFSIDClient)
err = s.initContainerProcessor(ctx, cfg, morphClients.CnrClient, morphClients.FrostFSIDClient)
if err != nil {
return err
}
err = s.initBalanceProcessor(cfg, morphClients.FrostFSClient)
err = s.initBalanceProcessor(ctx, cfg, morphClients.FrostFSClient)
if err != nil {
return err
}
err = s.initFrostFSMainnetProcessor(cfg)
err = s.initFrostFSMainnetProcessor(ctx, cfg)
if err != nil {
return err
}
err = s.initAlphabetProcessor(cfg)
err = s.initAlphabetProcessor(ctx, cfg)
return err
}

View file

@ -157,7 +157,7 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
return err
}
if s.IsAlphabet() {
if s.IsAlphabet(ctx) {
err = s.initMainNotary(ctx)
if err != nil {
return err
@ -217,14 +217,14 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
}
func (s *Server) registerMorphNewBlockEventHandler() {
s.morphListener.RegisterBlockHandler(func(b *block.Block) {
s.log.Debug(context.Background(), logs.InnerringNewBlock,
s.morphListener.RegisterBlockHandler(func(ctx context.Context, b *block.Block) {
s.log.Debug(ctx, logs.InnerringNewBlock,
zap.Uint32("index", b.Index),
)
err := s.persistate.SetUInt32(persistateSideChainLastBlockKey, b.Index)
if err != nil {
s.log.Warn(context.Background(), logs.InnerringCantUpdatePersistentState,
s.log.Warn(ctx, logs.InnerringCantUpdatePersistentState,
zap.String("chain", "side"),
zap.Uint32("block_index", b.Index))
}
@ -235,10 +235,10 @@ func (s *Server) registerMorphNewBlockEventHandler() {
func (s *Server) registerMainnetNewBlockEventHandler() {
if !s.withoutMainNet {
s.mainnetListener.RegisterBlockHandler(func(b *block.Block) {
s.mainnetListener.RegisterBlockHandler(func(ctx context.Context, b *block.Block) {
err := s.persistate.SetUInt32(persistateMainChainLastBlockKey, b.Index)
if err != nil {
s.log.Warn(context.Background(), logs.InnerringCantUpdatePersistentState,
s.log.Warn(ctx, logs.InnerringCantUpdatePersistentState,
zap.String("chain", "main"),
zap.Uint32("block_index", b.Index))
}
@ -400,14 +400,14 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan
return nil, err
}
err = server.initProcessors(cfg, morphClients)
err = server.initProcessors(ctx, cfg, morphClients)
if err != nil {
return nil, err
}
server.initTimers(ctx, cfg)
err = server.initGRPCServer(cfg, log, audit)
err = server.initGRPCServer(ctx, cfg, log, audit)
if err != nil {
return nil, err
}
@ -604,7 +604,7 @@ func (s *Server) initConfigFromBlockchain(ctx context.Context) error {
s.log.Debug(ctx, logs.InnerringReadConfigFromBlockchain,
zap.Bool("active", s.IsActive(ctx)),
zap.Bool("alphabet", s.IsAlphabet()),
zap.Bool("alphabet", s.IsAlphabet(ctx)),
zap.Uint64("epoch", epoch),
zap.Uint32("precision", balancePrecision),
zap.Uint32("init_epoch_tick_delta", s.initialEpochTickDelta),
@ -636,7 +636,7 @@ func (s *Server) nextEpochBlockDelta() (uint32, error) {
// only if inner ring node is alphabet node.
func (s *Server) onlyAlphabetEventHandler(f event.Handler) event.Handler {
return func(ctx context.Context, ev event.Event) {
if s.IsAlphabet() {
if s.IsAlphabet(ctx) {
f(ctx, ev)
}
}

View file

@ -28,37 +28,38 @@ const (
gasDivisor = 2
)
func (s *Server) depositMainNotary() (tx util.Uint256, err error) {
func (s *Server) depositMainNotary(ctx context.Context) (tx util.Uint256, err error) {
depositAmount, err := client.CalculateNotaryDepositAmount(s.mainnetClient, gasMultiplier, gasDivisor)
if err != nil {
return util.Uint256{}, fmt.Errorf("could not calculate main notary deposit amount: %w", err)
}
return s.mainnetClient.DepositNotary(
ctx,
depositAmount,
uint32(s.epochDuration.Load())+notaryExtraBlocks,
)
}
func (s *Server) depositSideNotary() (util.Uint256, error) {
func (s *Server) depositSideNotary(ctx context.Context) (util.Uint256, error) {
depositAmount, err := client.CalculateNotaryDepositAmount(s.morphClient, gasMultiplier, gasDivisor)
if err != nil {
return util.Uint256{}, fmt.Errorf("could not calculate side notary deposit amount: %w", err)
}
tx, _, err := s.morphClient.DepositEndlessNotary(depositAmount)
tx, _, err := s.morphClient.DepositEndlessNotary(ctx, depositAmount)
return tx, err
}
func (s *Server) notaryHandler(ctx context.Context, _ event.Event) {
if !s.mainNotaryConfig.disabled {
_, err := s.depositMainNotary()
_, err := s.depositMainNotary(ctx)
if err != nil {
s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err))
}
}
if _, err := s.depositSideNotary(); err != nil {
if _, err := s.depositSideNotary(ctx); err != nil {
s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err))
}
}
@ -72,7 +73,7 @@ func (s *Server) awaitSideNotaryDeposit(ctx context.Context, tx util.Uint256) er
}
func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaiter, msg string) error {
tx, err := deposit()
tx, err := deposit(ctx)
if err != nil {
return err
}

View file

@ -10,16 +10,16 @@ import (
"go.uber.org/zap"
)
func (ap *Processor) HandleGasEmission(ev event.Event) {
func (ap *Processor) HandleGasEmission(ctx context.Context, ev event.Event) {
_ = ev.(timers.NewAlphabetEmitTick)
ap.log.Info(context.Background(), logs.AlphabetTick, zap.String("type", "alphabet gas emit"))
ap.log.Info(ctx, logs.AlphabetTick, zap.String("type", "alphabet gas emit"))
// send event to the worker pool
err := processors.SubmitEvent(ap.pool, ap.metrics, "alphabet_emit_gas", ap.processEmit)
err := processors.SubmitEvent(ap.pool, ap.metrics, "alphabet_emit_gas", func() bool { return ap.processEmit(ctx) })
if err != nil {
// there system can be moved into controlled degradation stage
ap.log.Warn(context.Background(), logs.AlphabetAlphabetProcessorWorkerPoolDrained,
ap.log.Warn(ctx, logs.AlphabetAlphabetProcessorWorkerPoolDrained,
zap.Int("capacity", ap.pool.Cap()))
}
}

View file

@ -1,6 +1,7 @@
package alphabet_test
import (
"context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
@ -60,7 +61,7 @@ func TestProcessorEmitsGasToNetmapAndAlphabet(t *testing.T) {
processor, err := alphabet.New(params)
require.NoError(t, err, "failed to create processor instance")
processor.HandleGasEmission(timers.NewAlphabetEmitTick{})
processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{})
processor.WaitPoolRunning()
@ -137,7 +138,7 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) {
processor, err := alphabet.New(params)
require.NoError(t, err, "failed to create processor instance")
processor.HandleGasEmission(timers.NewAlphabetEmitTick{})
processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{})
processor.WaitPoolRunning()
@ -198,7 +199,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) {
processor, err := alphabet.New(params)
require.NoError(t, err, "failed to create processor instance")
processor.HandleGasEmission(timers.NewAlphabetEmitTick{})
processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{})
processor.WaitPoolRunning()
@ -219,7 +220,7 @@ type testIndexer struct {
index int
}
func (i *testIndexer) AlphabetIndex() int {
func (i *testIndexer) AlphabetIndex(context.Context) int {
return i.index
}
@ -246,7 +247,7 @@ type testMorphClient struct {
batchTransferedGas []batchTransferGas
}
func (c *testMorphClient) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) {
func (c *testMorphClient) Invoke(_ context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) {
c.invokedMethods = append(c.invokedMethods,
invokedMethod{
contract: contract,

View file

@ -14,39 +14,39 @@ import (
const emitMethod = "emit"
func (ap *Processor) processEmit() bool {
index := ap.irList.AlphabetIndex()
func (ap *Processor) processEmit(ctx context.Context) bool {
index := ap.irList.AlphabetIndex(ctx)
if index < 0 {
ap.log.Info(context.Background(), logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent)
ap.log.Info(ctx, logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent)
return true
}
contract, ok := ap.alphabetContracts.GetByIndex(index)
if !ok {
ap.log.Debug(context.Background(), logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent,
ap.log.Debug(ctx, logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent,
zap.Int("index", index))
return false
}
// there is no signature collecting, so we don't need extra fee
_, err := ap.morphClient.Invoke(contract, 0, emitMethod)
_, err := ap.morphClient.Invoke(ctx, contract, 0, emitMethod)
if err != nil {
ap.log.Warn(context.Background(), logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error()))
ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error()))
return false
}
if ap.storageEmission == 0 {
ap.log.Info(context.Background(), logs.AlphabetStorageNodeEmissionIsOff)
ap.log.Info(ctx, logs.AlphabetStorageNodeEmissionIsOff)
return true
}
networkMap, err := ap.netmapClient.NetMap()
if err != nil {
ap.log.Warn(context.Background(), logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
zap.String("error", err.Error()))
return false
@ -59,7 +59,7 @@ func (ap *Processor) processEmit() bool {
ap.pwLock.RUnlock()
extraLen := len(pw)
ap.log.Debug(context.Background(), logs.AlphabetGasEmission,
ap.log.Debug(ctx, logs.AlphabetGasEmission,
zap.Int("network_map", nmLen),
zap.Int("extra_wallets", extraLen))
@ -69,20 +69,20 @@ func (ap *Processor) processEmit() bool {
gasPerNode := fixedn.Fixed8(ap.storageEmission / uint64(nmLen+extraLen))
ap.transferGasToNetmapNodes(nmNodes, gasPerNode)
ap.transferGasToNetmapNodes(ctx, nmNodes, gasPerNode)
ap.transferGasToExtraNodes(pw, gasPerNode)
ap.transferGasToExtraNodes(ctx, pw, gasPerNode)
return true
}
func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerNode fixedn.Fixed8) {
func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []netmap.NodeInfo, gasPerNode fixedn.Fixed8) {
for i := range nmNodes {
keyBytes := nmNodes[i].PublicKey()
key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256())
if err != nil {
ap.log.Warn(context.Background(), logs.AlphabetCantParseNodePublicKey,
ap.log.Warn(ctx, logs.AlphabetCantParseNodePublicKey,
zap.String("error", err.Error()))
continue
@ -90,7 +90,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN
err = ap.morphClient.TransferGas(key.GetScriptHash(), gasPerNode)
if err != nil {
ap.log.Warn(context.Background(), logs.AlphabetCantTransferGas,
ap.log.Warn(ctx, logs.AlphabetCantTransferGas,
zap.String("receiver", key.Address()),
zap.Int64("amount", int64(gasPerNode)),
zap.String("error", err.Error()),
@ -99,7 +99,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN
}
}
func (ap *Processor) transferGasToExtraNodes(pw []util.Uint160, gasPerNode fixedn.Fixed8) {
func (ap *Processor) transferGasToExtraNodes(ctx context.Context, pw []util.Uint160, gasPerNode fixedn.Fixed8) {
if len(pw) > 0 {
err := ap.morphClient.BatchTransferGas(pw, gasPerNode)
if err != nil {
@ -107,7 +107,7 @@ func (ap *Processor) transferGasToExtraNodes(pw []util.Uint160, gasPerNode fixed
for i, addr := range pw {
receiversLog[i] = addr.StringLE()
}
ap.log.Warn(context.Background(), logs.AlphabetCantTransferGasToWallet,
ap.log.Warn(ctx, logs.AlphabetCantTransferGasToWallet,
zap.Strings("receivers", receiversLog),
zap.Int64("amount", int64(gasPerNode)),
zap.String("error", err.Error()),

View file

@ -7,7 +7,6 @@ import (
"sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@ -15,13 +14,12 @@ import (
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
)
type (
// Indexer is a callback interface for inner ring global state.
Indexer interface {
AlphabetIndex() int
AlphabetIndex(context.Context) int
}
// Contracts is an interface of the storage
@ -41,7 +39,7 @@ type (
}
morphClient interface {
Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error)
Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error)
TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error
BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8) error
}
@ -86,8 +84,6 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/alphabet: global state is not set")
}
p.Log.Debug(context.Background(), logs.AlphabetAlphabetWorkerPool, zap.Int("size", p.PoolSize))
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err)

View file

@ -20,7 +20,7 @@ func (bp *Processor) handleLock(ctx context.Context, ev event.Event) {
// send an event to the worker pool
err := processors.SubmitEvent(bp.pool, bp.metrics, "lock", func() bool {
return bp.processLock(&lock)
return bp.processLock(ctx, &lock)
})
if err != nil {
// there system can be moved into controlled degradation stage

View file

@ -70,7 +70,7 @@ type testAlphabetState struct {
isAlphabet bool
}
func (s *testAlphabetState) IsAlphabet() bool {
func (s *testAlphabetState) IsAlphabet(context.Context) bool {
return s.isAlphabet
}
@ -84,7 +84,7 @@ type testFrostFSContractClient struct {
chequeCalls int
}
func (c *testFrostFSContractClient) Cheque(p frostfscontract.ChequePrm) error {
func (c *testFrostFSContractClient) Cheque(_ context.Context, p frostfscontract.ChequePrm) error {
c.chequeCalls++
return nil
}

View file

@ -11,9 +11,9 @@ import (
// Process lock event by invoking Cheque method in main net to send assets
// back to the withdraw issuer.
func (bp *Processor) processLock(lock *balanceEvent.Lock) bool {
if !bp.alphabetState.IsAlphabet() {
bp.log.Info(context.Background(), logs.BalanceNonAlphabetModeIgnoreBalanceLock)
func (bp *Processor) processLock(ctx context.Context, lock *balanceEvent.Lock) bool {
if !bp.alphabetState.IsAlphabet(ctx) {
bp.log.Info(ctx, logs.BalanceNonAlphabetModeIgnoreBalanceLock)
return true
}
@ -25,9 +25,9 @@ func (bp *Processor) processLock(lock *balanceEvent.Lock) bool {
prm.SetLock(lock.LockAccount())
prm.SetHash(lock.TxHash())
err := bp.frostfsClient.Cheque(prm)
err := bp.frostfsClient.Cheque(ctx, prm)
if err != nil {
bp.log.Error(context.Background(), logs.BalanceCantSendLockAssetTx, zap.Error(err))
bp.log.Error(ctx, logs.BalanceCantSendLockAssetTx, zap.Error(err))
return false
}

View file

@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
@ -13,13 +12,12 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
)
type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
IsAlphabet() bool
IsAlphabet(context.Context) bool
}
// PrecisionConverter converts balance amount values.
@ -28,7 +26,7 @@ type (
}
FrostFSClient interface {
Cheque(p frostfscontract.ChequePrm) error
Cheque(ctx context.Context, p frostfscontract.ChequePrm) error
}
// Processor of events produced by balance contract in the morphchain.
@ -69,8 +67,6 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/balance: balance precision converter is not set")
}
p.Log.Debug(context.Background(), logs.BalanceBalanceWorkerPool, zap.Int("size", p.PoolSize))
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/balance: can't create worker pool: %w", err)

View file

@ -23,7 +23,7 @@ func (cp *Processor) handlePut(ctx context.Context, ev event.Event) {
// send an event to the worker pool
err := processors.SubmitEvent(cp.pool, cp.metrics, "container_put", func() bool {
return cp.processContainerPut(put)
return cp.processContainerPut(ctx, put)
})
if err != nil {
// there system can be moved into controlled degradation stage
@ -41,7 +41,7 @@ func (cp *Processor) handleDelete(ctx context.Context, ev event.Event) {
// send an event to the worker pool
err := processors.SubmitEvent(cp.pool, cp.metrics, "container_delete", func() bool {
return cp.processContainerDelete(del)
return cp.processContainerDelete(ctx, del)
})
if err != nil {
// there system can be moved into controlled degradation stage

View file

@ -161,7 +161,7 @@ type testAlphabetState struct {
isAlphabet bool
}
func (s *testAlphabetState) IsAlphabet() bool {
func (s *testAlphabetState) IsAlphabet(context.Context) bool {
return s.isAlphabet
}

View file

@ -37,27 +37,27 @@ var errContainerAndOwnerNamespaceDontMatch = errors.New("container and owner nam
// Process a new container from the user by checking the container sanity
// and sending approve tx back to the morph.
func (cp *Processor) processContainerPut(put putEvent) bool {
if !cp.alphabetState.IsAlphabet() {
cp.log.Info(context.Background(), logs.ContainerNonAlphabetModeIgnoreContainerPut)
func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool {
if !cp.alphabetState.IsAlphabet(ctx) {
cp.log.Info(ctx, logs.ContainerNonAlphabetModeIgnoreContainerPut)
return true
}
ctx := &putContainerContext{
pctx := &putContainerContext{
e: put,
}
err := cp.checkPutContainer(ctx)
err := cp.checkPutContainer(pctx)
if err != nil {
cp.log.Error(context.Background(), logs.ContainerPutContainerCheckFailed,
cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed,
zap.String("error", err.Error()),
)
return false
}
if err := cp.morphClient.NotarySignAndInvokeTX(ctx.e.NotaryRequest().MainTransaction); err != nil {
cp.log.Error(context.Background(), logs.ContainerCouldNotApprovePutContainer,
if err := cp.morphClient.NotarySignAndInvokeTX(pctx.e.NotaryRequest().MainTransaction); err != nil {
cp.log.Error(ctx, logs.ContainerCouldNotApprovePutContainer,
zap.String("error", err.Error()),
)
return false
@ -104,15 +104,15 @@ func (cp *Processor) checkPutContainer(ctx *putContainerContext) error {
// Process delete container operation from the user by checking container sanity
// and sending approve tx back to morph.
func (cp *Processor) processContainerDelete(e containerEvent.Delete) bool {
if !cp.alphabetState.IsAlphabet() {
cp.log.Info(context.Background(), logs.ContainerNonAlphabetModeIgnoreContainerDelete)
func (cp *Processor) processContainerDelete(ctx context.Context, e containerEvent.Delete) bool {
if !cp.alphabetState.IsAlphabet(ctx) {
cp.log.Info(ctx, logs.ContainerNonAlphabetModeIgnoreContainerDelete)
return true
}
err := cp.checkDeleteContainer(e)
if err != nil {
cp.log.Error(context.Background(), logs.ContainerDeleteContainerCheckFailed,
cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed,
zap.String("error", err.Error()),
)
@ -120,7 +120,7 @@ func (cp *Processor) processContainerDelete(e containerEvent.Delete) bool {
}
if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil {
cp.log.Error(context.Background(), logs.ContainerCouldNotApproveDeleteContainer,
cp.log.Error(ctx, logs.ContainerCouldNotApproveDeleteContainer,
zap.String("error", err.Error()),
)

View file

@ -6,7 +6,6 @@ import (
"fmt"
frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
@ -16,13 +15,12 @@ import (
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
)
type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
IsAlphabet() bool
IsAlphabet(context.Context) bool
}
ContClient interface {
@ -98,8 +96,6 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/container: FrostFSID client is not set")
}
p.Log.Debug(context.Background(), logs.ContainerContainerWorkerPool, zap.Int("size", p.PoolSize))
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/container: can't create worker pool: %w", err)

View file

@ -24,7 +24,7 @@ func (np *Processor) handleDeposit(ctx context.Context, ev event.Event) {
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_deposit", func() bool {
return np.processDeposit(deposit)
return np.processDeposit(ctx, deposit)
})
if err != nil {
// there system can be moved into controlled degradation stage
@ -44,7 +44,7 @@ func (np *Processor) handleWithdraw(ctx context.Context, ev event.Event) {
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_withdraw", func() bool {
return np.processWithdraw(withdraw)
return np.processWithdraw(ctx, withdraw)
})
if err != nil {
// there system can be moved into controlled degradation stage
@ -62,7 +62,7 @@ func (np *Processor) handleCheque(ctx context.Context, ev event.Event) {
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_cheque", func() bool {
return np.processCheque(cheque)
return np.processCheque(ctx, cheque)
})
if err != nil {
// there system can be moved into controlled degradation stage
@ -81,7 +81,7 @@ func (np *Processor) handleConfig(ctx context.Context, ev event.Event) {
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_config", func() bool {
return np.processConfig(cfg)
return np.processConfig(ctx, cfg)
})
if err != nil {
// there system can be moved into controlled degradation stage

View file

@ -226,7 +226,7 @@ type testAlphabetState struct {
isAlphabet bool
}
func (s *testAlphabetState) IsAlphabet() bool {
func (s *testAlphabetState) IsAlphabet(context.Context) bool {
return s.isAlphabet
}
@ -242,17 +242,17 @@ type testBalaceClient struct {
burn []balance.BurnPrm
}
func (c *testBalaceClient) Mint(p balance.MintPrm) error {
func (c *testBalaceClient) Mint(_ context.Context, p balance.MintPrm) error {
c.mint = append(c.mint, p)
return nil
}
func (c *testBalaceClient) Lock(p balance.LockPrm) error {
func (c *testBalaceClient) Lock(_ context.Context, p balance.LockPrm) error {
c.lock = append(c.lock, p)
return nil
}
func (c *testBalaceClient) Burn(p balance.BurnPrm) error {
func (c *testBalaceClient) Burn(_ context.Context, p balance.BurnPrm) error {
c.burn = append(c.burn, p)
return nil
}
@ -261,7 +261,7 @@ type testNetmapClient struct {
config []nmClient.SetConfigPrm
}
func (c *testNetmapClient) SetConfig(p nmClient.SetConfigPrm) error {
func (c *testNetmapClient) SetConfig(_ context.Context, p nmClient.SetConfigPrm) error {
c.config = append(c.config, p)
return nil
}

View file

@ -17,9 +17,9 @@ const (
// Process deposit event by invoking a balance contract and sending native
// gas in the sidechain.
func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
if !np.alphabetState.IsAlphabet() {
np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreDeposit)
func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.Deposit) bool {
if !np.alphabetState.IsAlphabet(ctx) {
np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreDeposit)
return true
}
@ -30,9 +30,9 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
prm.SetID(deposit.ID())
// send transferX to a balance contract
err := np.balanceClient.Mint(prm)
err := np.balanceClient.Mint(ctx, prm)
if err != nil {
np.log.Error(context.Background(), logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err))
np.log.Error(ctx, logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err))
}
curEpoch := np.epochState.EpochCounter()
@ -46,7 +46,7 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
val, ok := np.mintEmitCache.Get(receiver.String())
if ok && val+np.mintEmitThreshold >= curEpoch {
np.log.Warn(context.Background(), logs.FrostFSDoubleMintEmissionDeclined,
np.log.Warn(ctx, logs.FrostFSDoubleMintEmissionDeclined,
zap.Stringer("receiver", receiver),
zap.Uint64("last_emission", val),
zap.Uint64("current_epoch", curEpoch))
@ -58,12 +58,12 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
// before gas transfer check if the balance is greater than the threshold
balance, err := np.morphClient.GasBalance()
if err != nil {
np.log.Error(context.Background(), logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err))
np.log.Error(ctx, logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err))
return false
}
if balance < np.gasBalanceThreshold {
np.log.Warn(context.Background(), logs.FrostFSGasBalanceThresholdHasBeenReached,
np.log.Warn(ctx, logs.FrostFSGasBalanceThresholdHasBeenReached,
zap.Int64("balance", balance),
zap.Int64("threshold", np.gasBalanceThreshold))
@ -72,7 +72,7 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
err = np.morphClient.TransferGas(receiver, np.mintEmitValue)
if err != nil {
np.log.Error(context.Background(), logs.FrostFSCantTransferNativeGasToReceiver,
np.log.Error(ctx, logs.FrostFSCantTransferNativeGasToReceiver,
zap.String("error", err.Error()))
return false
@ -84,16 +84,16 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
}
// Process withdraw event by locking assets in the balance account.
func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool {
if !np.alphabetState.IsAlphabet() {
np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreWithdraw)
func (np *Processor) processWithdraw(ctx context.Context, withdraw frostfsEvent.Withdraw) bool {
if !np.alphabetState.IsAlphabet(ctx) {
np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreWithdraw)
return true
}
// create lock account
lock, err := util.Uint160DecodeBytesBE(withdraw.ID()[:util.Uint160Size])
if err != nil {
np.log.Error(context.Background(), logs.FrostFSCantCreateLockAccount, zap.Error(err))
np.log.Error(ctx, logs.FrostFSCantCreateLockAccount, zap.Error(err))
return false
}
@ -107,9 +107,9 @@ func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool {
prm.SetAmount(np.converter.ToBalancePrecision(withdraw.Amount()))
prm.SetDueEpoch(int64(curEpoch + lockAccountLifetime))
err = np.balanceClient.Lock(prm)
err = np.balanceClient.Lock(ctx, prm)
if err != nil {
np.log.Error(context.Background(), logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err))
np.log.Error(ctx, logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err))
return false
}
@ -118,9 +118,9 @@ func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool {
// Process cheque event by transferring assets from the lock account back to
// the reserve account.
func (np *Processor) processCheque(cheque frostfsEvent.Cheque) bool {
if !np.alphabetState.IsAlphabet() {
np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreCheque)
func (np *Processor) processCheque(ctx context.Context, cheque frostfsEvent.Cheque) bool {
if !np.alphabetState.IsAlphabet(ctx) {
np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreCheque)
return true
}
@ -130,9 +130,9 @@ func (np *Processor) processCheque(cheque frostfsEvent.Cheque) bool {
prm.SetAmount(np.converter.ToBalancePrecision(cheque.Amount()))
prm.SetID(cheque.ID())
err := np.balanceClient.Burn(prm)
err := np.balanceClient.Burn(ctx, prm)
if err != nil {
np.log.Error(context.Background(), logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err))
np.log.Error(ctx, logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err))
return false
}

View file

@ -11,9 +11,9 @@ import (
// Process config event by setting configuration value from the mainchain in
// the sidechain.
func (np *Processor) processConfig(config frostfsEvent.Config) bool {
if !np.alphabetState.IsAlphabet() {
np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreConfig)
func (np *Processor) processConfig(ctx context.Context, config frostfsEvent.Config) bool {
if !np.alphabetState.IsAlphabet(ctx) {
np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreConfig)
return true
}
@ -24,9 +24,9 @@ func (np *Processor) processConfig(config frostfsEvent.Config) bool {
prm.SetValue(config.Value())
prm.SetHash(config.TxHash())
err := np.netmapClient.SetConfig(prm)
err := np.netmapClient.SetConfig(ctx, prm)
if err != nil {
np.log.Error(context.Background(), logs.FrostFSCantRelaySetConfigEvent, zap.Error(err))
np.log.Error(ctx, logs.FrostFSCantRelaySetConfigEvent, zap.Error(err))
return false
}

View file

@ -6,7 +6,6 @@ import (
"fmt"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
@ -17,7 +16,6 @@ import (
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
)
type (
@ -28,7 +26,7 @@ type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
IsAlphabet() bool
IsAlphabet(context.Context) bool
}
// PrecisionConverter converts balance amount values.
@ -37,13 +35,13 @@ type (
}
BalanceClient interface {
Mint(p balance.MintPrm) error
Lock(p balance.LockPrm) error
Burn(p balance.BurnPrm) error
Mint(ctx context.Context, p balance.MintPrm) error
Lock(ctx context.Context, p balance.LockPrm) error
Burn(ctx context.Context, p balance.BurnPrm) error
}
NetmapClient interface {
SetConfig(p nmClient.SetConfigPrm) error
SetConfig(ctx context.Context, p nmClient.SetConfigPrm) error
}
MorphClient interface {
@ -111,8 +109,6 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/frostfs: balance precision converter is not set")
}
p.Log.Debug(context.Background(), logs.FrostFSFrostfsWorkerPool, zap.Int("size", p.PoolSize))
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err)

View file

@ -219,7 +219,7 @@ type testAlphabetState struct {
isAlphabet bool
}
func (s *testAlphabetState) IsAlphabet() bool {
func (s *testAlphabetState) IsAlphabet(context.Context) bool {
return s.isAlphabet
}
@ -251,12 +251,12 @@ func (c *testMorphClient) Committee() (res keys.PublicKeys, err error) {
return c.commiteeKeys, nil
}
func (c *testMorphClient) UpdateNeoFSAlphabetList(prm client.UpdateAlphabetListPrm) error {
func (c *testMorphClient) UpdateNeoFSAlphabetList(_ context.Context, prm client.UpdateAlphabetListPrm) error {
c.alphabetUpdates = append(c.alphabetUpdates, prm)
return nil
}
func (c *testMorphClient) UpdateNotaryList(prm client.UpdateNotaryListPrm) error {
func (c *testMorphClient) UpdateNotaryList(_ context.Context, prm client.UpdateNotaryListPrm) error {
c.notaryUpdates = append(c.notaryUpdates, prm)
return nil
}
@ -278,7 +278,7 @@ type testFrostFSClient struct {
updates []frostfscontract.AlphabetUpdatePrm
}
func (c *testFrostFSClient) AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm) error {
func (c *testFrostFSClient) AlphabetUpdate(_ context.Context, p frostfscontract.AlphabetUpdatePrm) error {
c.updates = append(c.updates, p)
return nil
}

View file

@ -20,7 +20,7 @@ const (
)
func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint256) bool {
if !gp.alphabetState.IsAlphabet() {
if !gp.alphabetState.IsAlphabet(ctx) {
gp.log.Info(ctx, logs.GovernanceNonAlphabetModeIgnoreAlphabetSync)
return true
}
@ -69,13 +69,13 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25
}
// 2. Update NeoFSAlphabet role in the sidechain.
gp.updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet, txHash)
gp.updateNeoFSAlphabetRoleInSidechain(ctx, sidechainAlphabet, newAlphabet, txHash)
// 3. Update notary role in the sidechain.
gp.updateNotaryRoleInSidechain(newAlphabet, txHash)
gp.updateNotaryRoleInSidechain(ctx, newAlphabet, txHash)
// 4. Update FrostFS contract in the mainnet.
gp.updateFrostFSContractInMainnet(newAlphabet)
gp.updateFrostFSContractInMainnet(ctx, newAlphabet)
gp.log.Info(ctx, logs.GovernanceFinishedAlphabetListUpdate)
@ -94,24 +94,24 @@ func prettyKeys(keys keys.PublicKeys) string {
return strings.TrimRight(sb.String(), delimiter)
}
func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) {
func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) {
innerRing, err := gp.irFetcher.InnerRingKeys()
if err != nil {
gp.log.Error(context.Background(), logs.GovernanceCantFetchInnerRingListFromSideChain,
gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain,
zap.String("error", err.Error()))
return
}
newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet)
if err != nil {
gp.log.Error(context.Background(), logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
gp.log.Error(ctx, logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
zap.String("error", err.Error()))
return
}
sort.Sort(newInnerRing)
gp.log.Info(context.Background(), logs.GovernanceUpdateOfTheInnerRingList,
gp.log.Info(ctx, logs.GovernanceUpdateOfTheInnerRingList,
zap.String("before", prettyKeys(innerRing)),
zap.String("after", prettyKeys(newInnerRing)),
)
@ -120,26 +120,26 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAl
updPrm.SetList(newInnerRing)
updPrm.SetHash(txHash)
if err = gp.morphClient.UpdateNeoFSAlphabetList(updPrm); err != nil {
gp.log.Error(context.Background(), logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
if err = gp.morphClient.UpdateNeoFSAlphabetList(ctx, updPrm); err != nil {
gp.log.Error(ctx, logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
zap.String("error", err.Error()))
}
}
func (gp *Processor) updateNotaryRoleInSidechain(newAlphabet keys.PublicKeys, txHash util.Uint256) {
func (gp *Processor) updateNotaryRoleInSidechain(ctx context.Context, newAlphabet keys.PublicKeys, txHash util.Uint256) {
updPrm := client.UpdateNotaryListPrm{}
updPrm.SetList(newAlphabet)
updPrm.SetHash(txHash)
err := gp.morphClient.UpdateNotaryList(updPrm)
err := gp.morphClient.UpdateNotaryList(ctx, updPrm)
if err != nil {
gp.log.Error(context.Background(), logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
gp.log.Error(ctx, logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
zap.String("error", err.Error()))
}
}
func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys) {
func (gp *Processor) updateFrostFSContractInMainnet(ctx context.Context, newAlphabet keys.PublicKeys) {
epoch := gp.epochState.EpochCounter()
buf := make([]byte, 8)
@ -152,9 +152,9 @@ func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys)
prm.SetID(id)
prm.SetPubs(newAlphabet)
err := gp.frostfsClient.AlphabetUpdate(prm)
err := gp.frostfsClient.AlphabetUpdate(ctx, prm)
if err != nil {
gp.log.Error(context.Background(), logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
gp.log.Error(ctx, logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
zap.String("error", err.Error()))
}
}

View file

@ -26,7 +26,7 @@ const ProcessorPoolSize = 1
type (
// AlphabetState is a callback interface for innerring global state.
AlphabetState interface {
IsAlphabet() bool
IsAlphabet(context.Context) bool
}
)
@ -56,7 +56,7 @@ type (
}
FrostFSClient interface {
AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm) error
AlphabetUpdate(ctx context.Context, p frostfscontract.AlphabetUpdatePrm) error
}
NetmapClient interface {
@ -70,8 +70,8 @@ type (
MorphClient interface {
Committee() (res keys.PublicKeys, err error)
UpdateNeoFSAlphabetList(prm client.UpdateAlphabetListPrm) error
UpdateNotaryList(prm client.UpdateNotaryListPrm) error
UpdateNeoFSAlphabetList(ctx context.Context, prm client.UpdateAlphabetListPrm) error
UpdateNotaryList(ctx context.Context, prm client.UpdateNotaryListPrm) error
}
// Processor of events related to governance in the network.

View file

@ -14,14 +14,14 @@ import (
func (np *Processor) HandleNewEpochTick(ctx context.Context, ev event.Event) {
_ = ev.(timerEvent.NewEpochTick)
np.log.Info(context.Background(), logs.NetmapTick, zap.String("type", "epoch"))
np.log.Info(ctx, logs.NetmapTick, zap.String("type", "epoch"))
// send an event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", func() bool { return np.processNewEpochTick(ctx) })
if err != nil {
// there system can be moved into controlled degradation stage
np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained,
np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
@ -54,7 +54,7 @@ func (np *Processor) handleAddPeer(ctx context.Context, ev event.Event) {
// send an event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_add_peer", func() bool {
return np.processAddPeer(newPeer)
return np.processAddPeer(ctx, newPeer)
})
if err != nil {
// there system can be moved into controlled degradation stage
@ -72,7 +72,7 @@ func (np *Processor) handleUpdateState(ctx context.Context, ev event.Event) {
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_update_peer", func() bool {
return np.processUpdatePeer(updPeer)
return np.processUpdatePeer(ctx, updPeer)
})
if err != nil {
// there system can be moved into controlled degradation stage
@ -94,7 +94,7 @@ func (np *Processor) handleCleanupTick(ctx context.Context, ev event.Event) {
// send event to the worker pool
err := processors.SubmitEvent(np.pool, np.metrics, "netmap_cleanup_tick", func() bool {
return np.processNetmapCleanupTick(cleanup)
return np.processNetmapCleanupTick(ctx, cleanup)
})
if err != nil {
// there system can be moved into controlled degradation stage

View file

@ -341,7 +341,7 @@ type testAlphabetState struct {
isAlphabet bool
}
func (s *testAlphabetState) IsAlphabet() bool {
func (s *testAlphabetState) IsAlphabet(context.Context) bool {
return s.isAlphabet
}
@ -365,7 +365,7 @@ type testNetmapClient struct {
invokedTxs []*transaction.Transaction
}
func (c *testNetmapClient) MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
func (c *testNetmapClient) MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
c.notaryInvokes = append(c.notaryInvokes, notaryInvoke{
contract: contract,
fee: fee,
@ -396,7 +396,7 @@ func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) {
return c.netmap, nil
}
func (c *testNetmapClient) NewEpoch(epoch uint64) error {
func (c *testNetmapClient) NewEpoch(_ context.Context, epoch uint64) error {
c.newEpochs = append(c.newEpochs, epoch)
return nil
}

View file

@ -9,9 +9,9 @@ import (
"go.uber.org/zap"
)
func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
if !np.alphabetState.IsAlphabet() {
np.log.Info(context.Background(), logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick)
func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapCleanupTick) bool {
if !np.alphabetState.IsAlphabet(ctx) {
np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick)
return true
}
@ -19,13 +19,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
err := np.netmapSnapshot.forEachRemoveCandidate(ev.epoch, func(s string) error {
key, err := keys.NewPublicKeyFromString(s)
if err != nil {
np.log.Warn(context.Background(), logs.NetmapCantDecodePublicKeyOfNetmapNode,
np.log.Warn(ctx, logs.NetmapCantDecodePublicKeyOfNetmapNode,
zap.String("key", s))
return nil
}
np.log.Info(context.Background(), logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s))
np.log.Info(ctx, logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s))
// In notary environments we call UpdateStateIR method instead of UpdateState.
// It differs from UpdateState only by name, so we can do this in the same form.
@ -33,6 +33,7 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
const methodUpdateStateNotary = "updateStateIR"
err = np.netmapClient.MorphNotaryInvoke(
ctx,
np.netmapClient.ContractAddress(),
0,
uint32(ev.epoch),
@ -41,13 +42,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
int64(v2netmap.Offline), key.Bytes(),
)
if err != nil {
np.log.Error(context.Background(), logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err))
np.log.Error(ctx, logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err))
}
return nil
})
if err != nil {
np.log.Warn(context.Background(), logs.NetmapCantIterateOnNetmapCleanerCache,
np.log.Warn(ctx, logs.NetmapCantIterateOnNetmapCleanerCache,
zap.String("error", err.Error()))
return false
}

View file

@ -55,7 +55,7 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc
// Process new epoch tick by invoking new epoch method in network map contract.
func (np *Processor) processNewEpochTick(ctx context.Context) bool {
if !np.alphabetState.IsAlphabet() {
if !np.alphabetState.IsAlphabet(ctx) {
np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewEpochTick)
return true
}
@ -63,7 +63,7 @@ func (np *Processor) processNewEpochTick(ctx context.Context) bool {
nextEpoch := np.epochState.EpochCounter() + 1
np.log.Debug(ctx, logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch))
err := np.netmapClient.NewEpoch(nextEpoch)
err := np.netmapClient.NewEpoch(ctx, nextEpoch)
if err != nil {
np.log.Error(ctx, logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err))
return false

View file

@ -13,9 +13,9 @@ import (
// Process add peer notification by sanity check of new node
// local epoch timer.
func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
if !np.alphabetState.IsAlphabet() {
np.log.Info(context.Background(), logs.NetmapNonAlphabetModeIgnoreNewPeerNotification)
func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer) bool {
if !np.alphabetState.IsAlphabet(ctx) {
np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewPeerNotification)
return true
}
@ -23,7 +23,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
tx := ev.NotaryRequest().MainTransaction
ok, err := np.netmapClient.MorphIsValidScript(tx.Script, tx.Signers)
if err != nil || !ok {
np.log.Warn(context.Background(), logs.NetmapNonhaltNotaryTransaction,
np.log.Warn(ctx, logs.NetmapNonhaltNotaryTransaction,
zap.String("method", "netmap.AddPeer"),
zap.String("hash", tx.Hash().StringLE()),
zap.Error(err))
@ -34,14 +34,14 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
var nodeInfo netmap.NodeInfo
if err := nodeInfo.Unmarshal(ev.Node()); err != nil {
// it will be nice to have tx id at event structure to log it
np.log.Warn(context.Background(), logs.NetmapCantParseNetworkMapCandidate)
np.log.Warn(ctx, logs.NetmapCantParseNetworkMapCandidate)
return false
}
// validate and update node info
err = np.nodeValidator.VerifyAndUpdate(&nodeInfo)
if err != nil {
np.log.Warn(context.Background(), logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
zap.String("error", err.Error()),
)
@ -64,7 +64,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
// That is why we need to perform `addPeerIR` only in case when node is online,
// because in scope of this method, contract set state `ONLINE` for the node.
if updated && nodeInfo.Status().IsOnline() {
np.log.Info(context.Background(), logs.NetmapApprovingNetworkMapCandidate,
np.log.Info(ctx, logs.NetmapApprovingNetworkMapCandidate,
zap.String("key", keyString))
prm := netmapclient.AddPeerPrm{}
@ -77,6 +77,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
// create new notary request with the original nonce
err = np.netmapClient.MorphNotaryInvoke(
ctx,
np.netmapClient.ContractAddress(),
0,
ev.NotaryRequest().MainTransaction.Nonce,
@ -85,7 +86,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
nodeInfoBinary,
)
if err != nil {
np.log.Error(context.Background(), logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err))
np.log.Error(ctx, logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err))
return false
}
}
@ -94,9 +95,9 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
}
// Process update peer notification by sending approval tx to the smart contract.
func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool {
if !np.alphabetState.IsAlphabet() {
np.log.Info(context.Background(), logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification)
func (np *Processor) processUpdatePeer(ctx context.Context, ev netmapEvent.UpdatePeer) bool {
if !np.alphabetState.IsAlphabet(ctx) {
np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification)
return true
}
@ -109,7 +110,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool {
if ev.Maintenance() {
err = np.nodeStateSettings.MaintenanceModeAllowed()
if err != nil {
np.log.Info(context.Background(), logs.NetmapPreventSwitchingNodeToMaintenanceState,
np.log.Info(ctx, logs.NetmapPreventSwitchingNodeToMaintenanceState,
zap.Error(err),
)
@ -118,7 +119,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool {
}
if err = np.netmapClient.MorphNotarySignAndInvokeTX(ev.NotaryRequest().MainTransaction); err != nil {
np.log.Error(context.Background(), logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err))
np.log.Error(ctx, logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err))
return false
}

View file

@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
@ -17,7 +16,6 @@ import (
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
)
type (
@ -36,7 +34,7 @@ type (
// AlphabetState is a callback interface for inner ring global state.
AlphabetState interface {
IsAlphabet() bool
IsAlphabet(context.Context) bool
}
// NodeValidator wraps basic method of checking the correctness
@ -55,12 +53,12 @@ type (
}
Client interface {
MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error
MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error
ContractAddress() util.Uint160
EpochDuration() (uint64, error)
MorphTxHeight(h util.Uint256) (res uint32, err error)
NetMap() (*netmap.NetMap, error)
NewEpoch(epoch uint64) error
NewEpoch(ctx context.Context, epoch uint64) error
MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error)
MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error
}
@ -133,8 +131,6 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/netmap: node state settings is not set")
}
p.Log.Debug(context.Background(), logs.NetmapNetmapWorkerPool, zap.Int("size", p.PoolSize))
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
return nil, fmt.Errorf("ir/netmap: can't create worker pool: %w", err)

View file

@ -1,6 +1,8 @@
package netmap
import (
"context"
netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
@ -18,13 +20,13 @@ type netmapClientWrapper struct {
netmapClient *netmapclient.Client
}
func (w *netmapClientWrapper) UpdatePeerState(p netmapclient.UpdatePeerPrm) error {
_, err := w.netmapClient.UpdatePeerState(p)
func (w *netmapClientWrapper) UpdatePeerState(ctx context.Context, p netmapclient.UpdatePeerPrm) error {
_, err := w.netmapClient.UpdatePeerState(ctx, p)
return err
}
func (w *netmapClientWrapper) MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
_, err := w.netmapClient.Morph().NotaryInvoke(contract, fee, nonce, vub, method, args...)
func (w *netmapClientWrapper) MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error {
_, err := w.netmapClient.Morph().NotaryInvoke(ctx, contract, fee, nonce, vub, method, args...)
return err
}
@ -44,16 +46,16 @@ func (w *netmapClientWrapper) NetMap() (*netmap.NetMap, error) {
return w.netmapClient.NetMap()
}
func (w *netmapClientWrapper) NewEpoch(epoch uint64) error {
return w.netmapClient.NewEpoch(epoch)
func (w *netmapClientWrapper) NewEpoch(ctx context.Context, epoch uint64) error {
return w.netmapClient.NewEpoch(ctx, epoch)
}
func (w *netmapClientWrapper) MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) {
return w.netmapClient.Morph().IsValidScript(script, signers)
}
func (w *netmapClientWrapper) AddPeer(p netmapclient.AddPeerPrm) error {
return w.netmapClient.AddPeer(p)
func (w *netmapClientWrapper) AddPeer(ctx context.Context, p netmapclient.AddPeerPrm) error {
return w.netmapClient.AddPeer(ctx, p)
}
func (w *netmapClientWrapper) MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error {

View file

@ -53,8 +53,8 @@ func (s *Server) IsActive(ctx context.Context) bool {
}
// IsAlphabet is a getter for a global alphabet flag state.
func (s *Server) IsAlphabet() bool {
return s.AlphabetIndex() >= 0
func (s *Server) IsAlphabet(ctx context.Context) bool {
return s.AlphabetIndex(ctx) >= 0
}
// InnerRingIndex is a getter for a global index of node in inner ring list. Negative
@ -83,10 +83,10 @@ func (s *Server) InnerRingSize(ctx context.Context) int {
// AlphabetIndex is a getter for a global index of node in alphabet list.
// Negative index means that node is not in the alphabet list.
func (s *Server) AlphabetIndex() int {
func (s *Server) AlphabetIndex(ctx context.Context) int {
index, err := s.statusIndex.AlphabetIndex()
if err != nil {
s.log.Error(context.Background(), logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error()))
s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error()))
return -1
}
@ -127,7 +127,7 @@ func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.V
}
s.contracts.alphabet.iterate(func(letter GlagoliticLetter, contract util.Uint160) {
_, err := s.morphClient.NotaryInvoke(contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators)
_, err := s.morphClient.NotaryInvoke(ctx, contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators)
if err != nil {
s.log.Warn(ctx, logs.InnerringCantInvokeVoteMethodInAlphabetContract,
zap.Int8("alphabet_index", int8(letter)),

View file

@ -47,8 +47,8 @@ func TestServerState(t *testing.T) {
require.Equal(t, healthStatus, srv.HealthStatus(), "invalid health status")
require.True(t, srv.IsActive(context.Background()), "invalid IsActive result")
require.True(t, srv.IsAlphabet(), "invalid IsAlphabet result")
require.True(t, srv.IsAlphabet(context.Background()), "invalid IsAlphabet result")
require.Equal(t, 0, srv.InnerRingIndex(context.Background()), "invalid IR index")
require.Equal(t, 1, srv.InnerRingSize(context.Background()), "invalid IR index")
require.Equal(t, 0, srv.AlphabetIndex(), "invalid alphabet index")
require.Equal(t, 0, srv.AlphabetIndex(context.Background()), "invalid alphabet index")
}

View file

@ -72,7 +72,7 @@ func TestBlobovnicza(t *testing.T) {
require.NoError(t, blz.Open(context.Background()))
// initialize Blobovnicza
require.NoError(t, blz.Init())
require.NoError(t, blz.Init(context.Background()))
// try to read non-existent address
testGet(t, blz, oidtest.Address(), nil, client.IsErrObjectNotFound)
@ -102,5 +102,5 @@ func TestBlobovnicza(t *testing.T) {
return err == nil
}, nil)
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
}

View file

@ -56,7 +56,7 @@ func (b *Blobovnicza) Open(ctx context.Context) error {
//
// If Blobovnicza is already initialized, no action is taken.
// Blobovnicza must be open, otherwise an error will return.
func (b *Blobovnicza) Init() error {
func (b *Blobovnicza) Init(ctx context.Context) error {
b.controlMtx.Lock()
defer b.controlMtx.Unlock()
@ -64,7 +64,7 @@ func (b *Blobovnicza) Init() error {
return errors.New("blobovnicza is not open")
}
b.log.Debug(context.Background(), logs.BlobovniczaInitializing,
b.log.Debug(ctx, logs.BlobovniczaInitializing,
zap.Uint64("object size limit", b.objSizeLimit),
zap.Uint64("storage size limit", b.fullSizeLimit),
)
@ -72,7 +72,7 @@ func (b *Blobovnicza) Init() error {
size := b.dataSize.Load()
items := b.itemsCount.Load()
if size != 0 || items != 0 {
b.log.Debug(context.Background(), logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items))
b.log.Debug(ctx, logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items))
return nil
}
@ -82,7 +82,7 @@ func (b *Blobovnicza) Init() error {
// create size range bucket
rangeStr := stringifyBounds(lower, upper)
b.log.Debug(context.Background(), logs.BlobovniczaCreatingBucketForSizeRange,
b.log.Debug(ctx, logs.BlobovniczaCreatingBucketForSizeRange,
zap.String("range", rangeStr))
_, err := tx.CreateBucketIfNotExists(key)
@ -99,14 +99,14 @@ func (b *Blobovnicza) Init() error {
}
}
return b.initializeCounters()
return b.initializeCounters(ctx)
}
func (b *Blobovnicza) ObjectsCount() uint64 {
return b.itemsCount.Load()
}
func (b *Blobovnicza) initializeCounters() error {
func (b *Blobovnicza) initializeCounters(ctx context.Context) error {
var size uint64
var items uint64
var sizeExists bool
@ -132,17 +132,17 @@ func (b *Blobovnicza) initializeCounters() error {
return fmt.Errorf("can't determine DB size: %w", err)
}
if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly {
b.log.Debug(context.Background(), logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items))
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items))
if err := b.boltDB.Update(func(tx *bbolt.Tx) error {
if err := saveDataSize(tx, size); err != nil {
return err
}
return saveItemsCount(tx, items)
}); err != nil {
b.log.Debug(context.Background(), logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items))
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items))
return fmt.Errorf("can't save blobovnicza's size and items count: %w", err)
}
b.log.Debug(context.Background(), logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items))
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items))
}
b.dataSize.Store(size)
@ -155,7 +155,7 @@ func (b *Blobovnicza) initializeCounters() error {
// Close releases all internal database resources.
//
// If blobovnicza is already closed, does nothing.
func (b *Blobovnicza) Close() error {
func (b *Blobovnicza) Close(ctx context.Context) error {
b.controlMtx.Lock()
defer b.controlMtx.Unlock()
@ -163,7 +163,7 @@ func (b *Blobovnicza) Close() error {
return nil
}
b.log.Debug(context.Background(), logs.BlobovniczaClosingBoltDB,
b.log.Debug(ctx, logs.BlobovniczaClosingBoltDB,
zap.String("path", b.path),
)

View file

@ -14,11 +14,11 @@ func TestBlobovnicza_Get(t *testing.T) {
filename := filepath.Join(t.TempDir(), "blob")
var blz *Blobovnicza
defer func() { require.NoError(t, blz.Close()) }()
defer func() { require.NoError(t, blz.Close(context.Background())) }()
fnInit := func(szLimit uint64) {
if blz != nil {
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
}
blz = New(
@ -27,7 +27,7 @@ func TestBlobovnicza_Get(t *testing.T) {
)
require.NoError(t, blz.Open(context.Background()))
require.NoError(t, blz.Init())
require.NoError(t, blz.Init(context.Background()))
}
// initial distribution: [0:32K] (32K:64K]

View file

@ -16,7 +16,7 @@ func TestBlobovniczaIterate(t *testing.T) {
filename := filepath.Join(t.TempDir(), "blob")
b := New(WithPath(filename))
require.NoError(t, b.Open(context.Background()))
require.NoError(t, b.Init())
require.NoError(t, b.Init(context.Background()))
data := [][]byte{{0, 1, 2, 3}, {5, 6, 7, 8}}
addr := oidtest.Address()

View file

@ -18,8 +18,8 @@ func (db *activeDB) Blobovnicza() *blobovnicza.Blobovnicza {
return db.blz
}
func (db *activeDB) Close() {
db.shDB.Close()
func (db *activeDB) Close(ctx context.Context) {
db.shDB.Close(ctx)
}
func (db *activeDB) SystemPath() string {
@ -73,12 +73,12 @@ func (m *activeDBManager) Open() {
m.closed = false
}
func (m *activeDBManager) Close() {
func (m *activeDBManager) Close(ctx context.Context) {
m.levelToActiveDBGuard.Lock()
defer m.levelToActiveDBGuard.Unlock()
for _, db := range m.levelToActiveDB {
db.Close()
db.Close(ctx)
}
m.levelToActiveDB = make(map[string]*sharedDB)
m.closed = true
@ -103,7 +103,7 @@ func (m *activeDBManager) getCurrentActiveIfOk(ctx context.Context, lvlPath stri
}
if blz.IsFull() {
db.Close()
db.Close(ctx)
return nil, nil
}
@ -168,10 +168,10 @@ func (m *activeDBManager) getNextSharedDB(ctx context.Context, lvlPath string) (
previous, updated := m.replace(lvlPath, next)
if !updated && next != nil {
next.Close() // manager is closed, so don't hold active DB open
next.Close(ctx) // manager is closed, so don't hold active DB open
}
if updated && previous != nil {
previous.Close()
previous.Close(ctx)
}
return next, nil
}

View file

@ -167,7 +167,7 @@ func (b *Blobovniczas) Compressor() *compression.Config {
}
// SetReportErrorFunc implements common.Storage.
func (b *Blobovniczas) SetReportErrorFunc(f func(string, error)) {
func (b *Blobovniczas) SetReportErrorFunc(f func(context.Context, string, error)) {
b.reportError = f
}

View file

@ -32,7 +32,7 @@ func newDBCache(parentCtx context.Context, size int,
ch := cache.NewCache[string, *sharedDB]().
WithTTL(ttl).WithLRU().WithMaxKeys(size).
WithOnEvicted(func(_ string, db *sharedDB) {
db.Close()
db.Close(parentCtx)
})
ctx, cancel := context.WithCancel(parentCtx)
res := &dbCache{
@ -138,7 +138,7 @@ func (c *dbCache) create(ctx context.Context, path string) *sharedDB {
return value
}
if added := c.put(path, value); !added {
value.Close()
value.Close(ctx)
}
return value
}

View file

@ -27,7 +27,7 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) {
require.NoError(t, st.Open(mode.ComponentReadWrite))
require.NoError(t, st.Init())
defer func() {
require.NoError(t, st.Close())
require.NoError(t, st.Close(context.Background()))
}()
objGen := &testutil.SeqObjGenerator{ObjSize: 1}

View file

@ -50,7 +50,7 @@ func (b *Blobovniczas) initializeDBs(ctx context.Context) error {
if err != nil {
return err
}
defer shBlz.Close()
defer shBlz.Close(egCtx)
moveInfo, err := blz.ListMoveInfo(egCtx)
if err != nil {
@ -80,9 +80,9 @@ func (b *Blobovniczas) openManagers() {
}
// Close implements common.Storage.
func (b *Blobovniczas) Close() error {
func (b *Blobovniczas) Close(ctx context.Context) error {
b.dbCache.Close() // order important
b.activeDBManager.Close()
b.activeDBManager.Close(ctx)
b.commondbManager.Close()
return nil

View file

@ -51,7 +51,7 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) {
require.NoError(t, err)
require.EqualValues(t, obj35, gRes.Object)
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
// change depth and width
blz = NewBlobovniczaTree(
@ -89,7 +89,7 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) {
})
require.NoError(t, err)
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
// change depth and width back
blz = NewBlobovniczaTree(
@ -127,5 +127,5 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) {
require.NoError(t, err)
require.EqualValues(t, obj52, gRes.Object)
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
}

View file

@ -26,7 +26,7 @@ func (b *Blobovniczas) ObjectsCount(ctx context.Context) (uint64, error) {
if err != nil {
return true, err
}
defer shDB.Close()
defer shDB.Close(ctx)
result += blz.ObjectsCount()
return false, nil

View file

@ -66,7 +66,7 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
if err != nil {
return res, err
}
defer shBlz.Close()
defer shBlz.Close(ctx)
if res, err = b.deleteObject(ctx, blz, bPrm); err == nil {
success = true
@ -114,7 +114,7 @@ func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicz
if err != nil {
return common.DeleteRes{}, err
}
defer shBlz.Close()
defer shBlz.Close(ctx)
return b.deleteObject(ctx, blz, prm)
}

View file

@ -42,7 +42,7 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
if err != nil {
return common.ExistsRes{}, err
}
defer shBlz.Close()
defer shBlz.Close(ctx)
exists, err := blz.Exists(ctx, prm.Address)
return common.ExistsRes{Exists: exists}, err

View file

@ -27,7 +27,7 @@ func TestExistsInvalidStorageID(t *testing.T) {
WithBlobovniczaSize(1<<20))
require.NoError(t, b.Open(mode.ComponentReadWrite))
require.NoError(t, b.Init())
defer func() { require.NoError(t, b.Close()) }()
defer func() { require.NoError(t, b.Close(context.Background())) }()
obj := blobstortest.NewObject(1024)
addr := object.AddressOf(obj)

View file

@ -53,7 +53,7 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
if err != nil {
return res, err
}
defer shBlz.Close()
defer shBlz.Close(ctx)
res, err = b.getObject(ctx, blz, bPrm)
if err == nil {
@ -100,7 +100,7 @@ func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.G
if err != nil {
return common.GetRes{}, err
}
defer shBlz.Close()
defer shBlz.Close(ctx)
return b.getObject(ctx, blz, prm)
}

View file

@ -52,7 +52,7 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
if err != nil {
return common.GetRangeRes{}, err
}
defer shBlz.Close()
defer shBlz.Close(ctx)
res, err := b.getObjectRange(ctx, blz, prm)
if err == nil {
@ -108,7 +108,7 @@ func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRang
if err != nil {
return common.GetRangeRes{}, err
}
defer shBlz.Close()
defer shBlz.Close(ctx)
return b.getObjectRange(ctx, blz, prm)
}

View file

@ -84,7 +84,7 @@ func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors boo
}
return false, fmt.Errorf("could not open blobovnicza %s: %w", p, err)
}
defer shBlz.Close()
defer shBlz.Close(ctx)
err = f(p, blz)

View file

@ -71,7 +71,7 @@ func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) {
if err := blz.Open(ctx); err != nil {
return nil, fmt.Errorf("could not open blobovnicza %s: %w", b.path, err)
}
if err := blz.Init(); err != nil {
if err := blz.Init(ctx); err != nil {
return nil, fmt.Errorf("could not init blobovnicza %s: %w", b.path, err)
}
@ -82,20 +82,20 @@ func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) {
return blz, nil
}
func (b *sharedDB) Close() {
func (b *sharedDB) Close(ctx context.Context) {
b.cond.L.Lock()
defer b.cond.L.Unlock()
if b.refCount == 0 {
b.log.Error(context.Background(), logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path))
b.log.Error(ctx, logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path))
b.cond.Broadcast()
return
}
if b.refCount == 1 {
b.refCount = 0
if err := b.blcza.Close(); err != nil {
b.log.Error(context.Background(), logs.BlobovniczatreeCouldNotCloseBlobovnicza,
if err := b.blcza.Close(ctx); err != nil {
b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
zap.String("id", b.path),
zap.String("error", err.Error()),
)
@ -111,7 +111,7 @@ func (b *sharedDB) Close() {
}
}
func (b *sharedDB) CloseAndRemoveFile() error {
func (b *sharedDB) CloseAndRemoveFile(ctx context.Context) error {
b.cond.L.Lock()
if b.refCount > 1 {
b.cond.Wait()
@ -122,8 +122,8 @@ func (b *sharedDB) CloseAndRemoveFile() error {
return errClosingClosedBlobovnicza
}
if err := b.blcza.Close(); err != nil {
b.log.Error(context.Background(), logs.BlobovniczatreeCouldNotCloseBlobovnicza,
if err := b.blcza.Close(ctx); err != nil {
b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
zap.String("id", b.path),
zap.String("error", err.Error()),
)

View file

@ -1,6 +1,7 @@
package blobovniczatree
import (
"context"
"io/fs"
"time"
@ -20,7 +21,7 @@ type cfg struct {
blzShallowWidth uint64
compression *compression.Config
blzOpts []blobovnicza.Option
reportError func(string, error) // reportError is the function called when encountering disk errors.
reportError func(context.Context, string, error) // reportError is the function called when encountering disk errors.
metrics Metrics
waitBeforeDropDB time.Duration
blzInitWorkerCount int
@ -54,7 +55,7 @@ func initConfig(c *cfg) {
openedCacheExpInterval: defaultOpenedCacheInterval,
blzShallowDepth: defaultBlzShallowDepth,
blzShallowWidth: defaultBlzShallowWidth,
reportError: func(string, error) {},
reportError: func(context.Context, string, error) {},
metrics: &noopMetrics{},
waitBeforeDropDB: defaultWaitBeforeDropDB,
blzInitWorkerCount: defaultBlzInitWorkerCount,

View file

@ -80,7 +80,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error)
active, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath)
if err != nil {
if !isLogical(err) {
i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
} else {
i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
zap.String("error", err.Error()),
@ -95,14 +95,14 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error)
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return false, nil
}
defer active.Close()
defer active.Close(ctx)
i.AllFull = false
_, err = active.Blobovnicza().Put(ctx, i.PutPrm)
if err != nil {
if !isLogical(err) {
i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
} else {
i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
zap.String("path", active.SystemPath()),

View file

@ -186,7 +186,7 @@ func (b *Blobovniczas) rebuildBySize(ctx context.Context, path string, targetFil
if err != nil {
return false, err
}
defer shDB.Close()
defer shDB.Close(ctx)
fp := blz.FillPercent()
// accepted fill percent defines as
// |----|+++++++++++++++++|+++++++++++++++++|---------------
@ -206,9 +206,9 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M
if shDBClosed {
return
}
shDB.Close()
shDB.Close(ctx)
}()
dropTempFile, err := b.addRebuildTempFile(path)
dropTempFile, err := b.addRebuildTempFile(ctx, path)
if err != nil {
return 0, err
}
@ -224,7 +224,7 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M
return migratedObjects, err
}
func (b *Blobovniczas) addRebuildTempFile(path string) (func(), error) {
func (b *Blobovniczas) addRebuildTempFile(ctx context.Context, path string) (func(), error) {
sysPath := filepath.Join(b.rootPath, path)
sysPath = sysPath + rebuildSuffix
_, err := os.OpenFile(sysPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, b.perm)
@ -233,7 +233,7 @@ func (b *Blobovniczas) addRebuildTempFile(path string) (func(), error) {
}
return func() {
if err := os.Remove(sysPath); err != nil {
b.log.Warn(context.Background(), logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
}
}, nil
}
@ -330,7 +330,7 @@ func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDb *sharedDB)
b.dbFilesGuard.Lock()
defer b.dbFilesGuard.Unlock()
if err := shDb.CloseAndRemoveFile(); err != nil {
if err := shDb.CloseAndRemoveFile(ctx); err != nil {
return false, err
}
b.commondbManager.CleanResources(path)
@ -370,7 +370,7 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co
if err != nil {
return true, err
}
defer shDB.Close()
defer shDB.Close(ctx)
incompletedMoves, err := blz.ListMoveInfo(ctx)
if err != nil {
@ -403,7 +403,7 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob
if err != nil {
return err
}
defer targetDB.Close()
defer targetDB.Close(ctx)
existsInSource := true
var gPrm blobovnicza.GetPrm
@ -480,7 +480,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
target, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath)
if err != nil {
if !isLogical(err) {
i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
} else {
i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.Error(err))
}
@ -491,7 +491,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
i.B.log.Warn(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath))
return false, nil
}
defer target.Close()
defer target.Close(ctx)
i.AllFull = false
@ -503,7 +503,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
TargetStorageID: targetStorageID.Bytes(),
}); err != nil {
if !isLogical(err) {
i.B.reportError(logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, err)
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, err)
} else {
i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, zap.String("path", i.SourceSysPath), zap.Error(err))
}
@ -519,7 +519,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
_, err = target.Blobovnicza().Put(ctx, putPrm)
if err != nil {
if !isLogical(err) {
i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
} else {
i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", target.SystemPath()), zap.Error(err))
}
@ -535,7 +535,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
deletePrm.SetAddress(i.Address)
if _, err = i.Source.Delete(ctx, deletePrm); err != nil {
if !isLogical(err) {
i.B.reportError(logs.BlobovniczatreeCouldNotDeleteFromSource, err)
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, err)
} else {
i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", i.SourceSysPath), zap.Error(err))
}
@ -544,7 +544,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
if err = i.Source.DropMoveInfo(ctx, i.Address); err != nil {
if !isLogical(err) {
i.B.reportError(logs.BlobovniczatreeCouldNotDropMoveInfo, err)
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, err)
} else {
i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", i.SourceSysPath), zap.Error(err))
}

View file

@ -36,7 +36,7 @@ func testRebuildFailoverOnlyMoveInfoSaved(t *testing.T) {
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
require.NoError(t, blz.Open(context.Background()))
require.NoError(t, blz.Init())
require.NoError(t, blz.Init(context.Background()))
obj := blobstortest.NewObject(1024)
data, err := obj.Marshal()
@ -53,7 +53,7 @@ func testRebuildFailoverOnlyMoveInfoSaved(t *testing.T) {
TargetStorageID: []byte("0/0/0"),
}))
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
_, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
require.NoError(t, err)
@ -66,7 +66,7 @@ func testRebuildFailoverObjectSavedToTarget(t *testing.T) {
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
require.NoError(t, blz.Open(context.Background()))
require.NoError(t, blz.Init())
require.NoError(t, blz.Init(context.Background()))
obj := blobstortest.NewObject(1024)
data, err := obj.Marshal()
@ -83,19 +83,19 @@ func testRebuildFailoverObjectSavedToTarget(t *testing.T) {
TargetStorageID: []byte("0/0/0"),
}))
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
_, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
require.NoError(t, err)
blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
require.NoError(t, blz.Open(context.Background()))
require.NoError(t, blz.Init())
require.NoError(t, blz.Init(context.Background()))
_, err = blz.Put(context.Background(), pPrm)
require.NoError(t, err)
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
testRebuildFailoverValidate(t, dir, obj, true)
}
@ -106,7 +106,7 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) {
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
require.NoError(t, blz.Open(context.Background()))
require.NoError(t, blz.Init())
require.NoError(t, blz.Init(context.Background()))
obj := blobstortest.NewObject(1024)
data, err := obj.Marshal()
@ -117,14 +117,14 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) {
TargetStorageID: []byte("0/0/0"),
}))
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
_, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
require.NoError(t, err)
blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
require.NoError(t, blz.Open(context.Background()))
require.NoError(t, blz.Init())
require.NoError(t, blz.Init(context.Background()))
var pPrm blobovnicza.PutPrm
pPrm.SetAddress(object.AddressOf(obj))
@ -132,7 +132,7 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) {
_, err = blz.Put(context.Background(), pPrm)
require.NoError(t, err)
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
testRebuildFailoverValidate(t, dir, obj, false)
}
@ -170,11 +170,11 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object
require.Equal(t, uint64(1), rRes.ObjectsMoved)
require.Equal(t, uint64(0), rRes.FilesRemoved)
require.NoError(t, b.Close())
require.NoError(t, b.Close(context.Background()))
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
require.NoError(t, blz.Open(context.Background()))
require.NoError(t, blz.Init())
require.NoError(t, blz.Init(context.Background()))
moveInfo, err := blz.ListMoveInfo(context.Background())
require.NoError(t, err)
@ -185,11 +185,11 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object
_, err = blz.Get(context.Background(), gPrm)
require.True(t, client.IsErrObjectNotFound(err))
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
require.NoError(t, blz.Open(context.Background()))
require.NoError(t, blz.Init())
require.NoError(t, blz.Init(context.Background()))
moveInfo, err = blz.ListMoveInfo(context.Background())
require.NoError(t, err)
@ -203,7 +203,7 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object
require.True(t, bytes.Equal([]byte("0/0/0"), metaStub.storageIDs[object.AddressOf(obj)]))
}
require.NoError(t, blz.Close())
require.NoError(t, blz.Close(context.Background()))
_, err = os.Stat(filepath.Join(dir, "0", "0", "1.db.rebuild"))
require.True(t, os.IsNotExist(err))

View file

@ -93,7 +93,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, err)
}
require.NoError(t, b.Close())
require.NoError(t, b.Close(context.Background()))
})
t.Run("no rebuild single db", func(t *testing.T) {
@ -145,7 +145,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, err)
}
require.NoError(t, b.Close())
require.NoError(t, b.Close(context.Background()))
})
t.Run("rebuild by fill percent", func(t *testing.T) {
@ -214,7 +214,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, err)
}
require.NoError(t, b.Close())
require.NoError(t, b.Close(context.Background()))
})
t.Run("rebuild by overflow", func(t *testing.T) {
@ -251,7 +251,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
storageIDs: storageIDs,
guard: &sync.Mutex{},
}
require.NoError(t, b.Close())
require.NoError(t, b.Close(context.Background()))
b = NewBlobovniczaTree(
context.Background(),
WithLogger(test.NewLogger(t)),
@ -284,7 +284,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, err)
}
require.NoError(t, b.Close())
require.NoError(t, b.Close(context.Background()))
})
}
@ -318,7 +318,7 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
storageIDs := make(map[oid.Address][]byte)
storageIDs[prm.Address] = res.StorageID
require.NoError(t, b.Close())
require.NoError(t, b.Close(context.Background()))
b = NewBlobovniczaTree(
context.Background(),
@ -355,7 +355,7 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
require.NoError(t, err)
}
require.NoError(t, b.Close())
require.NoError(t, b.Close(context.Background()))
}
func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, targetDepth, targetWidth uint64, shouldMigrate bool) {
@ -399,7 +399,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta
}
require.NoError(t, eg.Wait())
require.NoError(t, b.Close())
require.NoError(t, b.Close(context.Background()))
b = NewBlobovniczaTree(
context.Background(),
@ -444,7 +444,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta
require.NoError(t, err)
}
require.NoError(t, b.Close())
require.NoError(t, b.Close(context.Background()))
}
type storageIDUpdateStub struct {

View file

@ -1,6 +1,7 @@
package blobstor
import (
"context"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@ -139,7 +140,7 @@ func WithUncompressableContentTypes(values []string) Option {
// SetReportErrorFunc allows to provide a function to be called on disk errors.
// This function MUST be called before Open.
func (b *BlobStor) SetReportErrorFunc(f func(string, error)) {
func (b *BlobStor) SetReportErrorFunc(f func(context.Context, string, error)) {
for i := range b.storage {
b.storage[i].Storage.SetReportErrorFunc(f)
}

View file

@ -54,7 +54,7 @@ func TestCompression(t *testing.T) {
WithCompressObjects(compress),
WithStorages(defaultStorages(dir, smallSizeLimit)))
require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
require.NoError(t, bs.Init())
require.NoError(t, bs.Init(context.Background()))
return bs
}
@ -91,20 +91,20 @@ func TestCompression(t *testing.T) {
blobStor := newBlobStor(t, false)
testPut(t, blobStor, 0)
testGet(t, blobStor, 0)
require.NoError(t, blobStor.Close())
require.NoError(t, blobStor.Close(context.Background()))
blobStor = newBlobStor(t, true)
testGet(t, blobStor, 0) // get uncompressed object with compress enabled
testPut(t, blobStor, 1)
testGet(t, blobStor, 1)
require.NoError(t, blobStor.Close())
require.NoError(t, blobStor.Close(context.Background()))
blobStor = newBlobStor(t, false)
testGet(t, blobStor, 0) // get old uncompressed object
testGet(t, blobStor, 1) // get compressed object with compression disabled
testPut(t, blobStor, 2)
testGet(t, blobStor, 2)
require.NoError(t, blobStor.Close())
require.NoError(t, blobStor.Close(context.Background()))
}
func TestBlobstor_needsCompression(t *testing.T) {
@ -130,7 +130,7 @@ func TestBlobstor_needsCompression(t *testing.T) {
},
}))
require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
require.NoError(t, bs.Init())
require.NoError(t, bs.Init(context.Background()))
return bs
}
@ -192,7 +192,7 @@ func TestConcurrentPut(t *testing.T) {
blobStor := New(
WithStorages(defaultStorages(dir, smallSizeLimit)))
require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite))
require.NoError(t, blobStor.Init())
require.NoError(t, blobStor.Init(context.Background()))
testGet := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) {
res, err := b.Get(context.Background(), common.GetPrm{Address: object.AddressOf(obj)})
@ -272,7 +272,7 @@ func TestConcurrentDelete(t *testing.T) {
blobStor := New(
WithStorages(defaultStorages(dir, smallSizeLimit)))
require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite))
require.NoError(t, blobStor.Init())
require.NoError(t, blobStor.Init(context.Background()))
testPut := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) {
var prm common.PutPrm

View file

@ -12,7 +12,7 @@ import (
type Storage interface {
Open(mode mode.ComponentMode) error
Init() error
Close() error
Close(context.Context) error
Type() string
Path() string
@ -23,7 +23,7 @@ type Storage interface {
// SetReportErrorFunc allows to provide a function to be called on disk errors.
// This function MUST be called before Open.
SetReportErrorFunc(f func(string, error))
SetReportErrorFunc(f func(context.Context, string, error))
SetParentID(parentID string)
Get(context.Context, GetPrm) (GetRes, error)

View file

@ -50,8 +50,8 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag
// If BlobStor is already initialized, no action is taken.
//
// Returns wrapped ErrInitBlobovniczas on blobovnicza tree's initializaiton failure.
func (b *BlobStor) Init() error {
b.log.Debug(context.Background(), logs.BlobstorInitializing)
func (b *BlobStor) Init(ctx context.Context) error {
b.log.Debug(ctx, logs.BlobstorInitializing)
if err := b.compression.Init(); err != nil {
return err
@ -67,14 +67,14 @@ func (b *BlobStor) Init() error {
}
// Close releases all internal resources of BlobStor.
func (b *BlobStor) Close() error {
b.log.Debug(context.Background(), logs.BlobstorClosing)
func (b *BlobStor) Close(ctx context.Context) error {
b.log.Debug(ctx, logs.BlobstorClosing)
var firstErr error
for i := range b.storage {
err := b.storage[i].Storage.Close()
err := b.storage[i].Storage.Close(ctx)
if err != nil {
b.log.Info(context.Background(), logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error()))
b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error()))
if firstErr == nil {
firstErr = err
}

View file

@ -22,7 +22,7 @@ func TestExists(t *testing.T) {
b := New(WithStorages(storages))
require.NoError(t, b.Open(context.Background(), mode.ReadWrite))
require.NoError(t, b.Init())
require.NoError(t, b.Init(context.Background()))
objects := []*objectSDK.Object{
testObject(smallSizeLimit / 2),

View file

@ -1,6 +1,8 @@
package fstree
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
)
@ -28,7 +30,7 @@ func (t *FSTree) Init() error {
}
// Close implements common.Storage.
func (t *FSTree) Close() error {
func (t *FSTree) Close(_ context.Context) error {
t.metrics.Close()
return nil
}

View file

@ -606,7 +606,7 @@ func (t *FSTree) Compressor() *compression.Config {
}
// SetReportErrorFunc implements common.Storage.
func (t *FSTree) SetReportErrorFunc(_ func(string, error)) {
func (t *FSTree) SetReportErrorFunc(_ func(context.Context, string, error)) {
// Do nothing, FSTree can encounter only one error which is returned.
}

View file

@ -52,7 +52,7 @@ func TestObjectCounter(t *testing.T) {
require.Equal(t, uint64(0), size)
defer func() {
require.NoError(t, fst.Close())
require.NoError(t, fst.Close(context.Background()))
}()
addr := oidtest.Address()

View file

@ -19,7 +19,7 @@ func TestControl(t *testing.T, cons Constructor, minSize, maxSize uint64) {
require.NoError(t, s.Init())
objects := prepare(t, 10, s, minSize, maxSize)
require.NoError(t, s.Close())
require.NoError(t, s.Close(context.Background()))
require.NoError(t, s.Open(mode.ComponentReadOnly))
for i := range objects {

View file

@ -15,7 +15,7 @@ func TestDelete(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
defer func() { require.NoError(t, s.Close()) }()
defer func() { require.NoError(t, s.Close(context.Background())) }()
objects := prepare(t, 4, s, minSize, maxSize)

View file

@ -14,7 +14,7 @@ func TestExists(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
defer func() { require.NoError(t, s.Close()) }()
defer func() { require.NoError(t, s.Close(context.Background())) }()
objects := prepare(t, 1, s, minSize, maxSize)

View file

@ -15,7 +15,7 @@ func TestGet(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
defer func() { require.NoError(t, s.Close()) }()
defer func() { require.NoError(t, s.Close(context.Background())) }()
objects := prepare(t, 2, s, minSize, maxSize)

View file

@ -17,7 +17,7 @@ func TestGetRange(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
defer func() { require.NoError(t, s.Close()) }()
defer func() { require.NoError(t, s.Close(context.Background())) }()
objects := prepare(t, 1, s, minSize, maxSize)

View file

@ -14,7 +14,7 @@ func TestIterate(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
defer func() { require.NoError(t, s.Close()) }()
defer func() { require.NoError(t, s.Close(context.Background())) }()
objects := prepare(t, 10, s, minSize, maxSize)

View file

@ -33,9 +33,9 @@ func TestIterateObjects(t *testing.T) {
require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite))
// initialize Blobstor
require.NoError(t, blobStor.Init())
require.NoError(t, blobStor.Init(context.Background()))
defer blobStor.Close()
defer blobStor.Close(context.Background())
const objNum = 5
@ -118,7 +118,7 @@ func TestIterate_IgnoreErrors(t *testing.T) {
})}
bs := New(bsOpts...)
require.NoError(t, bs.Open(ctx, mode.ReadWrite))
require.NoError(t, bs.Init())
require.NoError(t, bs.Init(ctx))
nopHandler := func(e common.IterationElement) error {
return nil

View file

@ -1,6 +1,8 @@
package memstore
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
)
@ -10,11 +12,11 @@ func (s *memstoreImpl) Open(mod mode.ComponentMode) error {
return nil
}
func (s *memstoreImpl) Init() error { return nil }
func (s *memstoreImpl) Close() error { return nil }
func (s *memstoreImpl) Type() string { return Type }
func (s *memstoreImpl) Path() string { return s.rootPath }
func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc }
func (s *memstoreImpl) Compressor() *compression.Config { return s.compression }
func (s *memstoreImpl) SetReportErrorFunc(func(string, error)) {}
func (s *memstoreImpl) SetParentID(string) {}
func (s *memstoreImpl) Init() error { return nil }
func (s *memstoreImpl) Close(context.Context) error { return nil }
func (s *memstoreImpl) Type() string { return Type }
func (s *memstoreImpl) Path() string { return s.rootPath }
func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc }
func (s *memstoreImpl) Compressor() *compression.Config { return s.compression }
func (s *memstoreImpl) SetReportErrorFunc(func(context.Context, string, error)) {}
func (s *memstoreImpl) SetParentID(string) {}

View file

@ -16,7 +16,7 @@ func TestSimpleLifecycle(t *testing.T) {
s := New(
WithRootPath("memstore"),
)
defer func() { require.NoError(t, s.Close()) }()
defer func() { require.NoError(t, s.Close(context.Background())) }()
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())

View file

@ -20,10 +20,10 @@ func (b *BlobStor) SetMode(ctx context.Context, m mode.Mode) error {
return nil
}
err := b.Close()
err := b.Close(ctx)
if err == nil {
if err = b.openBlobStor(ctx, m); err == nil {
err = b.Init()
err = b.Init(ctx)
}
}
if err != nil {

View file

@ -106,7 +106,7 @@ func BenchmarkSubstorageReadPerf(b *testing.B) {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) {
objGen := tt.objGen()
st := stEntry.open(b)
defer func() { require.NoError(b, st.Close()) }()
defer func() { require.NoError(b, st.Close(context.Background())) }()
// Fill database
var errG errgroup.Group
@ -161,7 +161,7 @@ func BenchmarkSubstorageWritePerf(b *testing.B) {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, genEntry.desc), func(b *testing.B) {
gen := genEntry.create()
st := stEntry.open(b)
defer func() { require.NoError(b, st.Close()) }()
defer func() { require.NoError(b, st.Close(context.Background())) }()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
@ -200,7 +200,7 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) {
objGen := tt.objGen()
st := stEntry.open(b)
defer func() { require.NoError(b, st.Close()) }()
defer func() { require.NoError(b, st.Close(context.Background())) }()
// Fill database
for range tt.size {

View file

@ -1,6 +1,8 @@
package teststore
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
@ -17,7 +19,7 @@ type cfg struct {
Path func() string
SetCompressor func(cc *compression.Config)
Compressor func() *compression.Config
SetReportErrorFunc func(f func(string, error))
SetReportErrorFunc func(f func(context.Context, string, error))
Get func(common.GetPrm) (common.GetRes, error)
GetRange func(common.GetRangePrm) (common.GetRangeRes, error)
@ -51,7 +53,7 @@ func WithCompressor(f func() *compression.Config) Option {
return func(c *cfg) { c.overrides.Compressor = f }
}
func WithReportErrorFunc(f func(func(string, error))) Option {
func WithReportErrorFunc(f func(func(context.Context, string, error))) Option {
return func(c *cfg) { c.overrides.SetReportErrorFunc = f }
}

View file

@ -77,14 +77,14 @@ func (s *TestStore) Init() error {
}
}
func (s *TestStore) Close() error {
func (s *TestStore) Close(ctx context.Context) error {
s.mu.RLock()
defer s.mu.RUnlock()
switch {
case s.overrides.Close != nil:
return s.overrides.Close()
case s.st != nil:
return s.st.Close()
return s.st.Close(ctx)
default:
panic("unexpected storage call: Close()")
}
@ -142,7 +142,7 @@ func (s *TestStore) Compressor() *compression.Config {
}
}
func (s *TestStore) SetReportErrorFunc(f func(string, error)) {
func (s *TestStore) SetReportErrorFunc(f func(context.Context, string, error)) {
s.mu.RLock()
defer s.mu.RUnlock()
switch {

Some files were not shown because too many files have changed in this diff Show more