From f729d1a8c87742dde73c524be5dd82af2962e8df Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 21 Oct 2024 16:27:28 +0300 Subject: [PATCH] [#1437] node: Fix contextcheck linter Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-ir/config.go | 4 +- cmd/frostfs-ir/httpcomponent.go | 30 +++++----- cmd/frostfs-ir/main.go | 12 ++-- cmd/frostfs-ir/pprof.go | 18 +++--- .../internal/blobovnicza/inspect.go | 2 +- cmd/frostfs-lens/internal/blobovnicza/list.go | 2 +- cmd/frostfs-lens/internal/meta/inspect.go | 2 +- .../internal/meta/list-garbage.go | 2 +- .../internal/meta/list-graveyard.go | 2 +- cmd/frostfs-node/config.go | 60 +++++++++---------- cmd/frostfs-node/container.go | 8 +-- cmd/frostfs-node/control.go | 22 +++---- cmd/frostfs-node/grpc.go | 56 ++++++++--------- cmd/frostfs-node/httpcomponent.go | 10 ++-- cmd/frostfs-node/main.go | 56 ++++++++--------- cmd/frostfs-node/morph.go | 8 +-- cmd/frostfs-node/netmap.go | 48 +++++++-------- cmd/frostfs-node/object.go | 4 +- cmd/frostfs-node/pprof.go | 5 +- cmd/frostfs-node/runtime.go | 6 +- internal/audit/request.go | 8 +-- pkg/core/object/fmt.go | 12 ++-- pkg/core/object/sender_classifier.go | 9 +-- pkg/innerring/blocktimer.go | 20 +++---- pkg/innerring/blocktimer_test.go | 7 ++- pkg/innerring/initialization.go | 51 +++++++++------- pkg/innerring/innerring.go | 20 +++---- pkg/innerring/notary.go | 13 ++-- pkg/innerring/processors/alphabet/handlers.go | 8 +-- .../processors/alphabet/handlers_test.go | 11 ++-- .../processors/alphabet/process_emit.go | 32 +++++----- .../processors/alphabet/processor.go | 8 +-- pkg/innerring/processors/balance/handlers.go | 2 +- .../processors/balance/handlers_test.go | 4 +- .../processors/balance/process_assets.go | 10 ++-- pkg/innerring/processors/balance/processor.go | 8 +-- .../processors/container/handlers.go | 4 +- .../processors/container/handlers_test.go | 2 +- .../processors/container/process_container.go | 26 ++++---- .../processors/container/processor.go | 6 +- pkg/innerring/processors/frostfs/handlers.go | 8 +-- .../processors/frostfs/handlers_test.go | 10 ++-- .../processors/frostfs/process_assets.go | 40 ++++++------- .../processors/frostfs/process_config.go | 10 ++-- pkg/innerring/processors/frostfs/processor.go | 14 ++--- .../processors/governance/handlers_test.go | 8 +-- .../processors/governance/process_update.go | 32 +++++----- .../processors/governance/processor.go | 8 +-- pkg/innerring/processors/netmap/handlers.go | 10 ++-- .../processors/netmap/handlers_test.go | 8 +-- .../processors/netmap/process_cleanup.go | 15 ++--- .../processors/netmap/process_epoch.go | 8 +-- .../processors/netmap/process_peers.go | 27 +++++---- pkg/innerring/processors/netmap/processor.go | 12 ++-- pkg/innerring/processors/netmap/wrappers.go | 18 +++--- pkg/innerring/state.go | 10 ++-- pkg/innerring/state_test.go | 4 +- .../blobovnicza/blobovnicza_test.go | 4 +- .../blobovnicza/control.go | 22 +++---- .../blobovnicza/get_test.go | 6 +- .../blobovnicza/iterate_test.go | 2 +- .../blobstor/blobovniczatree/active.go | 14 ++--- .../blobstor/blobovniczatree/blobovnicza.go | 2 +- .../blobstor/blobovniczatree/cache.go | 4 +- .../blobovniczatree/concurrency_test.go | 2 +- .../blobstor/blobovniczatree/control.go | 6 +- .../blobstor/blobovniczatree/control_test.go | 6 +- .../blobstor/blobovniczatree/count.go | 2 +- .../blobstor/blobovniczatree/delete.go | 4 +- .../blobstor/blobovniczatree/exists.go | 2 +- .../blobstor/blobovniczatree/exists_test.go | 2 +- .../blobstor/blobovniczatree/get.go | 4 +- .../blobstor/blobovniczatree/get_range.go | 4 +- .../blobstor/blobovniczatree/iterate.go | 2 +- .../blobstor/blobovniczatree/manager.go | 16 ++--- .../blobstor/blobovniczatree/option.go | 5 +- .../blobstor/blobovniczatree/put.go | 6 +- .../blobstor/blobovniczatree/rebuild.go | 28 ++++----- .../blobovniczatree/rebuild_failover_test.go | 30 +++++----- .../blobstor/blobovniczatree/rebuild_test.go | 18 +++--- pkg/local_object_storage/blobstor/blobstor.go | 3 +- .../blobstor/blobstor_test.go | 14 ++--- .../blobstor/common/storage.go | 4 +- pkg/local_object_storage/blobstor/control.go | 12 ++-- .../blobstor/exists_test.go | 2 +- .../blobstor/fstree/control.go | 4 +- .../blobstor/fstree/fstree.go | 2 +- .../blobstor/fstree/fstree_test.go | 2 +- .../blobstor/internal/blobstortest/control.go | 2 +- .../blobstor/internal/blobstortest/delete.go | 2 +- .../blobstor/internal/blobstortest/exists.go | 2 +- .../blobstor/internal/blobstortest/get.go | 2 +- .../internal/blobstortest/get_range.go | 2 +- .../blobstor/internal/blobstortest/iterate.go | 2 +- .../blobstor/iterate_test.go | 6 +- .../blobstor/memstore/control.go | 18 +++--- .../blobstor/memstore/memstore_test.go | 2 +- pkg/local_object_storage/blobstor/mode.go | 4 +- .../blobstor/perf_test.go | 6 +- .../blobstor/teststore/option.go | 6 +- .../blobstor/teststore/teststore.go | 6 +- pkg/local_object_storage/engine/container.go | 14 ++--- pkg/local_object_storage/engine/delete.go | 6 +- pkg/local_object_storage/engine/engine.go | 19 +++--- pkg/local_object_storage/engine/exists.go | 2 +- pkg/local_object_storage/engine/get.go | 2 +- pkg/local_object_storage/engine/head.go | 2 +- pkg/local_object_storage/engine/inhume.go | 10 ++-- pkg/local_object_storage/engine/lock.go | 8 +-- pkg/local_object_storage/engine/put.go | 2 +- pkg/local_object_storage/engine/range.go | 2 +- pkg/local_object_storage/engine/select.go | 4 +- pkg/local_object_storage/engine/shards.go | 6 +- pkg/local_object_storage/engine/tree.go | 24 ++++---- .../internal/storagetest/storage.go | 30 +++++----- .../metabase/containers_test.go | 6 +- pkg/local_object_storage/metabase/control.go | 12 ++-- .../metabase/control_test.go | 2 +- .../metabase/counter_test.go | 18 +++--- pkg/local_object_storage/metabase/db_test.go | 2 +- .../metabase/delete_ec_test.go | 8 +-- .../metabase/delete_meta_test.go | 4 +- .../metabase/delete_test.go | 12 ++-- .../metabase/exists_test.go | 3 +- .../metabase/expired_test.go | 2 +- pkg/local_object_storage/metabase/get_test.go | 6 +- .../metabase/graveyard_test.go | 12 ++-- .../metabase/inhume_ec_test.go | 4 +- .../metabase/inhume_test.go | 6 +- .../metabase/iterators_test.go | 4 +- .../metabase/list_test.go | 8 +-- .../metabase/lock_test.go | 6 +- pkg/local_object_storage/metabase/mode.go | 4 +- .../metabase/mode_test.go | 8 +-- pkg/local_object_storage/metabase/put_test.go | 6 +- .../metabase/reset_test.go | 4 +- .../metabase/select_test.go | 34 +++++------ .../metabase/storage_id_test.go | 4 +- .../metabase/upgrade_test.go | 12 ++-- .../metabase/version_test.go | 32 +++++----- .../pilorama/bench_test.go | 4 +- pkg/local_object_storage/pilorama/boltdb.go | 10 ++-- pkg/local_object_storage/pilorama/forest.go | 4 +- .../pilorama/forest_test.go | 54 ++++++++--------- .../pilorama/interface.go | 4 +- .../pilorama/mode_test.go | 8 +-- pkg/local_object_storage/shard/control.go | 16 ++--- pkg/local_object_storage/shard/gc.go | 16 ++--- .../shard/gc_internal_test.go | 4 +- pkg/local_object_storage/shard/lock_test.go | 4 +- pkg/local_object_storage/shard/shard.go | 10 ++-- pkg/local_object_storage/shard/shard_test.go | 4 +- .../writecache/benchmark/writecache_test.go | 8 +-- pkg/local_object_storage/writecache/cache.go | 10 ++-- pkg/local_object_storage/writecache/flush.go | 14 ++--- .../writecache/flush_test.go | 16 ++--- pkg/local_object_storage/writecache/mode.go | 4 +- .../writecache/mode_test.go | 8 +-- .../writecache/options.go | 6 +- .../writecache/writecache.go | 4 +- pkg/morph/client/balance/burn.go | 6 +- pkg/morph/client/balance/lock.go | 6 +- pkg/morph/client/balance/mint.go | 6 +- pkg/morph/client/balance/transfer.go | 5 +- pkg/morph/client/client.go | 4 +- pkg/morph/client/container/delete.go | 9 +-- pkg/morph/client/container/estimations.go | 9 +-- pkg/morph/client/container/put.go | 9 +-- pkg/morph/client/frostfs/cheque.go | 10 ++-- pkg/morph/client/netmap/config.go | 5 +- pkg/morph/client/netmap/innerring.go | 5 +- pkg/morph/client/netmap/new_epoch.go | 9 +-- pkg/morph/client/netmap/peer.go | 9 +-- pkg/morph/client/netmap/update_state.go | 5 +- pkg/morph/client/notary.go | 40 +++++++------ pkg/morph/client/static.go | 8 ++- pkg/morph/event/handlers.go | 2 +- pkg/morph/event/listener.go | 8 +-- pkg/morph/event/listener_test.go | 4 +- pkg/network/transport/object/grpc/service.go | 4 +- pkg/services/apemanager/audit.go | 6 +- pkg/services/container/audit.go | 8 +-- pkg/services/container/morph/executor.go | 12 ++-- pkg/services/container/morph/executor_test.go | 4 +- pkg/services/control/ir/server/audit.go | 8 +-- pkg/services/control/ir/server/calls.go | 18 +++--- pkg/services/control/server/server.go | 5 +- .../control/server/set_netmap_status.go | 6 +- pkg/services/object/acl/v2/service.go | 34 +++++------ pkg/services/object/ape/service.go | 8 +-- pkg/services/object/audit.go | 34 +++++------ pkg/services/object/common.go | 8 +-- pkg/services/object/common/writer/common.go | 4 +- pkg/services/object/common/writer/ec.go | 2 +- pkg/services/object/get/assembleec.go | 2 +- pkg/services/object/get/container.go | 4 +- pkg/services/object/get/remote.go | 2 +- pkg/services/object/get/request.go | 12 ++-- pkg/services/object/metrics.go | 12 ++-- pkg/services/object/response.go | 8 +-- pkg/services/object/search/search.go | 10 ++-- pkg/services/object/server.go | 4 +- pkg/services/object/sign.go | 8 +-- pkg/services/object/transport_splitter.go | 8 +-- pkg/services/object/util/log.go | 8 +-- .../object_manager/tombstone/checker.go | 6 +- pkg/services/policer/check.go | 8 +-- pkg/services/tree/getsubtree_test.go | 2 +- pkg/services/tree/sync.go | 6 +- pkg/util/http/calls.go | 4 +- scripts/populate-metabase/main.go | 6 +- 211 files changed, 1088 insertions(+), 1054 deletions(-) diff --git a/cmd/frostfs-ir/config.go b/cmd/frostfs-ir/config.go index 7415e8e70..09af08525 100644 --- a/cmd/frostfs-ir/config.go +++ b/cmd/frostfs-ir/config.go @@ -93,8 +93,8 @@ func watchForSignal(ctx context.Context, cancel func()) { if err != nil { log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err)) } - pprofCmp.reload() - metricsCmp.reload() + pprofCmp.reload(ctx) + metricsCmp.reload(ctx) log.Info(ctx, logs.FrostFSIRReloadExtraWallets) err = innerRing.SetExtraWallets(cfg) if err != nil { diff --git a/cmd/frostfs-ir/httpcomponent.go b/cmd/frostfs-ir/httpcomponent.go index 685ef61ad..a8eef6010 100644 --- a/cmd/frostfs-ir/httpcomponent.go +++ b/cmd/frostfs-ir/httpcomponent.go @@ -25,8 +25,8 @@ const ( shutdownTimeoutKeyPostfix = ".shutdown_timeout" ) -func (c *httpComponent) init() { - log.Info(context.Background(), "init "+c.name) +func (c *httpComponent) init(ctx context.Context) { + log.Info(ctx, "init "+c.name) c.enabled = cfg.GetBool(c.name + enabledKeyPostfix) c.address = cfg.GetString(c.name + addressKeyPostfix) c.shutdownDur = cfg.GetDuration(c.name + shutdownTimeoutKeyPostfix) @@ -40,14 +40,14 @@ func (c *httpComponent) init() { httputil.WithShutdownTimeout(c.shutdownDur), ) } else { - log.Info(context.Background(), c.name+" is disabled, skip") + log.Info(ctx, c.name+" is disabled, skip") c.srv = nil } } -func (c *httpComponent) start() { +func (c *httpComponent) start(ctx context.Context) { if c.srv != nil { - log.Info(context.Background(), "start "+c.name) + log.Info(ctx, "start "+c.name) wg.Add(1) go func() { defer wg.Done() @@ -56,10 +56,10 @@ func (c *httpComponent) start() { } } -func (c *httpComponent) shutdown() error { +func (c *httpComponent) shutdown(ctx context.Context) error { if c.srv != nil { - log.Info(context.Background(), "shutdown "+c.name) - return c.srv.Shutdown() + log.Info(ctx, "shutdown "+c.name) + return c.srv.Shutdown(ctx) } return nil } @@ -71,17 +71,17 @@ func (c *httpComponent) needReload() bool { return enabled != c.enabled || enabled && (address != c.address || dur != c.shutdownDur) } -func (c *httpComponent) reload() { - log.Info(context.Background(), "reload "+c.name) +func (c *httpComponent) reload(ctx context.Context) { + log.Info(ctx, "reload "+c.name) if c.needReload() { - log.Info(context.Background(), c.name+" config updated") - if err := c.shutdown(); err != nil { - log.Debug(context.Background(), logs.FrostFSIRCouldNotShutdownHTTPServer, + log.Info(ctx, c.name+" config updated") + if err := c.shutdown(ctx); err != nil { + log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, zap.String("error", err.Error()), ) } else { - c.init() - c.start() + c.init(ctx) + c.start(ctx) } } } diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go index bcb2c5dd8..e86c04b9e 100644 --- a/cmd/frostfs-ir/main.go +++ b/cmd/frostfs-ir/main.go @@ -87,17 +87,17 @@ func main() { ctx, cancel := context.WithCancel(context.Background()) pprofCmp = newPprofComponent() - pprofCmp.init() + pprofCmp.init(ctx) metricsCmp = newMetricsComponent() - metricsCmp.init() + metricsCmp.init(ctx) audit.Store(cfg.GetBool("audit.enabled")) innerRing, err = innerring.New(ctx, log, cfg, intErr, metrics, cmode, audit) exitErr(err) - pprofCmp.start() - metricsCmp.start() + pprofCmp.start(ctx) + metricsCmp.start(ctx) // start inner ring err = innerRing.Start(ctx, intErr) @@ -117,12 +117,12 @@ func main() { func shutdown(ctx context.Context) { innerRing.Stop(ctx) - if err := metricsCmp.shutdown(); err != nil { + if err := metricsCmp.shutdown(ctx); err != nil { log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, zap.String("error", err.Error()), ) } - if err := pprofCmp.shutdown(); err != nil { + if err := pprofCmp.shutdown(ctx); err != nil { log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, zap.String("error", err.Error()), ) diff --git a/cmd/frostfs-ir/pprof.go b/cmd/frostfs-ir/pprof.go index e95fd117f..8e81d8b85 100644 --- a/cmd/frostfs-ir/pprof.go +++ b/cmd/frostfs-ir/pprof.go @@ -29,8 +29,8 @@ func newPprofComponent() *pprofComponent { } } -func (c *pprofComponent) init() { - c.httpComponent.init() +func (c *pprofComponent) init(ctx context.Context) { + c.httpComponent.init(ctx) if c.enabled { c.blockRate = cfg.GetInt(pprofBlockRateKey) @@ -52,17 +52,17 @@ func (c *pprofComponent) needReload() bool { c.enabled && (c.blockRate != blockRate || c.mutexRate != mutexRate) } -func (c *pprofComponent) reload() { - log.Info(context.Background(), "reload "+c.name) +func (c *pprofComponent) reload(ctx context.Context) { + log.Info(ctx, "reload "+c.name) if c.needReload() { - log.Info(context.Background(), c.name+" config updated") - if err := c.shutdown(); err != nil { - log.Debug(context.Background(), logs.FrostFSIRCouldNotShutdownHTTPServer, + log.Info(ctx, c.name+" config updated") + if err := c.shutdown(ctx); err != nil { + log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, zap.String("error", err.Error())) return } - c.init() - c.start() + c.init(ctx) + c.start(ctx) } } diff --git a/cmd/frostfs-lens/internal/blobovnicza/inspect.go b/cmd/frostfs-lens/internal/blobovnicza/inspect.go index b1a6e3fd2..e7e2c0769 100644 --- a/cmd/frostfs-lens/internal/blobovnicza/inspect.go +++ b/cmd/frostfs-lens/internal/blobovnicza/inspect.go @@ -28,7 +28,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) { common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err)) blz := openBlobovnicza(cmd) - defer blz.Close() + defer blz.Close(cmd.Context()) var prm blobovnicza.GetPrm prm.SetAddress(addr) diff --git a/cmd/frostfs-lens/internal/blobovnicza/list.go b/cmd/frostfs-lens/internal/blobovnicza/list.go index d327dbc41..d41a15bcf 100644 --- a/cmd/frostfs-lens/internal/blobovnicza/list.go +++ b/cmd/frostfs-lens/internal/blobovnicza/list.go @@ -32,7 +32,7 @@ func listFunc(cmd *cobra.Command, _ []string) { } blz := openBlobovnicza(cmd) - defer blz.Close() + defer blz.Close(cmd.Context()) err := blobovnicza.IterateAddresses(context.Background(), blz, wAddr) common.ExitOnErr(cmd, common.Errf("blobovnicza iterator failure: %w", err)) diff --git a/cmd/frostfs-lens/internal/meta/inspect.go b/cmd/frostfs-lens/internal/meta/inspect.go index 9eb60f966..f436343c7 100644 --- a/cmd/frostfs-lens/internal/meta/inspect.go +++ b/cmd/frostfs-lens/internal/meta/inspect.go @@ -31,7 +31,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) { common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err)) db := openMeta(cmd) - defer db.Close() + defer db.Close(cmd.Context()) storageID := meta.StorageIDPrm{} storageID.SetAddress(addr) diff --git a/cmd/frostfs-lens/internal/meta/list-garbage.go b/cmd/frostfs-lens/internal/meta/list-garbage.go index 61b10ca1f..6b27a232f 100644 --- a/cmd/frostfs-lens/internal/meta/list-garbage.go +++ b/cmd/frostfs-lens/internal/meta/list-garbage.go @@ -19,7 +19,7 @@ func init() { func listGarbageFunc(cmd *cobra.Command, _ []string) { db := openMeta(cmd) - defer db.Close() + defer db.Close(cmd.Context()) var garbPrm meta.GarbageIterationPrm garbPrm.SetHandler( diff --git a/cmd/frostfs-lens/internal/meta/list-graveyard.go b/cmd/frostfs-lens/internal/meta/list-graveyard.go index 19a93691c..45642e74b 100644 --- a/cmd/frostfs-lens/internal/meta/list-graveyard.go +++ b/cmd/frostfs-lens/internal/meta/list-graveyard.go @@ -19,7 +19,7 @@ func init() { func listGraveyardFunc(cmd *cobra.Command, _ []string) { db := openMeta(cmd) - defer db.Close() + defer db.Close(cmd.Context()) var gravePrm meta.GraveyardIterationPrm gravePrm.SetHandler( diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index e332cbc03..afb9942b1 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -397,16 +397,16 @@ type internals struct { } // starts node's maintenance. -func (c *cfg) startMaintenance() { +func (c *cfg) startMaintenance(ctx context.Context) { c.isMaintenance.Store(true) c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE) - c.log.Info(context.Background(), logs.FrostFSNodeStartedLocalNodesMaintenance) + c.log.Info(ctx, logs.FrostFSNodeStartedLocalNodesMaintenance) } // stops node's maintenance. -func (c *internals) stopMaintenance() { +func (c *internals) stopMaintenance(ctx context.Context) { if c.isMaintenance.CompareAndSwap(true, false) { - c.log.Info(context.Background(), logs.FrostFSNodeStoppedLocalNodesMaintenance) + c.log.Info(ctx, logs.FrostFSNodeStoppedLocalNodesMaintenance) } } @@ -1131,10 +1131,10 @@ func initLocalStorage(ctx context.Context, c *cfg) { }) } -func initAccessPolicyEngine(_ context.Context, c *cfg) { +func initAccessPolicyEngine(ctx context.Context, c *cfg) { var localOverrideDB chainbase.LocalOverrideDatabase if nodeconfig.PersistentPolicyRules(c.appCfg).Path() == "" { - c.log.Warn(context.Background(), logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed) + c.log.Warn(ctx, logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed) localOverrideDB = chainbase.NewInmemoryLocalOverrideDatabase() } else { localOverrideDB = chainbase.NewBoltLocalOverrideDatabase( @@ -1159,7 +1159,7 @@ func initAccessPolicyEngine(_ context.Context, c *cfg) { c.onShutdown(func() { if err := ape.LocalOverrideDatabaseCore().Close(); err != nil { - c.log.Warn(context.Background(), logs.FrostFSNodeAccessPolicyEngineClosingFailure, + c.log.Warn(ctx, logs.FrostFSNodeAccessPolicyEngineClosingFailure, zap.Error(err), ) } @@ -1208,10 +1208,10 @@ func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) { c.cfgNetmap.state.setNodeInfo(ni) } -func (c *cfg) updateContractNodeInfo(epoch uint64) { +func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) { ni, err := c.netmapLocalNodeState(epoch) if err != nil { - c.log.Error(context.Background(), logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch, + c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch, zap.Uint64("epoch", epoch), zap.String("error", err.Error())) return @@ -1223,19 +1223,19 @@ func (c *cfg) updateContractNodeInfo(epoch uint64) { // bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract // with the binary-encoded information from the current node's configuration. // The state is set using the provided setter which MUST NOT be nil. -func (c *cfg) bootstrapWithState(stateSetter func(*netmap.NodeInfo)) error { +func (c *cfg) bootstrapWithState(ctx context.Context, stateSetter func(*netmap.NodeInfo)) error { ni := c.cfgNodeInfo.localInfo stateSetter(&ni) prm := nmClient.AddPeerPrm{} prm.SetNodeInfo(ni) - return c.cfgNetmap.wrapper.AddPeer(prm) + return c.cfgNetmap.wrapper.AddPeer(ctx, prm) } // bootstrapOnline calls cfg.bootstrapWithState with "online" state. -func bootstrapOnline(c *cfg) error { - return c.bootstrapWithState(func(ni *netmap.NodeInfo) { +func bootstrapOnline(ctx context.Context, c *cfg) error { + return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Online) }) } @@ -1243,21 +1243,21 @@ func bootstrapOnline(c *cfg) error { // bootstrap calls bootstrapWithState with: // - "maintenance" state if maintenance is in progress on the current node // - "online", otherwise -func (c *cfg) bootstrap() error { +func (c *cfg) bootstrap(ctx context.Context) error { // switch to online except when under maintenance st := c.cfgNetmap.state.controlNetmapStatus() if st == control.NetmapStatus_MAINTENANCE { - c.log.Info(context.Background(), logs.FrostFSNodeBootstrappingWithTheMaintenanceState) - return c.bootstrapWithState(func(ni *netmap.NodeInfo) { + c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState) + return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Maintenance) }) } - c.log.Info(context.Background(), logs.FrostFSNodeBootstrappingWithOnlineState, + c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState, zap.Stringer("previous", st), ) - return bootstrapOnline(c) + return bootstrapOnline(ctx, c) } // needBootstrap checks if local node should be registered in network on bootup. @@ -1284,7 +1284,7 @@ func (c *cfg) signalWatcher(ctx context.Context) { case <-ch: c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) - c.shutdown() + c.shutdown(ctx) c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete) return @@ -1292,7 +1292,7 @@ func (c *cfg) signalWatcher(ctx context.Context) { c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError, zap.String("message", err.Error())) - c.shutdown() + c.shutdown(ctx) c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete) return @@ -1304,7 +1304,7 @@ func (c *cfg) signalWatcher(ctx context.Context) { case <-ch: c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) - c.shutdown() + c.shutdown(ctx) c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete) return @@ -1312,7 +1312,7 @@ func (c *cfg) signalWatcher(ctx context.Context) { c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError, zap.String("message", err.Error())) - c.shutdown() + c.shutdown(ctx) c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete) return @@ -1324,11 +1324,11 @@ func (c *cfg) signalWatcher(ctx context.Context) { func (c *cfg) reloadConfig(ctx context.Context) { c.log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration) - if !c.compareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) { + if !c.compareAndSwapHealthStatus(ctx, control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) { c.log.Info(ctx, logs.FrostFSNodeSIGHUPSkip) return } - defer c.compareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY) + defer c.compareAndSwapHealthStatus(ctx, control.HealthStatus_RECONFIGURING, control.HealthStatus_READY) err := c.reloadAppConfig() if err != nil { @@ -1390,7 +1390,7 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp { components = append(components, dCmp{"logger", logPrm.Reload}) components = append(components, dCmp{"runtime", func() error { - setRuntimeParameters(c) + setRuntimeParameters(ctx, c) return nil }}) components = append(components, dCmp{"audit", func() error { @@ -1476,14 +1476,14 @@ func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoPro }) } -func (c *cfg) shutdown() { - old := c.swapHealthStatus(control.HealthStatus_SHUTTING_DOWN) +func (c *cfg) shutdown(ctx context.Context) { + old := c.swapHealthStatus(ctx, control.HealthStatus_SHUTTING_DOWN) if old == control.HealthStatus_SHUTTING_DOWN { - c.log.Info(context.Background(), logs.FrostFSNodeShutdownSkip) + c.log.Info(ctx, logs.FrostFSNodeShutdownSkip) return } if old == control.HealthStatus_STARTING { - c.log.Warn(context.Background(), logs.FrostFSNodeShutdownWhenNotReady) + c.log.Warn(ctx, logs.FrostFSNodeShutdownWhenNotReady) } c.ctxCancel() @@ -1493,6 +1493,6 @@ func (c *cfg) shutdown() { } if err := sdnotify.ClearStatus(); err != nil { - c.log.Error(context.Background(), logs.FailedToReportStatusToSystemd, zap.Error(err)) + c.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err)) } } diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go index c59dab747..9dc7beff4 100644 --- a/cmd/frostfs-node/container.go +++ b/cmd/frostfs-node/container.go @@ -250,10 +250,10 @@ type morphContainerWriter struct { neoClient *cntClient.Client } -func (m morphContainerWriter) Put(cnr containerCore.Container) (*cid.ID, error) { - return cntClient.Put(m.neoClient, cnr) +func (m morphContainerWriter) Put(ctx context.Context, cnr containerCore.Container) (*cid.ID, error) { + return cntClient.Put(ctx, m.neoClient, cnr) } -func (m morphContainerWriter) Delete(witness containerCore.RemovalWitness) error { - return cntClient.Delete(m.neoClient, witness) +func (m morphContainerWriter) Delete(ctx context.Context, witness containerCore.RemovalWitness) error { + return cntClient.Delete(ctx, m.neoClient, witness) } diff --git a/cmd/frostfs-node/control.go b/cmd/frostfs-node/control.go index ffac23eec..ecd82bba5 100644 --- a/cmd/frostfs-node/control.go +++ b/cmd/frostfs-node/control.go @@ -16,7 +16,7 @@ import ( const serviceNameControl = "control" -func initControlService(c *cfg) { +func initControlService(ctx context.Context, c *cfg) { endpoint := controlconfig.GRPC(c.appCfg).Endpoint() if endpoint == controlconfig.GRPCEndpointDefault { return @@ -46,14 +46,14 @@ func initControlService(c *cfg) { lis, err := net.Listen("tcp", endpoint) if err != nil { - c.log.Error(context.Background(), logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err)) return } c.cfgControlService.server = grpc.NewServer() c.onShutdown(func() { - stopGRPC("FrostFS Control API", c.cfgControlService.server, c.log) + stopGRPC(ctx, "FrostFS Control API", c.cfgControlService.server, c.log) }) control.RegisterControlServiceServer(c.cfgControlService.server, ctlSvc) @@ -72,23 +72,23 @@ func (c *cfg) NetmapStatus() control.NetmapStatus { return c.cfgNetmap.state.controlNetmapStatus() } -func (c *cfg) setHealthStatus(st control.HealthStatus) { - c.notifySystemd(st) +func (c *cfg) setHealthStatus(ctx context.Context, st control.HealthStatus) { + c.notifySystemd(ctx, st) c.healthStatus.Store(int32(st)) c.metricsCollector.State().SetHealth(int32(st)) } -func (c *cfg) compareAndSwapHealthStatus(oldSt, newSt control.HealthStatus) (swapped bool) { +func (c *cfg) compareAndSwapHealthStatus(ctx context.Context, oldSt, newSt control.HealthStatus) (swapped bool) { if swapped = c.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped { - c.notifySystemd(newSt) + c.notifySystemd(ctx, newSt) c.metricsCollector.State().SetHealth(int32(newSt)) } return } -func (c *cfg) swapHealthStatus(st control.HealthStatus) (old control.HealthStatus) { +func (c *cfg) swapHealthStatus(ctx context.Context, st control.HealthStatus) (old control.HealthStatus) { old = control.HealthStatus(c.healthStatus.Swap(int32(st))) - c.notifySystemd(st) + c.notifySystemd(ctx, st) c.metricsCollector.State().SetHealth(int32(st)) return } @@ -97,7 +97,7 @@ func (c *cfg) HealthStatus() control.HealthStatus { return control.HealthStatus(c.healthStatus.Load()) } -func (c *cfg) notifySystemd(st control.HealthStatus) { +func (c *cfg) notifySystemd(ctx context.Context, st control.HealthStatus) { if !c.sdNotify { return } @@ -113,6 +113,6 @@ func (c *cfg) notifySystemd(st control.HealthStatus) { err = sdnotify.Status(fmt.Sprintf("%v", st)) } if err != nil { - c.log.Error(context.Background(), logs.FailedToReportStatusToSystemd, zap.Error(err)) + c.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err)) } } diff --git a/cmd/frostfs-node/grpc.go b/cmd/frostfs-node/grpc.go index 271810ee6..6105be861 100644 --- a/cmd/frostfs-node/grpc.go +++ b/cmd/frostfs-node/grpc.go @@ -19,11 +19,11 @@ import ( const maxRecvMsgSize = 256 << 20 -func initGRPC(c *cfg) { +func initGRPC(ctx context.Context, c *cfg) { var endpointsToReconnect []string var successCount int grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) { - serverOpts, ok := getGrpcServerOpts(c, sc) + serverOpts, ok := getGrpcServerOpts(ctx, c, sc) if !ok { return } @@ -31,7 +31,7 @@ func initGRPC(c *cfg) { lis, err := net.Listen("tcp", sc.Endpoint()) if err != nil { c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(sc.Endpoint()) - c.log.Error(context.Background(), logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err)) endpointsToReconnect = append(endpointsToReconnect, sc.Endpoint()) return } @@ -40,7 +40,7 @@ func initGRPC(c *cfg) { srv := grpc.NewServer(serverOpts...) c.onShutdown(func() { - stopGRPC("FrostFS Public API", srv, c.log) + stopGRPC(ctx, "FrostFS Public API", srv, c.log) }) c.cfgGRPC.append(sc.Endpoint(), lis, srv) @@ -53,11 +53,11 @@ func initGRPC(c *cfg) { c.cfgGRPC.reconnectTimeout = grpcconfig.ReconnectTimeout(c.appCfg) for _, endpoint := range endpointsToReconnect { - scheduleReconnect(endpoint, c) + scheduleReconnect(ctx, endpoint, c) } } -func scheduleReconnect(endpoint string, c *cfg) { +func scheduleReconnect(ctx context.Context, endpoint string, c *cfg) { c.wg.Add(1) go func() { defer c.wg.Done() @@ -66,7 +66,7 @@ func scheduleReconnect(endpoint string, c *cfg) { for { select { case <-t.C: - if tryReconnect(endpoint, c) { + if tryReconnect(ctx, endpoint, c) { return } case <-c.done: @@ -76,20 +76,20 @@ func scheduleReconnect(endpoint string, c *cfg) { }() } -func tryReconnect(endpoint string, c *cfg) bool { - c.log.Info(context.Background(), logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint)) +func tryReconnect(ctx context.Context, endpoint string, c *cfg) bool { + c.log.Info(ctx, logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint)) - serverOpts, found := getGRPCEndpointOpts(endpoint, c) + serverOpts, found := getGRPCEndpointOpts(ctx, endpoint, c) if !found { - c.log.Warn(context.Background(), logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint)) + c.log.Warn(ctx, logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint)) return true } lis, err := net.Listen("tcp", endpoint) if err != nil { c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(endpoint) - c.log.Error(context.Background(), logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err)) - c.log.Warn(context.Background(), logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout)) + c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err)) + c.log.Warn(ctx, logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout)) return false } c.metricsCollector.GrpcServerMetrics().MarkHealthy(endpoint) @@ -97,16 +97,16 @@ func tryReconnect(endpoint string, c *cfg) bool { srv := grpc.NewServer(serverOpts...) c.onShutdown(func() { - stopGRPC("FrostFS Public API", srv, c.log) + stopGRPC(ctx, "FrostFS Public API", srv, c.log) }) c.cfgGRPC.appendAndHandle(endpoint, lis, srv) - c.log.Info(context.Background(), logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint)) + c.log.Info(ctx, logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint)) return true } -func getGRPCEndpointOpts(endpoint string, c *cfg) (result []grpc.ServerOption, found bool) { +func getGRPCEndpointOpts(ctx context.Context, endpoint string, c *cfg) (result []grpc.ServerOption, found bool) { unlock := c.LockAppConfigShared() defer unlock() grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) { @@ -117,7 +117,7 @@ func getGRPCEndpointOpts(endpoint string, c *cfg) (result []grpc.ServerOption, f return } var ok bool - result, ok = getGrpcServerOpts(c, sc) + result, ok = getGrpcServerOpts(ctx, c, sc) if !ok { return } @@ -126,7 +126,7 @@ func getGRPCEndpointOpts(endpoint string, c *cfg) (result []grpc.ServerOption, f return } -func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool) { +func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool) { serverOpts := []grpc.ServerOption{ grpc.MaxRecvMsgSize(maxRecvMsgSize), grpc.ChainUnaryInterceptor( @@ -144,7 +144,7 @@ func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool if tlsCfg != nil { cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile()) if err != nil { - c.log.Error(context.Background(), logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err)) return nil, false } @@ -175,38 +175,38 @@ func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool return serverOpts, true } -func serveGRPC(c *cfg) { +func serveGRPC(ctx context.Context, c *cfg) { c.cfgGRPC.performAndSave(func(e string, l net.Listener, s *grpc.Server) { c.wg.Add(1) go func() { defer func() { - c.log.Info(context.Background(), logs.FrostFSNodeStopListeningGRPCEndpoint, + c.log.Info(ctx, logs.FrostFSNodeStopListeningGRPCEndpoint, zap.Stringer("endpoint", l.Addr()), ) c.wg.Done() }() - c.log.Info(context.Background(), logs.FrostFSNodeStartListeningEndpoint, + c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint, zap.String("service", "gRPC"), zap.Stringer("endpoint", l.Addr()), ) if err := s.Serve(l); err != nil { c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(e) - c.log.Error(context.Background(), logs.FrostFSNodeGRPCServerError, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeGRPCServerError, zap.Error(err)) c.cfgGRPC.dropConnection(e) - scheduleReconnect(e, c) + scheduleReconnect(ctx, e, c) } }() }) } -func stopGRPC(name string, s *grpc.Server, l *logger.Logger) { +func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger) { l = l.With(zap.String("name", name)) - l.Info(context.Background(), logs.FrostFSNodeStoppingGRPCServer) + l.Info(ctx, logs.FrostFSNodeStoppingGRPCServer) // GracefulStop() may freeze forever, see #1270 done := make(chan struct{}) @@ -218,9 +218,9 @@ func stopGRPC(name string, s *grpc.Server, l *logger.Logger) { select { case <-done: case <-time.After(1 * time.Minute): - l.Info(context.Background(), logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop) + l.Info(ctx, logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop) s.Stop() } - l.Info(context.Background(), logs.FrostFSNodeGRPCServerStoppedSuccessfully) + l.Info(ctx, logs.FrostFSNodeGRPCServerStoppedSuccessfully) } diff --git a/cmd/frostfs-node/httpcomponent.go b/cmd/frostfs-node/httpcomponent.go index a699a72a1..7346206ef 100644 --- a/cmd/frostfs-node/httpcomponent.go +++ b/cmd/frostfs-node/httpcomponent.go @@ -20,9 +20,9 @@ type httpComponent struct { preReload func(c *cfg) } -func (cmp *httpComponent) init(c *cfg) { +func (cmp *httpComponent) init(ctx context.Context, c *cfg) { if !cmp.enabled { - c.log.Info(context.Background(), cmp.name+" is disabled") + c.log.Info(ctx, cmp.name+" is disabled") return } // Init server with parameters @@ -39,14 +39,14 @@ func (cmp *httpComponent) init(c *cfg) { go func() { defer c.wg.Done() - c.log.Info(context.Background(), logs.FrostFSNodeStartListeningEndpoint, + c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint, zap.String("service", cmp.name), zap.String("endpoint", cmp.address)) fatalOnErr(srv.Serve()) }() c.closers = append(c.closers, closer{ cmp.name, - func() { stopAndLog(c, cmp.name, srv.Shutdown) }, + func() { stopAndLog(ctx, c, cmp.name, srv.Shutdown) }, }) } @@ -62,7 +62,7 @@ func (cmp *httpComponent) reload(ctx context.Context) error { // Cleanup delCloser(cmp.cfg, cmp.name) // Init server with new parameters - cmp.init(cmp.cfg) + cmp.init(ctx, cmp.cfg) // Start worker if cmp.enabled { startWorker(ctx, cmp.cfg, *getWorker(cmp.cfg, cmp.name)) diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go index cd42d5f1d..f8854ab3c 100644 --- a/cmd/frostfs-node/main.go +++ b/cmd/frostfs-node/main.go @@ -61,21 +61,21 @@ func main() { var ctx context.Context ctx, c.ctxCancel = context.WithCancel(context.Background()) - c.setHealthStatus(control.HealthStatus_STARTING) + c.setHealthStatus(ctx, control.HealthStatus_STARTING) initApp(ctx, c) bootUp(ctx, c) - c.compareAndSwapHealthStatus(control.HealthStatus_STARTING, control.HealthStatus_READY) + c.compareAndSwapHealthStatus(ctx, control.HealthStatus_STARTING, control.HealthStatus_READY) wait(c) } -func initAndLog(c *cfg, name string, initializer func(*cfg)) { - c.log.Info(context.Background(), fmt.Sprintf("initializing %s service...", name)) +func initAndLog(ctx context.Context, c *cfg, name string, initializer func(*cfg)) { + c.log.Info(ctx, fmt.Sprintf("initializing %s service...", name)) initializer(c) - c.log.Info(context.Background(), name+" service has been successfully initialized") + c.log.Info(ctx, name+" service has been successfully initialized") } func initApp(ctx context.Context, c *cfg) { @@ -85,38 +85,38 @@ func initApp(ctx context.Context, c *cfg) { c.wg.Done() }() - setRuntimeParameters(c) + setRuntimeParameters(ctx, c) metrics, _ := metricsComponent(c) - initAndLog(c, "profiler", initProfilerService) - initAndLog(c, metrics.name, metrics.init) + initAndLog(ctx, c, "profiler", func(c *cfg) { initProfilerService(ctx, c) }) + initAndLog(ctx, c, metrics.name, func(c *cfg) { metrics.init(ctx, c) }) - initAndLog(c, "tracing", func(c *cfg) { initTracing(ctx, c) }) + initAndLog(ctx, c, "tracing", func(c *cfg) { initTracing(ctx, c) }) initLocalStorage(ctx, c) - initAndLog(c, "storage engine", func(c *cfg) { + initAndLog(ctx, c, "storage engine", func(c *cfg) { fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Open(ctx)) fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Init(ctx)) }) - initAndLog(c, "gRPC", initGRPC) - initAndLog(c, "netmap", func(c *cfg) { initNetmapService(ctx, c) }) + initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) }) + initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) }) initAccessPolicyEngine(ctx, c) - initAndLog(c, "access policy engine", func(c *cfg) { + initAndLog(ctx, c, "access policy engine", func(c *cfg) { fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Open(ctx)) fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Init()) }) - initAndLog(c, "accounting", func(c *cfg) { initAccountingService(ctx, c) }) - initAndLog(c, "container", func(c *cfg) { initContainerService(ctx, c) }) - initAndLog(c, "session", initSessionService) - initAndLog(c, "object", initObjectService) - initAndLog(c, "tree", initTreeService) - initAndLog(c, "apemanager", initAPEManagerService) - initAndLog(c, "control", initControlService) + initAndLog(ctx, c, "accounting", func(c *cfg) { initAccountingService(ctx, c) }) + initAndLog(ctx, c, "container", func(c *cfg) { initContainerService(ctx, c) }) + initAndLog(ctx, c, "session", initSessionService) + initAndLog(ctx, c, "object", initObjectService) + initAndLog(ctx, c, "tree", initTreeService) + initAndLog(ctx, c, "apemanager", initAPEManagerService) + initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) }) - initAndLog(c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) }) + initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) }) } func runAndLog(ctx context.Context, c *cfg, name string, logSuccess bool, starter func(context.Context, *cfg)) { @@ -128,24 +128,24 @@ func runAndLog(ctx context.Context, c *cfg, name string, logSuccess bool, starte } } -func stopAndLog(c *cfg, name string, stopper func() error) { - c.log.Debug(context.Background(), fmt.Sprintf("shutting down %s service", name)) +func stopAndLog(ctx context.Context, c *cfg, name string, stopper func(context.Context) error) { + c.log.Debug(ctx, fmt.Sprintf("shutting down %s service", name)) - err := stopper() + err := stopper(ctx) if err != nil { - c.log.Debug(context.Background(), fmt.Sprintf("could not shutdown %s server", name), + c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name), zap.String("error", err.Error()), ) } - c.log.Debug(context.Background(), name+" service has been stopped") + c.log.Debug(ctx, name+" service has been stopped") } func bootUp(ctx context.Context, c *cfg) { - runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(c) }) + runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(ctx, c) }) runAndLog(ctx, c, "notary", true, makeAndWaitNotaryDeposit) - bootstrapNode(c) + bootstrapNode(ctx, c) startWorkers(ctx, c) } diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index 3e010b181..ca9f4fe3e 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -129,7 +129,7 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) { return } - tx, vub, err := makeNotaryDeposit(c) + tx, vub, err := makeNotaryDeposit(ctx, c) fatalOnErr(err) if tx.Equals(util.Uint256{}) { @@ -144,7 +144,7 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) { fatalOnErr(err) } -func makeNotaryDeposit(c *cfg) (util.Uint256, uint32, error) { +func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error) { const ( // gasMultiplier defines how many times more the notary // balance must be compared to the GAS balance of the node: @@ -161,7 +161,7 @@ func makeNotaryDeposit(c *cfg) (util.Uint256, uint32, error) { return util.Uint256{}, 0, fmt.Errorf("could not calculate notary deposit: %w", err) } - return c.cfgMorph.client.DepositEndlessNotary(depositAmount) + return c.cfgMorph.client.DepositEndlessNotary(ctx, depositAmount) } var ( @@ -256,7 +256,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { registerNotificationHandlers(c.cfgNetmap.scriptHash, lis, c.cfgNetmap.parsers, c.cfgNetmap.subscribers) registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers) - registerBlockHandler(lis, func(block *block.Block) { + registerBlockHandler(lis, func(ctx context.Context, block *block.Block) { c.log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index)) err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index) diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index c8909e608..a73da4ca8 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -145,7 +145,7 @@ func initNetmapService(ctx context.Context, c *cfg) { c.initMorphComponents(ctx) - initNetmapState(c) + initNetmapState(ctx, c) server := netmapTransportGRPC.New( netmapService.NewSignService( @@ -179,20 +179,20 @@ func addNewEpochNotificationHandlers(c *cfg) { addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, ev event.Event) { e := ev.(netmapEvent.NewEpoch).EpochNumber() - c.updateContractNodeInfo(e) + c.updateContractNodeInfo(ctx, e) if !c.needBootstrap() || c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470 return } - if err := c.bootstrap(); err != nil { + if err := c.bootstrap(ctx); err != nil { c.log.Warn(ctx, logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err)) } }) if c.cfgMorph.notaryEnabled { addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) { - _, _, err := makeNotaryDeposit(c) + _, _, err := makeNotaryDeposit(ctx, c) if err != nil { c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit, zap.String("error", err.Error()), @@ -204,13 +204,13 @@ func addNewEpochNotificationHandlers(c *cfg) { // bootstrapNode adds current node to the Network map. // Must be called after initNetmapService. -func bootstrapNode(c *cfg) { +func bootstrapNode(ctx context.Context, c *cfg) { if c.needBootstrap() { if c.IsMaintenance() { - c.log.Info(context.Background(), logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap) + c.log.Info(ctx, logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap) return } - err := c.bootstrap() + err := c.bootstrap(ctx) fatalOnErrDetails("bootstrap error", err) } } @@ -237,17 +237,17 @@ func setNetmapNotificationParser(c *cfg, sTyp string, p event.NotificationParser // initNetmapState inits current Network map state. // Must be called after Morph components initialization. -func initNetmapState(c *cfg) { +func initNetmapState(ctx context.Context, c *cfg) { epoch, err := c.cfgNetmap.wrapper.Epoch() fatalOnErrDetails("could not initialize current epoch number", err) var ni *netmapSDK.NodeInfo - ni, err = c.netmapInitLocalNodeState(epoch) + ni, err = c.netmapInitLocalNodeState(ctx, epoch) fatalOnErrDetails("could not init network state", err) stateWord := nodeState(ni) - c.log.Info(context.Background(), logs.FrostFSNodeInitialNetworkState, + c.log.Info(ctx, logs.FrostFSNodeInitialNetworkState, zap.Uint64("epoch", epoch), zap.String("state", stateWord), ) @@ -276,7 +276,7 @@ func nodeState(ni *netmapSDK.NodeInfo) string { return "undefined" } -func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) { +func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) { nmNodes, err := c.cfgNetmap.wrapper.GetCandidates() if err != nil { return nil, err @@ -304,7 +304,7 @@ func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error if nmState != candidateState { // This happens when the node was switched to maintenance without epoch tick. // We expect it to continue staying in maintenance. - c.log.Info(context.Background(), logs.CandidateStatusPriority, + c.log.Info(ctx, logs.CandidateStatusPriority, zap.String("netmap", nmState), zap.String("candidate", candidateState)) } @@ -350,16 +350,16 @@ func addNewEpochAsyncNotificationHandler(c *cfg, h event.Handler) { var errRelayBootstrap = errors.New("setting netmap status is forbidden in relay mode") -func (c *cfg) SetNetmapStatus(st control.NetmapStatus) error { +func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error { switch st { default: return fmt.Errorf("unsupported status %v", st) case control.NetmapStatus_MAINTENANCE: - return c.setMaintenanceStatus(false) + return c.setMaintenanceStatus(ctx, false) case control.NetmapStatus_ONLINE, control.NetmapStatus_OFFLINE: } - c.stopMaintenance() + c.stopMaintenance(ctx) if !c.needBootstrap() { return errRelayBootstrap @@ -367,12 +367,12 @@ func (c *cfg) SetNetmapStatus(st control.NetmapStatus) error { if st == control.NetmapStatus_ONLINE { c.cfgNetmap.reBoostrapTurnedOff.Store(false) - return bootstrapOnline(c) + return bootstrapOnline(ctx, c) } c.cfgNetmap.reBoostrapTurnedOff.Store(true) - return c.updateNetMapState(func(*nmClient.UpdatePeerPrm) {}) + return c.updateNetMapState(ctx, func(*nmClient.UpdatePeerPrm) {}) } func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) { @@ -384,11 +384,11 @@ func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) { return st, epoch, nil } -func (c *cfg) ForceMaintenance() error { - return c.setMaintenanceStatus(true) +func (c *cfg) ForceMaintenance(ctx context.Context) error { + return c.setMaintenanceStatus(ctx, true) } -func (c *cfg) setMaintenanceStatus(force bool) error { +func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error { netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration() if err != nil { err = fmt.Errorf("read network settings to check maintenance allowance: %w", err) @@ -397,10 +397,10 @@ func (c *cfg) setMaintenanceStatus(force bool) error { } if err == nil || force { - c.startMaintenance() + c.startMaintenance(ctx) if err == nil { - err = c.updateNetMapState((*nmClient.UpdatePeerPrm).SetMaintenance) + err = c.updateNetMapState(ctx, (*nmClient.UpdatePeerPrm).SetMaintenance) } if err != nil { @@ -413,12 +413,12 @@ func (c *cfg) setMaintenanceStatus(force bool) error { // calls UpdatePeerState operation of Netmap contract's client for the local node. // State setter is used to specify node state to switch to. -func (c *cfg) updateNetMapState(stateSetter func(*nmClient.UpdatePeerPrm)) error { +func (c *cfg) updateNetMapState(ctx context.Context, stateSetter func(*nmClient.UpdatePeerPrm)) error { var prm nmClient.UpdatePeerPrm prm.SetKey(c.key.PublicKey().Bytes()) stateSetter(&prm) - _, err := c.cfgNetmap.wrapper.UpdatePeerState(prm) + _, err := c.cfgNetmap.wrapper.UpdatePeerState(ctx, prm) return err } diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index fd62d29bc..f6cce057c 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -71,11 +71,11 @@ func (c *cfg) MaxObjectSize() uint64 { return sz } -func (s *objectSvc) Put() (objectService.PutObjectStream, error) { +func (s *objectSvc) Put(_ context.Context) (objectService.PutObjectStream, error) { return s.put.Put() } -func (s *objectSvc) Patch() (objectService.PatchObjectStream, error) { +func (s *objectSvc) Patch(_ context.Context) (objectService.PatchObjectStream, error) { return s.patch.Patch() } diff --git a/cmd/frostfs-node/pprof.go b/cmd/frostfs-node/pprof.go index dcd320146..5b40c8a88 100644 --- a/cmd/frostfs-node/pprof.go +++ b/cmd/frostfs-node/pprof.go @@ -1,17 +1,18 @@ package main import ( + "context" "runtime" profilerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/profiler" httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http" ) -func initProfilerService(c *cfg) { +func initProfilerService(ctx context.Context, c *cfg) { tuneProfilers(c) pprof, _ := pprofComponent(c) - pprof.init(c) + pprof.init(ctx, c) } func pprofComponent(c *cfg) (*httpComponent, bool) { diff --git a/cmd/frostfs-node/runtime.go b/cmd/frostfs-node/runtime.go index 0b1000e70..f6d398574 100644 --- a/cmd/frostfs-node/runtime.go +++ b/cmd/frostfs-node/runtime.go @@ -10,17 +10,17 @@ import ( "go.uber.org/zap" ) -func setRuntimeParameters(c *cfg) { +func setRuntimeParameters(ctx context.Context, c *cfg) { if len(os.Getenv("GOMEMLIMIT")) != 0 { // default limit < yaml limit < app env limit < GOMEMLIMIT - c.log.Warn(context.Background(), logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT) + c.log.Warn(ctx, logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT) return } memLimitBytes := runtime.GCMemoryLimitBytes(c.appCfg) previous := debug.SetMemoryLimit(memLimitBytes) if memLimitBytes != previous { - c.log.Info(context.Background(), logs.RuntimeSoftMemoryLimitUpdated, + c.log.Info(ctx, logs.RuntimeSoftMemoryLimitUpdated, zap.Int64("new_value", memLimitBytes), zap.Int64("old_value", previous)) } diff --git a/internal/audit/request.go b/internal/audit/request.go index dd1eb365e..f101cf06f 100644 --- a/internal/audit/request.go +++ b/internal/audit/request.go @@ -19,15 +19,15 @@ type Target interface { String() string } -func LogRequest(log *logger.Logger, operation string, req Request, target Target, status bool) { +func LogRequest(ctx context.Context, log *logger.Logger, operation string, req Request, target Target, status bool) { var key []byte if req != nil { key = req.GetVerificationHeader().GetBodySignature().GetKey() } - LogRequestWithKey(log, operation, key, target, status) + LogRequestWithKey(ctx, log, operation, key, target, status) } -func LogRequestWithKey(log *logger.Logger, operation string, key []byte, target Target, status bool) { +func LogRequestWithKey(ctx context.Context, log *logger.Logger, operation string, key []byte, target Target, status bool) { object, subject := NotDefined, NotDefined publicKey := crypto.UnmarshalPublicKey(key) @@ -39,7 +39,7 @@ func LogRequestWithKey(log *logger.Logger, operation string, key []byte, target object = target.String() } - log.Info(context.Background(), logs.AuditEventLogRecord, + log.Info(ctx, logs.AuditEventLogRecord, zap.String("operation", operation), zap.String("object", object), zap.String("subject", subject), diff --git a/pkg/core/object/fmt.go b/pkg/core/object/fmt.go index 317d62cb0..368217020 100644 --- a/pkg/core/object/fmt.go +++ b/pkg/core/object/fmt.go @@ -117,7 +117,7 @@ func (v *FormatValidator) Validate(ctx context.Context, obj *objectSDK.Object, u } if !unprepared { - if err := v.validateSignatureKey(obj); err != nil { + if err := v.validateSignatureKey(ctx, obj); err != nil { return fmt.Errorf("(%T) could not validate signature key: %w", v, err) } @@ -134,7 +134,7 @@ func (v *FormatValidator) Validate(ctx context.Context, obj *objectSDK.Object, u return nil } -func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error { +func (v *FormatValidator) validateSignatureKey(ctx context.Context, obj *objectSDK.Object) error { sig := obj.Signature() if sig == nil { return errMissingSignature @@ -156,7 +156,7 @@ func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error { ownerID := obj.OwnerID() if token == nil && obj.ECHeader() != nil { - role, err := v.isIROrContainerNode(obj, binKey) + role, err := v.isIROrContainerNode(ctx, obj, binKey) if err != nil { return err } @@ -172,7 +172,7 @@ func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error { } if v.verifyTokenIssuer { - role, err := v.isIROrContainerNode(obj, binKey) + role, err := v.isIROrContainerNode(ctx, obj, binKey) if err != nil { return err } @@ -190,7 +190,7 @@ func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error { return nil } -func (v *FormatValidator) isIROrContainerNode(obj *objectSDK.Object, signerKey []byte) (acl.Role, error) { +func (v *FormatValidator) isIROrContainerNode(ctx context.Context, obj *objectSDK.Object, signerKey []byte) (acl.Role, error) { cnrID, containerIDSet := obj.ContainerID() if !containerIDSet { return acl.RoleOthers, errNilCID @@ -204,7 +204,7 @@ func (v *FormatValidator) isIROrContainerNode(obj *objectSDK.Object, signerKey [ return acl.RoleOthers, fmt.Errorf("failed to get container (id=%s): %w", cnrID.EncodeToString(), err) } - res, err := v.senderClassifier.IsInnerRingOrContainerNode(signerKey, cnrID, cnr.Value) + res, err := v.senderClassifier.IsInnerRingOrContainerNode(ctx, signerKey, cnrID, cnr.Value) if err != nil { return acl.RoleOthers, err } diff --git a/pkg/core/object/sender_classifier.go b/pkg/core/object/sender_classifier.go index ed438c0b9..3b3650134 100644 --- a/pkg/core/object/sender_classifier.go +++ b/pkg/core/object/sender_classifier.go @@ -41,6 +41,7 @@ type ClassifyResult struct { } func (c SenderClassifier) Classify( + ctx context.Context, ownerID *user.ID, ownerKey *keys.PublicKey, idCnr cid.ID, @@ -58,14 +59,14 @@ func (c SenderClassifier) Classify( }, nil } - return c.IsInnerRingOrContainerNode(ownerKeyInBytes, idCnr, cnr) + return c.IsInnerRingOrContainerNode(ctx, ownerKeyInBytes, idCnr, cnr) } -func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) { +func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) { isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes) if err != nil { // do not throw error, try best case matching - c.log.Debug(context.Background(), logs.V2CantCheckIfRequestFromInnerRing, + c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing, zap.String("error", err.Error())) } else if isInnerRingNode { return &ClassifyResult{ @@ -82,7 +83,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idC // error might happen if request has `RoleOther` key and placement // is not possible for previous epoch, so // do not throw error, try best case matching - c.log.Debug(context.Background(), logs.V2CantCheckIfRequestFromContainerNode, + c.log.Debug(ctx, logs.V2CantCheckIfRequestFromContainerNode, zap.String("error", err.Error())) } else if isContainerNode { return &ClassifyResult{ diff --git a/pkg/innerring/blocktimer.go b/pkg/innerring/blocktimer.go index f84a3a703..3624556c2 100644 --- a/pkg/innerring/blocktimer.go +++ b/pkg/innerring/blocktimer.go @@ -20,13 +20,13 @@ type ( } alphaState interface { - IsAlphabet() bool + IsAlphabet(context.Context) bool } newEpochHandler func() containerEstimationStopper interface { - StopEstimation(p container.StopEstimationPrm) error + StopEstimation(ctx context.Context, p container.StopEstimationPrm) error } epochTimerArgs struct { @@ -49,7 +49,7 @@ type ( emitDuration uint32 // in blocks } - depositor func() (util.Uint256, error) + depositor func(context.Context) (util.Uint256, error) awaiter func(context.Context, util.Uint256) error ) @@ -73,7 +73,7 @@ func (s *Server) tickTimers(h uint32) { } } -func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer { +func newEpochTimer(ctx context.Context, args *epochTimerArgs) *timer.BlockTimer { epochTimer := timer.NewBlockTimer( func() (uint32, error) { return uint32(args.epoch.EpochDuration()), nil @@ -91,8 +91,8 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer { args.stopEstimationDMul, args.stopEstimationDDiv, func() { - if !args.alphabetState.IsAlphabet() { - args.l.Debug(context.Background(), logs.InnerringNonalphabetModeDoNotStopContainerEstimations) + if !args.alphabetState.IsAlphabet(ctx) { + args.l.Debug(ctx, logs.InnerringNonalphabetModeDoNotStopContainerEstimations) return } @@ -104,9 +104,9 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer { prm := container.StopEstimationPrm{} prm.SetEpoch(epochN - 1) - err := args.cnrWrapper.StopEstimation(prm) + err := args.cnrWrapper.StopEstimation(ctx, prm) if err != nil { - args.l.Warn(context.Background(), logs.InnerringCantStopEpochEstimation, + args.l.Warn(ctx, logs.InnerringCantStopEpochEstimation, zap.Uint64("epoch", epochN), zap.String("error", err.Error())) } @@ -115,11 +115,11 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer { return epochTimer } -func newEmissionTimer(args *emitTimerArgs) *timer.BlockTimer { +func newEmissionTimer(ctx context.Context, args *emitTimerArgs) *timer.BlockTimer { return timer.NewBlockTimer( timer.StaticBlockMeter(args.emitDuration), func() { - args.ap.HandleGasEmission(timerEvent.NewAlphabetEmitTick{}) + args.ap.HandleGasEmission(ctx, timerEvent.NewAlphabetEmitTick{}) }, ) } diff --git a/pkg/innerring/blocktimer_test.go b/pkg/innerring/blocktimer_test.go index 242c0903b..be1e9699d 100644 --- a/pkg/innerring/blocktimer_test.go +++ b/pkg/innerring/blocktimer_test.go @@ -1,6 +1,7 @@ package innerring import ( + "context" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" @@ -27,7 +28,7 @@ func TestEpochTimer(t *testing.T) { stopEstimationDMul: 2, stopEstimationDDiv: 10, } - et := newEpochTimer(args) + et := newEpochTimer(context.Background(), args) err := et.Reset() require.NoError(t, err, "failed to reset timer") @@ -88,7 +89,7 @@ type testAlphabetState struct { isAlphabet bool } -func (s *testAlphabetState) IsAlphabet() bool { +func (s *testAlphabetState) IsAlphabet(context.Context) bool { return s.isAlphabet } @@ -104,7 +105,7 @@ type testContainerEstStopper struct { called int } -func (s *testContainerEstStopper) StopEstimation(_ container.StopEstimationPrm) error { +func (s *testContainerEstStopper) StopEstimation(context.Context, container.StopEstimationPrm) error { s.called++ return nil } diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go index 8b387393a..e1c90ad21 100644 --- a/pkg/innerring/initialization.go +++ b/pkg/innerring/initialization.go @@ -35,7 +35,7 @@ import ( "google.golang.org/grpc" ) -func (s *Server) initNetmapProcessor(cfg *viper.Viper, +func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper, cnrClient *container.Client, alphaSync event.Handler, ) error { @@ -49,10 +49,13 @@ func (s *Server) initNetmapProcessor(cfg *viper.Viper, var netMapCandidateStateValidator statevalidation.NetMapCandidateValidator netMapCandidateStateValidator.SetNetworkSettings(netSettings) + poolSize := cfg.GetInt("workers.netmap") + s.log.Debug(ctx, logs.NetmapNetmapWorkerPool, zap.Int("size", poolSize)) + s.netmapProcessor, err = netmap.New(&netmap.Params{ Log: s.log, Metrics: s.irMetrics, - PoolSize: cfg.GetInt("workers.netmap"), + PoolSize: poolSize, NetmapClient: netmap.NewNetmapClient(s.netmapClient), EpochTimer: s, EpochState: s, @@ -199,7 +202,7 @@ func (s *Server) createIRFetcher() irFetcher { } func (s *Server) initTimers(ctx context.Context, cfg *viper.Viper, morphClients *serverMorphClients) { - s.epochTimer = newEpochTimer(&epochTimerArgs{ + s.epochTimer = newEpochTimer(ctx, &epochTimerArgs{ l: s.log, alphabetState: s, newEpochHandlers: s.newEpochTickHandlers(ctx), @@ -212,7 +215,7 @@ func (s *Server) initTimers(ctx context.Context, cfg *viper.Viper, morphClients s.addBlockTimer(s.epochTimer) // initialize emission timer - emissionTimer := newEmissionTimer(&emitTimerArgs{ + emissionTimer := newEmissionTimer(ctx, &emitTimerArgs{ ap: s.alphabetProcessor, emitDuration: cfg.GetUint32("timers.emit"), }) @@ -220,18 +223,20 @@ func (s *Server) initTimers(ctx context.Context, cfg *viper.Viper, morphClients s.addBlockTimer(emissionTimer) } -func (s *Server) initAlphabetProcessor(cfg *viper.Viper) error { +func (s *Server) initAlphabetProcessor(ctx context.Context, cfg *viper.Viper) error { parsedWallets, err := parseWalletAddressesFromStrings(cfg.GetStringSlice("emit.extra_wallets")) if err != nil { return err } + poolSize := cfg.GetInt("workers.alphabet") + s.log.Debug(ctx, logs.AlphabetAlphabetWorkerPool, zap.Int("size", poolSize)) // create alphabet processor s.alphabetProcessor, err = alphabet.New(&alphabet.Params{ ParsedWallets: parsedWallets, Log: s.log, Metrics: s.irMetrics, - PoolSize: cfg.GetInt("workers.alphabet"), + PoolSize: poolSize, AlphabetContracts: s.contracts.alphabet, NetmapClient: s.netmapClient, MorphClient: s.morphClient, @@ -246,12 +251,14 @@ func (s *Server) initAlphabetProcessor(cfg *viper.Viper) error { return err } -func (s *Server) initContainerProcessor(cfg *viper.Viper, cnrClient *container.Client, frostfsIDClient *frostfsid.Client) error { +func (s *Server) initContainerProcessor(ctx context.Context, cfg *viper.Viper, cnrClient *container.Client, frostfsIDClient *frostfsid.Client) error { + poolSize := cfg.GetInt("workers.container") + s.log.Debug(ctx, logs.ContainerContainerWorkerPool, zap.Int("size", poolSize)) // container processor containerProcessor, err := cont.New(&cont.Params{ Log: s.log, Metrics: s.irMetrics, - PoolSize: cfg.GetInt("workers.container"), + PoolSize: poolSize, AlphabetState: s, ContainerClient: cnrClient, MorphClient: cnrClient.Morph(), @@ -265,12 +272,14 @@ func (s *Server) initContainerProcessor(cfg *viper.Viper, cnrClient *container.C return bindMorphProcessor(containerProcessor, s) } -func (s *Server) initBalanceProcessor(cfg *viper.Viper, frostfsCli *frostfsClient.Client) error { +func (s *Server) initBalanceProcessor(ctx context.Context, cfg *viper.Viper, frostfsCli *frostfsClient.Client) error { + poolSize := cfg.GetInt("workers.balance") + s.log.Debug(ctx, logs.BalanceBalanceWorkerPool, zap.Int("size", poolSize)) // create balance processor balanceProcessor, err := balance.New(&balance.Params{ Log: s.log, Metrics: s.irMetrics, - PoolSize: cfg.GetInt("workers.balance"), + PoolSize: poolSize, FrostFSClient: frostfsCli, BalanceSC: s.contracts.balance, AlphabetState: s, @@ -283,15 +292,17 @@ func (s *Server) initBalanceProcessor(cfg *viper.Viper, frostfsCli *frostfsClien return bindMorphProcessor(balanceProcessor, s) } -func (s *Server) initFrostFSMainnetProcessor(cfg *viper.Viper) error { +func (s *Server) initFrostFSMainnetProcessor(ctx context.Context, cfg *viper.Viper) error { if s.withoutMainNet { return nil } + poolSize := cfg.GetInt("workers.frostfs") + s.log.Debug(ctx, logs.FrostFSFrostfsWorkerPool, zap.Int("size", poolSize)) frostfsProcessor, err := frostfs.New(&frostfs.Params{ Log: s.log, Metrics: s.irMetrics, - PoolSize: cfg.GetInt("workers.frostfs"), + PoolSize: poolSize, FrostFSContract: s.contracts.frostfs, BalanceClient: s.balanceClient, NetmapClient: s.netmapClient, @@ -311,10 +322,10 @@ func (s *Server) initFrostFSMainnetProcessor(cfg *viper.Viper) error { return bindMainnetProcessor(frostfsProcessor, s) } -func (s *Server) initGRPCServer(cfg *viper.Viper, log *logger.Logger, audit *atomic.Bool) error { +func (s *Server) initGRPCServer(ctx context.Context, cfg *viper.Viper, log *logger.Logger, audit *atomic.Bool) error { controlSvcEndpoint := cfg.GetString("control.grpc.endpoint") if controlSvcEndpoint == "" { - s.log.Info(context.Background(), logs.InnerringNoControlServerEndpointSpecified) + s.log.Info(ctx, logs.InnerringNoControlServerEndpointSpecified) return nil } @@ -410,7 +421,7 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) { return result, nil } -func (s *Server) initProcessors(cfg *viper.Viper, morphClients *serverMorphClients) error { +func (s *Server) initProcessors(ctx context.Context, cfg *viper.Viper, morphClients *serverMorphClients) error { irf := s.createIRFetcher() s.statusIndex = newInnerRingIndexer( @@ -425,27 +436,27 @@ func (s *Server) initProcessors(cfg *viper.Viper, morphClients *serverMorphClien return err } - err = s.initNetmapProcessor(cfg, morphClients.CnrClient, alphaSync) + err = s.initNetmapProcessor(ctx, cfg, morphClients.CnrClient, alphaSync) if err != nil { return err } - err = s.initContainerProcessor(cfg, morphClients.CnrClient, morphClients.FrostFSIDClient) + err = s.initContainerProcessor(ctx, cfg, morphClients.CnrClient, morphClients.FrostFSIDClient) if err != nil { return err } - err = s.initBalanceProcessor(cfg, morphClients.FrostFSClient) + err = s.initBalanceProcessor(ctx, cfg, morphClients.FrostFSClient) if err != nil { return err } - err = s.initFrostFSMainnetProcessor(cfg) + err = s.initFrostFSMainnetProcessor(ctx, cfg) if err != nil { return err } - err = s.initAlphabetProcessor(cfg) + err = s.initAlphabetProcessor(ctx, cfg) return err } diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go index 374aea891..44df883e8 100644 --- a/pkg/innerring/innerring.go +++ b/pkg/innerring/innerring.go @@ -157,7 +157,7 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) { return err } - if s.IsAlphabet() { + if s.IsAlphabet(ctx) { err = s.initMainNotary(ctx) if err != nil { return err @@ -217,14 +217,14 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) { } func (s *Server) registerMorphNewBlockEventHandler() { - s.morphListener.RegisterBlockHandler(func(b *block.Block) { - s.log.Debug(context.Background(), logs.InnerringNewBlock, + s.morphListener.RegisterBlockHandler(func(ctx context.Context, b *block.Block) { + s.log.Debug(ctx, logs.InnerringNewBlock, zap.Uint32("index", b.Index), ) err := s.persistate.SetUInt32(persistateSideChainLastBlockKey, b.Index) if err != nil { - s.log.Warn(context.Background(), logs.InnerringCantUpdatePersistentState, + s.log.Warn(ctx, logs.InnerringCantUpdatePersistentState, zap.String("chain", "side"), zap.Uint32("block_index", b.Index)) } @@ -235,10 +235,10 @@ func (s *Server) registerMorphNewBlockEventHandler() { func (s *Server) registerMainnetNewBlockEventHandler() { if !s.withoutMainNet { - s.mainnetListener.RegisterBlockHandler(func(b *block.Block) { + s.mainnetListener.RegisterBlockHandler(func(ctx context.Context, b *block.Block) { err := s.persistate.SetUInt32(persistateMainChainLastBlockKey, b.Index) if err != nil { - s.log.Warn(context.Background(), logs.InnerringCantUpdatePersistentState, + s.log.Warn(ctx, logs.InnerringCantUpdatePersistentState, zap.String("chain", "main"), zap.Uint32("block_index", b.Index)) } @@ -400,14 +400,14 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan return nil, err } - err = server.initProcessors(cfg, morphClients) + err = server.initProcessors(ctx, cfg, morphClients) if err != nil { return nil, err } server.initTimers(ctx, cfg, morphClients) - err = server.initGRPCServer(cfg, log, audit) + err = server.initGRPCServer(ctx, cfg, log, audit) if err != nil { return nil, err } @@ -604,7 +604,7 @@ func (s *Server) initConfigFromBlockchain(ctx context.Context) error { s.log.Debug(ctx, logs.InnerringReadConfigFromBlockchain, zap.Bool("active", s.IsActive(ctx)), - zap.Bool("alphabet", s.IsAlphabet()), + zap.Bool("alphabet", s.IsAlphabet(ctx)), zap.Uint64("epoch", epoch), zap.Uint32("precision", balancePrecision), zap.Uint32("init_epoch_tick_delta", s.initialEpochTickDelta), @@ -636,7 +636,7 @@ func (s *Server) nextEpochBlockDelta() (uint32, error) { // only if inner ring node is alphabet node. func (s *Server) onlyAlphabetEventHandler(f event.Handler) event.Handler { return func(ctx context.Context, ev event.Event) { - if s.IsAlphabet() { + if s.IsAlphabet(ctx) { f(ctx, ev) } } diff --git a/pkg/innerring/notary.go b/pkg/innerring/notary.go index dd3afa2c2..c8a69962f 100644 --- a/pkg/innerring/notary.go +++ b/pkg/innerring/notary.go @@ -28,37 +28,38 @@ const ( gasDivisor = 2 ) -func (s *Server) depositMainNotary() (tx util.Uint256, err error) { +func (s *Server) depositMainNotary(ctx context.Context) (tx util.Uint256, err error) { depositAmount, err := client.CalculateNotaryDepositAmount(s.mainnetClient, gasMultiplier, gasDivisor) if err != nil { return util.Uint256{}, fmt.Errorf("could not calculate main notary deposit amount: %w", err) } return s.mainnetClient.DepositNotary( + ctx, depositAmount, uint32(s.epochDuration.Load())+notaryExtraBlocks, ) } -func (s *Server) depositSideNotary() (util.Uint256, error) { +func (s *Server) depositSideNotary(ctx context.Context) (util.Uint256, error) { depositAmount, err := client.CalculateNotaryDepositAmount(s.morphClient, gasMultiplier, gasDivisor) if err != nil { return util.Uint256{}, fmt.Errorf("could not calculate side notary deposit amount: %w", err) } - tx, _, err := s.morphClient.DepositEndlessNotary(depositAmount) + tx, _, err := s.morphClient.DepositEndlessNotary(ctx, depositAmount) return tx, err } func (s *Server) notaryHandler(ctx context.Context, _ event.Event) { if !s.mainNotaryConfig.disabled { - _, err := s.depositMainNotary() + _, err := s.depositMainNotary(ctx) if err != nil { s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err)) } } - if _, err := s.depositSideNotary(); err != nil { + if _, err := s.depositSideNotary(ctx); err != nil { s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err)) } } @@ -72,7 +73,7 @@ func (s *Server) awaitSideNotaryDeposit(ctx context.Context, tx util.Uint256) er } func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaiter, msg string) error { - tx, err := deposit() + tx, err := deposit(ctx) if err != nil { return err } diff --git a/pkg/innerring/processors/alphabet/handlers.go b/pkg/innerring/processors/alphabet/handlers.go index 0cc2a5f39..d6b653282 100644 --- a/pkg/innerring/processors/alphabet/handlers.go +++ b/pkg/innerring/processors/alphabet/handlers.go @@ -10,16 +10,16 @@ import ( "go.uber.org/zap" ) -func (ap *Processor) HandleGasEmission(ev event.Event) { +func (ap *Processor) HandleGasEmission(ctx context.Context, ev event.Event) { _ = ev.(timers.NewAlphabetEmitTick) - ap.log.Info(context.Background(), logs.AlphabetTick, zap.String("type", "alphabet gas emit")) + ap.log.Info(ctx, logs.AlphabetTick, zap.String("type", "alphabet gas emit")) // send event to the worker pool - err := processors.SubmitEvent(ap.pool, ap.metrics, "alphabet_emit_gas", ap.processEmit) + err := processors.SubmitEvent(ap.pool, ap.metrics, "alphabet_emit_gas", func() bool { return ap.processEmit(ctx) }) if err != nil { // there system can be moved into controlled degradation stage - ap.log.Warn(context.Background(), logs.AlphabetAlphabetProcessorWorkerPoolDrained, + ap.log.Warn(ctx, logs.AlphabetAlphabetProcessorWorkerPoolDrained, zap.Int("capacity", ap.pool.Cap())) } } diff --git a/pkg/innerring/processors/alphabet/handlers_test.go b/pkg/innerring/processors/alphabet/handlers_test.go index c7a004b54..036b8055c 100644 --- a/pkg/innerring/processors/alphabet/handlers_test.go +++ b/pkg/innerring/processors/alphabet/handlers_test.go @@ -1,6 +1,7 @@ package alphabet_test import ( + "context" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" @@ -60,7 +61,7 @@ func TestProcessorEmitsGasToNetmapAndAlphabet(t *testing.T) { processor, err := alphabet.New(params) require.NoError(t, err, "failed to create processor instance") - processor.HandleGasEmission(timers.NewAlphabetEmitTick{}) + processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{}) processor.WaitPoolRunning() @@ -137,7 +138,7 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) { processor, err := alphabet.New(params) require.NoError(t, err, "failed to create processor instance") - processor.HandleGasEmission(timers.NewAlphabetEmitTick{}) + processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{}) processor.WaitPoolRunning() @@ -198,7 +199,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) { processor, err := alphabet.New(params) require.NoError(t, err, "failed to create processor instance") - processor.HandleGasEmission(timers.NewAlphabetEmitTick{}) + processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{}) processor.WaitPoolRunning() @@ -219,7 +220,7 @@ type testIndexer struct { index int } -func (i *testIndexer) AlphabetIndex() int { +func (i *testIndexer) AlphabetIndex(context.Context) int { return i.index } @@ -246,7 +247,7 @@ type testMorphClient struct { batchTransferedGas []batchTransferGas } -func (c *testMorphClient) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) { +func (c *testMorphClient) Invoke(_ context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) { c.invokedMethods = append(c.invokedMethods, invokedMethod{ contract: contract, diff --git a/pkg/innerring/processors/alphabet/process_emit.go b/pkg/innerring/processors/alphabet/process_emit.go index 142409631..229261250 100644 --- a/pkg/innerring/processors/alphabet/process_emit.go +++ b/pkg/innerring/processors/alphabet/process_emit.go @@ -14,39 +14,39 @@ import ( const emitMethod = "emit" -func (ap *Processor) processEmit() bool { - index := ap.irList.AlphabetIndex() +func (ap *Processor) processEmit(ctx context.Context) bool { + index := ap.irList.AlphabetIndex(ctx) if index < 0 { - ap.log.Info(context.Background(), logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent) + ap.log.Info(ctx, logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent) return true } contract, ok := ap.alphabetContracts.GetByIndex(index) if !ok { - ap.log.Debug(context.Background(), logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent, + ap.log.Debug(ctx, logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent, zap.Int("index", index)) return false } // there is no signature collecting, so we don't need extra fee - _, err := ap.morphClient.Invoke(contract, 0, emitMethod) + _, err := ap.morphClient.Invoke(ctx, contract, 0, emitMethod) if err != nil { - ap.log.Warn(context.Background(), logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error())) + ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error())) return false } if ap.storageEmission == 0 { - ap.log.Info(context.Background(), logs.AlphabetStorageNodeEmissionIsOff) + ap.log.Info(ctx, logs.AlphabetStorageNodeEmissionIsOff) return true } networkMap, err := ap.netmapClient.NetMap() if err != nil { - ap.log.Warn(context.Background(), logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes, + ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes, zap.String("error", err.Error())) return false @@ -59,7 +59,7 @@ func (ap *Processor) processEmit() bool { ap.pwLock.RUnlock() extraLen := len(pw) - ap.log.Debug(context.Background(), logs.AlphabetGasEmission, + ap.log.Debug(ctx, logs.AlphabetGasEmission, zap.Int("network_map", nmLen), zap.Int("extra_wallets", extraLen)) @@ -69,20 +69,20 @@ func (ap *Processor) processEmit() bool { gasPerNode := fixedn.Fixed8(ap.storageEmission / uint64(nmLen+extraLen)) - ap.transferGasToNetmapNodes(nmNodes, gasPerNode) + ap.transferGasToNetmapNodes(ctx, nmNodes, gasPerNode) - ap.transferGasToExtraNodes(pw, gasPerNode) + ap.transferGasToExtraNodes(ctx, pw, gasPerNode) return true } -func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerNode fixedn.Fixed8) { +func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []netmap.NodeInfo, gasPerNode fixedn.Fixed8) { for i := range nmNodes { keyBytes := nmNodes[i].PublicKey() key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256()) if err != nil { - ap.log.Warn(context.Background(), logs.AlphabetCantParseNodePublicKey, + ap.log.Warn(ctx, logs.AlphabetCantParseNodePublicKey, zap.String("error", err.Error())) continue @@ -90,7 +90,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN err = ap.morphClient.TransferGas(key.GetScriptHash(), gasPerNode) if err != nil { - ap.log.Warn(context.Background(), logs.AlphabetCantTransferGas, + ap.log.Warn(ctx, logs.AlphabetCantTransferGas, zap.String("receiver", key.Address()), zap.Int64("amount", int64(gasPerNode)), zap.String("error", err.Error()), @@ -99,7 +99,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN } } -func (ap *Processor) transferGasToExtraNodes(pw []util.Uint160, gasPerNode fixedn.Fixed8) { +func (ap *Processor) transferGasToExtraNodes(ctx context.Context, pw []util.Uint160, gasPerNode fixedn.Fixed8) { if len(pw) > 0 { err := ap.morphClient.BatchTransferGas(pw, gasPerNode) if err != nil { @@ -107,7 +107,7 @@ func (ap *Processor) transferGasToExtraNodes(pw []util.Uint160, gasPerNode fixed for i, addr := range pw { receiversLog[i] = addr.StringLE() } - ap.log.Warn(context.Background(), logs.AlphabetCantTransferGasToWallet, + ap.log.Warn(ctx, logs.AlphabetCantTransferGasToWallet, zap.Strings("receivers", receiversLog), zap.Int64("amount", int64(gasPerNode)), zap.String("error", err.Error()), diff --git a/pkg/innerring/processors/alphabet/processor.go b/pkg/innerring/processors/alphabet/processor.go index 8dbef1e20..bf74834ed 100644 --- a/pkg/innerring/processors/alphabet/processor.go +++ b/pkg/innerring/processors/alphabet/processor.go @@ -7,7 +7,6 @@ import ( "sync" "time" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" @@ -15,13 +14,12 @@ import ( "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/panjf2000/ants/v2" - "go.uber.org/zap" ) type ( // Indexer is a callback interface for inner ring global state. Indexer interface { - AlphabetIndex() int + AlphabetIndex(context.Context) int } // Contracts is an interface of the storage @@ -41,7 +39,7 @@ type ( } morphClient interface { - Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) + Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8) error } @@ -86,8 +84,6 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/alphabet: global state is not set") } - p.Log.Debug(context.Background(), logs.AlphabetAlphabetWorkerPool, zap.Int("size", p.PoolSize)) - pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err) diff --git a/pkg/innerring/processors/balance/handlers.go b/pkg/innerring/processors/balance/handlers.go index 5a89e6f7c..b5d05a02e 100644 --- a/pkg/innerring/processors/balance/handlers.go +++ b/pkg/innerring/processors/balance/handlers.go @@ -20,7 +20,7 @@ func (bp *Processor) handleLock(ctx context.Context, ev event.Event) { // send an event to the worker pool err := processors.SubmitEvent(bp.pool, bp.metrics, "lock", func() bool { - return bp.processLock(&lock) + return bp.processLock(ctx, &lock) }) if err != nil { // there system can be moved into controlled degradation stage diff --git a/pkg/innerring/processors/balance/handlers_test.go b/pkg/innerring/processors/balance/handlers_test.go index 3ef4959cc..0fd23d8ab 100644 --- a/pkg/innerring/processors/balance/handlers_test.go +++ b/pkg/innerring/processors/balance/handlers_test.go @@ -70,7 +70,7 @@ type testAlphabetState struct { isAlphabet bool } -func (s *testAlphabetState) IsAlphabet() bool { +func (s *testAlphabetState) IsAlphabet(context.Context) bool { return s.isAlphabet } @@ -84,7 +84,7 @@ type testFrostFSContractClient struct { chequeCalls int } -func (c *testFrostFSContractClient) Cheque(p frostfscontract.ChequePrm) error { +func (c *testFrostFSContractClient) Cheque(_ context.Context, p frostfscontract.ChequePrm) error { c.chequeCalls++ return nil } diff --git a/pkg/innerring/processors/balance/process_assets.go b/pkg/innerring/processors/balance/process_assets.go index ac6a1e493..60475908c 100644 --- a/pkg/innerring/processors/balance/process_assets.go +++ b/pkg/innerring/processors/balance/process_assets.go @@ -11,9 +11,9 @@ import ( // Process lock event by invoking Cheque method in main net to send assets // back to the withdraw issuer. -func (bp *Processor) processLock(lock *balanceEvent.Lock) bool { - if !bp.alphabetState.IsAlphabet() { - bp.log.Info(context.Background(), logs.BalanceNonAlphabetModeIgnoreBalanceLock) +func (bp *Processor) processLock(ctx context.Context, lock *balanceEvent.Lock) bool { + if !bp.alphabetState.IsAlphabet(ctx) { + bp.log.Info(ctx, logs.BalanceNonAlphabetModeIgnoreBalanceLock) return true } @@ -25,9 +25,9 @@ func (bp *Processor) processLock(lock *balanceEvent.Lock) bool { prm.SetLock(lock.LockAccount()) prm.SetHash(lock.TxHash()) - err := bp.frostfsClient.Cheque(prm) + err := bp.frostfsClient.Cheque(ctx, prm) if err != nil { - bp.log.Error(context.Background(), logs.BalanceCantSendLockAssetTx, zap.Error(err)) + bp.log.Error(ctx, logs.BalanceCantSendLockAssetTx, zap.Error(err)) return false } diff --git a/pkg/innerring/processors/balance/processor.go b/pkg/innerring/processors/balance/processor.go index c4078461e..e2f649600 100644 --- a/pkg/innerring/processors/balance/processor.go +++ b/pkg/innerring/processors/balance/processor.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" @@ -13,13 +12,12 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/panjf2000/ants/v2" - "go.uber.org/zap" ) type ( // AlphabetState is a callback interface for inner ring global state. AlphabetState interface { - IsAlphabet() bool + IsAlphabet(context.Context) bool } // PrecisionConverter converts balance amount values. @@ -28,7 +26,7 @@ type ( } FrostFSClient interface { - Cheque(p frostfscontract.ChequePrm) error + Cheque(ctx context.Context, p frostfscontract.ChequePrm) error } // Processor of events produced by balance contract in the morphchain. @@ -69,8 +67,6 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/balance: balance precision converter is not set") } - p.Log.Debug(context.Background(), logs.BalanceBalanceWorkerPool, zap.Int("size", p.PoolSize)) - pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { return nil, fmt.Errorf("ir/balance: can't create worker pool: %w", err) diff --git a/pkg/innerring/processors/container/handlers.go b/pkg/innerring/processors/container/handlers.go index 45cac513a..bb038a3cb 100644 --- a/pkg/innerring/processors/container/handlers.go +++ b/pkg/innerring/processors/container/handlers.go @@ -23,7 +23,7 @@ func (cp *Processor) handlePut(ctx context.Context, ev event.Event) { // send an event to the worker pool err := processors.SubmitEvent(cp.pool, cp.metrics, "container_put", func() bool { - return cp.processContainerPut(put) + return cp.processContainerPut(ctx, put) }) if err != nil { // there system can be moved into controlled degradation stage @@ -41,7 +41,7 @@ func (cp *Processor) handleDelete(ctx context.Context, ev event.Event) { // send an event to the worker pool err := processors.SubmitEvent(cp.pool, cp.metrics, "container_delete", func() bool { - return cp.processContainerDelete(del) + return cp.processContainerDelete(ctx, del) }) if err != nil { // there system can be moved into controlled degradation stage diff --git a/pkg/innerring/processors/container/handlers_test.go b/pkg/innerring/processors/container/handlers_test.go index a2fe50fa8..f28e5372a 100644 --- a/pkg/innerring/processors/container/handlers_test.go +++ b/pkg/innerring/processors/container/handlers_test.go @@ -161,7 +161,7 @@ type testAlphabetState struct { isAlphabet bool } -func (s *testAlphabetState) IsAlphabet() bool { +func (s *testAlphabetState) IsAlphabet(context.Context) bool { return s.isAlphabet } diff --git a/pkg/innerring/processors/container/process_container.go b/pkg/innerring/processors/container/process_container.go index 2b9c5995c..16c450166 100644 --- a/pkg/innerring/processors/container/process_container.go +++ b/pkg/innerring/processors/container/process_container.go @@ -37,27 +37,27 @@ var errContainerAndOwnerNamespaceDontMatch = errors.New("container and owner nam // Process a new container from the user by checking the container sanity // and sending approve tx back to the morph. -func (cp *Processor) processContainerPut(put putEvent) bool { - if !cp.alphabetState.IsAlphabet() { - cp.log.Info(context.Background(), logs.ContainerNonAlphabetModeIgnoreContainerPut) +func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool { + if !cp.alphabetState.IsAlphabet(ctx) { + cp.log.Info(ctx, logs.ContainerNonAlphabetModeIgnoreContainerPut) return true } - ctx := &putContainerContext{ + pctx := &putContainerContext{ e: put, } - err := cp.checkPutContainer(ctx) + err := cp.checkPutContainer(pctx) if err != nil { - cp.log.Error(context.Background(), logs.ContainerPutContainerCheckFailed, + cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed, zap.String("error", err.Error()), ) return false } - if err := cp.morphClient.NotarySignAndInvokeTX(ctx.e.NotaryRequest().MainTransaction); err != nil { - cp.log.Error(context.Background(), logs.ContainerCouldNotApprovePutContainer, + if err := cp.morphClient.NotarySignAndInvokeTX(pctx.e.NotaryRequest().MainTransaction); err != nil { + cp.log.Error(ctx, logs.ContainerCouldNotApprovePutContainer, zap.String("error", err.Error()), ) return false @@ -104,15 +104,15 @@ func (cp *Processor) checkPutContainer(ctx *putContainerContext) error { // Process delete container operation from the user by checking container sanity // and sending approve tx back to morph. -func (cp *Processor) processContainerDelete(e containerEvent.Delete) bool { - if !cp.alphabetState.IsAlphabet() { - cp.log.Info(context.Background(), logs.ContainerNonAlphabetModeIgnoreContainerDelete) +func (cp *Processor) processContainerDelete(ctx context.Context, e containerEvent.Delete) bool { + if !cp.alphabetState.IsAlphabet(ctx) { + cp.log.Info(ctx, logs.ContainerNonAlphabetModeIgnoreContainerDelete) return true } err := cp.checkDeleteContainer(e) if err != nil { - cp.log.Error(context.Background(), logs.ContainerDeleteContainerCheckFailed, + cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed, zap.String("error", err.Error()), ) @@ -120,7 +120,7 @@ func (cp *Processor) processContainerDelete(e containerEvent.Delete) bool { } if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil { - cp.log.Error(context.Background(), logs.ContainerCouldNotApproveDeleteContainer, + cp.log.Error(ctx, logs.ContainerCouldNotApproveDeleteContainer, zap.String("error", err.Error()), ) diff --git a/pkg/innerring/processors/container/processor.go b/pkg/innerring/processors/container/processor.go index 7a50ca773..58b90457c 100644 --- a/pkg/innerring/processors/container/processor.go +++ b/pkg/innerring/processors/container/processor.go @@ -6,7 +6,6 @@ import ( "fmt" frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" @@ -16,13 +15,12 @@ import ( "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/panjf2000/ants/v2" - "go.uber.org/zap" ) type ( // AlphabetState is a callback interface for inner ring global state. AlphabetState interface { - IsAlphabet() bool + IsAlphabet(context.Context) bool } ContClient interface { @@ -98,8 +96,6 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/container: FrostFSID client is not set") } - p.Log.Debug(context.Background(), logs.ContainerContainerWorkerPool, zap.Int("size", p.PoolSize)) - pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { return nil, fmt.Errorf("ir/container: can't create worker pool: %w", err) diff --git a/pkg/innerring/processors/frostfs/handlers.go b/pkg/innerring/processors/frostfs/handlers.go index d11ad0f5c..936de2e77 100644 --- a/pkg/innerring/processors/frostfs/handlers.go +++ b/pkg/innerring/processors/frostfs/handlers.go @@ -24,7 +24,7 @@ func (np *Processor) handleDeposit(ctx context.Context, ev event.Event) { // send event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_deposit", func() bool { - return np.processDeposit(deposit) + return np.processDeposit(ctx, deposit) }) if err != nil { // there system can be moved into controlled degradation stage @@ -44,7 +44,7 @@ func (np *Processor) handleWithdraw(ctx context.Context, ev event.Event) { // send event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_withdraw", func() bool { - return np.processWithdraw(withdraw) + return np.processWithdraw(ctx, withdraw) }) if err != nil { // there system can be moved into controlled degradation stage @@ -62,7 +62,7 @@ func (np *Processor) handleCheque(ctx context.Context, ev event.Event) { // send event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_cheque", func() bool { - return np.processCheque(cheque) + return np.processCheque(ctx, cheque) }) if err != nil { // there system can be moved into controlled degradation stage @@ -81,7 +81,7 @@ func (np *Processor) handleConfig(ctx context.Context, ev event.Event) { // send event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_config", func() bool { - return np.processConfig(cfg) + return np.processConfig(ctx, cfg) }) if err != nil { // there system can be moved into controlled degradation stage diff --git a/pkg/innerring/processors/frostfs/handlers_test.go b/pkg/innerring/processors/frostfs/handlers_test.go index c1541ca40..72310f6f9 100644 --- a/pkg/innerring/processors/frostfs/handlers_test.go +++ b/pkg/innerring/processors/frostfs/handlers_test.go @@ -226,7 +226,7 @@ type testAlphabetState struct { isAlphabet bool } -func (s *testAlphabetState) IsAlphabet() bool { +func (s *testAlphabetState) IsAlphabet(context.Context) bool { return s.isAlphabet } @@ -242,17 +242,17 @@ type testBalaceClient struct { burn []balance.BurnPrm } -func (c *testBalaceClient) Mint(p balance.MintPrm) error { +func (c *testBalaceClient) Mint(_ context.Context, p balance.MintPrm) error { c.mint = append(c.mint, p) return nil } -func (c *testBalaceClient) Lock(p balance.LockPrm) error { +func (c *testBalaceClient) Lock(_ context.Context, p balance.LockPrm) error { c.lock = append(c.lock, p) return nil } -func (c *testBalaceClient) Burn(p balance.BurnPrm) error { +func (c *testBalaceClient) Burn(_ context.Context, p balance.BurnPrm) error { c.burn = append(c.burn, p) return nil } @@ -261,7 +261,7 @@ type testNetmapClient struct { config []nmClient.SetConfigPrm } -func (c *testNetmapClient) SetConfig(p nmClient.SetConfigPrm) error { +func (c *testNetmapClient) SetConfig(_ context.Context, p nmClient.SetConfigPrm) error { c.config = append(c.config, p) return nil } diff --git a/pkg/innerring/processors/frostfs/process_assets.go b/pkg/innerring/processors/frostfs/process_assets.go index 3bee6ed96..ee824ea31 100644 --- a/pkg/innerring/processors/frostfs/process_assets.go +++ b/pkg/innerring/processors/frostfs/process_assets.go @@ -17,9 +17,9 @@ const ( // Process deposit event by invoking a balance contract and sending native // gas in the sidechain. -func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { - if !np.alphabetState.IsAlphabet() { - np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreDeposit) +func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.Deposit) bool { + if !np.alphabetState.IsAlphabet(ctx) { + np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreDeposit) return true } @@ -30,9 +30,9 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { prm.SetID(deposit.ID()) // send transferX to a balance contract - err := np.balanceClient.Mint(prm) + err := np.balanceClient.Mint(ctx, prm) if err != nil { - np.log.Error(context.Background(), logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err)) + np.log.Error(ctx, logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err)) } curEpoch := np.epochState.EpochCounter() @@ -46,7 +46,7 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { val, ok := np.mintEmitCache.Get(receiver.String()) if ok && val+np.mintEmitThreshold >= curEpoch { - np.log.Warn(context.Background(), logs.FrostFSDoubleMintEmissionDeclined, + np.log.Warn(ctx, logs.FrostFSDoubleMintEmissionDeclined, zap.Stringer("receiver", receiver), zap.Uint64("last_emission", val), zap.Uint64("current_epoch", curEpoch)) @@ -58,12 +58,12 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { // before gas transfer check if the balance is greater than the threshold balance, err := np.morphClient.GasBalance() if err != nil { - np.log.Error(context.Background(), logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err)) + np.log.Error(ctx, logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err)) return false } if balance < np.gasBalanceThreshold { - np.log.Warn(context.Background(), logs.FrostFSGasBalanceThresholdHasBeenReached, + np.log.Warn(ctx, logs.FrostFSGasBalanceThresholdHasBeenReached, zap.Int64("balance", balance), zap.Int64("threshold", np.gasBalanceThreshold)) @@ -72,7 +72,7 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { err = np.morphClient.TransferGas(receiver, np.mintEmitValue) if err != nil { - np.log.Error(context.Background(), logs.FrostFSCantTransferNativeGasToReceiver, + np.log.Error(ctx, logs.FrostFSCantTransferNativeGasToReceiver, zap.String("error", err.Error())) return false @@ -84,16 +84,16 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { } // Process withdraw event by locking assets in the balance account. -func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool { - if !np.alphabetState.IsAlphabet() { - np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreWithdraw) +func (np *Processor) processWithdraw(ctx context.Context, withdraw frostfsEvent.Withdraw) bool { + if !np.alphabetState.IsAlphabet(ctx) { + np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreWithdraw) return true } // create lock account lock, err := util.Uint160DecodeBytesBE(withdraw.ID()[:util.Uint160Size]) if err != nil { - np.log.Error(context.Background(), logs.FrostFSCantCreateLockAccount, zap.Error(err)) + np.log.Error(ctx, logs.FrostFSCantCreateLockAccount, zap.Error(err)) return false } @@ -107,9 +107,9 @@ func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool { prm.SetAmount(np.converter.ToBalancePrecision(withdraw.Amount())) prm.SetDueEpoch(int64(curEpoch + lockAccountLifetime)) - err = np.balanceClient.Lock(prm) + err = np.balanceClient.Lock(ctx, prm) if err != nil { - np.log.Error(context.Background(), logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err)) + np.log.Error(ctx, logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err)) return false } @@ -118,9 +118,9 @@ func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool { // Process cheque event by transferring assets from the lock account back to // the reserve account. -func (np *Processor) processCheque(cheque frostfsEvent.Cheque) bool { - if !np.alphabetState.IsAlphabet() { - np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreCheque) +func (np *Processor) processCheque(ctx context.Context, cheque frostfsEvent.Cheque) bool { + if !np.alphabetState.IsAlphabet(ctx) { + np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreCheque) return true } @@ -130,9 +130,9 @@ func (np *Processor) processCheque(cheque frostfsEvent.Cheque) bool { prm.SetAmount(np.converter.ToBalancePrecision(cheque.Amount())) prm.SetID(cheque.ID()) - err := np.balanceClient.Burn(prm) + err := np.balanceClient.Burn(ctx, prm) if err != nil { - np.log.Error(context.Background(), logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err)) + np.log.Error(ctx, logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err)) return false } diff --git a/pkg/innerring/processors/frostfs/process_config.go b/pkg/innerring/processors/frostfs/process_config.go index 814dd40b4..dc579f6bb 100644 --- a/pkg/innerring/processors/frostfs/process_config.go +++ b/pkg/innerring/processors/frostfs/process_config.go @@ -11,9 +11,9 @@ import ( // Process config event by setting configuration value from the mainchain in // the sidechain. -func (np *Processor) processConfig(config frostfsEvent.Config) bool { - if !np.alphabetState.IsAlphabet() { - np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreConfig) +func (np *Processor) processConfig(ctx context.Context, config frostfsEvent.Config) bool { + if !np.alphabetState.IsAlphabet(ctx) { + np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreConfig) return true } @@ -24,9 +24,9 @@ func (np *Processor) processConfig(config frostfsEvent.Config) bool { prm.SetValue(config.Value()) prm.SetHash(config.TxHash()) - err := np.netmapClient.SetConfig(prm) + err := np.netmapClient.SetConfig(ctx, prm) if err != nil { - np.log.Error(context.Background(), logs.FrostFSCantRelaySetConfigEvent, zap.Error(err)) + np.log.Error(ctx, logs.FrostFSCantRelaySetConfigEvent, zap.Error(err)) return false } diff --git a/pkg/innerring/processors/frostfs/processor.go b/pkg/innerring/processors/frostfs/processor.go index fdc31d82e..6c29d330d 100644 --- a/pkg/innerring/processors/frostfs/processor.go +++ b/pkg/innerring/processors/frostfs/processor.go @@ -6,7 +6,6 @@ import ( "fmt" "sync" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" @@ -17,7 +16,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/panjf2000/ants/v2" - "go.uber.org/zap" ) type ( @@ -28,7 +26,7 @@ type ( // AlphabetState is a callback interface for inner ring global state. AlphabetState interface { - IsAlphabet() bool + IsAlphabet(context.Context) bool } // PrecisionConverter converts balance amount values. @@ -37,13 +35,13 @@ type ( } BalanceClient interface { - Mint(p balance.MintPrm) error - Lock(p balance.LockPrm) error - Burn(p balance.BurnPrm) error + Mint(ctx context.Context, p balance.MintPrm) error + Lock(ctx context.Context, p balance.LockPrm) error + Burn(ctx context.Context, p balance.BurnPrm) error } NetmapClient interface { - SetConfig(p nmClient.SetConfigPrm) error + SetConfig(ctx context.Context, p nmClient.SetConfigPrm) error } MorphClient interface { @@ -111,8 +109,6 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/frostfs: balance precision converter is not set") } - p.Log.Debug(context.Background(), logs.FrostFSFrostfsWorkerPool, zap.Int("size", p.PoolSize)) - pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err) diff --git a/pkg/innerring/processors/governance/handlers_test.go b/pkg/innerring/processors/governance/handlers_test.go index 286935129..5a6126249 100644 --- a/pkg/innerring/processors/governance/handlers_test.go +++ b/pkg/innerring/processors/governance/handlers_test.go @@ -219,7 +219,7 @@ type testAlphabetState struct { isAlphabet bool } -func (s *testAlphabetState) IsAlphabet() bool { +func (s *testAlphabetState) IsAlphabet(context.Context) bool { return s.isAlphabet } @@ -251,12 +251,12 @@ func (c *testMorphClient) Committee() (res keys.PublicKeys, err error) { return c.commiteeKeys, nil } -func (c *testMorphClient) UpdateNeoFSAlphabetList(prm client.UpdateAlphabetListPrm) error { +func (c *testMorphClient) UpdateNeoFSAlphabetList(_ context.Context, prm client.UpdateAlphabetListPrm) error { c.alphabetUpdates = append(c.alphabetUpdates, prm) return nil } -func (c *testMorphClient) UpdateNotaryList(prm client.UpdateNotaryListPrm) error { +func (c *testMorphClient) UpdateNotaryList(_ context.Context, prm client.UpdateNotaryListPrm) error { c.notaryUpdates = append(c.notaryUpdates, prm) return nil } @@ -278,7 +278,7 @@ type testFrostFSClient struct { updates []frostfscontract.AlphabetUpdatePrm } -func (c *testFrostFSClient) AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm) error { +func (c *testFrostFSClient) AlphabetUpdate(_ context.Context, p frostfscontract.AlphabetUpdatePrm) error { c.updates = append(c.updates, p) return nil } diff --git a/pkg/innerring/processors/governance/process_update.go b/pkg/innerring/processors/governance/process_update.go index fdfdfa479..73d21a7d2 100644 --- a/pkg/innerring/processors/governance/process_update.go +++ b/pkg/innerring/processors/governance/process_update.go @@ -20,7 +20,7 @@ const ( ) func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint256) bool { - if !gp.alphabetState.IsAlphabet() { + if !gp.alphabetState.IsAlphabet(ctx) { gp.log.Info(ctx, logs.GovernanceNonAlphabetModeIgnoreAlphabetSync) return true } @@ -69,13 +69,13 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25 } // 2. Update NeoFSAlphabet role in the sidechain. - gp.updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet, txHash) + gp.updateNeoFSAlphabetRoleInSidechain(ctx, sidechainAlphabet, newAlphabet, txHash) // 3. Update notary role in the sidechain. - gp.updateNotaryRoleInSidechain(newAlphabet, txHash) + gp.updateNotaryRoleInSidechain(ctx, newAlphabet, txHash) // 4. Update FrostFS contract in the mainnet. - gp.updateFrostFSContractInMainnet(newAlphabet) + gp.updateFrostFSContractInMainnet(ctx, newAlphabet) gp.log.Info(ctx, logs.GovernanceFinishedAlphabetListUpdate) @@ -94,24 +94,24 @@ func prettyKeys(keys keys.PublicKeys) string { return strings.TrimRight(sb.String(), delimiter) } -func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) { +func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) { innerRing, err := gp.irFetcher.InnerRingKeys() if err != nil { - gp.log.Error(context.Background(), logs.GovernanceCantFetchInnerRingListFromSideChain, + gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain, zap.String("error", err.Error())) return } newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet) if err != nil { - gp.log.Error(context.Background(), logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys, + gp.log.Error(ctx, logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys, zap.String("error", err.Error())) return } sort.Sort(newInnerRing) - gp.log.Info(context.Background(), logs.GovernanceUpdateOfTheInnerRingList, + gp.log.Info(ctx, logs.GovernanceUpdateOfTheInnerRingList, zap.String("before", prettyKeys(innerRing)), zap.String("after", prettyKeys(newInnerRing)), ) @@ -120,26 +120,26 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAl updPrm.SetList(newInnerRing) updPrm.SetHash(txHash) - if err = gp.morphClient.UpdateNeoFSAlphabetList(updPrm); err != nil { - gp.log.Error(context.Background(), logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys, + if err = gp.morphClient.UpdateNeoFSAlphabetList(ctx, updPrm); err != nil { + gp.log.Error(ctx, logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys, zap.String("error", err.Error())) } } -func (gp *Processor) updateNotaryRoleInSidechain(newAlphabet keys.PublicKeys, txHash util.Uint256) { +func (gp *Processor) updateNotaryRoleInSidechain(ctx context.Context, newAlphabet keys.PublicKeys, txHash util.Uint256) { updPrm := client.UpdateNotaryListPrm{} updPrm.SetList(newAlphabet) updPrm.SetHash(txHash) - err := gp.morphClient.UpdateNotaryList(updPrm) + err := gp.morphClient.UpdateNotaryList(ctx, updPrm) if err != nil { - gp.log.Error(context.Background(), logs.GovernanceCantUpdateListOfNotaryNodesInSideChain, + gp.log.Error(ctx, logs.GovernanceCantUpdateListOfNotaryNodesInSideChain, zap.String("error", err.Error())) } } -func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys) { +func (gp *Processor) updateFrostFSContractInMainnet(ctx context.Context, newAlphabet keys.PublicKeys) { epoch := gp.epochState.EpochCounter() buf := make([]byte, 8) @@ -152,9 +152,9 @@ func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys) prm.SetID(id) prm.SetPubs(newAlphabet) - err := gp.frostfsClient.AlphabetUpdate(prm) + err := gp.frostfsClient.AlphabetUpdate(ctx, prm) if err != nil { - gp.log.Error(context.Background(), logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract, + gp.log.Error(ctx, logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract, zap.String("error", err.Error())) } } diff --git a/pkg/innerring/processors/governance/processor.go b/pkg/innerring/processors/governance/processor.go index eaadfdb4f..565f4c27d 100644 --- a/pkg/innerring/processors/governance/processor.go +++ b/pkg/innerring/processors/governance/processor.go @@ -26,7 +26,7 @@ const ProcessorPoolSize = 1 type ( // AlphabetState is a callback interface for innerring global state. AlphabetState interface { - IsAlphabet() bool + IsAlphabet(context.Context) bool } ) @@ -56,7 +56,7 @@ type ( } FrostFSClient interface { - AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm) error + AlphabetUpdate(ctx context.Context, p frostfscontract.AlphabetUpdatePrm) error } NetmapClient interface { @@ -70,8 +70,8 @@ type ( MorphClient interface { Committee() (res keys.PublicKeys, err error) - UpdateNeoFSAlphabetList(prm client.UpdateAlphabetListPrm) error - UpdateNotaryList(prm client.UpdateNotaryListPrm) error + UpdateNeoFSAlphabetList(ctx context.Context, prm client.UpdateAlphabetListPrm) error + UpdateNotaryList(ctx context.Context, prm client.UpdateNotaryListPrm) error } // Processor of events related to governance in the network. diff --git a/pkg/innerring/processors/netmap/handlers.go b/pkg/innerring/processors/netmap/handlers.go index 61547e0ba..4c7199a49 100644 --- a/pkg/innerring/processors/netmap/handlers.go +++ b/pkg/innerring/processors/netmap/handlers.go @@ -14,14 +14,14 @@ import ( func (np *Processor) HandleNewEpochTick(ctx context.Context, ev event.Event) { _ = ev.(timerEvent.NewEpochTick) - np.log.Info(context.Background(), logs.NetmapTick, zap.String("type", "epoch")) + np.log.Info(ctx, logs.NetmapTick, zap.String("type", "epoch")) // send an event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", func() bool { return np.processNewEpochTick(ctx) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained, + np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } @@ -54,7 +54,7 @@ func (np *Processor) handleAddPeer(ctx context.Context, ev event.Event) { // send an event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "netmap_add_peer", func() bool { - return np.processAddPeer(newPeer) + return np.processAddPeer(ctx, newPeer) }) if err != nil { // there system can be moved into controlled degradation stage @@ -72,7 +72,7 @@ func (np *Processor) handleUpdateState(ctx context.Context, ev event.Event) { // send event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "netmap_update_peer", func() bool { - return np.processUpdatePeer(updPeer) + return np.processUpdatePeer(ctx, updPeer) }) if err != nil { // there system can be moved into controlled degradation stage @@ -94,7 +94,7 @@ func (np *Processor) handleCleanupTick(ctx context.Context, ev event.Event) { // send event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "netmap_cleanup_tick", func() bool { - return np.processNetmapCleanupTick(cleanup) + return np.processNetmapCleanupTick(ctx, cleanup) }) if err != nil { // there system can be moved into controlled degradation stage diff --git a/pkg/innerring/processors/netmap/handlers_test.go b/pkg/innerring/processors/netmap/handlers_test.go index 50a67c12d..d780cf25b 100644 --- a/pkg/innerring/processors/netmap/handlers_test.go +++ b/pkg/innerring/processors/netmap/handlers_test.go @@ -351,7 +351,7 @@ type testAlphabetState struct { isAlphabet bool } -func (s *testAlphabetState) IsAlphabet() bool { +func (s *testAlphabetState) IsAlphabet(context.Context) bool { return s.isAlphabet } @@ -359,7 +359,7 @@ type testContainerClient struct { estimations []cntClient.StartEstimationPrm } -func (c *testContainerClient) StartEstimation(p cntClient.StartEstimationPrm) error { +func (c *testContainerClient) StartEstimation(_ context.Context, p cntClient.StartEstimationPrm) error { c.estimations = append(c.estimations, p) return nil } @@ -384,7 +384,7 @@ type testNetmapClient struct { invokedTxs []*transaction.Transaction } -func (c *testNetmapClient) MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error { +func (c *testNetmapClient) MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error { c.notaryInvokes = append(c.notaryInvokes, notaryInvoke{ contract: contract, fee: fee, @@ -415,7 +415,7 @@ func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) { return c.netmap, nil } -func (c *testNetmapClient) NewEpoch(epoch uint64) error { +func (c *testNetmapClient) NewEpoch(_ context.Context, epoch uint64) error { c.newEpochs = append(c.newEpochs, epoch) return nil } diff --git a/pkg/innerring/processors/netmap/process_cleanup.go b/pkg/innerring/processors/netmap/process_cleanup.go index 8df78abce..79fe09d91 100644 --- a/pkg/innerring/processors/netmap/process_cleanup.go +++ b/pkg/innerring/processors/netmap/process_cleanup.go @@ -9,9 +9,9 @@ import ( "go.uber.org/zap" ) -func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool { - if !np.alphabetState.IsAlphabet() { - np.log.Info(context.Background(), logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick) +func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapCleanupTick) bool { + if !np.alphabetState.IsAlphabet(ctx) { + np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick) return true } @@ -19,13 +19,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool { err := np.netmapSnapshot.forEachRemoveCandidate(ev.epoch, func(s string) error { key, err := keys.NewPublicKeyFromString(s) if err != nil { - np.log.Warn(context.Background(), logs.NetmapCantDecodePublicKeyOfNetmapNode, + np.log.Warn(ctx, logs.NetmapCantDecodePublicKeyOfNetmapNode, zap.String("key", s)) return nil } - np.log.Info(context.Background(), logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s)) + np.log.Info(ctx, logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s)) // In notary environments we call UpdateStateIR method instead of UpdateState. // It differs from UpdateState only by name, so we can do this in the same form. @@ -33,6 +33,7 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool { const methodUpdateStateNotary = "updateStateIR" err = np.netmapClient.MorphNotaryInvoke( + ctx, np.netmapClient.ContractAddress(), 0, uint32(ev.epoch), @@ -41,13 +42,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool { int64(v2netmap.Offline), key.Bytes(), ) if err != nil { - np.log.Error(context.Background(), logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err)) + np.log.Error(ctx, logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err)) } return nil }) if err != nil { - np.log.Warn(context.Background(), logs.NetmapCantIterateOnNetmapCleanerCache, + np.log.Warn(ctx, logs.NetmapCantIterateOnNetmapCleanerCache, zap.String("error", err.Error())) return false } diff --git a/pkg/innerring/processors/netmap/process_epoch.go b/pkg/innerring/processors/netmap/process_epoch.go index 5fbeeb809..8c29656b5 100644 --- a/pkg/innerring/processors/netmap/process_epoch.go +++ b/pkg/innerring/processors/netmap/process_epoch.go @@ -51,8 +51,8 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc prm.SetEpoch(epoch - 1) prm.SetHash(ev.TxHash()) - if epoch > 0 && np.alphabetState.IsAlphabet() { // estimates are invalid in genesis epoch - err = np.containerWrp.StartEstimation(prm) + if epoch > 0 && np.alphabetState.IsAlphabet(ctx) { // estimates are invalid in genesis epoch + err = np.containerWrp.StartEstimation(ctx, prm) if err != nil { np.log.Warn(ctx, logs.NetmapCantStartContainerSizeEstimation, zap.Uint64("epoch", epoch), @@ -70,7 +70,7 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc // Process new epoch tick by invoking new epoch method in network map contract. func (np *Processor) processNewEpochTick(ctx context.Context) bool { - if !np.alphabetState.IsAlphabet() { + if !np.alphabetState.IsAlphabet(ctx) { np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewEpochTick) return true } @@ -78,7 +78,7 @@ func (np *Processor) processNewEpochTick(ctx context.Context) bool { nextEpoch := np.epochState.EpochCounter() + 1 np.log.Debug(ctx, logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch)) - err := np.netmapClient.NewEpoch(nextEpoch) + err := np.netmapClient.NewEpoch(ctx, nextEpoch) if err != nil { np.log.Error(ctx, logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err)) return false diff --git a/pkg/innerring/processors/netmap/process_peers.go b/pkg/innerring/processors/netmap/process_peers.go index 42d1b5ec6..72aa08f76 100644 --- a/pkg/innerring/processors/netmap/process_peers.go +++ b/pkg/innerring/processors/netmap/process_peers.go @@ -13,9 +13,9 @@ import ( // Process add peer notification by sanity check of new node // local epoch timer. -func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { - if !np.alphabetState.IsAlphabet() { - np.log.Info(context.Background(), logs.NetmapNonAlphabetModeIgnoreNewPeerNotification) +func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer) bool { + if !np.alphabetState.IsAlphabet(ctx) { + np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewPeerNotification) return true } @@ -23,7 +23,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { tx := ev.NotaryRequest().MainTransaction ok, err := np.netmapClient.MorphIsValidScript(tx.Script, tx.Signers) if err != nil || !ok { - np.log.Warn(context.Background(), logs.NetmapNonhaltNotaryTransaction, + np.log.Warn(ctx, logs.NetmapNonhaltNotaryTransaction, zap.String("method", "netmap.AddPeer"), zap.String("hash", tx.Hash().StringLE()), zap.Error(err)) @@ -34,14 +34,14 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { var nodeInfo netmap.NodeInfo if err := nodeInfo.Unmarshal(ev.Node()); err != nil { // it will be nice to have tx id at event structure to log it - np.log.Warn(context.Background(), logs.NetmapCantParseNetworkMapCandidate) + np.log.Warn(ctx, logs.NetmapCantParseNetworkMapCandidate) return false } // validate and update node info err = np.nodeValidator.VerifyAndUpdate(&nodeInfo) if err != nil { - np.log.Warn(context.Background(), logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate, + np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate, zap.String("error", err.Error()), ) @@ -64,7 +64,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { // That is why we need to perform `addPeerIR` only in case when node is online, // because in scope of this method, contract set state `ONLINE` for the node. if updated && nodeInfo.Status().IsOnline() { - np.log.Info(context.Background(), logs.NetmapApprovingNetworkMapCandidate, + np.log.Info(ctx, logs.NetmapApprovingNetworkMapCandidate, zap.String("key", keyString)) prm := netmapclient.AddPeerPrm{} @@ -77,6 +77,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { // create new notary request with the original nonce err = np.netmapClient.MorphNotaryInvoke( + ctx, np.netmapClient.ContractAddress(), 0, ev.NotaryRequest().MainTransaction.Nonce, @@ -85,7 +86,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { nodeInfoBinary, ) if err != nil { - np.log.Error(context.Background(), logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err)) + np.log.Error(ctx, logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err)) return false } } @@ -94,9 +95,9 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { } // Process update peer notification by sending approval tx to the smart contract. -func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool { - if !np.alphabetState.IsAlphabet() { - np.log.Info(context.Background(), logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification) +func (np *Processor) processUpdatePeer(ctx context.Context, ev netmapEvent.UpdatePeer) bool { + if !np.alphabetState.IsAlphabet(ctx) { + np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification) return true } @@ -109,7 +110,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool { if ev.Maintenance() { err = np.nodeStateSettings.MaintenanceModeAllowed() if err != nil { - np.log.Info(context.Background(), logs.NetmapPreventSwitchingNodeToMaintenanceState, + np.log.Info(ctx, logs.NetmapPreventSwitchingNodeToMaintenanceState, zap.Error(err), ) @@ -118,7 +119,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool { } if err = np.netmapClient.MorphNotarySignAndInvokeTX(ev.NotaryRequest().MainTransaction); err != nil { - np.log.Error(context.Background(), logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err)) + np.log.Error(ctx, logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err)) return false } diff --git a/pkg/innerring/processors/netmap/processor.go b/pkg/innerring/processors/netmap/processor.go index 621a9ab47..ac5205439 100644 --- a/pkg/innerring/processors/netmap/processor.go +++ b/pkg/innerring/processors/netmap/processor.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state" cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" @@ -18,7 +17,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/panjf2000/ants/v2" - "go.uber.org/zap" ) type ( @@ -37,7 +35,7 @@ type ( // AlphabetState is a callback interface for inner ring global state. AlphabetState interface { - IsAlphabet() bool + IsAlphabet(context.Context) bool } // NodeValidator wraps basic method of checking the correctness @@ -56,18 +54,18 @@ type ( } Client interface { - MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error + MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error ContractAddress() util.Uint160 EpochDuration() (uint64, error) MorphTxHeight(h util.Uint256) (res uint32, err error) NetMap() (*netmap.NetMap, error) - NewEpoch(epoch uint64) error + NewEpoch(ctx context.Context, epoch uint64) error MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error } ContainerClient interface { - StartEstimation(p cntClient.StartEstimationPrm) error + StartEstimation(ctx context.Context, p cntClient.StartEstimationPrm) error } // Processor of events produced by network map contract @@ -142,8 +140,6 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/netmap: node state settings is not set") } - p.Log.Debug(context.Background(), logs.NetmapNetmapWorkerPool, zap.Int("size", p.PoolSize)) - pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { return nil, fmt.Errorf("ir/netmap: can't create worker pool: %w", err) diff --git a/pkg/innerring/processors/netmap/wrappers.go b/pkg/innerring/processors/netmap/wrappers.go index e75fdaf40..9cd71ae48 100644 --- a/pkg/innerring/processors/netmap/wrappers.go +++ b/pkg/innerring/processors/netmap/wrappers.go @@ -1,6 +1,8 @@ package netmap import ( + "context" + netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/nspcc-dev/neo-go/pkg/core/transaction" @@ -18,13 +20,13 @@ type netmapClientWrapper struct { netmapClient *netmapclient.Client } -func (w *netmapClientWrapper) UpdatePeerState(p netmapclient.UpdatePeerPrm) error { - _, err := w.netmapClient.UpdatePeerState(p) +func (w *netmapClientWrapper) UpdatePeerState(ctx context.Context, p netmapclient.UpdatePeerPrm) error { + _, err := w.netmapClient.UpdatePeerState(ctx, p) return err } -func (w *netmapClientWrapper) MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error { - _, err := w.netmapClient.Morph().NotaryInvoke(contract, fee, nonce, vub, method, args...) +func (w *netmapClientWrapper) MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error { + _, err := w.netmapClient.Morph().NotaryInvoke(ctx, contract, fee, nonce, vub, method, args...) return err } @@ -44,16 +46,16 @@ func (w *netmapClientWrapper) NetMap() (*netmap.NetMap, error) { return w.netmapClient.NetMap() } -func (w *netmapClientWrapper) NewEpoch(epoch uint64) error { - return w.netmapClient.NewEpoch(epoch) +func (w *netmapClientWrapper) NewEpoch(ctx context.Context, epoch uint64) error { + return w.netmapClient.NewEpoch(ctx, epoch) } func (w *netmapClientWrapper) MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) { return w.netmapClient.Morph().IsValidScript(script, signers) } -func (w *netmapClientWrapper) AddPeer(p netmapclient.AddPeerPrm) error { - return w.netmapClient.AddPeer(p) +func (w *netmapClientWrapper) AddPeer(ctx context.Context, p netmapclient.AddPeerPrm) error { + return w.netmapClient.AddPeer(ctx, p) } func (w *netmapClientWrapper) MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error { diff --git a/pkg/innerring/state.go b/pkg/innerring/state.go index 85f332fb6..77c2af2ce 100644 --- a/pkg/innerring/state.go +++ b/pkg/innerring/state.go @@ -53,8 +53,8 @@ func (s *Server) IsActive(ctx context.Context) bool { } // IsAlphabet is a getter for a global alphabet flag state. -func (s *Server) IsAlphabet() bool { - return s.AlphabetIndex() >= 0 +func (s *Server) IsAlphabet(ctx context.Context) bool { + return s.AlphabetIndex(ctx) >= 0 } // InnerRingIndex is a getter for a global index of node in inner ring list. Negative @@ -83,10 +83,10 @@ func (s *Server) InnerRingSize(ctx context.Context) int { // AlphabetIndex is a getter for a global index of node in alphabet list. // Negative index means that node is not in the alphabet list. -func (s *Server) AlphabetIndex() int { +func (s *Server) AlphabetIndex(ctx context.Context) int { index, err := s.statusIndex.AlphabetIndex() if err != nil { - s.log.Error(context.Background(), logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error())) + s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error())) return -1 } @@ -127,7 +127,7 @@ func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.V } s.contracts.alphabet.iterate(func(letter GlagoliticLetter, contract util.Uint160) { - _, err := s.morphClient.NotaryInvoke(contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators) + _, err := s.morphClient.NotaryInvoke(ctx, contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators) if err != nil { s.log.Warn(ctx, logs.InnerringCantInvokeVoteMethodInAlphabetContract, zap.Int8("alphabet_index", int8(letter)), diff --git a/pkg/innerring/state_test.go b/pkg/innerring/state_test.go index 17ab995af..f60ca87c4 100644 --- a/pkg/innerring/state_test.go +++ b/pkg/innerring/state_test.go @@ -47,8 +47,8 @@ func TestServerState(t *testing.T) { require.Equal(t, healthStatus, srv.HealthStatus(), "invalid health status") require.True(t, srv.IsActive(context.Background()), "invalid IsActive result") - require.True(t, srv.IsAlphabet(), "invalid IsAlphabet result") + require.True(t, srv.IsAlphabet(context.Background()), "invalid IsAlphabet result") require.Equal(t, 0, srv.InnerRingIndex(context.Background()), "invalid IR index") require.Equal(t, 1, srv.InnerRingSize(context.Background()), "invalid IR index") - require.Equal(t, 0, srv.AlphabetIndex(), "invalid alphabet index") + require.Equal(t, 0, srv.AlphabetIndex(context.Background()), "invalid alphabet index") } diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go index 10cb6f368..95fdd844b 100644 --- a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go +++ b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go @@ -72,7 +72,7 @@ func TestBlobovnicza(t *testing.T) { require.NoError(t, blz.Open(context.Background())) // initialize Blobovnicza - require.NoError(t, blz.Init()) + require.NoError(t, blz.Init(context.Background())) // try to read non-existent address testGet(t, blz, oidtest.Address(), nil, client.IsErrObjectNotFound) @@ -102,5 +102,5 @@ func TestBlobovnicza(t *testing.T) { return err == nil }, nil) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) } diff --git a/pkg/local_object_storage/blobovnicza/control.go b/pkg/local_object_storage/blobovnicza/control.go index a317279a4..d0e71a876 100644 --- a/pkg/local_object_storage/blobovnicza/control.go +++ b/pkg/local_object_storage/blobovnicza/control.go @@ -56,7 +56,7 @@ func (b *Blobovnicza) Open(ctx context.Context) error { // // If Blobovnicza is already initialized, no action is taken. // Blobovnicza must be open, otherwise an error will return. -func (b *Blobovnicza) Init() error { +func (b *Blobovnicza) Init(ctx context.Context) error { b.controlMtx.Lock() defer b.controlMtx.Unlock() @@ -64,7 +64,7 @@ func (b *Blobovnicza) Init() error { return errors.New("blobovnicza is not open") } - b.log.Debug(context.Background(), logs.BlobovniczaInitializing, + b.log.Debug(ctx, logs.BlobovniczaInitializing, zap.Uint64("object size limit", b.objSizeLimit), zap.Uint64("storage size limit", b.fullSizeLimit), ) @@ -72,7 +72,7 @@ func (b *Blobovnicza) Init() error { size := b.dataSize.Load() items := b.itemsCount.Load() if size != 0 || items != 0 { - b.log.Debug(context.Background(), logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items)) + b.log.Debug(ctx, logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items)) return nil } @@ -82,7 +82,7 @@ func (b *Blobovnicza) Init() error { // create size range bucket rangeStr := stringifyBounds(lower, upper) - b.log.Debug(context.Background(), logs.BlobovniczaCreatingBucketForSizeRange, + b.log.Debug(ctx, logs.BlobovniczaCreatingBucketForSizeRange, zap.String("range", rangeStr)) _, err := tx.CreateBucketIfNotExists(key) @@ -99,14 +99,14 @@ func (b *Blobovnicza) Init() error { } } - return b.initializeCounters() + return b.initializeCounters(ctx) } func (b *Blobovnicza) ObjectsCount() uint64 { return b.itemsCount.Load() } -func (b *Blobovnicza) initializeCounters() error { +func (b *Blobovnicza) initializeCounters(ctx context.Context) error { var size uint64 var items uint64 var sizeExists bool @@ -132,17 +132,17 @@ func (b *Blobovnicza) initializeCounters() error { return fmt.Errorf("can't determine DB size: %w", err) } if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly { - b.log.Debug(context.Background(), logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items)) + b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items)) if err := b.boltDB.Update(func(tx *bbolt.Tx) error { if err := saveDataSize(tx, size); err != nil { return err } return saveItemsCount(tx, items) }); err != nil { - b.log.Debug(context.Background(), logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items)) + b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items)) return fmt.Errorf("can't save blobovnicza's size and items count: %w", err) } - b.log.Debug(context.Background(), logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items)) + b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items)) } b.dataSize.Store(size) @@ -155,7 +155,7 @@ func (b *Blobovnicza) initializeCounters() error { // Close releases all internal database resources. // // If blobovnicza is already closed, does nothing. -func (b *Blobovnicza) Close() error { +func (b *Blobovnicza) Close(ctx context.Context) error { b.controlMtx.Lock() defer b.controlMtx.Unlock() @@ -163,7 +163,7 @@ func (b *Blobovnicza) Close() error { return nil } - b.log.Debug(context.Background(), logs.BlobovniczaClosingBoltDB, + b.log.Debug(ctx, logs.BlobovniczaClosingBoltDB, zap.String("path", b.path), ) diff --git a/pkg/local_object_storage/blobovnicza/get_test.go b/pkg/local_object_storage/blobovnicza/get_test.go index 8c435af89..5a382c159 100644 --- a/pkg/local_object_storage/blobovnicza/get_test.go +++ b/pkg/local_object_storage/blobovnicza/get_test.go @@ -14,11 +14,11 @@ func TestBlobovnicza_Get(t *testing.T) { filename := filepath.Join(t.TempDir(), "blob") var blz *Blobovnicza - defer func() { require.NoError(t, blz.Close()) }() + defer func() { require.NoError(t, blz.Close(context.Background())) }() fnInit := func(szLimit uint64) { if blz != nil { - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) } blz = New( @@ -27,7 +27,7 @@ func TestBlobovnicza_Get(t *testing.T) { ) require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Init(context.Background())) } // initial distribution: [0:32K] (32K:64K] diff --git a/pkg/local_object_storage/blobovnicza/iterate_test.go b/pkg/local_object_storage/blobovnicza/iterate_test.go index bec23bb96..717274781 100644 --- a/pkg/local_object_storage/blobovnicza/iterate_test.go +++ b/pkg/local_object_storage/blobovnicza/iterate_test.go @@ -16,7 +16,7 @@ func TestBlobovniczaIterate(t *testing.T) { filename := filepath.Join(t.TempDir(), "blob") b := New(WithPath(filename)) require.NoError(t, b.Open(context.Background())) - require.NoError(t, b.Init()) + require.NoError(t, b.Init(context.Background())) data := [][]byte{{0, 1, 2, 3}, {5, 6, 7, 8}} addr := oidtest.Address() diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/active.go b/pkg/local_object_storage/blobstor/blobovniczatree/active.go index 0ac15df82..dbaa7387a 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/active.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/active.go @@ -18,8 +18,8 @@ func (db *activeDB) Blobovnicza() *blobovnicza.Blobovnicza { return db.blz } -func (db *activeDB) Close() { - db.shDB.Close() +func (db *activeDB) Close(ctx context.Context) { + db.shDB.Close(ctx) } func (db *activeDB) SystemPath() string { @@ -73,12 +73,12 @@ func (m *activeDBManager) Open() { m.closed = false } -func (m *activeDBManager) Close() { +func (m *activeDBManager) Close(ctx context.Context) { m.levelToActiveDBGuard.Lock() defer m.levelToActiveDBGuard.Unlock() for _, db := range m.levelToActiveDB { - db.Close() + db.Close(ctx) } m.levelToActiveDB = make(map[string]*sharedDB) m.closed = true @@ -103,7 +103,7 @@ func (m *activeDBManager) getCurrentActiveIfOk(ctx context.Context, lvlPath stri } if blz.IsFull() { - db.Close() + db.Close(ctx) return nil, nil } @@ -168,10 +168,10 @@ func (m *activeDBManager) getNextSharedDB(ctx context.Context, lvlPath string) ( previous, updated := m.replace(lvlPath, next) if !updated && next != nil { - next.Close() // manager is closed, so don't hold active DB open + next.Close(ctx) // manager is closed, so don't hold active DB open } if updated && previous != nil { - previous.Close() + previous.Close(ctx) } return next, nil } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go index c909113c7..d9e99d0d1 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go @@ -167,7 +167,7 @@ func (b *Blobovniczas) Compressor() *compression.Config { } // SetReportErrorFunc implements common.Storage. -func (b *Blobovniczas) SetReportErrorFunc(f func(string, error)) { +func (b *Blobovniczas) SetReportErrorFunc(f func(context.Context, string, error)) { b.reportError = f } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/cache.go b/pkg/local_object_storage/blobstor/blobovniczatree/cache.go index e8016781a..04ff5120c 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/cache.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/cache.go @@ -32,7 +32,7 @@ func newDBCache(parentCtx context.Context, size int, ch := cache.NewCache[string, *sharedDB](). WithTTL(ttl).WithLRU().WithMaxKeys(size). WithOnEvicted(func(_ string, db *sharedDB) { - db.Close() + db.Close(parentCtx) }) ctx, cancel := context.WithCancel(parentCtx) res := &dbCache{ @@ -138,7 +138,7 @@ func (c *dbCache) create(ctx context.Context, path string) *sharedDB { return value } if added := c.put(path, value); !added { - value.Close() + value.Close(ctx) } return value } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go index cc8a52d03..ec9743b57 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go @@ -27,7 +27,7 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) { require.NoError(t, st.Open(mode.ComponentReadWrite)) require.NoError(t, st.Init()) defer func() { - require.NoError(t, st.Close()) + require.NoError(t, st.Close(context.Background())) }() objGen := &testutil.SeqObjGenerator{ObjSize: 1} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control.go b/pkg/local_object_storage/blobstor/blobovniczatree/control.go index 7c0a9edd6..c77df63bf 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/control.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/control.go @@ -50,7 +50,7 @@ func (b *Blobovniczas) initializeDBs(ctx context.Context) error { if err != nil { return err } - defer shBlz.Close() + defer shBlz.Close(egCtx) moveInfo, err := blz.ListMoveInfo(egCtx) if err != nil { @@ -80,9 +80,9 @@ func (b *Blobovniczas) openManagers() { } // Close implements common.Storage. -func (b *Blobovniczas) Close() error { +func (b *Blobovniczas) Close(ctx context.Context) error { b.dbCache.Close() // order important - b.activeDBManager.Close() + b.activeDBManager.Close(ctx) b.commondbManager.Close() return nil diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go index f0a32ded1..b26323bd0 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go @@ -51,7 +51,7 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) { require.NoError(t, err) require.EqualValues(t, obj35, gRes.Object) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) // change depth and width blz = NewBlobovniczaTree( @@ -89,7 +89,7 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) { }) require.NoError(t, err) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) // change depth and width back blz = NewBlobovniczaTree( @@ -127,5 +127,5 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) { require.NoError(t, err) require.EqualValues(t, obj52, gRes.Object) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/count.go b/pkg/local_object_storage/blobstor/blobovniczatree/count.go index 1137b9eb2..b83849c77 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/count.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/count.go @@ -26,7 +26,7 @@ func (b *Blobovniczas) ObjectsCount(ctx context.Context) (uint64, error) { if err != nil { return true, err } - defer shDB.Close() + defer shDB.Close(ctx) result += blz.ObjectsCount() return false, nil diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go index b26e44144..8c2d7aa67 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go @@ -66,7 +66,7 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co if err != nil { return res, err } - defer shBlz.Close() + defer shBlz.Close(ctx) if res, err = b.deleteObject(ctx, blz, bPrm); err == nil { success = true @@ -114,7 +114,7 @@ func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicz if err != nil { return common.DeleteRes{}, err } - defer shBlz.Close() + defer shBlz.Close(ctx) return b.deleteObject(ctx, blz, prm) } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go index 528dbfed7..63d2f21e1 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go @@ -42,7 +42,7 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common if err != nil { return common.ExistsRes{}, err } - defer shBlz.Close() + defer shBlz.Close(ctx) exists, err := blz.Exists(ctx, prm.Address) return common.ExistsRes{Exists: exists}, err diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go index d6ffd8bce..5414140f0 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go @@ -27,7 +27,7 @@ func TestExistsInvalidStorageID(t *testing.T) { WithBlobovniczaSize(1<<20)) require.NoError(t, b.Open(mode.ComponentReadWrite)) require.NoError(t, b.Init()) - defer func() { require.NoError(t, b.Close()) }() + defer func() { require.NoError(t, b.Close(context.Background())) }() obj := blobstortest.NewObject(1024) addr := object.AddressOf(obj) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get.go b/pkg/local_object_storage/blobstor/blobovniczatree/get.go index fc017f22d..b7ef8d8a5 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get.go @@ -53,7 +53,7 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G if err != nil { return res, err } - defer shBlz.Close() + defer shBlz.Close(ctx) res, err = b.getObject(ctx, blz, bPrm) if err == nil { @@ -100,7 +100,7 @@ func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.G if err != nil { return common.GetRes{}, err } - defer shBlz.Close() + defer shBlz.Close(ctx) return b.getObject(ctx, blz, prm) } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go index 384544d7b..b24f1b881 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go @@ -52,7 +52,7 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re if err != nil { return common.GetRangeRes{}, err } - defer shBlz.Close() + defer shBlz.Close(ctx) res, err := b.getObjectRange(ctx, blz, prm) if err == nil { @@ -108,7 +108,7 @@ func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRang if err != nil { return common.GetRangeRes{}, err } - defer shBlz.Close() + defer shBlz.Close(ctx) return b.getObjectRange(ctx, blz, prm) } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go index 049a61d72..b120c22f7 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go @@ -84,7 +84,7 @@ func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors boo } return false, fmt.Errorf("could not open blobovnicza %s: %w", p, err) } - defer shBlz.Close() + defer shBlz.Close(ctx) err = f(p, blz) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go index 502202d68..b35e052cf 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go @@ -71,7 +71,7 @@ func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) { if err := blz.Open(ctx); err != nil { return nil, fmt.Errorf("could not open blobovnicza %s: %w", b.path, err) } - if err := blz.Init(); err != nil { + if err := blz.Init(ctx); err != nil { return nil, fmt.Errorf("could not init blobovnicza %s: %w", b.path, err) } @@ -82,20 +82,20 @@ func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) { return blz, nil } -func (b *sharedDB) Close() { +func (b *sharedDB) Close(ctx context.Context) { b.cond.L.Lock() defer b.cond.L.Unlock() if b.refCount == 0 { - b.log.Error(context.Background(), logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path)) + b.log.Error(ctx, logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path)) b.cond.Broadcast() return } if b.refCount == 1 { b.refCount = 0 - if err := b.blcza.Close(); err != nil { - b.log.Error(context.Background(), logs.BlobovniczatreeCouldNotCloseBlobovnicza, + if err := b.blcza.Close(ctx); err != nil { + b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza, zap.String("id", b.path), zap.String("error", err.Error()), ) @@ -111,7 +111,7 @@ func (b *sharedDB) Close() { } } -func (b *sharedDB) CloseAndRemoveFile() error { +func (b *sharedDB) CloseAndRemoveFile(ctx context.Context) error { b.cond.L.Lock() if b.refCount > 1 { b.cond.Wait() @@ -122,8 +122,8 @@ func (b *sharedDB) CloseAndRemoveFile() error { return errClosingClosedBlobovnicza } - if err := b.blcza.Close(); err != nil { - b.log.Error(context.Background(), logs.BlobovniczatreeCouldNotCloseBlobovnicza, + if err := b.blcza.Close(ctx); err != nil { + b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza, zap.String("id", b.path), zap.String("error", err.Error()), ) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/option.go b/pkg/local_object_storage/blobstor/blobovniczatree/option.go index b56251772..0e1b2022e 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/option.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/option.go @@ -1,6 +1,7 @@ package blobovniczatree import ( + "context" "io/fs" "time" @@ -20,7 +21,7 @@ type cfg struct { blzShallowWidth uint64 compression *compression.Config blzOpts []blobovnicza.Option - reportError func(string, error) // reportError is the function called when encountering disk errors. + reportError func(context.Context, string, error) // reportError is the function called when encountering disk errors. metrics Metrics waitBeforeDropDB time.Duration blzInitWorkerCount int @@ -54,7 +55,7 @@ func initConfig(c *cfg) { openedCacheExpInterval: defaultOpenedCacheInterval, blzShallowDepth: defaultBlzShallowDepth, blzShallowWidth: defaultBlzShallowWidth, - reportError: func(string, error) {}, + reportError: func(context.Context, string, error) {}, metrics: &noopMetrics{}, waitBeforeDropDB: defaultWaitBeforeDropDB, blzInitWorkerCount: defaultBlzInitWorkerCount, diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/put.go b/pkg/local_object_storage/blobstor/blobovniczatree/put.go index 8dff436d3..1678e578c 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/put.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/put.go @@ -80,7 +80,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) active, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath) if err != nil { if !isLogical(err) { - i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) + i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) } else { i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.String("error", err.Error()), @@ -95,14 +95,14 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return false, nil } - defer active.Close() + defer active.Close(ctx) i.AllFull = false _, err = active.Blobovnicza().Put(ctx, i.PutPrm) if err != nil { if !isLogical(err) { - i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err) + i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err) } else { i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", active.SystemPath()), diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go index e137bdd99..16ef2b180 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go @@ -186,7 +186,7 @@ func (b *Blobovniczas) rebuildBySize(ctx context.Context, path string, targetFil if err != nil { return false, err } - defer shDB.Close() + defer shDB.Close(ctx) fp := blz.FillPercent() // accepted fill percent defines as // |----|+++++++++++++++++|+++++++++++++++++|--------------- @@ -206,9 +206,9 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M if shDBClosed { return } - shDB.Close() + shDB.Close(ctx) }() - dropTempFile, err := b.addRebuildTempFile(path) + dropTempFile, err := b.addRebuildTempFile(ctx, path) if err != nil { return 0, err } @@ -224,7 +224,7 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M return migratedObjects, err } -func (b *Blobovniczas) addRebuildTempFile(path string) (func(), error) { +func (b *Blobovniczas) addRebuildTempFile(ctx context.Context, path string) (func(), error) { sysPath := filepath.Join(b.rootPath, path) sysPath = sysPath + rebuildSuffix _, err := os.OpenFile(sysPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, b.perm) @@ -233,7 +233,7 @@ func (b *Blobovniczas) addRebuildTempFile(path string) (func(), error) { } return func() { if err := os.Remove(sysPath); err != nil { - b.log.Warn(context.Background(), logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err)) + b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err)) } }, nil } @@ -330,7 +330,7 @@ func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDb *sharedDB) b.dbFilesGuard.Lock() defer b.dbFilesGuard.Unlock() - if err := shDb.CloseAndRemoveFile(); err != nil { + if err := shDb.CloseAndRemoveFile(ctx); err != nil { return false, err } b.commondbManager.CleanResources(path) @@ -370,7 +370,7 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co if err != nil { return true, err } - defer shDB.Close() + defer shDB.Close(ctx) incompletedMoves, err := blz.ListMoveInfo(ctx) if err != nil { @@ -403,7 +403,7 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob if err != nil { return err } - defer targetDB.Close() + defer targetDB.Close(ctx) existsInSource := true var gPrm blobovnicza.GetPrm @@ -480,7 +480,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, target, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath) if err != nil { if !isLogical(err) { - i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) + i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) } else { i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.Error(err)) } @@ -491,7 +491,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, i.B.log.Warn(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath)) return false, nil } - defer target.Close() + defer target.Close(ctx) i.AllFull = false @@ -503,7 +503,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, TargetStorageID: targetStorageID.Bytes(), }); err != nil { if !isLogical(err) { - i.B.reportError(logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, err) + i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, err) } else { i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, zap.String("path", i.SourceSysPath), zap.Error(err)) } @@ -519,7 +519,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, _, err = target.Blobovnicza().Put(ctx, putPrm) if err != nil { if !isLogical(err) { - i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err) + i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err) } else { i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", target.SystemPath()), zap.Error(err)) } @@ -535,7 +535,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, deletePrm.SetAddress(i.Address) if _, err = i.Source.Delete(ctx, deletePrm); err != nil { if !isLogical(err) { - i.B.reportError(logs.BlobovniczatreeCouldNotDeleteFromSource, err) + i.B.reportError(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, err) } else { i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", i.SourceSysPath), zap.Error(err)) } @@ -544,7 +544,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, if err = i.Source.DropMoveInfo(ctx, i.Address); err != nil { if !isLogical(err) { - i.B.reportError(logs.BlobovniczatreeCouldNotDropMoveInfo, err) + i.B.reportError(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, err) } else { i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", i.SourceSysPath), zap.Error(err)) } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go index bfea97afe..2f58624aa 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go @@ -36,7 +36,7 @@ func testRebuildFailoverOnlyMoveInfoSaved(t *testing.T) { blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Init(context.Background())) obj := blobstortest.NewObject(1024) data, err := obj.Marshal() @@ -53,7 +53,7 @@ func testRebuildFailoverOnlyMoveInfoSaved(t *testing.T) { TargetStorageID: []byte("0/0/0"), })) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm) require.NoError(t, err) @@ -66,7 +66,7 @@ func testRebuildFailoverObjectSavedToTarget(t *testing.T) { blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Init(context.Background())) obj := blobstortest.NewObject(1024) data, err := obj.Marshal() @@ -83,19 +83,19 @@ func testRebuildFailoverObjectSavedToTarget(t *testing.T) { TargetStorageID: []byte("0/0/0"), })) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm) require.NoError(t, err) blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db"))) require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Init(context.Background())) _, err = blz.Put(context.Background(), pPrm) require.NoError(t, err) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) testRebuildFailoverValidate(t, dir, obj, true) } @@ -106,7 +106,7 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) { blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Init(context.Background())) obj := blobstortest.NewObject(1024) data, err := obj.Marshal() @@ -117,14 +117,14 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) { TargetStorageID: []byte("0/0/0"), })) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm) require.NoError(t, err) blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db"))) require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Init(context.Background())) var pPrm blobovnicza.PutPrm pPrm.SetAddress(object.AddressOf(obj)) @@ -132,7 +132,7 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) { _, err = blz.Put(context.Background(), pPrm) require.NoError(t, err) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) testRebuildFailoverValidate(t, dir, obj, false) } @@ -170,11 +170,11 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object require.Equal(t, uint64(1), rRes.ObjectsMoved) require.Equal(t, uint64(0), rRes.FilesRemoved) - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Init(context.Background())) moveInfo, err := blz.ListMoveInfo(context.Background()) require.NoError(t, err) @@ -185,11 +185,11 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object _, err = blz.Get(context.Background(), gPrm) require.True(t, client.IsErrObjectNotFound(err)) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db"))) require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Init(context.Background())) moveInfo, err = blz.ListMoveInfo(context.Background()) require.NoError(t, err) @@ -203,7 +203,7 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object require.True(t, bytes.Equal([]byte("0/0/0"), metaStub.storageIDs[object.AddressOf(obj)])) } - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) _, err = os.Stat(filepath.Join(dir, "0", "0", "1.db.rebuild")) require.True(t, os.IsNotExist(err)) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go index dff4e9024..aae72b5ff 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go @@ -93,7 +93,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { require.NoError(t, err) } - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) }) t.Run("no rebuild single db", func(t *testing.T) { @@ -145,7 +145,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { require.NoError(t, err) } - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) }) t.Run("rebuild by fill percent", func(t *testing.T) { @@ -214,7 +214,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { require.NoError(t, err) } - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) }) t.Run("rebuild by overflow", func(t *testing.T) { @@ -251,7 +251,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { storageIDs: storageIDs, guard: &sync.Mutex{}, } - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) b = NewBlobovniczaTree( context.Background(), WithLogger(test.NewLogger(t)), @@ -284,7 +284,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { require.NoError(t, err) } - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) }) } @@ -318,7 +318,7 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { storageIDs := make(map[oid.Address][]byte) storageIDs[prm.Address] = res.StorageID - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) b = NewBlobovniczaTree( context.Background(), @@ -355,7 +355,7 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { require.NoError(t, err) } - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) } func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, targetDepth, targetWidth uint64, shouldMigrate bool) { @@ -399,7 +399,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta } require.NoError(t, eg.Wait()) - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) b = NewBlobovniczaTree( context.Background(), @@ -444,7 +444,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta require.NoError(t, err) } - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) } type storageIDUpdateStub struct { diff --git a/pkg/local_object_storage/blobstor/blobstor.go b/pkg/local_object_storage/blobstor/blobstor.go index 41c6cf161..f850f48b4 100644 --- a/pkg/local_object_storage/blobstor/blobstor.go +++ b/pkg/local_object_storage/blobstor/blobstor.go @@ -1,6 +1,7 @@ package blobstor import ( + "context" "sync" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -139,7 +140,7 @@ func WithUncompressableContentTypes(values []string) Option { // SetReportErrorFunc allows to provide a function to be called on disk errors. // This function MUST be called before Open. -func (b *BlobStor) SetReportErrorFunc(f func(string, error)) { +func (b *BlobStor) SetReportErrorFunc(f func(context.Context, string, error)) { for i := range b.storage { b.storage[i].Storage.SetReportErrorFunc(f) } diff --git a/pkg/local_object_storage/blobstor/blobstor_test.go b/pkg/local_object_storage/blobstor/blobstor_test.go index bed5e0eb9..6cc56fa3b 100644 --- a/pkg/local_object_storage/blobstor/blobstor_test.go +++ b/pkg/local_object_storage/blobstor/blobstor_test.go @@ -54,7 +54,7 @@ func TestCompression(t *testing.T) { WithCompressObjects(compress), WithStorages(defaultStorages(dir, smallSizeLimit))) require.NoError(t, bs.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, bs.Init()) + require.NoError(t, bs.Init(context.Background())) return bs } @@ -91,20 +91,20 @@ func TestCompression(t *testing.T) { blobStor := newBlobStor(t, false) testPut(t, blobStor, 0) testGet(t, blobStor, 0) - require.NoError(t, blobStor.Close()) + require.NoError(t, blobStor.Close(context.Background())) blobStor = newBlobStor(t, true) testGet(t, blobStor, 0) // get uncompressed object with compress enabled testPut(t, blobStor, 1) testGet(t, blobStor, 1) - require.NoError(t, blobStor.Close()) + require.NoError(t, blobStor.Close(context.Background())) blobStor = newBlobStor(t, false) testGet(t, blobStor, 0) // get old uncompressed object testGet(t, blobStor, 1) // get compressed object with compression disabled testPut(t, blobStor, 2) testGet(t, blobStor, 2) - require.NoError(t, blobStor.Close()) + require.NoError(t, blobStor.Close(context.Background())) } func TestBlobstor_needsCompression(t *testing.T) { @@ -130,7 +130,7 @@ func TestBlobstor_needsCompression(t *testing.T) { }, })) require.NoError(t, bs.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, bs.Init()) + require.NoError(t, bs.Init(context.Background())) return bs } @@ -192,7 +192,7 @@ func TestConcurrentPut(t *testing.T) { blobStor := New( WithStorages(defaultStorages(dir, smallSizeLimit))) require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, blobStor.Init()) + require.NoError(t, blobStor.Init(context.Background())) testGet := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) { res, err := b.Get(context.Background(), common.GetPrm{Address: object.AddressOf(obj)}) @@ -272,7 +272,7 @@ func TestConcurrentDelete(t *testing.T) { blobStor := New( WithStorages(defaultStorages(dir, smallSizeLimit))) require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, blobStor.Init()) + require.NoError(t, blobStor.Init(context.Background())) testPut := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) { var prm common.PutPrm diff --git a/pkg/local_object_storage/blobstor/common/storage.go b/pkg/local_object_storage/blobstor/common/storage.go index 4f3a20993..6ecef48cd 100644 --- a/pkg/local_object_storage/blobstor/common/storage.go +++ b/pkg/local_object_storage/blobstor/common/storage.go @@ -12,7 +12,7 @@ import ( type Storage interface { Open(mode mode.ComponentMode) error Init() error - Close() error + Close(context.Context) error Type() string Path() string @@ -23,7 +23,7 @@ type Storage interface { // SetReportErrorFunc allows to provide a function to be called on disk errors. // This function MUST be called before Open. - SetReportErrorFunc(f func(string, error)) + SetReportErrorFunc(f func(context.Context, string, error)) SetParentID(parentID string) Get(context.Context, GetPrm) (GetRes, error) diff --git a/pkg/local_object_storage/blobstor/control.go b/pkg/local_object_storage/blobstor/control.go index 43436b4eb..44685524f 100644 --- a/pkg/local_object_storage/blobstor/control.go +++ b/pkg/local_object_storage/blobstor/control.go @@ -50,8 +50,8 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag // If BlobStor is already initialized, no action is taken. // // Returns wrapped ErrInitBlobovniczas on blobovnicza tree's initializaiton failure. -func (b *BlobStor) Init() error { - b.log.Debug(context.Background(), logs.BlobstorInitializing) +func (b *BlobStor) Init(ctx context.Context) error { + b.log.Debug(ctx, logs.BlobstorInitializing) if err := b.compression.Init(); err != nil { return err @@ -67,14 +67,14 @@ func (b *BlobStor) Init() error { } // Close releases all internal resources of BlobStor. -func (b *BlobStor) Close() error { - b.log.Debug(context.Background(), logs.BlobstorClosing) +func (b *BlobStor) Close(ctx context.Context) error { + b.log.Debug(ctx, logs.BlobstorClosing) var firstErr error for i := range b.storage { - err := b.storage[i].Storage.Close() + err := b.storage[i].Storage.Close(ctx) if err != nil { - b.log.Info(context.Background(), logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error())) + b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error())) if firstErr == nil { firstErr = err } diff --git a/pkg/local_object_storage/blobstor/exists_test.go b/pkg/local_object_storage/blobstor/exists_test.go index 783c198b2..7eb7d49bf 100644 --- a/pkg/local_object_storage/blobstor/exists_test.go +++ b/pkg/local_object_storage/blobstor/exists_test.go @@ -22,7 +22,7 @@ func TestExists(t *testing.T) { b := New(WithStorages(storages)) require.NoError(t, b.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, b.Init()) + require.NoError(t, b.Init(context.Background())) objects := []*objectSDK.Object{ testObject(smallSizeLimit / 2), diff --git a/pkg/local_object_storage/blobstor/fstree/control.go b/pkg/local_object_storage/blobstor/fstree/control.go index c21d79f09..2544729f7 100644 --- a/pkg/local_object_storage/blobstor/fstree/control.go +++ b/pkg/local_object_storage/blobstor/fstree/control.go @@ -1,6 +1,8 @@ package fstree import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" ) @@ -28,7 +30,7 @@ func (t *FSTree) Init() error { } // Close implements common.Storage. -func (t *FSTree) Close() error { +func (t *FSTree) Close(_ context.Context) error { t.metrics.Close() return nil } diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go index 7f52762a7..53eb0395a 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree.go @@ -606,7 +606,7 @@ func (t *FSTree) Compressor() *compression.Config { } // SetReportErrorFunc implements common.Storage. -func (t *FSTree) SetReportErrorFunc(_ func(string, error)) { +func (t *FSTree) SetReportErrorFunc(_ func(context.Context, string, error)) { // Do nothing, FSTree can encounter only one error which is returned. } diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_test.go index eb2126b6c..50dae46a7 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree_test.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree_test.go @@ -52,7 +52,7 @@ func TestObjectCounter(t *testing.T) { require.Equal(t, uint64(0), size) defer func() { - require.NoError(t, fst.Close()) + require.NoError(t, fst.Close(context.Background())) }() addr := oidtest.Address() diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go b/pkg/local_object_storage/blobstor/internal/blobstortest/control.go index 21c80b089..b8e88f84a 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/control.go @@ -19,7 +19,7 @@ func TestControl(t *testing.T, cons Constructor, minSize, maxSize uint64) { require.NoError(t, s.Init()) objects := prepare(t, 10, s, minSize, maxSize) - require.NoError(t, s.Close()) + require.NoError(t, s.Close(context.Background())) require.NoError(t, s.Open(mode.ComponentReadOnly)) for i := range objects { diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go b/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go index cf4e76513..3a163f6b1 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go @@ -15,7 +15,7 @@ func TestDelete(t *testing.T, cons Constructor, minSize, maxSize uint64) { s := cons(t) require.NoError(t, s.Open(mode.ComponentReadWrite)) require.NoError(t, s.Init()) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() objects := prepare(t, 4, s, minSize, maxSize) diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go b/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go index 08465ed5e..f34fe5f97 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go @@ -14,7 +14,7 @@ func TestExists(t *testing.T, cons Constructor, minSize, maxSize uint64) { s := cons(t) require.NoError(t, s.Open(mode.ComponentReadWrite)) require.NoError(t, s.Init()) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() objects := prepare(t, 1, s, minSize, maxSize) diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/get.go b/pkg/local_object_storage/blobstor/internal/blobstortest/get.go index d1f709b0c..af0f4b45d 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/get.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/get.go @@ -15,7 +15,7 @@ func TestGet(t *testing.T, cons Constructor, minSize, maxSize uint64) { s := cons(t) require.NoError(t, s.Open(mode.ComponentReadWrite)) require.NoError(t, s.Init()) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() objects := prepare(t, 2, s, minSize, maxSize) diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go b/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go index fcbeddac7..13032048c 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go @@ -17,7 +17,7 @@ func TestGetRange(t *testing.T, cons Constructor, minSize, maxSize uint64) { s := cons(t) require.NoError(t, s.Open(mode.ComponentReadWrite)) require.NoError(t, s.Init()) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() objects := prepare(t, 1, s, minSize, maxSize) diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go index 3a6c8b699..36b2c33f8 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go @@ -14,7 +14,7 @@ func TestIterate(t *testing.T, cons Constructor, minSize, maxSize uint64) { s := cons(t) require.NoError(t, s.Open(mode.ComponentReadWrite)) require.NoError(t, s.Init()) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() objects := prepare(t, 10, s, minSize, maxSize) diff --git a/pkg/local_object_storage/blobstor/iterate_test.go b/pkg/local_object_storage/blobstor/iterate_test.go index 3c9457db2..ccfa510fe 100644 --- a/pkg/local_object_storage/blobstor/iterate_test.go +++ b/pkg/local_object_storage/blobstor/iterate_test.go @@ -33,9 +33,9 @@ func TestIterateObjects(t *testing.T) { require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite)) // initialize Blobstor - require.NoError(t, blobStor.Init()) + require.NoError(t, blobStor.Init(context.Background())) - defer blobStor.Close() + defer blobStor.Close(context.Background()) const objNum = 5 @@ -118,7 +118,7 @@ func TestIterate_IgnoreErrors(t *testing.T) { })} bs := New(bsOpts...) require.NoError(t, bs.Open(ctx, mode.ReadWrite)) - require.NoError(t, bs.Init()) + require.NoError(t, bs.Init(ctx)) nopHandler := func(e common.IterationElement) error { return nil diff --git a/pkg/local_object_storage/blobstor/memstore/control.go b/pkg/local_object_storage/blobstor/memstore/control.go index 83da52eb7..95a916662 100644 --- a/pkg/local_object_storage/blobstor/memstore/control.go +++ b/pkg/local_object_storage/blobstor/memstore/control.go @@ -1,6 +1,8 @@ package memstore import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" ) @@ -10,11 +12,11 @@ func (s *memstoreImpl) Open(mod mode.ComponentMode) error { return nil } -func (s *memstoreImpl) Init() error { return nil } -func (s *memstoreImpl) Close() error { return nil } -func (s *memstoreImpl) Type() string { return Type } -func (s *memstoreImpl) Path() string { return s.rootPath } -func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc } -func (s *memstoreImpl) Compressor() *compression.Config { return s.compression } -func (s *memstoreImpl) SetReportErrorFunc(func(string, error)) {} -func (s *memstoreImpl) SetParentID(string) {} +func (s *memstoreImpl) Init() error { return nil } +func (s *memstoreImpl) Close(context.Context) error { return nil } +func (s *memstoreImpl) Type() string { return Type } +func (s *memstoreImpl) Path() string { return s.rootPath } +func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc } +func (s *memstoreImpl) Compressor() *compression.Config { return s.compression } +func (s *memstoreImpl) SetReportErrorFunc(func(context.Context, string, error)) {} +func (s *memstoreImpl) SetParentID(string) {} diff --git a/pkg/local_object_storage/blobstor/memstore/memstore_test.go b/pkg/local_object_storage/blobstor/memstore/memstore_test.go index dd130e5f9..f904d4232 100644 --- a/pkg/local_object_storage/blobstor/memstore/memstore_test.go +++ b/pkg/local_object_storage/blobstor/memstore/memstore_test.go @@ -16,7 +16,7 @@ func TestSimpleLifecycle(t *testing.T) { s := New( WithRootPath("memstore"), ) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() require.NoError(t, s.Open(mode.ComponentReadWrite)) require.NoError(t, s.Init()) diff --git a/pkg/local_object_storage/blobstor/mode.go b/pkg/local_object_storage/blobstor/mode.go index f081ff645..af19e398e 100644 --- a/pkg/local_object_storage/blobstor/mode.go +++ b/pkg/local_object_storage/blobstor/mode.go @@ -20,10 +20,10 @@ func (b *BlobStor) SetMode(ctx context.Context, m mode.Mode) error { return nil } - err := b.Close() + err := b.Close(ctx) if err == nil { if err = b.openBlobStor(ctx, m); err == nil { - err = b.Init() + err = b.Init(ctx) } } if err != nil { diff --git a/pkg/local_object_storage/blobstor/perf_test.go b/pkg/local_object_storage/blobstor/perf_test.go index 1ac769e36..64e3c8da1 100644 --- a/pkg/local_object_storage/blobstor/perf_test.go +++ b/pkg/local_object_storage/blobstor/perf_test.go @@ -106,7 +106,7 @@ func BenchmarkSubstorageReadPerf(b *testing.B) { b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) { objGen := tt.objGen() st := stEntry.open(b) - defer func() { require.NoError(b, st.Close()) }() + defer func() { require.NoError(b, st.Close(context.Background())) }() // Fill database var errG errgroup.Group @@ -161,7 +161,7 @@ func BenchmarkSubstorageWritePerf(b *testing.B) { b.Run(fmt.Sprintf("%s-%s", stEntry.desc, genEntry.desc), func(b *testing.B) { gen := genEntry.create() st := stEntry.open(b) - defer func() { require.NoError(b, st.Close()) }() + defer func() { require.NoError(b, st.Close(context.Background())) }() b.ResetTimer() b.RunParallel(func(pb *testing.PB) { @@ -200,7 +200,7 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) { b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) { objGen := tt.objGen() st := stEntry.open(b) - defer func() { require.NoError(b, st.Close()) }() + defer func() { require.NoError(b, st.Close(context.Background())) }() // Fill database for range tt.size { diff --git a/pkg/local_object_storage/blobstor/teststore/option.go b/pkg/local_object_storage/blobstor/teststore/option.go index bc0bed49d..fb1188751 100644 --- a/pkg/local_object_storage/blobstor/teststore/option.go +++ b/pkg/local_object_storage/blobstor/teststore/option.go @@ -1,6 +1,8 @@ package teststore import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" @@ -17,7 +19,7 @@ type cfg struct { Path func() string SetCompressor func(cc *compression.Config) Compressor func() *compression.Config - SetReportErrorFunc func(f func(string, error)) + SetReportErrorFunc func(f func(context.Context, string, error)) Get func(common.GetPrm) (common.GetRes, error) GetRange func(common.GetRangePrm) (common.GetRangeRes, error) @@ -51,7 +53,7 @@ func WithCompressor(f func() *compression.Config) Option { return func(c *cfg) { c.overrides.Compressor = f } } -func WithReportErrorFunc(f func(func(string, error))) Option { +func WithReportErrorFunc(f func(func(context.Context, string, error))) Option { return func(c *cfg) { c.overrides.SetReportErrorFunc = f } } diff --git a/pkg/local_object_storage/blobstor/teststore/teststore.go b/pkg/local_object_storage/blobstor/teststore/teststore.go index fea4a2d49..626ba0023 100644 --- a/pkg/local_object_storage/blobstor/teststore/teststore.go +++ b/pkg/local_object_storage/blobstor/teststore/teststore.go @@ -77,14 +77,14 @@ func (s *TestStore) Init() error { } } -func (s *TestStore) Close() error { +func (s *TestStore) Close(ctx context.Context) error { s.mu.RLock() defer s.mu.RUnlock() switch { case s.overrides.Close != nil: return s.overrides.Close() case s.st != nil: - return s.st.Close() + return s.st.Close(ctx) default: panic("unexpected storage call: Close()") } @@ -142,7 +142,7 @@ func (s *TestStore) Compressor() *compression.Config { } } -func (s *TestStore) SetReportErrorFunc(f func(string, error)) { +func (s *TestStore) SetReportErrorFunc(f func(context.Context, string, error)) { s.mu.RLock() defer s.mu.RUnlock() switch { diff --git a/pkg/local_object_storage/engine/container.go b/pkg/local_object_storage/engine/container.go index 6def02f12..cdee02159 100644 --- a/pkg/local_object_storage/engine/container.go +++ b/pkg/local_object_storage/engine/container.go @@ -44,9 +44,9 @@ func (r ListContainersRes) Containers() []cid.ID { // ContainerSize returns the sum of estimation container sizes among all shards. // // Returns an error if executions are blocked (see BlockExecution). -func (e *StorageEngine) ContainerSize(prm ContainerSizePrm) (res ContainerSizeRes, err error) { +func (e *StorageEngine) ContainerSize(ctx context.Context, prm ContainerSizePrm) (res ContainerSizeRes, err error) { err = e.execIfNotBlocked(func() error { - res, err = e.containerSize(prm) + res, err = e.containerSize(ctx, prm) return err }) @@ -54,12 +54,12 @@ func (e *StorageEngine) ContainerSize(prm ContainerSizePrm) (res ContainerSizeRe } // ContainerSize calls ContainerSize method on engine to calculate sum of estimation container sizes among all shards. -func ContainerSize(e *StorageEngine, id cid.ID) (uint64, error) { +func ContainerSize(ctx context.Context, e *StorageEngine, id cid.ID) (uint64, error) { var prm ContainerSizePrm prm.SetContainerID(id) - res, err := e.ContainerSize(prm) + res, err := e.ContainerSize(ctx, prm) if err != nil { return 0, err } @@ -67,7 +67,7 @@ func ContainerSize(e *StorageEngine, id cid.ID) (uint64, error) { return res.Size(), nil } -func (e *StorageEngine) containerSize(prm ContainerSizePrm) (res ContainerSizeRes, err error) { +func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) (res ContainerSizeRes, err error) { defer elapsed("EstimateContainerSize", e.metrics.AddMethodDuration)() e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { @@ -76,7 +76,7 @@ func (e *StorageEngine) containerSize(prm ContainerSizePrm) (res ContainerSizeRe csRes, err := sh.Shard.ContainerSize(csPrm) if err != nil { - e.reportShardError(sh, "can't get container size", err, + e.reportShardError(ctx, sh, "can't get container size", err, zap.Stringer("container_id", prm.cnr)) return false } @@ -121,7 +121,7 @@ func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { res, err := sh.Shard.ListContainers(ctx, shard.ListContainersPrm{}) if err != nil { - e.reportShardError(sh, "can't get list of containers", err) + e.reportShardError(ctx, sh, "can't get list of containers", err) return false } diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go index e3cb21664..3d1ec1c93 100644 --- a/pkg/local_object_storage/engine/delete.go +++ b/pkg/local_object_storage/engine/delete.go @@ -98,7 +98,7 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, e return false } else { if !client.IsErrObjectNotFound(err) { - e.reportShardError(sh, "could not check object existence", err, zap.Stringer("address", prm.addr)) + e.reportShardError(ctx, sh, "could not check object existence", err, zap.Stringer("address", prm.addr)) } return false } @@ -114,7 +114,7 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, e _, err = sh.Inhume(ctx, shPrm) if err != nil { - e.reportShardError(sh, "could not inhume object in shard", err, zap.Stringer("address", prm.addr)) + e.reportShardError(ctx, sh, "could not inhume object in shard", err, zap.Stringer("address", prm.addr)) var target *apistatus.ObjectLocked locked.is = errors.As(err, &target) @@ -189,7 +189,7 @@ func (e *StorageEngine) deleteChunks( var objID oid.ID err := objID.ReadFromV2(chunk.ID) if err != nil { - e.reportShardError(sh, "could not delete EC chunk", err, zap.Stringer("address", prm.addr)) + e.reportShardError(ctx, sh, "could not delete EC chunk", err, zap.Stringer("address", prm.addr)) } addr.SetObject(objID) inhumePrm.MarkAsGarbage(addr) diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go index 6e30ee9de..029904046 100644 --- a/pkg/local_object_storage/engine/engine.go +++ b/pkg/local_object_storage/engine/engine.go @@ -99,24 +99,24 @@ func (e *StorageEngine) moveToDegraded(ctx context.Context, sh *shard.Shard, err if isMeta { err := sh.SetMode(ctx, mode.DegradedReadOnly) if err == nil { - log.Info(context.Background(), logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold) + log.Info(ctx, logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold) return } - log.Error(context.Background(), logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly, + log.Error(ctx, logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly, zap.Error(err)) } err := sh.SetMode(ctx, mode.ReadOnly) if err != nil { - log.Error(context.Background(), logs.EngineFailedToMoveShardInReadonlyMode, zap.Error(err)) + log.Error(ctx, logs.EngineFailedToMoveShardInReadonlyMode, zap.Error(err)) return } - log.Info(context.Background(), logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold) + log.Info(ctx, logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold) } // reportShardErrorByID increases shard error counter and logs an error. -func (e *StorageEngine) reportShardErrorByID(id string, msg string, err error) { +func (e *StorageEngine) reportShardErrorByID(ctx context.Context, id string, msg string, err error) { e.mtx.RLock() sh, ok := e.shards[id] e.mtx.RUnlock() @@ -125,19 +125,20 @@ func (e *StorageEngine) reportShardErrorByID(id string, msg string, err error) { return } - e.reportShardError(sh, msg, err) + e.reportShardError(ctx, sh, msg, err) } // reportShardError checks that the amount of errors doesn't exceed the configured threshold. // If it does, shard is set to read-only mode. func (e *StorageEngine) reportShardError( + ctx context.Context, sh hashedShard, msg string, err error, fields ...zap.Field, ) { if isLogical(err) { - e.log.Warn(context.Background(), msg, + e.log.Warn(ctx, msg, zap.Stringer("shard_id", sh.ID()), zap.String("error", err.Error())) return @@ -147,7 +148,7 @@ func (e *StorageEngine) reportShardError( e.metrics.IncErrorCounter(sh.ID().String()) sid := sh.ID() - e.log.Warn(context.Background(), msg, append([]zap.Field{ + e.log.Warn(ctx, msg, append([]zap.Field{ zap.Stringer("shard_id", sid), zap.Uint32("error count", errCount), zap.String("error", err.Error()), @@ -168,7 +169,7 @@ func (e *StorageEngine) reportShardError( default: // For background workers we can have a lot of such errors, // thus logging is done with DEBUG level. - e.log.Debug(context.Background(), logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest, + e.log.Debug(ctx, logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest, zap.Stringer("shard_id", sid), zap.Uint32("error_count", errCount)) } diff --git a/pkg/local_object_storage/engine/exists.go b/pkg/local_object_storage/engine/exists.go index d98101306..9d2b1c1b7 100644 --- a/pkg/local_object_storage/engine/exists.go +++ b/pkg/local_object_storage/engine/exists.go @@ -37,7 +37,7 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool } if !client.IsErrObjectNotFound(err) { - e.reportShardError(sh, "could not check existence of object in shard", err, zap.Stringer("address", shPrm.Address)) + e.reportShardError(ctx, sh, "could not check existence of object in shard", err, zap.Stringer("address", shPrm.Address)) } return false } diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go index 68b298a90..7396cd0ef 100644 --- a/pkg/local_object_storage/engine/get.go +++ b/pkg/local_object_storage/engine/get.go @@ -184,7 +184,7 @@ func (i *getShardIterator) tryGetWithMeta(ctx context.Context) { i.ObjectExpired = true return true default: - i.Engine.reportShardError(sh, "could not get object from shard", err, zap.Stringer("address", i.Address)) + i.Engine.reportShardError(ctx, sh, "could not get object from shard", err, zap.Stringer("address", i.Address)) return false } }) diff --git a/pkg/local_object_storage/engine/head.go b/pkg/local_object_storage/engine/head.go index d2e3cfd99..d6892f129 100644 --- a/pkg/local_object_storage/engine/head.go +++ b/pkg/local_object_storage/engine/head.go @@ -117,7 +117,7 @@ func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) outError = new(apistatus.ObjectNotFound) return true default: - e.reportShardError(sh, "could not head object from shard", err, zap.Stringer("address", prm.addr)) + e.reportShardError(ctx, sh, "could not head object from shard", err, zap.Stringer("address", prm.addr)) return false } } diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index 3c2159f0c..194946542 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -152,7 +152,7 @@ func (e *StorageEngine) inhumeAddr(ctx context.Context, addr oid.Address, prm sh var siErr *objectSDK.SplitInfoError var ecErr *objectSDK.ECInfoError if !(errors.As(err, &siErr) || errors.As(err, &ecErr)) { - e.reportShardError(sh, "could not check for presents in shard", err, zap.Stringer("address", addr)) + e.reportShardError(ctx, sh, "could not check for presents in shard", err, zap.Stringer("address", addr)) return } @@ -177,7 +177,7 @@ func (e *StorageEngine) inhumeAddr(ctx context.Context, addr oid.Address, prm sh return true } - e.reportShardError(sh, "could not inhume object in shard", err, zap.Stringer("address", addr)) + e.reportShardError(ctx, sh, "could not inhume object in shard", err, zap.Stringer("address", addr)) return false } @@ -203,7 +203,7 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { locked, err = h.Shard.IsLocked(ctx, addr) if err != nil { - e.reportShardError(h, "can't check object's lockers", err, zap.Stringer("address", addr), + e.reportShardError(ctx, h, "can't check object's lockers", err, zap.Stringer("address", addr), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) outErr = err return false @@ -233,7 +233,7 @@ func (e *StorageEngine) GetLocked(ctx context.Context, addr oid.Address) ([]oid. e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { ld, err := h.Shard.GetLocked(ctx, addr) if err != nil { - e.reportShardError(h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr), + e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) outErr = err } @@ -275,7 +275,7 @@ func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, l func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.Address) { e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { - sh.HandleDeletedLocks(lockers) + sh.HandleDeletedLocks(ctx, lockers) select { case <-ctx.Done(): diff --git a/pkg/local_object_storage/engine/lock.go b/pkg/local_object_storage/engine/lock.go index ac8fa9c6f..07ab0a209 100644 --- a/pkg/local_object_storage/engine/lock.go +++ b/pkg/local_object_storage/engine/lock.go @@ -88,7 +88,7 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo var objID oid.ID err = objID.ReadFromV2(chunk.ID) if err != nil { - e.reportShardError(sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), + e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) return false } @@ -96,7 +96,7 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo } err = sh.Lock(ctx, idCnr, locker, eclocked) if err != nil { - e.reportShardError(sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), + e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) return false } @@ -108,7 +108,7 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo // do not lock it return true } - e.reportShardError(sh, "could not check locked object for presence in shard", err, zap.Stringer("container_id", idCnr), + e.reportShardError(ctx, sh, "could not check locked object for presence in shard", err, zap.Stringer("container_id", idCnr), zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) return } @@ -121,7 +121,7 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo err := sh.Lock(ctx, idCnr, locker, []oid.ID{locked}) if err != nil { - e.reportShardError(sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), + e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) var errIrregular *apistatus.LockNonRegularObject diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go index 7a6304384..66cf7e73b 100644 --- a/pkg/local_object_storage/engine/put.go +++ b/pkg/local_object_storage/engine/put.go @@ -180,7 +180,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti return } - e.reportShardError(sh, "could not put object to shard", err, zap.Stringer("address", addr)) + e.reportShardError(ctx, sh, "could not put object to shard", err, zap.Stringer("address", addr)) return } diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go index 45dbabba7..1cd75fb27 100644 --- a/pkg/local_object_storage/engine/range.go +++ b/pkg/local_object_storage/engine/range.go @@ -206,7 +206,7 @@ func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) { return true // stop, return it back default: - i.Engine.reportShardError(sh, "could not get object from shard", err, zap.Stringer("address", i.Address)) + i.Engine.reportShardError(ctx, sh, "could not get object from shard", err, zap.Stringer("address", i.Address)) return false } }) diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go index a85891f0c..a16314211 100644 --- a/pkg/local_object_storage/engine/select.go +++ b/pkg/local_object_storage/engine/select.go @@ -75,7 +75,7 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { res, err := sh.Select(ctx, shPrm) if err != nil { - e.reportShardError(sh, "could not select objects from shard", err) + e.reportShardError(ctx, sh, "could not select objects from shard", err) return false } @@ -118,7 +118,7 @@ func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, erro e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { res, err := sh.List(ctx) // consider limit result of shard iterator if err != nil { - e.reportShardError(sh, "could not select objects from shard", err) + e.reportShardError(ctx, sh, "could not select objects from shard", err) } else { for _, addr := range res.AddressList() { // save only unique values if _, ok := uniqueMap[addr.EncodeToString()]; !ok { diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go index 2b94103e9..898f685ec 100644 --- a/pkg/local_object_storage/engine/shards.go +++ b/pkg/local_object_storage/engine/shards.go @@ -351,7 +351,7 @@ func (e *StorageEngine) DetachShards(ctx context.Context, ids []*shard.ID) error return logicerr.New("ids must be non-empty") } - deletedShards, err := e.deleteShards(ids) + deletedShards, err := e.deleteShards(ctx, ids) if err != nil { return err } @@ -400,7 +400,7 @@ func (e *StorageEngine) closeShards(ctx context.Context, deletedShards []hashedS // deleteShards deletes shards with specified ids from engine shard list // and releases all engine resources associated with shards. // Returns deleted shards or error if some shard could not be deleted. -func (e *StorageEngine) deleteShards(ids []*shard.ID) ([]hashedShard, error) { +func (e *StorageEngine) deleteShards(ctx context.Context, ids []*shard.ID) ([]hashedShard, error) { ss := make([]hashedShard, 0, len(ids)) e.mtx.Lock() @@ -432,7 +432,7 @@ func (e *StorageEngine) deleteShards(ids []*shard.ID) ([]hashedShard, error) { delete(e.shardPools, idStr) } - e.log.Info(context.Background(), logs.EngineShardHasBeenRemoved, + e.log.Info(ctx, logs.EngineShardHasBeenRemoved, zap.String("id", idStr)) } diff --git a/pkg/local_object_storage/engine/tree.go b/pkg/local_object_storage/engine/tree.go index 39122628f..23d4531f2 100644 --- a/pkg/local_object_storage/engine/tree.go +++ b/pkg/local_object_storage/engine/tree.go @@ -37,7 +37,7 @@ func (e *StorageEngine) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, lm, err := lst[index].TreeMove(ctx, d, treeID, m) if err != nil { if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { - e.reportShardError(lst[index], "can't perform `TreeMove`", err, + e.reportShardError(ctx, lst[index], "can't perform `TreeMove`", err, zap.Stringer("cid", d.CID), zap.String("tree", treeID), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -71,7 +71,7 @@ func (e *StorageEngine) TreeAddByPath(ctx context.Context, d pilorama.CIDDescrip lm, err := lst[index].TreeAddByPath(ctx, d, treeID, attr, path, m) if err != nil { if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { - e.reportShardError(lst[index], "can't perform `TreeAddByPath`", err, + e.reportShardError(ctx, lst[index], "can't perform `TreeAddByPath`", err, zap.Stringer("cid", d.CID), zap.String("tree", treeID), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -100,7 +100,7 @@ func (e *StorageEngine) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID str err = lst[index].TreeApply(ctx, cnr, treeID, m, backgroundSync) if err != nil { if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { - e.reportShardError(lst[index], "can't perform `TreeApply`", err, + e.reportShardError(ctx, lst[index], "can't perform `TreeApply`", err, zap.Stringer("cid", cnr), zap.String("tree", treeID), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -132,7 +132,7 @@ func (e *StorageEngine) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID break } if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(sh, "can't perform `TreeGetByPath`", err, + e.reportShardError(ctx, sh, "can't perform `TreeGetByPath`", err, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -165,7 +165,7 @@ func (e *StorageEngine) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID s break } if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(sh, "can't perform `TreeGetMeta`", err, + e.reportShardError(ctx, sh, "can't perform `TreeGetMeta`", err, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -197,7 +197,7 @@ func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, tree break } if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(sh, "can't perform `TreeGetChildren`", err, + e.reportShardError(ctx, sh, "can't perform `TreeGetChildren`", err, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -229,7 +229,7 @@ func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, break } if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(sh, "can't perform `TreeSortedByFilename`", err, + e.reportShardError(ctx, sh, "can't perform `TreeSortedByFilename`", err, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -261,7 +261,7 @@ func (e *StorageEngine) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID break } if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(sh, "can't perform `TreeGetOpLog`", err, + e.reportShardError(ctx, sh, "can't perform `TreeGetOpLog`", err, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -291,7 +291,7 @@ func (e *StorageEngine) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID stri break } if !errors.Is(err, pilorama.ErrTreeNotFound) && !errors.Is(err, shard.ErrReadOnlyMode) { - e.reportShardError(sh, "can't perform `TreeDrop`", err, + e.reportShardError(ctx, sh, "can't perform `TreeDrop`", err, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -321,7 +321,7 @@ func (e *StorageEngine) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, return nil, err } - e.reportShardError(sh, "can't perform `TreeList`", err, + e.reportShardError(ctx, sh, "can't perform `TreeList`", err, zap.Stringer("cid", cid), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -387,7 +387,7 @@ func (e *StorageEngine) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK err = lst[index].TreeUpdateLastSyncHeight(ctx, cid, treeID, height) if err != nil && !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { - e.reportShardError(lst[index], "can't update tree synchronization height", err, + e.reportShardError(ctx, lst[index], "can't update tree synchronization height", err, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -414,7 +414,7 @@ func (e *StorageEngine) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, t break } if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(sh, "can't read tree synchronization height", err, + e.reportShardError(ctx, sh, "can't read tree synchronization height", err, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) diff --git a/pkg/local_object_storage/internal/storagetest/storage.go b/pkg/local_object_storage/internal/storagetest/storage.go index d8ac106dd..d46365296 100644 --- a/pkg/local_object_storage/internal/storagetest/storage.go +++ b/pkg/local_object_storage/internal/storagetest/storage.go @@ -12,8 +12,8 @@ import ( type Component interface { Open(context.Context, mode.Mode) error SetMode(context.Context, mode.Mode) error - Init() error - Close() error + Init(context.Context) error + Close(context.Context) error } // Constructor constructs storage component. @@ -59,18 +59,18 @@ func TestCloseAfterOpen(t *testing.T, cons Constructor) { // Use-case: irrecoverable error on some components, close everything. s := cons(t) require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, s.Close()) + require.NoError(t, s.Close(context.Background())) }) t.Run("RO", func(t *testing.T) { // Use-case: irrecoverable error on some components, close everything. // Open in read-only must be done after the db is here. s := cons(t) require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, s.Init()) - require.NoError(t, s.Close()) + require.NoError(t, s.Init(context.Background())) + require.NoError(t, s.Close(context.Background())) require.NoError(t, s.Open(context.Background(), mode.ReadOnly)) - require.NoError(t, s.Close()) + require.NoError(t, s.Close(context.Background())) }) } @@ -79,9 +79,9 @@ func TestCloseTwice(t *testing.T, cons Constructor) { // Use-case: move to maintenance mode twice, first time failed. s := cons(t) require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, s.Init()) - require.NoError(t, s.Close()) - require.NoError(t, s.Close()) // already closed, no-op + require.NoError(t, s.Init(context.Background())) + require.NoError(t, s.Close(context.Background())) + require.NoError(t, s.Close(context.Background())) // already closed, no-op } // TestSetMode checks that any mode transition can be done safely. @@ -94,20 +94,20 @@ func TestSetMode(t *testing.T, cons Constructor, m mode.Mode) { require.NoError(t, s.SetMode(context.Background(), m)) t.Run("after open in RO", func(t *testing.T) { - require.NoError(t, s.Close()) + require.NoError(t, s.Close(context.Background())) require.NoError(t, s.Open(context.Background(), mode.ReadOnly)) require.NoError(t, s.SetMode(context.Background(), m)) }) - require.NoError(t, s.Close()) + require.NoError(t, s.Close(context.Background())) }) t.Run("after init", func(t *testing.T) { s := cons(t) // Use-case: notmal node operation. require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, s.Init()) + require.NoError(t, s.Init(context.Background())) require.NoError(t, s.SetMode(context.Background(), m)) - require.NoError(t, s.Close()) + require.NoError(t, s.Close(context.Background())) }) } @@ -115,8 +115,8 @@ func TestModeTransition(t *testing.T, cons Constructor, from, to mode.Mode) { // Use-case: normal node operation. s := cons(t) require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, s.Init()) + require.NoError(t, s.Init(context.Background())) require.NoError(t, s.SetMode(context.Background(), from)) require.NoError(t, s.SetMode(context.Background(), to)) - require.NoError(t, s.Close()) + require.NoError(t, s.Close(context.Background())) } diff --git a/pkg/local_object_storage/metabase/containers_test.go b/pkg/local_object_storage/metabase/containers_test.go index 110be68ad..8d8d91dc7 100644 --- a/pkg/local_object_storage/metabase/containers_test.go +++ b/pkg/local_object_storage/metabase/containers_test.go @@ -18,7 +18,7 @@ func TestDB_Containers(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() const N = 10 @@ -79,7 +79,7 @@ func TestDB_ContainersCount(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() const R, T, SG, L = 10, 11, 12, 13 // amount of object per type @@ -116,7 +116,7 @@ func TestDB_ContainerSize(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() const ( C = 3 diff --git a/pkg/local_object_storage/metabase/control.go b/pkg/local_object_storage/metabase/control.go index 54bea4204..07fa7e9cf 100644 --- a/pkg/local_object_storage/metabase/control.go +++ b/pkg/local_object_storage/metabase/control.go @@ -57,7 +57,7 @@ func (db *DB) openDB(ctx context.Context, mode mode.Mode) error { return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err) } - db.log.Debug(context.Background(), logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path)) + db.log.Debug(ctx, logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path)) if db.boltOptions == nil { opts := *bbolt.DefaultOptions @@ -78,9 +78,9 @@ func (db *DB) openBolt(ctx context.Context) error { db.boltDB.MaxBatchDelay = db.boltBatchDelay db.boltDB.MaxBatchSize = db.boltBatchSize - db.log.Debug(context.Background(), logs.MetabaseOpenedBoltDBInstanceForMetabase) + db.log.Debug(ctx, logs.MetabaseOpenedBoltDBInstanceForMetabase) - db.log.Debug(context.Background(), logs.MetabaseCheckingMetabaseVersion) + db.log.Debug(ctx, logs.MetabaseCheckingMetabaseVersion) return db.boltDB.View(func(tx *bbolt.Tx) error { // The safest way to check if the metabase is fresh is to check if it has no buckets. // However, shard info can be present. So here we check that the number of buckets is @@ -109,7 +109,7 @@ func (db *DB) openBolt(ctx context.Context) error { // // Does nothing if metabase has already been initialized and filled. To roll back the database to its initial state, // use Reset. -func (db *DB) Init() error { +func (db *DB) Init(_ context.Context) error { return metaerr.Wrap(db.init(false)) } @@ -205,7 +205,7 @@ func (db *DB) SyncCounters() error { // Close closes boltDB instance // and reports metabase metric. -func (db *DB) Close() error { +func (db *DB) Close(context.Context) error { var err error if db.boltDB != nil { err = db.close() @@ -236,7 +236,7 @@ func (db *DB) Reload(ctx context.Context, opts ...Option) (bool, error) { defer db.modeMtx.Unlock() if db.mode.NoMetabase() || c.info.Path != "" && filepath.Clean(db.info.Path) != filepath.Clean(c.info.Path) { - if err := db.Close(); err != nil { + if err := db.Close(ctx); err != nil { return false, err } diff --git a/pkg/local_object_storage/metabase/control_test.go b/pkg/local_object_storage/metabase/control_test.go index 2a64881cb..d26402675 100644 --- a/pkg/local_object_storage/metabase/control_test.go +++ b/pkg/local_object_storage/metabase/control_test.go @@ -15,7 +15,7 @@ import ( func TestReset(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() err := db.Reset() require.NoError(t, err) diff --git a/pkg/local_object_storage/metabase/counter_test.go b/pkg/local_object_storage/metabase/counter_test.go index dccccd456..950385a29 100644 --- a/pkg/local_object_storage/metabase/counter_test.go +++ b/pkg/local_object_storage/metabase/counter_test.go @@ -22,7 +22,7 @@ func TestCounters(t *testing.T) { t.Run("defaults", func(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() c, err := db.ObjectCounters() require.NoError(t, err) require.Zero(t, c.Phy) @@ -37,7 +37,7 @@ func TestCounters(t *testing.T) { t.Run("put", func(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() oo := make([]*objectSDK.Object, 0, objCount) for range objCount { oo = append(oo, testutil.GenerateObject()) @@ -75,7 +75,7 @@ func TestCounters(t *testing.T) { t.Run("delete", func(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() oo := putObjs(t, db, objCount, false) exp := make(map[cid.ID]meta.ObjectCounters) @@ -120,7 +120,7 @@ func TestCounters(t *testing.T) { t.Run("inhume", func(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() oo := putObjs(t, db, objCount, false) exp := make(map[cid.ID]meta.ObjectCounters) @@ -185,7 +185,7 @@ func TestCounters(t *testing.T) { t.Run("put_split", func(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() parObj := testutil.GenerateObject() exp := make(map[cid.ID]meta.ObjectCounters) @@ -223,7 +223,7 @@ func TestCounters(t *testing.T) { t.Run("delete_split", func(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() oo := putObjs(t, db, objCount, true) exp := make(map[cid.ID]meta.ObjectCounters) @@ -265,7 +265,7 @@ func TestCounters(t *testing.T) { t.Run("inhume_split", func(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() oo := putObjs(t, db, objCount, true) exp := make(map[cid.ID]meta.ObjectCounters) @@ -329,7 +329,7 @@ func TestCounters(t *testing.T) { func TestDoublePut(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() obj := testutil.GenerateObject() exp := make(map[cid.ID]meta.ObjectCounters) @@ -387,7 +387,7 @@ func TestCounters_Expired(t *testing.T) { es := &epochState{epoch} db := newDB(t, meta.WithEpochState(es)) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() oo := make([]oid.Address, objCount) for i := range oo { diff --git a/pkg/local_object_storage/metabase/db_test.go b/pkg/local_object_storage/metabase/db_test.go index 0abb5ea89..40aedf489 100644 --- a/pkg/local_object_storage/metabase/db_test.go +++ b/pkg/local_object_storage/metabase/db_test.go @@ -61,7 +61,7 @@ func newDB(t testing.TB, opts ...meta.Option) *meta.DB { ) require.NoError(t, bdb.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, bdb.Init()) + require.NoError(t, bdb.Init(context.Background())) return bdb } diff --git a/pkg/local_object_storage/metabase/delete_ec_test.go b/pkg/local_object_storage/metabase/delete_ec_test.go index a25627990..9f1f91e14 100644 --- a/pkg/local_object_storage/metabase/delete_ec_test.go +++ b/pkg/local_object_storage/metabase/delete_ec_test.go @@ -30,8 +30,8 @@ func TestDeleteECObject_WithoutSplit(t *testing.T) { ) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) - defer func() { require.NoError(t, db.Close()) }() + require.NoError(t, db.Init(context.Background())) + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() ecChunk := oidtest.ID() @@ -194,8 +194,8 @@ func testDeleteECObjectWithSplit(t *testing.T, chunksCount int, withLinking bool ) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) - defer func() { require.NoError(t, db.Close()) }() + require.NoError(t, db.Init(context.Background())) + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() ecChunks := make([]oid.ID, chunksCount) diff --git a/pkg/local_object_storage/metabase/delete_meta_test.go b/pkg/local_object_storage/metabase/delete_meta_test.go index cdfe2a203..0329e3a73 100644 --- a/pkg/local_object_storage/metabase/delete_meta_test.go +++ b/pkg/local_object_storage/metabase/delete_meta_test.go @@ -23,8 +23,8 @@ func TestPutDeleteIndexAttributes(t *testing.T) { }...) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) - defer func() { require.NoError(t, db.Close()) }() + require.NoError(t, db.Init(context.Background())) + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() obj1 := testutil.GenerateObjectWithCID(cnr) diff --git a/pkg/local_object_storage/metabase/delete_test.go b/pkg/local_object_storage/metabase/delete_test.go index fe5f7833b..c0762a377 100644 --- a/pkg/local_object_storage/metabase/delete_test.go +++ b/pkg/local_object_storage/metabase/delete_test.go @@ -18,7 +18,7 @@ import ( func TestDB_Delete(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() parent := testutil.GenerateObjectWithCID(cnr) @@ -65,7 +65,7 @@ func TestDB_Delete(t *testing.T) { func TestDeleteAllChildren(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -103,7 +103,7 @@ func TestDeleteAllChildren(t *testing.T) { func TestGraveOnlyDelete(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() addr := oidtest.Address() @@ -116,7 +116,7 @@ func TestGraveOnlyDelete(t *testing.T) { func TestExpiredObject(t *testing.T) { db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) { // removing expired object should be error-free @@ -128,7 +128,7 @@ func TestExpiredObject(t *testing.T) { func TestDelete(t *testing.T) { db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() for range 10 { @@ -170,7 +170,7 @@ func TestDelete(t *testing.T) { func TestDeleteDropsGCMarkIfObjectNotFound(t *testing.T) { db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() addr := oidtest.Address() diff --git a/pkg/local_object_storage/metabase/exists_test.go b/pkg/local_object_storage/metabase/exists_test.go index 1e4148eba..3045e17f1 100644 --- a/pkg/local_object_storage/metabase/exists_test.go +++ b/pkg/local_object_storage/metabase/exists_test.go @@ -1,6 +1,7 @@ package meta_test import ( + "context" "errors" "testing" @@ -18,7 +19,7 @@ const currEpoch = 1000 func TestDB_Exists(t *testing.T) { db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() t.Run("no object", func(t *testing.T) { nonExist := testutil.GenerateObject() diff --git a/pkg/local_object_storage/metabase/expired_test.go b/pkg/local_object_storage/metabase/expired_test.go index bb98745ee..495c1eee7 100644 --- a/pkg/local_object_storage/metabase/expired_test.go +++ b/pkg/local_object_storage/metabase/expired_test.go @@ -13,7 +13,7 @@ import ( func TestDB_SelectExpired(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() containerID1 := cidtest.ID() diff --git a/pkg/local_object_storage/metabase/get_test.go b/pkg/local_object_storage/metabase/get_test.go index f0caaea70..c93d2c992 100644 --- a/pkg/local_object_storage/metabase/get_test.go +++ b/pkg/local_object_storage/metabase/get_test.go @@ -25,7 +25,7 @@ import ( func TestDB_Get(t *testing.T) { db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() raw := testutil.GenerateObject() @@ -219,7 +219,7 @@ func benchmarkGet(b *testing.B, numOfObj int) { meta.WithMaxBatchSize(batchSize), meta.WithMaxBatchDelay(10*time.Millisecond), ) - defer func() { require.NoError(b, db.Close()) }() + defer func() { require.NoError(b, db.Close(context.Background())) }() addrs := make([]oid.Address, 0, numOfObj) for range numOfObj { @@ -253,7 +253,7 @@ func benchmarkGet(b *testing.B, numOfObj int) { }) }) - require.NoError(b, db.Close()) + require.NoError(b, db.Close(context.Background())) require.NoError(b, os.RemoveAll(b.Name())) db, addrs = prepareDb(1) diff --git a/pkg/local_object_storage/metabase/graveyard_test.go b/pkg/local_object_storage/metabase/graveyard_test.go index b9c6ce28c..99794e609 100644 --- a/pkg/local_object_storage/metabase/graveyard_test.go +++ b/pkg/local_object_storage/metabase/graveyard_test.go @@ -15,7 +15,7 @@ import ( func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() var counter int var iterGravePRM meta.GraveyardIterationPrm @@ -42,7 +42,7 @@ func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) { func TestDB_Iterate_OffsetNotFound(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() obj1 := testutil.GenerateObject() obj2 := testutil.GenerateObject() @@ -113,7 +113,7 @@ func TestDB_Iterate_OffsetNotFound(t *testing.T) { func TestDB_IterateDeletedObjects(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() // generate and put 4 objects @@ -202,7 +202,7 @@ func TestDB_IterateDeletedObjects(t *testing.T) { func TestDB_IterateOverGraveyard_Offset(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() // generate and put 4 objects @@ -303,7 +303,7 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) { func TestDB_IterateOverGarbage_Offset(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() // generate and put 4 objects obj1 := testutil.GenerateObject() @@ -395,7 +395,7 @@ func TestDB_IterateOverGarbage_Offset(t *testing.T) { func TestDB_DropGraves(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() // generate and put 2 objects diff --git a/pkg/local_object_storage/metabase/inhume_ec_test.go b/pkg/local_object_storage/metabase/inhume_ec_test.go index 32e412c79..180713287 100644 --- a/pkg/local_object_storage/metabase/inhume_ec_test.go +++ b/pkg/local_object_storage/metabase/inhume_ec_test.go @@ -25,8 +25,8 @@ func TestInhumeECObject(t *testing.T) { ) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) - defer func() { require.NoError(t, db.Close()) }() + require.NoError(t, db.Init(context.Background())) + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() ecChunk := oidtest.ID() diff --git a/pkg/local_object_storage/metabase/inhume_test.go b/pkg/local_object_storage/metabase/inhume_test.go index 277316f7b..786d10396 100644 --- a/pkg/local_object_storage/metabase/inhume_test.go +++ b/pkg/local_object_storage/metabase/inhume_test.go @@ -17,7 +17,7 @@ import ( func TestDB_Inhume(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() raw := testutil.GenerateObject() testutil.AddAttribute(raw, "foo", "bar") @@ -37,7 +37,7 @@ func TestDB_Inhume(t *testing.T) { func TestInhumeTombOnTomb(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() var ( err error @@ -107,7 +107,7 @@ func TestInhumeTombOnTomb(t *testing.T) { func TestInhumeLocked(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() locked := oidtest.Address() diff --git a/pkg/local_object_storage/metabase/iterators_test.go b/pkg/local_object_storage/metabase/iterators_test.go index 777a94a6f..53c47af6b 100644 --- a/pkg/local_object_storage/metabase/iterators_test.go +++ b/pkg/local_object_storage/metabase/iterators_test.go @@ -18,7 +18,7 @@ import ( func TestDB_IterateExpired(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() const epoch = 13 @@ -70,7 +70,7 @@ func putWithExpiration(t *testing.T, db *meta.DB, typ objectSDK.Type, expiresAt func TestDB_IterateCoveredByTombstones(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() ts := oidtest.Address() diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go index 203802ec0..6f6463071 100644 --- a/pkg/local_object_storage/metabase/list_test.go +++ b/pkg/local_object_storage/metabase/list_test.go @@ -33,7 +33,7 @@ func listWithCursorPrepareDB(b *testing.B) *meta.DB { db := newDB(b, meta.WithMaxBatchSize(1), meta.WithBoltDBOptions(&bbolt.Options{ NoSync: true, })) // faster single-thread generation - defer func() { require.NoError(b, db.Close()) }() + defer func() { require.NoError(b, db.Close(context.Background())) }() obj := testutil.GenerateObject() for i := range 100_000 { // should be a multiple of all batch sizes @@ -71,7 +71,7 @@ func TestLisObjectsWithCursor(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() const ( containers = 5 @@ -163,7 +163,7 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() const total = 5 @@ -225,7 +225,7 @@ func TestIterateOver(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() const total uint64 = 5 for _, typ := range []objectSDK.Type{objectSDK.TypeRegular, objectSDK.TypeTombstone, objectSDK.TypeLock} { diff --git a/pkg/local_object_storage/metabase/lock_test.go b/pkg/local_object_storage/metabase/lock_test.go index 9601cb2be..341ff9ad1 100644 --- a/pkg/local_object_storage/metabase/lock_test.go +++ b/pkg/local_object_storage/metabase/lock_test.go @@ -21,7 +21,7 @@ func TestDB_Lock(t *testing.T) { cnr := cidtest.ID() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() t.Run("empty locked list", func(t *testing.T) { require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, nil) }) @@ -187,7 +187,7 @@ func TestDB_Lock_Expired(t *testing.T) { es := &epochState{e: 123} db := newDB(t, meta.WithEpochState(es)) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() // put an object addr := putWithExpiration(t, db, objectSDK.TypeRegular, 124) @@ -209,7 +209,7 @@ func TestDB_IsLocked(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() // existing and locked objs diff --git a/pkg/local_object_storage/metabase/mode.go b/pkg/local_object_storage/metabase/mode.go index f99262be4..ce6ae1004 100644 --- a/pkg/local_object_storage/metabase/mode.go +++ b/pkg/local_object_storage/metabase/mode.go @@ -18,7 +18,7 @@ func (db *DB) SetMode(ctx context.Context, m mode.Mode) error { } if !db.mode.NoMetabase() { - if err := db.Close(); err != nil { + if err := db.Close(ctx); err != nil { return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err) } } @@ -28,7 +28,7 @@ func (db *DB) SetMode(ctx context.Context, m mode.Mode) error { } else { err := db.openDB(ctx, m) if err == nil && !m.ReadOnly() { - err = db.Init() + err = db.Init(ctx) } if err != nil { return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err) diff --git a/pkg/local_object_storage/metabase/mode_test.go b/pkg/local_object_storage/metabase/mode_test.go index 1b9f60055..28b42283f 100644 --- a/pkg/local_object_storage/metabase/mode_test.go +++ b/pkg/local_object_storage/metabase/mode_test.go @@ -25,13 +25,13 @@ func Test_Mode(t *testing.T) { require.NoError(t, bdb.Open(context.Background(), mode.DegradedReadOnly)) require.Nil(t, bdb.boltDB) - require.NoError(t, bdb.Init()) + require.NoError(t, bdb.Init(context.Background())) require.Nil(t, bdb.boltDB) - require.NoError(t, bdb.Close()) + require.NoError(t, bdb.Close(context.Background())) require.NoError(t, bdb.Open(context.Background(), mode.Degraded)) require.Nil(t, bdb.boltDB) - require.NoError(t, bdb.Init()) + require.NoError(t, bdb.Init(context.Background())) require.Nil(t, bdb.boltDB) - require.NoError(t, bdb.Close()) + require.NoError(t, bdb.Close(context.Background())) } diff --git a/pkg/local_object_storage/metabase/put_test.go b/pkg/local_object_storage/metabase/put_test.go index 914f5ef06..f37ed4cf2 100644 --- a/pkg/local_object_storage/metabase/put_test.go +++ b/pkg/local_object_storage/metabase/put_test.go @@ -46,7 +46,7 @@ func BenchmarkPut(b *testing.B) { db := newDB(b, meta.WithMaxBatchDelay(time.Millisecond*10), meta.WithMaxBatchSize(runtime.NumCPU())) - defer func() { require.NoError(b, db.Close()) }() + defer func() { require.NoError(b, db.Close(context.Background())) }() // Ensure the benchmark is bound by CPU and not waiting batch-delay time. b.SetParallelism(1) @@ -68,7 +68,7 @@ func BenchmarkPut(b *testing.B) { db := newDB(b, meta.WithMaxBatchDelay(time.Millisecond*10), meta.WithMaxBatchSize(1)) - defer func() { require.NoError(b, db.Close()) }() + defer func() { require.NoError(b, db.Close(context.Background())) }() var index atomic.Int64 index.Store(-1) objs := prepareObjects(b.N) @@ -84,7 +84,7 @@ func BenchmarkPut(b *testing.B) { func TestDB_PutBlobovniczaUpdate(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() raw1 := testutil.GenerateObject() storageID := []byte{1, 2, 3, 4} diff --git a/pkg/local_object_storage/metabase/reset_test.go b/pkg/local_object_storage/metabase/reset_test.go index 993079dce..45faecc13 100644 --- a/pkg/local_object_storage/metabase/reset_test.go +++ b/pkg/local_object_storage/metabase/reset_test.go @@ -30,9 +30,9 @@ func TestResetDropsContainerBuckets(t *testing.T) { ) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) + require.NoError(t, db.Init(context.Background())) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() for idx := range 100 { var putPrm PutPrm diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go index 5cc25a9f6..251a2b2e9 100644 --- a/pkg/local_object_storage/metabase/select_test.go +++ b/pkg/local_object_storage/metabase/select_test.go @@ -38,7 +38,7 @@ func testSelectUserAttributes(t *testing.T, index bool) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -200,7 +200,7 @@ func TestDB_SelectRootPhyParent(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -354,7 +354,7 @@ func TestDB_SelectInhume(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -385,7 +385,7 @@ func TestDB_SelectPayloadHash(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -456,7 +456,7 @@ func TestDB_SelectWithSlowFilters(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -564,7 +564,7 @@ func TestDB_SelectObjectID(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -680,7 +680,7 @@ func TestDB_SelectOwnerID(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -786,7 +786,7 @@ func TestDB_SelectECWithFastAndSlowFilters(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() ecChunk1 := oidtest.ID() @@ -865,7 +865,7 @@ func TestDB_RawHead_SplitInfo(t *testing.T) { ) db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -906,7 +906,7 @@ func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIde t.Run("first last, then linking", func(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() require.NoError(t, metaPut(db, lastPart, nil)) require.NoError(t, metaPut(db, linking, nil)) @@ -930,7 +930,7 @@ func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIde }) t.Run("first linking, then last", func(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() require.NoError(t, metaPut(db, linking, nil)) require.NoError(t, metaPut(db, lastPart, nil)) @@ -954,7 +954,7 @@ func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIde }) t.Run("only last part", func(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() require.NoError(t, metaPut(db, lastPart, nil)) @@ -984,7 +984,7 @@ func TestDB_SelectSplitID_EC(t *testing.T) { ) db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -1052,7 +1052,7 @@ func TestDB_SelectSplitID(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -1109,7 +1109,7 @@ func TestDB_SelectContainerID(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -1157,7 +1157,7 @@ func TestDB_SelectContainerID(t *testing.T) { func BenchmarkSelect(b *testing.B) { const objCount = 1000 db := newDB(b) - defer func() { require.NoError(b, db.Close()) }() + defer func() { require.NoError(b, db.Close(context.Background())) }() cid := cidtest.ID() @@ -1199,7 +1199,7 @@ func TestExpiredObjects(t *testing.T) { t.Parallel() db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) { cidExp, _ := exp.ContainerID() diff --git a/pkg/local_object_storage/metabase/storage_id_test.go b/pkg/local_object_storage/metabase/storage_id_test.go index a86e42bd2..fef680159 100644 --- a/pkg/local_object_storage/metabase/storage_id_test.go +++ b/pkg/local_object_storage/metabase/storage_id_test.go @@ -15,7 +15,7 @@ func TestDB_StorageID(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() raw1 := testutil.GenerateObject() raw2 := testutil.GenerateObject() @@ -79,7 +79,7 @@ func TestPutWritecacheDataRace(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() putStorageID := []byte{1, 2, 3} wcStorageID := []byte{1, 2, 3, 4, 5} diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go index 70c94adc5..3f546376b 100644 --- a/pkg/local_object_storage/metabase/upgrade_test.go +++ b/pkg/local_object_storage/metabase/upgrade_test.go @@ -34,12 +34,12 @@ func TestUpgradeV2ToV3(t *testing.T) { }() db := New(WithPath(path), WithEpochState(epochState{e: 1000}), WithLogger(test.NewLogger(t))) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.ErrorIs(t, db.Init(), ErrOutdatedVersion) - require.NoError(t, db.Close()) + require.ErrorIs(t, db.Init(context.Background()), ErrOutdatedVersion) + require.NoError(t, db.Close(context.Background())) require.NoError(t, Upgrade(context.Background(), path, true, &testContainerInfoProvider{}, t.Log)) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) - require.NoError(t, db.Close()) + require.NoError(t, db.Init(context.Background())) + require.NoError(t, db.Close(context.Background())) fmt.Println() } @@ -87,7 +87,7 @@ func TestGenerateMetabaseFile(t *testing.T) { require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) db.boltDB.AllocSize = allocSize db.boltDB.NoSync = true - require.NoError(t, db.Init()) + require.NoError(t, db.Init(context.Background())) containers := make([]cid.ID, containersCount) for i := range containers { containers[i] = cidtest.ID() @@ -218,5 +218,5 @@ func TestGenerateMetabaseFile(t *testing.T) { require.NoError(t, eg.Wait()) db.log.Info(ctx, "simple objects locked by locks generated") require.NoError(t, db.boltDB.Sync()) - require.NoError(t, db.Close()) + require.NoError(t, db.Close(context.Background())) } diff --git a/pkg/local_object_storage/metabase/version_test.go b/pkg/local_object_storage/metabase/version_test.go index 509e72479..b373fb32e 100644 --- a/pkg/local_object_storage/metabase/version_test.go +++ b/pkg/local_object_storage/metabase/version_test.go @@ -45,15 +45,15 @@ func TestVersion(t *testing.T) { t.Run("simple", func(t *testing.T) { db := newDB(t) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) + require.NoError(t, db.Init(context.Background())) check(t, db) - require.NoError(t, db.Close()) + require.NoError(t, db.Close(context.Background())) t.Run("reopen", func(t *testing.T) { require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) + require.NoError(t, db.Init(context.Background())) check(t, db) - require.NoError(t, db.Close()) + require.NoError(t, db.Close(context.Background())) }) }) t.Run("old data", func(t *testing.T) { @@ -61,9 +61,9 @@ func TestVersion(t *testing.T) { require.NoError(t, db.SetShardID(context.Background(), []byte{1, 2, 3, 4}, mode.ReadWrite)) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) + require.NoError(t, db.Init(context.Background())) check(t, db) - require.NoError(t, db.Close()) + require.NoError(t, db.Close(context.Background())) }) t.Run("invalid version", func(t *testing.T) { db := newDB(t) @@ -71,37 +71,37 @@ func TestVersion(t *testing.T) { require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error { return updateVersion(tx, version+1) })) - require.NoError(t, db.Close()) + require.NoError(t, db.Close(context.Background())) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.Error(t, db.Init()) - require.NoError(t, db.Close()) + require.Error(t, db.Init(context.Background())) + require.NoError(t, db.Close(context.Background())) t.Run("reset", func(t *testing.T) { require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) require.NoError(t, db.Reset()) check(t, db) - require.NoError(t, db.Close()) + require.NoError(t, db.Close(context.Background())) }) }) t.Run("incompleted upgrade", func(t *testing.T) { db := newDB(t) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) - require.NoError(t, db.Close()) + require.NoError(t, db.Init(context.Background())) + require.NoError(t, db.Close(context.Background())) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error { return tx.Bucket(shardInfoBucket).Put(upgradeKey, zeroValue) })) - require.ErrorIs(t, db.Init(), ErrIncompletedUpgrade) - require.NoError(t, db.Close()) + require.ErrorIs(t, db.Init(context.Background()), ErrIncompletedUpgrade) + require.NoError(t, db.Close(context.Background())) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error { return tx.Bucket(shardInfoBucket).Delete(upgradeKey) })) - require.NoError(t, db.Init()) - require.NoError(t, db.Close()) + require.NoError(t, db.Init(context.Background())) + require.NoError(t, db.Close(context.Background())) }) } diff --git a/pkg/local_object_storage/pilorama/bench_test.go b/pkg/local_object_storage/pilorama/bench_test.go index 22b951a41..3156751f2 100644 --- a/pkg/local_object_storage/pilorama/bench_test.go +++ b/pkg/local_object_storage/pilorama/bench_test.go @@ -28,8 +28,8 @@ func BenchmarkCreate(b *testing.B) { WithPath(filepath.Join(tmpDir, "test.db")), WithMaxBatchSize(runtime.GOMAXPROCS(0))) require.NoError(b, f.Open(context.Background(), mode.ReadWrite)) - require.NoError(b, f.Init()) - defer func() { require.NoError(b, f.Close()) }() + require.NoError(b, f.Init(context.Background())) + defer func() { require.NoError(b, f.Close(context.Background())) }() b.Cleanup(func() { require.NoError(b, os.RemoveAll(tmpDir)) diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index a370fb36a..a60d5bfc8 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -91,7 +91,7 @@ func NewBoltForest(opts ...Option) ForestStorage { return &b } -func (t *boltForest) SetMode(_ context.Context, m mode.Mode) error { +func (t *boltForest) SetMode(ctx context.Context, m mode.Mode) error { t.modeMtx.Lock() defer t.modeMtx.Unlock() @@ -99,10 +99,10 @@ func (t *boltForest) SetMode(_ context.Context, m mode.Mode) error { return nil } - err := t.Close() + err := t.Close(ctx) if err == nil && !m.NoMetabase() { if err = t.openBolt(m); err == nil { - err = t.Init() + err = t.Init(ctx) } } if err != nil { @@ -148,7 +148,7 @@ func (t *boltForest) openBolt(m mode.Mode) error { return nil } -func (t *boltForest) Init() error { +func (t *boltForest) Init(context.Context) error { if t.mode.NoMetabase() || t.db.IsReadOnly() { return nil } @@ -162,7 +162,7 @@ func (t *boltForest) Init() error { }) } -func (t *boltForest) Close() error { +func (t *boltForest) Close(context.Context) error { var err error if t.db != nil { err = t.db.Close() diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go index c27694795..ed603287a 100644 --- a/pkg/local_object_storage/pilorama/forest.go +++ b/pkg/local_object_storage/pilorama/forest.go @@ -111,7 +111,7 @@ func (f *memoryForest) TreeApply(_ context.Context, cnr cid.ID, treeID string, o return s.Apply(op) } -func (f *memoryForest) Init() error { +func (f *memoryForest) Init(context.Context) error { return nil } @@ -123,7 +123,7 @@ func (f *memoryForest) SetMode(context.Context, mode.Mode) error { return nil } -func (f *memoryForest) Close() error { +func (f *memoryForest) Close(context.Context) error { return nil } func (f *memoryForest) SetParentID(string) {} diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go index fbcc53fb3..de56fc82b 100644 --- a/pkg/local_object_storage/pilorama/forest_test.go +++ b/pkg/local_object_storage/pilorama/forest_test.go @@ -30,7 +30,7 @@ var providers = []struct { {"inmemory", func(t testing.TB, _ ...Option) ForestStorage { f := NewMemoryForest() require.NoError(t, f.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, f.Init()) + require.NoError(t, f.Init(context.Background())) return f }}, {"bbolt", func(t testing.TB, opts ...Option) ForestStorage { @@ -40,7 +40,7 @@ var providers = []struct { WithMaxBatchSize(1), }, opts...)...) require.NoError(t, f.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, f.Init()) + require.NoError(t, f.Init(context.Background())) return f }}, } @@ -61,7 +61,7 @@ func TestForest_TreeMove(t *testing.T) { } func testForestTreeMove(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() cid := cidtest.ID() d := CIDDescriptor{cid, 0, 1} @@ -125,7 +125,7 @@ func TestMemoryForest_TreeGetChildren(t *testing.T) { } func testForestTreeGetChildren(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() cid := cidtest.ID() d := CIDDescriptor{cid, 0, 1} @@ -247,7 +247,7 @@ func TestForest_TreeSortedIterationBugWithSkip(t *testing.T) { } func testForestTreeSortedIterationBugWithSkip(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() cid := cidtest.ID() d := CIDDescriptor{cid, 0, 1} @@ -302,7 +302,7 @@ func TestForest_TreeSortedIteration(t *testing.T) { } func testForestTreeSortedIteration(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() cid := cidtest.ID() d := CIDDescriptor{cid, 0, 1} @@ -361,7 +361,7 @@ func TestForest_TreeSortedFilename(t *testing.T) { } func testForestTreeSortedByFilename(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() const controlAttr = "control_attr" cid := cidtest.ID() @@ -453,7 +453,7 @@ func TestForest_TreeDrop(t *testing.T) { } func testForestTreeDrop(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() const cidsSize = 3 var cids [cidsSize]cidSDK.ID @@ -523,7 +523,7 @@ func TestForest_TreeAdd(t *testing.T) { } func testForestTreeAdd(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() cid := cidtest.ID() d := CIDDescriptor{cid, 0, 1} @@ -571,7 +571,7 @@ func TestForest_TreeAddByPath(t *testing.T) { } func testForestTreeAddByPath(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() cid := cidtest.ID() d := CIDDescriptor{cid, 0, 1} @@ -709,7 +709,7 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio t.Run("add a child, then insert a parent removal", func(t *testing.T) { s := constructor(t) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() testApply(t, s, 10, 0, Meta{Time: 1, Items: []KeyValue{{"grand", []byte{1}}}}) @@ -722,7 +722,7 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio }) t.Run("add a child to non-existent parent, then add a parent", func(t *testing.T) { s := constructor(t) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() meta := Meta{Time: 1, Items: []KeyValue{{"child", []byte{3}}}} testApply(t, s, 11, 10, meta) @@ -792,7 +792,7 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _ t.Run("expected", func(t *testing.T) { s := constructor(t) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() for i := range logs { require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[i], false)) @@ -801,7 +801,7 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _ }) s := constructor(t, WithMaxBatchSize(batchSize)) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[0], false)) for range batchSize { @@ -842,7 +842,7 @@ func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Op } s := constructor(t) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() t.Run("empty log, no panic", func(t *testing.T) { _, err := s.TreeGetOpLog(context.Background(), cid, treeID, 0) @@ -883,7 +883,7 @@ func TestForest_TreeExists(t *testing.T) { func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...Option) ForestStorage) { s := constructor(t) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() checkExists := func(t *testing.T, expected bool, cid cidSDK.ID, treeID string) { actual, err := s.TreeExists(context.Background(), cid, treeID) @@ -942,7 +942,7 @@ func TestApplyTricky1(t *testing.T) { for i := range providers { t.Run(providers[i].name, func(t *testing.T) { s := providers[i].construct(t) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() for i := range ops { require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false)) @@ -1005,7 +1005,7 @@ func TestApplyTricky2(t *testing.T) { for i := range providers { t.Run(providers[i].name, func(t *testing.T) { s := providers[i].construct(t) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() for i := range ops { require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false)) @@ -1115,7 +1115,7 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _ treeID := "version" expected := constructor(t, WithNoSync(true)) - defer func() { require.NoError(t, expected.Close()) }() + defer func() { require.NoError(t, expected.Close(context.Background())) }() for i := range ops { require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false)) @@ -1145,7 +1145,7 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _ wg.Wait() compareForests(t, expected, actual, cid, treeID, nodeCount) - require.NoError(t, actual.Close()) + require.NoError(t, actual.Close(context.Background())) } } @@ -1163,7 +1163,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ .. treeID := "version" expected := constructor(t, WithNoSync(true)) - defer func() { require.NoError(t, expected.Close()) }() + defer func() { require.NoError(t, expected.Close(context.Background())) }() for i := range ops { require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false)) @@ -1179,7 +1179,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ .. require.NoError(t, actual.TreeApply(context.Background(), cid, treeID, &ops[i], false)) } compareForests(t, expected, actual, cid, treeID, nodeCount) - require.NoError(t, actual.Close()) + require.NoError(t, actual.Close(context.Background())) } } @@ -1197,7 +1197,7 @@ func BenchmarkApplySequential(b *testing.B) { b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) { r := mrand.New(mrand.NewSource(time.Now().Unix())) s := providers[i].construct(b, WithMaxBatchSize(bs)) - defer func() { require.NoError(b, s.Close()) }() + defer func() { require.NoError(b, s.Close(context.Background())) }() benchmarkApply(b, s, func(opCount int) []Move { ops := make([]Move, opCount) @@ -1233,7 +1233,7 @@ func BenchmarkApplyReorderLast(b *testing.B) { b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) { r := mrand.New(mrand.NewSource(time.Now().Unix())) s := providers[i].construct(b, WithMaxBatchSize(bs)) - defer func() { require.NoError(b, s.Close()) }() + defer func() { require.NoError(b, s.Close(context.Background())) }() benchmarkApply(b, s, func(opCount int) []Move { ops := make([]Move, opCount) @@ -1290,7 +1290,7 @@ func TestTreeGetByPath(t *testing.T) { } func testTreeGetByPath(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() cid := cidtest.ID() treeID := "version" @@ -1369,7 +1369,7 @@ func TestGetTrees(t *testing.T) { } func testTreeGetTrees(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() cids := []cidSDK.ID{cidtest.ID(), cidtest.ID()} d := CIDDescriptor{Position: 0, Size: 1} @@ -1415,7 +1415,7 @@ func TestTreeLastSyncHeight(t *testing.T) { } func testTreeLastSyncHeight(t *testing.T, f ForestStorage) { - defer func() { require.NoError(t, f.Close()) }() + defer func() { require.NoError(t, f.Close(context.Background())) }() cnr := cidtest.ID() treeID := "someTree" diff --git a/pkg/local_object_storage/pilorama/interface.go b/pkg/local_object_storage/pilorama/interface.go index d905a0197..d0948b2b0 100644 --- a/pkg/local_object_storage/pilorama/interface.go +++ b/pkg/local_object_storage/pilorama/interface.go @@ -60,9 +60,9 @@ type Forest interface { type ForestStorage interface { // DumpInfo returns information about the pilorama. DumpInfo() Info - Init() error + Init(context.Context) error Open(context.Context, mode.Mode) error - Close() error + Close(context.Context) error SetMode(context.Context, mode.Mode) error SetParentID(id string) Forest diff --git a/pkg/local_object_storage/pilorama/mode_test.go b/pkg/local_object_storage/pilorama/mode_test.go index 01d3da9f0..0c042aa56 100644 --- a/pkg/local_object_storage/pilorama/mode_test.go +++ b/pkg/local_object_storage/pilorama/mode_test.go @@ -19,13 +19,13 @@ func Test_Mode(t *testing.T) { require.NoError(t, f.Open(context.Background(), mode.DegradedReadOnly)) require.Nil(t, f.(*boltForest).db) - require.NoError(t, f.Init()) + require.NoError(t, f.Init(context.Background())) require.Nil(t, f.(*boltForest).db) - require.NoError(t, f.Close()) + require.NoError(t, f.Close(context.Background())) require.NoError(t, f.Open(context.Background(), mode.Degraded)) require.Nil(t, f.(*boltForest).db) - require.NoError(t, f.Init()) + require.NoError(t, f.Init(context.Background())) require.Nil(t, f.(*boltForest).db) - require.NoError(t, f.Close()) + require.NoError(t, f.Close(context.Background())) } diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index 056737a9d..5a9e26155 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -91,8 +91,8 @@ func (s *Shard) Open(ctx context.Context) error { type metabaseSynchronizer Shard -func (x *metabaseSynchronizer) Init() error { - ctx, span := tracing.StartSpanFromContext(context.TODO(), "metabaseSynchronizer.Init") +func (x *metabaseSynchronizer) Init(ctx context.Context) error { + ctx, span := tracing.StartSpanFromContext(ctx, "metabaseSynchronizer.Init") defer span.End() return (*Shard)(x).refillMetabase(ctx) @@ -140,7 +140,7 @@ func (s *Shard) Init(ctx context.Context) error { func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error { type initializer interface { - Init() error + Init(context.Context) error } var components []initializer @@ -170,7 +170,7 @@ func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error { } for _, component := range components { - if err := component.Init(); err != nil { + if err := component.Init(ctx); err != nil { if component == s.metaBase { if errors.Is(err, meta.ErrOutdatedVersion) || errors.Is(err, meta.ErrIncompletedUpgrade) { return fmt.Errorf("metabase initialization: %w", err) @@ -368,7 +368,7 @@ func (s *Shard) Close(ctx context.Context) error { if s.rb != nil { s.rb.Stop(ctx, s.log) } - var components []interface{ Close() error } + var components []interface{ Close(context.Context) error } if s.pilorama != nil { components = append(components, s.pilorama) @@ -384,7 +384,7 @@ func (s *Shard) Close(ctx context.Context) error { var lastErr error for _, component := range components { - if err := component.Close(); err != nil { + if err := component.Close(ctx); err != nil { lastErr = err s.log.Error(ctx, logs.ShardCouldNotCloseShardComponent, zap.Error(err)) } @@ -392,7 +392,7 @@ func (s *Shard) Close(ctx context.Context) error { // If Init/Open was unsuccessful gc can be nil. if s.gc != nil { - s.gc.stop() + s.gc.stop(ctx) } return lastErr @@ -437,7 +437,7 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error { // config after the node was updated. err = s.refillMetabase(ctx) } else { - err = s.metaBase.Init() + err = s.metaBase.Init(ctx) } if err != nil { s.log.Error(ctx, logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err)) diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go index 6fabf7103..a987d3d14 100644 --- a/pkg/local_object_storage/shard/gc.go +++ b/pkg/local_object_storage/shard/gc.go @@ -213,7 +213,7 @@ func (gc *gc) handleEvent(ctx context.Context, event Event) { } } -func (gc *gc) releaseResources() { +func (gc *gc) releaseResources(ctx context.Context) { if gc.workerPool != nil { gc.workerPool.Release() } @@ -222,7 +222,7 @@ func (gc *gc) releaseResources() { // because it is possible that we are close it earlier than stop writing. // It is ok to keep it opened. - gc.log.Debug(context.Background(), logs.ShardGCIsStopped) + gc.log.Debug(ctx, logs.ShardGCIsStopped) } func (gc *gc) tickRemover(ctx context.Context) { @@ -236,10 +236,10 @@ func (gc *gc) tickRemover(ctx context.Context) { case <-ctx.Done(): // Context canceled earlier than we start to close shards. // It make sense to stop collecting garbage by context too. - gc.releaseResources() + gc.releaseResources(ctx) return case <-gc.stopChannel: - gc.releaseResources() + gc.releaseResources(ctx) return case <-timer.C: startedAt := time.Now() @@ -258,12 +258,12 @@ func (gc *gc) tickRemover(ctx context.Context) { } } -func (gc *gc) stop() { +func (gc *gc) stop(ctx context.Context) { gc.onceStop.Do(func() { close(gc.stopChannel) }) - gc.log.Info(context.Background(), logs.ShardWaitingForGCWorkersToStop) + gc.log.Info(ctx, logs.ShardWaitingForGCWorkersToStop) gc.wg.Wait() } @@ -730,14 +730,14 @@ func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unloc } // HandleDeletedLocks unlocks all objects which were locked by lockers. -func (s *Shard) HandleDeletedLocks(lockers []oid.Address) { +func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) { if s.GetMode().NoMetabase() { return } _, err := s.metaBase.FreeLockedBy(lockers) if err != nil { - s.log.Warn(context.Background(), logs.ShardFailureToUnlockObjects, + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.String("error", err.Error()), ) diff --git a/pkg/local_object_storage/shard/gc_internal_test.go b/pkg/local_object_storage/shard/gc_internal_test.go index 39073a529..9998bbae2 100644 --- a/pkg/local_object_storage/shard/gc_internal_test.go +++ b/pkg/local_object_storage/shard/gc_internal_test.go @@ -61,8 +61,8 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) { meta.WithEpochState(epochState{}), ), WithPiloramaOptions(pilorama.WithPath(filepath.Join(rootPath, "pilorama"))), - WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) { - sh.HandleDeletedLocks(addresses) + WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) { + sh.HandleDeletedLocks(ctx, addresses) }), WithExpiredLocksCallback(func(ctx context.Context, epoch uint64, a []oid.Address) { sh.HandleExpiredLocks(ctx, epoch, a) diff --git a/pkg/local_object_storage/shard/lock_test.go b/pkg/local_object_storage/shard/lock_test.go index 7da8b8c28..5caf3641f 100644 --- a/pkg/local_object_storage/shard/lock_test.go +++ b/pkg/local_object_storage/shard/lock_test.go @@ -53,8 +53,8 @@ func TestShard_Lock(t *testing.T) { meta.WithPath(filepath.Join(rootPath, "meta")), meta.WithEpochState(epochState{}), ), - WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) { - sh.HandleDeletedLocks(addresses) + WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) { + sh.HandleDeletedLocks(ctx, addresses) }), } diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go index 3a06fe8a7..1eb7f14d0 100644 --- a/pkg/local_object_storage/shard/shard.go +++ b/pkg/local_object_storage/shard/shard.go @@ -95,7 +95,7 @@ type cfg struct { metricsWriter MetricsWriter - reportErrorFunc func(selfID string, message string, err error) + reportErrorFunc func(ctx context.Context, selfID string, message string, err error) containerInfo container.InfoProvider } @@ -105,7 +105,7 @@ func defaultCfg() *cfg { rmBatchSize: 100, log: logger.NewLoggerWrapper(zap.L()), gcCfg: defaultGCCfg(), - reportErrorFunc: func(string, string, error) {}, + reportErrorFunc: func(context.Context, string, string, error) {}, zeroSizeContainersCallback: func(context.Context, []cid.ID) {}, zeroCountContainersCallback: func(context.Context, []cid.ID) {}, metricsWriter: noopMetrics{}, @@ -130,8 +130,8 @@ func New(opts ...Option) *Shard { tsSource: c.tsSource, } - reportFunc := func(msg string, err error) { - s.reportErrorFunc(s.ID().String(), msg, err) + reportFunc := func(ctx context.Context, msg string, err error) { + s.reportErrorFunc(ctx, s.ID().String(), msg, err) } s.blobStor.SetReportErrorFunc(reportFunc) @@ -317,7 +317,7 @@ func WithGCMetrics(v GCMectrics) Option { // WithReportErrorFunc returns option to specify callback for handling storage-related errors // in the background workers. -func WithReportErrorFunc(f func(selfID string, message string, err error)) Option { +func WithReportErrorFunc(f func(ctx context.Context, selfID string, message string, err error)) Option { return func(c *cfg) { c.reportErrorFunc = f } diff --git a/pkg/local_object_storage/shard/shard_test.go b/pkg/local_object_storage/shard/shard_test.go index 73ba2e82b..f9ee34488 100644 --- a/pkg/local_object_storage/shard/shard_test.go +++ b/pkg/local_object_storage/shard/shard_test.go @@ -89,8 +89,8 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard WithPiloramaOptions(pilorama.WithPath(filepath.Join(o.rootPath, "pilorama"))), WithWriteCache(enableWriteCache), WithWriteCacheOptions(o.wcOpts), - WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) { - sh.HandleDeletedLocks(addresses) + WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) { + sh.HandleDeletedLocks(ctx, addresses) }), WithExpiredLocksCallback(func(ctx context.Context, epoch uint64, a []oid.Address) { sh.HandleExpiredLocks(ctx, epoch, a) diff --git a/pkg/local_object_storage/writecache/benchmark/writecache_test.go b/pkg/local_object_storage/writecache/benchmark/writecache_test.go index 79ab7d9c6..fd85b4501 100644 --- a/pkg/local_object_storage/writecache/benchmark/writecache_test.go +++ b/pkg/local_object_storage/writecache/benchmark/writecache_test.go @@ -43,12 +43,12 @@ func BenchmarkWriteAfterDelete(b *testing.B) { b.SetParallelism(parallel) benchmarkRunPar(b, cache, payloadSize) }) - require.NoError(b, cache.Close()) + require.NoError(b, cache.Close(context.Background())) } func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) { benchmarkPutPrepare(b, cache) - defer func() { require.NoError(b, cache.Close()) }() + defer func() { require.NoError(b, cache.Close(context.Background())) }() ctx := context.Background() objGen := testutil.RandObjGenerator{ObjSize: size} @@ -71,7 +71,7 @@ func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) { func benchmarkPutPar(b *testing.B, cache writecache.Cache, size uint64) { benchmarkPutPrepare(b, cache) - defer func() { require.NoError(b, cache.Close()) }() + defer func() { require.NoError(b, cache.Close(context.Background())) }() benchmarkRunPar(b, cache, size) } @@ -100,7 +100,7 @@ func benchmarkRunPar(b *testing.B, cache writecache.Cache, size uint64) { func benchmarkPutPrepare(b *testing.B, cache writecache.Cache) { require.NoError(b, cache.Open(context.Background(), mode.ReadWrite), "opening") - require.NoError(b, cache.Init(), "initializing") + require.NoError(b, cache.Init(context.Background()), "initializing") } type testMetabase struct{} diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go index 098872e08..e829d013c 100644 --- a/pkg/local_object_storage/writecache/cache.go +++ b/pkg/local_object_storage/writecache/cache.go @@ -98,19 +98,19 @@ func (c *cache) Open(_ context.Context, mod mode.Mode) error { } // Init runs necessary services. -func (c *cache) Init() error { +func (c *cache) Init(ctx context.Context) error { c.metrics.SetMode(mode.ConvertToComponentModeDegraded(c.mode)) - if err := c.flushAndDropBBoltDB(context.Background()); err != nil { + if err := c.flushAndDropBBoltDB(ctx); err != nil { return fmt.Errorf("flush previous version write-cache database: %w", err) } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(context.WithoutCancel(ctx)) // canceling performed by cache c.cancel.Store(cancel) c.runFlushLoop(ctx) return nil } // Close closes db connection and stops services. Executes ObjectCounters.FlushAndClose op. -func (c *cache) Close() error { +func (c *cache) Close(ctx context.Context) error { if cancelValue := c.cancel.Swap(dummyCanceler); cancelValue != nil { cancelValue.(context.CancelFunc)() } @@ -127,7 +127,7 @@ func (c *cache) Close() error { var err error if c.fsTree != nil { - err = c.fsTree.Close() + err = c.fsTree.Close(ctx) if err != nil { c.fsTree = nil } diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go index 123eb4abc..d9e34ceab 100644 --- a/pkg/local_object_storage/writecache/flush.go +++ b/pkg/local_object_storage/writecache/flush.go @@ -112,7 +112,7 @@ func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectI }) if err != nil { if !client.IsErrObjectNotFound(err) { - c.reportFlushError(logs.WritecacheCantGetObject, objInfo.addr.EncodeToString(), metaerr.Wrap(err)) + c.reportFlushError(ctx, logs.WritecacheCantGetObject, objInfo.addr.EncodeToString(), metaerr.Wrap(err)) } return } @@ -126,11 +126,11 @@ func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectI c.deleteFromDisk(ctx, objInfo.addr, uint64(len(res.RawData))) } -func (c *cache) reportFlushError(msg string, addr string, err error) { +func (c *cache) reportFlushError(ctx context.Context, msg string, addr string, err error) { if c.reportError != nil { - c.reportError(msg, err) + c.reportError(ctx, msg, err) } else { - c.log.Error(context.Background(), msg, + c.log.Error(ctx, msg, zap.String("address", addr), zap.Error(err)) } @@ -145,7 +145,7 @@ func (c *cache) flushFSTree(ctx context.Context, ignoreErrors bool) error { var obj objectSDK.Object err := obj.Unmarshal(e.ObjectData) if err != nil { - c.reportFlushError(logs.FSTreeCantUnmarshalObject, sAddr, metaerr.Wrap(err)) + c.reportFlushError(ctx, logs.FSTreeCantUnmarshalObject, sAddr, metaerr.Wrap(err)) if ignoreErrors { return nil } @@ -183,7 +183,7 @@ func (c *cache) flushObject(ctx context.Context, obj *objectSDK.Object, data []b if err != nil { if !errors.Is(err, common.ErrNoSpace) && !errors.Is(err, common.ErrReadOnly) && !errors.Is(err, blobstor.ErrNoPlaceFound) { - c.reportFlushError(logs.FSTreeCantFushObjectBlobstor, + c.reportFlushError(ctx, logs.FSTreeCantFushObjectBlobstor, addr.EncodeToString(), err) } return err @@ -195,7 +195,7 @@ func (c *cache) flushObject(ctx context.Context, obj *objectSDK.Object, data []b _, err = c.metabase.UpdateStorageID(ctx, updPrm) if err != nil { - c.reportFlushError(logs.FSTreeCantUpdateID, + c.reportFlushError(ctx, logs.FSTreeCantUpdateID, addr.EncodeToString(), err) } return err diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go index 92fb493e0..7fc84657c 100644 --- a/pkg/local_object_storage/writecache/flush_test.go +++ b/pkg/local_object_storage/writecache/flush_test.go @@ -38,9 +38,9 @@ func TestFlush(t *testing.T) { errCountOpt := func() (Option, *atomic.Uint32) { cnt := &atomic.Uint32{} - return WithReportErrorFunc(func(msg string, err error) { + return WithReportErrorFunc(func(ctx context.Context, msg string, err error) { cnt.Add(1) - testlogger.Warn(context.Background(), msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err)) + testlogger.Warn(ctx, msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err)) }), cnt } @@ -114,7 +114,7 @@ func runFlushTest[Option any]( ) { t.Run("no errors", func(t *testing.T) { wc, bs, mb := newCache(t, createCacheFn) - defer func() { require.NoError(t, wc.Close()) }() + defer func() { require.NoError(t, wc.Close(context.Background())) }() objects := putObjects(t, wc) require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite)) @@ -127,7 +127,7 @@ func runFlushTest[Option any]( t.Run("flush on moving to degraded mode", func(t *testing.T) { wc, bs, mb := newCache(t, createCacheFn) - defer func() { require.NoError(t, wc.Close()) }() + defer func() { require.NoError(t, wc.Close(context.Background())) }() objects := putObjects(t, wc) // Blobstor is read-only, so we expect en error from `flush` here. @@ -145,7 +145,7 @@ func runFlushTest[Option any]( t.Run(f.Desc, func(t *testing.T) { errCountOpt, errCount := errCountOption() wc, bs, mb := newCache(t, createCacheFn, errCountOpt) - defer func() { require.NoError(t, wc.Close()) }() + defer func() { require.NoError(t, wc.Close(context.Background())) }() objects := putObjects(t, wc) f.InjectFn(t, wc) @@ -173,7 +173,7 @@ func newCache[Option any]( meta.WithPath(filepath.Join(dir, "meta")), meta.WithEpochState(dummyEpoch{})) require.NoError(t, mb.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, mb.Init()) + require.NoError(t, mb.Init(context.Background())) bs := blobstor.New(blobstor.WithStorages([]blobstor.SubStorage{ { @@ -184,11 +184,11 @@ func newCache[Option any]( }, })) require.NoError(t, bs.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, bs.Init()) + require.NoError(t, bs.Init(context.Background())) wc := createCacheFn(t, mb, bs, opts...) require.NoError(t, wc.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, wc.Init()) + require.NoError(t, wc.Init(context.Background())) // First set mode for metabase and blobstor to prevent background flushes. require.NoError(t, mb.SetMode(context.Background(), mode.ReadOnly)) diff --git a/pkg/local_object_storage/writecache/mode.go b/pkg/local_object_storage/writecache/mode.go index db789d994..73d12fd33 100644 --- a/pkg/local_object_storage/writecache/mode.go +++ b/pkg/local_object_storage/writecache/mode.go @@ -82,7 +82,7 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error { return nil } if !shrink { - if err := c.fsTree.Close(); err != nil { + if err := c.fsTree.Close(ctx); err != nil { return fmt.Errorf("can't close write-cache storage: %w", err) } return nil @@ -101,7 +101,7 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error { return fmt.Errorf("failed to check write-cache items: %w", err) } } - if err := c.fsTree.Close(); err != nil { + if err := c.fsTree.Close(ctx); err != nil { return fmt.Errorf("can't close write-cache storage: %w", err) } if empty { diff --git a/pkg/local_object_storage/writecache/mode_test.go b/pkg/local_object_storage/writecache/mode_test.go index 70cfe8382..4fbadbc64 100644 --- a/pkg/local_object_storage/writecache/mode_test.go +++ b/pkg/local_object_storage/writecache/mode_test.go @@ -18,13 +18,13 @@ func TestMode(t *testing.T) { require.NoError(t, wc.Open(context.Background(), mode.DegradedReadOnly)) require.Nil(t, wc.(*cache).fsTree) - require.NoError(t, wc.Init()) + require.NoError(t, wc.Init(context.Background())) require.Nil(t, wc.(*cache).fsTree) - require.NoError(t, wc.Close()) + require.NoError(t, wc.Close(context.Background())) require.NoError(t, wc.Open(context.Background(), mode.Degraded)) require.Nil(t, wc.(*cache).fsTree) - require.NoError(t, wc.Init()) + require.NoError(t, wc.Init(context.Background())) require.Nil(t, wc.(*cache).fsTree) - require.NoError(t, wc.Close()) + require.NoError(t, wc.Close(context.Background())) } diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go index 25c1694a8..f2957fe98 100644 --- a/pkg/local_object_storage/writecache/options.go +++ b/pkg/local_object_storage/writecache/options.go @@ -1,6 +1,8 @@ package writecache import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "go.uber.org/zap" ) @@ -29,7 +31,7 @@ type options struct { // noSync is true iff FSTree allows unsynchronized writes. noSync bool // reportError is the function called when encountering disk errors in background workers. - reportError func(string, error) + reportError func(context.Context, string, error) // metrics is metrics implementation metrics Metrics // disableBackgroundFlush is for testing purposes only. @@ -108,7 +110,7 @@ func WithNoSync(noSync bool) Option { } // WithReportErrorFunc sets error reporting function. -func WithReportErrorFunc(f func(string, error)) Option { +func WithReportErrorFunc(f func(context.Context, string, error)) Option { return func(o *options) { o.reportError = f } diff --git a/pkg/local_object_storage/writecache/writecache.go b/pkg/local_object_storage/writecache/writecache.go index d07220b68..70b17eb8e 100644 --- a/pkg/local_object_storage/writecache/writecache.go +++ b/pkg/local_object_storage/writecache/writecache.go @@ -44,9 +44,9 @@ type Cache interface { Flush(context.Context, bool, bool) error Seal(context.Context, SealPrm) error - Init() error + Init(context.Context) error Open(ctx context.Context, mode mode.Mode) error - Close() error + Close(context.Context) error GetMetrics() Metrics } diff --git a/pkg/morph/client/balance/burn.go b/pkg/morph/client/balance/burn.go index 4befbef45..f4685b0ab 100644 --- a/pkg/morph/client/balance/burn.go +++ b/pkg/morph/client/balance/burn.go @@ -1,6 +1,8 @@ package balance import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/util" ) @@ -30,12 +32,12 @@ func (b *BurnPrm) SetID(id []byte) { } // Burn destroys funds from the account. -func (c *Client) Burn(p BurnPrm) error { +func (c *Client) Burn(ctx context.Context, p BurnPrm) error { prm := client.InvokePrm{} prm.SetMethod(burnMethod) prm.SetArgs(p.to, p.amount, p.id) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := c.client.Invoke(prm) + _, err := c.client.Invoke(ctx, prm) return err } diff --git a/pkg/morph/client/balance/lock.go b/pkg/morph/client/balance/lock.go index a5b206799..83e8b0586 100644 --- a/pkg/morph/client/balance/lock.go +++ b/pkg/morph/client/balance/lock.go @@ -1,6 +1,8 @@ package balance import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/util" ) @@ -42,12 +44,12 @@ func (l *LockPrm) SetDueEpoch(dueEpoch int64) { } // Lock locks fund on the user account. -func (c *Client) Lock(p LockPrm) error { +func (c *Client) Lock(ctx context.Context, p LockPrm) error { prm := client.InvokePrm{} prm.SetMethod(lockMethod) prm.SetArgs(p.id, p.user, p.lock, p.amount, p.dueEpoch) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := c.client.Invoke(prm) + _, err := c.client.Invoke(ctx, prm) return err } diff --git a/pkg/morph/client/balance/mint.go b/pkg/morph/client/balance/mint.go index 73448da31..082ade85e 100644 --- a/pkg/morph/client/balance/mint.go +++ b/pkg/morph/client/balance/mint.go @@ -1,6 +1,8 @@ package balance import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/util" ) @@ -30,12 +32,12 @@ func (m *MintPrm) SetID(id []byte) { } // Mint sends funds to the account. -func (c *Client) Mint(p MintPrm) error { +func (c *Client) Mint(ctx context.Context, p MintPrm) error { prm := client.InvokePrm{} prm.SetMethod(mintMethod) prm.SetArgs(p.to, p.amount, p.id) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := c.client.Invoke(prm) + _, err := c.client.Invoke(ctx, prm) return err } diff --git a/pkg/morph/client/balance/transfer.go b/pkg/morph/client/balance/transfer.go index 08fb05289..65a0b70a6 100644 --- a/pkg/morph/client/balance/transfer.go +++ b/pkg/morph/client/balance/transfer.go @@ -1,6 +1,7 @@ package balance import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -23,7 +24,7 @@ type TransferPrm struct { // with details p.Details through direct smart contract call. // // If TryNotary is provided, calls notary contract. -func (c *Client) TransferX(p TransferPrm) error { +func (c *Client) TransferX(ctx context.Context, p TransferPrm) error { from, err := address.StringToUint160(p.From.EncodeToString()) if err != nil { return err @@ -39,7 +40,7 @@ func (c *Client) TransferX(p TransferPrm) error { prm.SetArgs(from, to, p.Amount, p.Details) prm.InvokePrmOptional = p.InvokePrmOptional - _, err = c.client.Invoke(prm) + _, err = c.client.Invoke(ctx, prm) if err != nil { return fmt.Errorf("could not invoke method (%s): %w", transferXMethod, err) } diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go index ef6a009e4..f61c6e9f9 100644 --- a/pkg/morph/client/client.go +++ b/pkg/morph/client/client.go @@ -180,7 +180,7 @@ func wrapFrostFSError(err error) error { // Invoke invokes contract method by sending transaction into blockchain. // Returns valid until block value. // Supported args types: int64, string, util.Uint160, []byte and bool. -func (c *Client) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) { +func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) { start := time.Now() success := false defer func() { @@ -199,7 +199,7 @@ func (c *Client) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, return 0, fmt.Errorf("could not invoke %s: %w", method, err) } - c.logger.Debug(context.Background(), logs.ClientNeoClientInvoke, + c.logger.Debug(ctx, logs.ClientNeoClientInvoke, zap.String("method", method), zap.Uint32("vub", vub), zap.Stringer("tx_hash", txHash.Reverse())) diff --git a/pkg/morph/client/container/delete.go b/pkg/morph/client/container/delete.go index 20351b570..5696645b2 100644 --- a/pkg/morph/client/container/delete.go +++ b/pkg/morph/client/container/delete.go @@ -1,6 +1,7 @@ package container import ( + "context" "crypto/sha256" "fmt" @@ -12,7 +13,7 @@ import ( // along with signature and session token. // // Returns error if container ID is nil. -func Delete(c *Client, witness core.RemovalWitness) error { +func Delete(ctx context.Context, c *Client, witness core.RemovalWitness) error { binCnr := make([]byte, sha256.Size) witness.ContainerID.Encode(binCnr) @@ -26,7 +27,7 @@ func Delete(c *Client, witness core.RemovalWitness) error { prm.SetToken(tok.Marshal()) } - _, err := c.Delete(prm) + _, err := c.Delete(ctx, prm) return err } @@ -67,7 +68,7 @@ func (d *DeletePrm) SetKey(key []byte) { // the removal to interrupt. // // If TryNotary is provided, calls notary contract. -func (c *Client) Delete(p DeletePrm) (uint32, error) { +func (c *Client) Delete(ctx context.Context, p DeletePrm) (uint32, error) { if len(p.signature) == 0 && !p.IsControl() { return 0, errNilArgument } @@ -77,7 +78,7 @@ func (c *Client) Delete(p DeletePrm) (uint32, error) { prm.SetArgs(p.cnr, p.signature, p.key, p.token) prm.InvokePrmOptional = p.InvokePrmOptional - res, err := c.client.Invoke(prm) + res, err := c.client.Invoke(ctx, prm) if err != nil { return 0, fmt.Errorf("could not invoke method (%s): %w", deleteMethod, err) } diff --git a/pkg/morph/client/container/estimations.go b/pkg/morph/client/container/estimations.go index f288c63cf..77f46d519 100644 --- a/pkg/morph/client/container/estimations.go +++ b/pkg/morph/client/container/estimations.go @@ -1,6 +1,7 @@ package container import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -28,26 +29,26 @@ func (p *commonEstimationPrm) SetEpoch(epoch uint64) { } // StartEstimation votes to produce start estimation notification. -func (c *Client) StartEstimation(p StartEstimationPrm) error { +func (c *Client) StartEstimation(ctx context.Context, p StartEstimationPrm) error { prm := client.InvokePrm{} prm.SetMethod(startEstimationMethod) prm.SetArgs(p.epoch) prm.InvokePrmOptional = p.InvokePrmOptional - if _, err := c.client.Invoke(prm); err != nil { + if _, err := c.client.Invoke(ctx, prm); err != nil { return fmt.Errorf("could not invoke method (%s): %w", startEstimationMethod, err) } return nil } // StopEstimation votes to produce stop estimation notification. -func (c *Client) StopEstimation(p StopEstimationPrm) error { +func (c *Client) StopEstimation(ctx context.Context, p StopEstimationPrm) error { prm := client.InvokePrm{} prm.SetMethod(stopEstimationMethod) prm.SetArgs(p.epoch) prm.InvokePrmOptional = p.InvokePrmOptional - if _, err := c.client.Invoke(prm); err != nil { + if _, err := c.client.Invoke(ctx, prm); err != nil { return fmt.Errorf("could not invoke method (%s): %w", stopEstimationMethod, err) } return nil diff --git a/pkg/morph/client/container/put.go b/pkg/morph/client/container/put.go index ee323af00..a9d28699a 100644 --- a/pkg/morph/client/container/put.go +++ b/pkg/morph/client/container/put.go @@ -1,6 +1,7 @@ package container import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" @@ -14,7 +15,7 @@ import ( // along with sig.Key() and sig.Sign(). // // Returns error if container is nil. -func Put(c *Client, cnr containercore.Container) (*cid.ID, error) { +func Put(ctx context.Context, c *Client, cnr containercore.Container) (*cid.ID, error) { data := cnr.Value.Marshal() d := container.ReadDomain(cnr.Value) @@ -35,7 +36,7 @@ func Put(c *Client, cnr containercore.Container) (*cid.ID, error) { prm.SetKey(sigV2.GetKey()) prm.SetSignature(sigV2.GetSign()) - err := c.Put(prm) + err := c.Put(ctx, prm) if err != nil { return nil, err } @@ -95,7 +96,7 @@ func (p *PutPrm) SetZone(zone string) { // encountered that caused the saving to interrupt. // // If TryNotary is provided, calls notary contract. -func (c *Client) Put(p PutPrm) error { +func (c *Client) Put(ctx context.Context, p PutPrm) error { if len(p.sig) == 0 || len(p.key) == 0 { return errNilArgument } @@ -116,7 +117,7 @@ func (c *Client) Put(p PutPrm) error { prm.SetMethod(method) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := c.client.Invoke(prm) + _, err := c.client.Invoke(ctx, prm) if err != nil { return fmt.Errorf("could not invoke method (%s): %w", method, err) } diff --git a/pkg/morph/client/frostfs/cheque.go b/pkg/morph/client/frostfs/cheque.go index 016b56f8f..d3eba7639 100644 --- a/pkg/morph/client/frostfs/cheque.go +++ b/pkg/morph/client/frostfs/cheque.go @@ -1,6 +1,8 @@ package frostfscontract import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/util" @@ -37,13 +39,13 @@ func (c *ChequePrm) SetLock(lock util.Uint160) { } // Cheque invokes `cheque` method of FrostFS contract. -func (x *Client) Cheque(p ChequePrm) error { +func (x *Client) Cheque(ctx context.Context, p ChequePrm) error { prm := client.InvokePrm{} prm.SetMethod(chequeMethod) prm.SetArgs(p.id, p.user, p.amount, p.lock) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := x.client.Invoke(prm) + _, err := x.client.Invoke(ctx, prm) return err } @@ -66,12 +68,12 @@ func (a *AlphabetUpdatePrm) SetPubs(pubs keys.PublicKeys) { } // AlphabetUpdate update list of alphabet nodes. -func (x *Client) AlphabetUpdate(p AlphabetUpdatePrm) error { +func (x *Client) AlphabetUpdate(ctx context.Context, p AlphabetUpdatePrm) error { prm := client.InvokePrm{} prm.SetMethod(alphabetUpdateMethod) prm.SetArgs(p.id, p.pubs) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := x.client.Invoke(prm) + _, err := x.client.Invoke(ctx, prm) return err } diff --git a/pkg/morph/client/netmap/config.go b/pkg/morph/client/netmap/config.go index 2d19a8193..0a3c351db 100644 --- a/pkg/morph/client/netmap/config.go +++ b/pkg/morph/client/netmap/config.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "errors" "fmt" @@ -155,13 +156,13 @@ func (s *SetConfigPrm) SetValue(value any) { } // SetConfig sets config field. -func (c *Client) SetConfig(p SetConfigPrm) error { +func (c *Client) SetConfig(ctx context.Context, p SetConfigPrm) error { prm := client.InvokePrm{} prm.SetMethod(setConfigMethod) prm.SetArgs(p.id, p.key, p.value) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := c.client.Invoke(prm) + _, err := c.client.Invoke(ctx, prm) return err } diff --git a/pkg/morph/client/netmap/innerring.go b/pkg/morph/client/netmap/innerring.go index d6f8c56b2..c9dc7d2fc 100644 --- a/pkg/morph/client/netmap/innerring.go +++ b/pkg/morph/client/netmap/innerring.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "crypto/elliptic" "fmt" @@ -23,7 +24,7 @@ func (u *UpdateIRPrm) SetKeys(keys keys.PublicKeys) { } // UpdateInnerRing updates inner ring keys. -func (c *Client) UpdateInnerRing(p UpdateIRPrm) error { +func (c *Client) UpdateInnerRing(ctx context.Context, p UpdateIRPrm) error { args := make([][]byte, len(p.keys)) for i := range args { args[i] = p.keys[i].Bytes() @@ -34,7 +35,7 @@ func (c *Client) UpdateInnerRing(p UpdateIRPrm) error { prm.SetArgs(args) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := c.client.Invoke(prm) + _, err := c.client.Invoke(ctx, prm) return err } diff --git a/pkg/morph/client/netmap/new_epoch.go b/pkg/morph/client/netmap/new_epoch.go index ded386c86..efcdfd7b6 100644 --- a/pkg/morph/client/netmap/new_epoch.go +++ b/pkg/morph/client/netmap/new_epoch.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -8,12 +9,12 @@ import ( // NewEpoch updates FrostFS epoch number through // Netmap contract call. -func (c *Client) NewEpoch(epoch uint64) error { +func (c *Client) NewEpoch(ctx context.Context, epoch uint64) error { prm := client.InvokePrm{} prm.SetMethod(newEpochMethod) prm.SetArgs(epoch) - _, err := c.client.Invoke(prm) + _, err := c.client.Invoke(ctx, prm) if err != nil { return fmt.Errorf("could not invoke method (%s): %w", newEpochMethod, err) } @@ -24,14 +25,14 @@ func (c *Client) NewEpoch(epoch uint64) error { // control notary transaction internally to ensure all // nodes produce the same transaction with high probability. // If vub > 0, vub will be used as valid until block value. -func (c *Client) NewEpochControl(epoch uint64, vub uint32) (uint32, error) { +func (c *Client) NewEpochControl(ctx context.Context, epoch uint64, vub uint32) (uint32, error) { prm := client.InvokePrm{} prm.SetMethod(newEpochMethod) prm.SetArgs(epoch) prm.SetControlTX(true) prm.SetVUB(vub) - res, err := c.client.Invoke(prm) + res, err := c.client.Invoke(ctx, prm) if err != nil { return 0, fmt.Errorf("could not invoke method (%s): %w", newEpochMethod, err) } diff --git a/pkg/morph/client/netmap/peer.go b/pkg/morph/client/netmap/peer.go index 764bbc899..9617d018c 100644 --- a/pkg/morph/client/netmap/peer.go +++ b/pkg/morph/client/netmap/peer.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "errors" "fmt" @@ -24,7 +25,7 @@ func (a *AddPeerPrm) SetNodeInfo(nodeInfo netmap.NodeInfo) { // AddPeer registers peer in FrostFS network through // Netmap contract call. -func (c *Client) AddPeer(p AddPeerPrm) error { +func (c *Client) AddPeer(ctx context.Context, p AddPeerPrm) error { method := addPeerMethod if c.client.WithNotary() && c.client.IsAlpha() { @@ -39,7 +40,7 @@ func (c *Client) AddPeer(p AddPeerPrm) error { prm.SetArgs(p.nodeInfo.Marshal()) prm.InvokePrmOptional = p.InvokePrmOptional - if _, err := c.client.Invoke(prm); err != nil { + if _, err := c.client.Invoke(ctx, prm); err != nil { return fmt.Errorf("could not invoke method (%s): %w", method, err) } return nil @@ -47,7 +48,7 @@ func (c *Client) AddPeer(p AddPeerPrm) error { // ForceRemovePeer marks the given peer as offline via a notary control transaction. // If vub > 0, vub will be used as valid until block value. -func (c *Client) ForceRemovePeer(nodeInfo netmap.NodeInfo, vub uint32) (uint32, error) { +func (c *Client) ForceRemovePeer(ctx context.Context, nodeInfo netmap.NodeInfo, vub uint32) (uint32, error) { if !c.client.WithNotary() { return 0, errFailedToRemovePeerWithoutNotary } @@ -57,7 +58,7 @@ func (c *Client) ForceRemovePeer(nodeInfo netmap.NodeInfo, vub uint32) (uint32, prm.SetControlTX(true) prm.SetVUB(vub) - vub, err := c.UpdatePeerState(prm) + vub, err := c.UpdatePeerState(ctx, prm) if err != nil { return 0, fmt.Errorf("updating peer state: %v", err) } diff --git a/pkg/morph/client/netmap/update_state.go b/pkg/morph/client/netmap/update_state.go index 7c3a4e8cd..971a55d33 100644 --- a/pkg/morph/client/netmap/update_state.go +++ b/pkg/morph/client/netmap/update_state.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap" @@ -36,7 +37,7 @@ func (u *UpdatePeerPrm) SetMaintenance() { } // UpdatePeerState changes peer status through Netmap contract call. -func (c *Client) UpdatePeerState(p UpdatePeerPrm) (uint32, error) { +func (c *Client) UpdatePeerState(ctx context.Context, p UpdatePeerPrm) (uint32, error) { method := updateStateMethod if c.client.WithNotary() && c.client.IsAlpha() { @@ -55,7 +56,7 @@ func (c *Client) UpdatePeerState(p UpdatePeerPrm) (uint32, error) { prm.SetArgs(int64(p.state), p.key) prm.InvokePrmOptional = p.InvokePrmOptional - res, err := c.client.Invoke(prm) + res, err := c.client.Invoke(ctx, prm) if err != nil { return 0, fmt.Errorf("could not invoke smart contract: %w", err) } diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go index 58c417fb1..65a5e77a6 100644 --- a/pkg/morph/client/notary.go +++ b/pkg/morph/client/notary.go @@ -141,7 +141,7 @@ func (c *Client) ProbeNotary() (res bool) { // use this function. // // This function must be invoked with notary enabled otherwise it throws panic. -func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (util.Uint256, error) { +func (c *Client) DepositNotary(ctx context.Context, amount fixedn.Fixed8, delta uint32) (util.Uint256, error) { c.switchLock.RLock() defer c.switchLock.RUnlock() @@ -164,7 +164,7 @@ func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (util.Uint256 } till := max(int64(bc+delta), currentTill) - res, _, err := c.depositNotary(amount, till) + res, _, err := c.depositNotary(ctx, amount, till) return res, err } @@ -173,7 +173,7 @@ func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (util.Uint256 // This allows to avoid ValidAfterDeposit failures. // // This function must be invoked with notary enabled otherwise it throws panic. -func (c *Client) DepositEndlessNotary(amount fixedn.Fixed8) (util.Uint256, uint32, error) { +func (c *Client) DepositEndlessNotary(ctx context.Context, amount fixedn.Fixed8) (util.Uint256, uint32, error) { c.switchLock.RLock() defer c.switchLock.RUnlock() @@ -186,10 +186,10 @@ func (c *Client) DepositEndlessNotary(amount fixedn.Fixed8) (util.Uint256, uint3 } // till value refers to a block height and it is uint32 value in neo-go - return c.depositNotary(amount, math.MaxUint32) + return c.depositNotary(ctx, amount, math.MaxUint32) } -func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (util.Uint256, uint32, error) { +func (c *Client) depositNotary(ctx context.Context, amount fixedn.Fixed8, till int64) (util.Uint256, uint32, error) { txHash, vub, err := c.gasToken.Transfer( c.accAddr, c.notary.notary, @@ -202,7 +202,7 @@ func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (util.Uint256, // Transaction is already in mempool waiting to be processed. // This is an expected situation if we restart the service. - c.logger.Info(context.Background(), logs.ClientNotaryDepositHasAlreadyBeenMade, + c.logger.Info(ctx, logs.ClientNotaryDepositHasAlreadyBeenMade, zap.Int64("amount", int64(amount)), zap.Int64("expire_at", till), zap.Uint32("vub", vub), @@ -210,7 +210,7 @@ func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (util.Uint256, return util.Uint256{}, 0, nil } - c.logger.Info(context.Background(), logs.ClientNotaryDepositInvoke, + c.logger.Info(ctx, logs.ClientNotaryDepositInvoke, zap.Int64("amount", int64(amount)), zap.Int64("expire_at", till), zap.Uint32("vub", vub), @@ -275,7 +275,7 @@ func (u *UpdateNotaryListPrm) SetHash(hash util.Uint256) { // committee multi signature. // // This function must be invoked with notary enabled otherwise it throws panic. -func (c *Client) UpdateNotaryList(prm UpdateNotaryListPrm) error { +func (c *Client) UpdateNotaryList(ctx context.Context, prm UpdateNotaryListPrm) error { c.switchLock.RLock() defer c.switchLock.RUnlock() @@ -293,6 +293,7 @@ func (c *Client) UpdateNotaryList(prm UpdateNotaryListPrm) error { } return c.notaryInvokeAsCommittee( + ctx, setDesignateMethod, nonce, vub, @@ -323,7 +324,7 @@ func (u *UpdateAlphabetListPrm) SetHash(hash util.Uint256) { // Requires committee multi signature. // // This function must be invoked with notary enabled otherwise it throws panic. -func (c *Client) UpdateNeoFSAlphabetList(prm UpdateAlphabetListPrm) error { +func (c *Client) UpdateNeoFSAlphabetList(ctx context.Context, prm UpdateAlphabetListPrm) error { c.switchLock.RLock() defer c.switchLock.RUnlock() @@ -341,6 +342,7 @@ func (c *Client) UpdateNeoFSAlphabetList(prm UpdateAlphabetListPrm) error { } return c.notaryInvokeAsCommittee( + ctx, setDesignateMethod, nonce, vub, @@ -356,7 +358,7 @@ func (c *Client) UpdateNeoFSAlphabetList(prm UpdateAlphabetListPrm) error { // Returns valid until block value. // // `nonce` and `vub` are used only if notary is enabled. -func (c *Client) NotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) (uint32, error) { +func (c *Client) NotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) (uint32, error) { c.switchLock.RLock() defer c.switchLock.RUnlock() @@ -365,10 +367,10 @@ func (c *Client) NotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce ui } if c.notary == nil { - return c.Invoke(contract, fee, method, args...) + return c.Invoke(ctx, contract, fee, method, args...) } - return c.notaryInvoke(false, true, contract, nonce, vub, method, args...) + return c.notaryInvoke(ctx, false, true, contract, nonce, vub, method, args...) } // NotaryInvokeNotAlpha does the same as NotaryInvoke but does not use client's @@ -376,7 +378,7 @@ func (c *Client) NotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce ui // not expected to be signed by the current node. // // Considered to be used by non-IR nodes. -func (c *Client) NotaryInvokeNotAlpha(contract util.Uint160, fee fixedn.Fixed8, vubP *uint32, method string, args ...any) (uint32, error) { +func (c *Client) NotaryInvokeNotAlpha(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, vubP *uint32, method string, args ...any) (uint32, error) { c.switchLock.RLock() defer c.switchLock.RUnlock() @@ -385,10 +387,10 @@ func (c *Client) NotaryInvokeNotAlpha(contract util.Uint160, fee fixedn.Fixed8, } if c.notary == nil { - return c.Invoke(contract, fee, method, args...) + return c.Invoke(ctx, contract, fee, method, args...) } - return c.notaryInvoke(false, false, contract, rand.Uint32(), vubP, method, args...) + return c.notaryInvoke(ctx, false, false, contract, rand.Uint32(), vubP, method, args...) } // NotarySignAndInvokeTX signs and sends notary request that was received from @@ -438,13 +440,13 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error { return nil } -func (c *Client) notaryInvokeAsCommittee(method string, nonce, vub uint32, args ...any) error { +func (c *Client) notaryInvokeAsCommittee(ctx context.Context, method string, nonce, vub uint32, args ...any) error { designate := c.GetDesignateHash() - _, err := c.notaryInvoke(true, true, designate, nonce, &vub, method, args...) + _, err := c.notaryInvoke(ctx, true, true, designate, nonce, &vub, method, args...) return err } -func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint160, nonce uint32, vub *uint32, method string, args ...any) (uint32, error) { +func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha bool, contract util.Uint160, nonce uint32, vub *uint32, method string, args ...any) (uint32, error) { start := time.Now() success := false defer func() { @@ -486,7 +488,7 @@ func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint return 0, err } - c.logger.Debug(context.Background(), logs.ClientNotaryRequestInvoked, + c.logger.Debug(ctx, logs.ClientNotaryRequestInvoked, zap.String("method", method), zap.Uint32("valid_until_block", untilActual), zap.String("tx_hash", mainH.StringLE()), diff --git a/pkg/morph/client/static.go b/pkg/morph/client/static.go index dfcf62b83..1e091936f 100644 --- a/pkg/morph/client/static.go +++ b/pkg/morph/client/static.go @@ -1,6 +1,7 @@ package client import ( + "context" "fmt" "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" @@ -140,7 +141,7 @@ type InvokeRes struct { // // If fee for the operation executed using specified method is customized, then StaticClient uses it. // Otherwise, default fee is used. -func (s StaticClient) Invoke(prm InvokePrm) (InvokeRes, error) { +func (s StaticClient) Invoke(ctx context.Context, prm InvokePrm) (InvokeRes, error) { var res InvokeRes var err error var vubP *uint32 @@ -169,7 +170,7 @@ func (s StaticClient) Invoke(prm InvokePrm) (InvokeRes, error) { vubP = &prm.vub } - res.VUB, err = s.client.NotaryInvoke(s.scScriptHash, s.fee, nonce, vubP, prm.method, prm.args...) + res.VUB, err = s.client.NotaryInvoke(ctx, s.scScriptHash, s.fee, nonce, vubP, prm.method, prm.args...) return res, err } @@ -177,11 +178,12 @@ func (s StaticClient) Invoke(prm InvokePrm) (InvokeRes, error) { vubP = &prm.vub } - res.VUB, err = s.client.NotaryInvokeNotAlpha(s.scScriptHash, s.fee, vubP, prm.method, prm.args...) + res.VUB, err = s.client.NotaryInvokeNotAlpha(ctx, s.scScriptHash, s.fee, vubP, prm.method, prm.args...) return res, err } res.VUB, err = s.client.Invoke( + ctx, s.scScriptHash, s.fee, prm.method, diff --git a/pkg/morph/event/handlers.go b/pkg/morph/event/handlers.go index bda83ba54..822335329 100644 --- a/pkg/morph/event/handlers.go +++ b/pkg/morph/event/handlers.go @@ -10,7 +10,7 @@ import ( type Handler func(context.Context, Event) // BlockHandler is a chain block processing function. -type BlockHandler func(*block.Block) +type BlockHandler func(context.Context, *block.Block) // NotificationHandlerInfo is a structure that groups // the parameters of the handler of particular diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go index eeec46540..6e6184e77 100644 --- a/pkg/morph/event/listener.go +++ b/pkg/morph/event/listener.go @@ -291,18 +291,18 @@ loop: continue loop } - l.handleBlockEvent(b) + l.handleBlockEvent(ctx, b) } } } -func (l *listener) handleBlockEvent(b *block.Block) { +func (l *listener) handleBlockEvent(ctx context.Context, b *block.Block) { if err := l.pool.Submit(func() { for i := range l.blockHandlers { - l.blockHandlers[i](b) + l.blockHandlers[i](ctx, b) } }); err != nil { - l.log.Warn(context.Background(), logs.EventListenerWorkerPoolDrained, + l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained, zap.Int("capacity", l.pool.Cap())) } } diff --git a/pkg/morph/event/listener_test.go b/pkg/morph/event/listener_test.go index 214daf694..c0f9722d7 100644 --- a/pkg/morph/event/listener_test.go +++ b/pkg/morph/event/listener_test.go @@ -34,7 +34,7 @@ func TestEventHandling(t *testing.T) { blockHandled := make(chan bool) handledBlocks := make([]*block.Block, 0) - l.RegisterBlockHandler(func(b *block.Block) { + l.RegisterBlockHandler(func(_ context.Context, b *block.Block) { handledBlocks = append(handledBlocks, b) blockHandled <- true }) @@ -137,7 +137,7 @@ func TestErrorPassing(t *testing.T) { WorkerPoolCapacity: 10, }) require.NoError(t, err, "failed to create listener") - l.RegisterBlockHandler(func(b *block.Block) {}) + l.RegisterBlockHandler(func(context.Context, *block.Block) {}) errCh := make(chan error) diff --git a/pkg/network/transport/object/grpc/service.go b/pkg/network/transport/object/grpc/service.go index d55e3d87f..874612e1d 100644 --- a/pkg/network/transport/object/grpc/service.go +++ b/pkg/network/transport/object/grpc/service.go @@ -26,7 +26,7 @@ func New(c objectSvc.ServiceServer) *Server { // Patch opens internal Object patch stream and feeds it by the data read from gRPC stream. func (s *Server) Patch(gStream objectGRPC.ObjectService_PatchServer) error { - stream, err := s.srv.Patch() + stream, err := s.srv.Patch(gStream.Context()) if err != nil { return err } @@ -68,7 +68,7 @@ func (s *Server) Patch(gStream objectGRPC.ObjectService_PatchServer) error { // Put opens internal Object service Put stream and overtakes data from gRPC stream to it. func (s *Server) Put(gStream objectGRPC.ObjectService_PutServer) error { - stream, err := s.srv.Put() + stream, err := s.srv.Put(gStream.Context()) if err != nil { return err } diff --git a/pkg/services/apemanager/audit.go b/pkg/services/apemanager/audit.go index d132ae7db..c0853af14 100644 --- a/pkg/services/apemanager/audit.go +++ b/pkg/services/apemanager/audit.go @@ -33,7 +33,7 @@ func (a *auditService) AddChain(ctx context.Context, req *apemanager.AddChainReq return res, err } - audit.LogRequest(a.log, ape_grpc.APEManagerService_AddChain_FullMethodName, req, + audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_AddChain_FullMethodName, req, audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(), req.GetBody().GetTarget().GetName(), res.GetBody().GetChainID()), @@ -49,7 +49,7 @@ func (a *auditService) ListChains(ctx context.Context, req *apemanager.ListChain return res, err } - audit.LogRequest(a.log, ape_grpc.APEManagerService_ListChains_FullMethodName, req, + audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_ListChains_FullMethodName, req, audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(), req.GetBody().GetTarget().GetName(), nil), @@ -65,7 +65,7 @@ func (a *auditService) RemoveChain(ctx context.Context, req *apemanager.RemoveCh return res, err } - audit.LogRequest(a.log, ape_grpc.APEManagerService_RemoveChain_FullMethodName, req, + audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_RemoveChain_FullMethodName, req, audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(), req.GetBody().GetTarget().GetName(), req.GetBody().GetChainID()), diff --git a/pkg/services/container/audit.go b/pkg/services/container/audit.go index b257272f5..e2e37e346 100644 --- a/pkg/services/container/audit.go +++ b/pkg/services/container/audit.go @@ -35,7 +35,7 @@ func (a *auditService) Delete(ctx context.Context, req *container.DeleteRequest) return res, err } - audit.LogRequest(a.log, container_grpc.ContainerService_Delete_FullMethodName, req, + audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Delete_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil) return res, err @@ -47,7 +47,7 @@ func (a *auditService) Get(ctx context.Context, req *container.GetRequest) (*con if !a.enabled.Load() { return res, err } - audit.LogRequest(a.log, container_grpc.ContainerService_Get_FullMethodName, req, + audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Get_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil) return res, err } @@ -58,7 +58,7 @@ func (a *auditService) List(ctx context.Context, req *container.ListRequest) (*c if !a.enabled.Load() { return res, err } - audit.LogRequest(a.log, container_grpc.ContainerService_List_FullMethodName, req, + audit.LogRequest(ctx, a.log, container_grpc.ContainerService_List_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetOwnerID(), &user.ID{}), err == nil) return res, err } @@ -69,7 +69,7 @@ func (a *auditService) Put(ctx context.Context, req *container.PutRequest) (*con if !a.enabled.Load() { return res, err } - audit.LogRequest(a.log, container_grpc.ContainerService_Put_FullMethodName, req, + audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Put_FullMethodName, req, audit.TargetFromRef(res.GetBody().GetContainerID(), &cid.ID{}), err == nil) return res, err } diff --git a/pkg/services/container/morph/executor.go b/pkg/services/container/morph/executor.go index 05d8749cf..9d1b79d06 100644 --- a/pkg/services/container/morph/executor.go +++ b/pkg/services/container/morph/executor.go @@ -36,9 +36,9 @@ type Reader interface { // Writer is an interface of container storage updater. type Writer interface { // Put stores specified container in the side chain. - Put(containercore.Container) (*cid.ID, error) + Put(context.Context, containercore.Container) (*cid.ID, error) // Delete removes specified container from the side chain. - Delete(containercore.RemovalWitness) error + Delete(context.Context, containercore.RemovalWitness) error } func NewExecutor(rdr Reader, wrt Writer) containerSvc.ServiceExecutor { @@ -48,7 +48,7 @@ func NewExecutor(rdr Reader, wrt Writer) containerSvc.ServiceExecutor { } } -func (s *morphExecutor) Put(_ context.Context, tokV2 *sessionV2.Token, body *container.PutRequestBody) (*container.PutResponseBody, error) { +func (s *morphExecutor) Put(ctx context.Context, tokV2 *sessionV2.Token, body *container.PutRequestBody) (*container.PutResponseBody, error) { sigV2 := body.GetSignature() if sigV2 == nil { // TODO(@cthulhu-rider): #468 use "const" error @@ -81,7 +81,7 @@ func (s *morphExecutor) Put(_ context.Context, tokV2 *sessionV2.Token, body *con } } - idCnr, err := s.wrt.Put(cnr) + idCnr, err := s.wrt.Put(ctx, cnr) if err != nil { return nil, err } @@ -95,7 +95,7 @@ func (s *morphExecutor) Put(_ context.Context, tokV2 *sessionV2.Token, body *con return res, nil } -func (s *morphExecutor) Delete(_ context.Context, tokV2 *sessionV2.Token, body *container.DeleteRequestBody) (*container.DeleteResponseBody, error) { +func (s *morphExecutor) Delete(ctx context.Context, tokV2 *sessionV2.Token, body *container.DeleteRequestBody) (*container.DeleteResponseBody, error) { idV2 := body.GetContainerID() if idV2 == nil { return nil, errors.New("missing container ID") @@ -125,7 +125,7 @@ func (s *morphExecutor) Delete(_ context.Context, tokV2 *sessionV2.Token, body * rmWitness.Signature = body.GetSignature() rmWitness.SessionToken = tok - err = s.wrt.Delete(rmWitness) + err = s.wrt.Delete(ctx, rmWitness) if err != nil { return nil, err } diff --git a/pkg/services/container/morph/executor_test.go b/pkg/services/container/morph/executor_test.go index c64310eb3..1d711f4d7 100644 --- a/pkg/services/container/morph/executor_test.go +++ b/pkg/services/container/morph/executor_test.go @@ -24,11 +24,11 @@ type mock struct { containerSvcMorph.Reader } -func (m mock) Put(_ containerCore.Container) (*cid.ID, error) { +func (m mock) Put(_ context.Context, _ containerCore.Container) (*cid.ID, error) { return new(cid.ID), nil } -func (m mock) Delete(_ containerCore.RemovalWitness) error { +func (m mock) Delete(_ context.Context, _ containerCore.RemovalWitness) error { return nil } diff --git a/pkg/services/control/ir/server/audit.go b/pkg/services/control/ir/server/audit.go index 9f7a8b879..4fcde4cbd 100644 --- a/pkg/services/control/ir/server/audit.go +++ b/pkg/services/control/ir/server/audit.go @@ -36,7 +36,7 @@ func (a *auditService) HealthCheck(ctx context.Context, req *control.HealthCheck if !a.enabled.Load() { return res, err } - audit.LogRequestWithKey(a.log, control.ControlService_HealthCheck_FullMethodName, req.GetSignature().GetKey(), nil, err == nil) + audit.LogRequestWithKey(ctx, a.log, control.ControlService_HealthCheck_FullMethodName, req.GetSignature().GetKey(), nil, err == nil) return res, err } @@ -79,7 +79,7 @@ func (a *auditService) RemoveContainer(ctx context.Context, req *control.RemoveC } } - audit.LogRequestWithKey(a.log, control.ControlService_RemoveContainer_FullMethodName, req.GetSignature().GetKey(), sb, err == nil) + audit.LogRequestWithKey(ctx, a.log, control.ControlService_RemoveContainer_FullMethodName, req.GetSignature().GetKey(), sb, err == nil) return res, err } @@ -90,7 +90,7 @@ func (a *auditService) RemoveNode(ctx context.Context, req *control.RemoveNodeRe return res, err } - audit.LogRequestWithKey(a.log, control.ControlService_RemoveNode_FullMethodName, req.GetSignature().GetKey(), + audit.LogRequestWithKey(ctx, a.log, control.ControlService_RemoveNode_FullMethodName, req.GetSignature().GetKey(), audit.TargetFromString(hex.EncodeToString(req.GetBody().GetKey())), err == nil) return res, err } @@ -102,7 +102,7 @@ func (a *auditService) TickEpoch(ctx context.Context, req *control.TickEpochRequ return res, err } - audit.LogRequestWithKey(a.log, control.ControlService_TickEpoch_FullMethodName, req.GetSignature().GetKey(), + audit.LogRequestWithKey(ctx, a.log, control.ControlService_TickEpoch_FullMethodName, req.GetSignature().GetKey(), nil, err == nil) return res, err } diff --git a/pkg/services/control/ir/server/calls.go b/pkg/services/control/ir/server/calls.go index 642932c91..b3b9578b1 100644 --- a/pkg/services/control/ir/server/calls.go +++ b/pkg/services/control/ir/server/calls.go @@ -40,7 +40,7 @@ func (s *Server) HealthCheck(_ context.Context, req *control.HealthCheckRequest) // TickEpoch forces a new epoch. // // If request is not signed with a key from white list, permission error returns. -func (s *Server) TickEpoch(_ context.Context, req *control.TickEpochRequest) (*control.TickEpochResponse, error) { +func (s *Server) TickEpoch(ctx context.Context, req *control.TickEpochRequest) (*control.TickEpochResponse, error) { if err := s.isValidRequest(req); err != nil { return nil, status.Error(codes.PermissionDenied, err.Error()) } @@ -53,7 +53,7 @@ func (s *Server) TickEpoch(_ context.Context, req *control.TickEpochRequest) (*c return nil, fmt.Errorf("getting current epoch: %w", err) } - vub, err := s.netmapClient.NewEpochControl(epoch+1, req.GetBody().GetVub()) + vub, err := s.netmapClient.NewEpochControl(ctx, epoch+1, req.GetBody().GetVub()) if err != nil { return nil, fmt.Errorf("forcing new epoch: %w", err) } @@ -69,7 +69,7 @@ func (s *Server) TickEpoch(_ context.Context, req *control.TickEpochRequest) (*c // RemoveNode forces a node removal. // // If request is not signed with a key from white list, permission error returns. -func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) (*control.RemoveNodeResponse, error) { +func (s *Server) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest) (*control.RemoveNodeResponse, error) { if err := s.isValidRequest(req); err != nil { return nil, status.Error(codes.PermissionDenied, err.Error()) } @@ -95,7 +95,7 @@ func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) ( return nil, status.Error(codes.FailedPrecondition, "node is already offline") } - vub, err := s.netmapClient.ForceRemovePeer(nodeInfo, req.GetBody().GetVub()) + vub, err := s.netmapClient.ForceRemovePeer(ctx, nodeInfo, req.GetBody().GetVub()) if err != nil { return nil, fmt.Errorf("forcing node removal: %w", err) } @@ -109,7 +109,7 @@ func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) ( } // RemoveContainer forces a container removal. -func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainerRequest) (*control.RemoveContainerResponse, error) { +func (s *Server) RemoveContainer(ctx context.Context, req *control.RemoveContainerRequest) (*control.RemoveContainerResponse, error) { if err := s.isValidRequest(req); err != nil { return nil, status.Error(codes.PermissionDenied, err.Error()) } @@ -124,7 +124,7 @@ func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainer return nil, status.Error(codes.InvalidArgument, "failed to parse container ID: "+err.Error()) } var err error - vub, err = s.removeContainer(containerID, req.GetBody().GetVub()) + vub, err = s.removeContainer(ctx, containerID, req.GetBody().GetVub()) if err != nil { return nil, err } @@ -144,7 +144,7 @@ func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainer } for _, containerID := range cids { - vub, err = s.removeContainer(containerID, req.GetBody().GetVub()) + vub, err = s.removeContainer(ctx, containerID, req.GetBody().GetVub()) if err != nil { return nil, err } @@ -162,13 +162,13 @@ func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainer return resp, nil } -func (s *Server) removeContainer(containerID cid.ID, vub uint32) (uint32, error) { +func (s *Server) removeContainer(ctx context.Context, containerID cid.ID, vub uint32) (uint32, error) { var prm container.DeletePrm prm.SetCID(containerID[:]) prm.SetControlTX(true) prm.SetVUB(vub) - vub, err := s.containerClient.Delete(prm) + vub, err := s.containerClient.Delete(ctx, prm) if err != nil { return 0, fmt.Errorf("forcing container removal: %w", err) } diff --git a/pkg/services/control/server/server.go b/pkg/services/control/server/server.go index b6fdcb246..94aa1ff5b 100644 --- a/pkg/services/control/server/server.go +++ b/pkg/services/control/server/server.go @@ -1,6 +1,7 @@ package control import ( + "context" "crypto/ecdsa" "sync/atomic" @@ -45,11 +46,11 @@ type NodeState interface { // // If status is control.NetmapStatus_MAINTENANCE and maintenance is allowed // in the network settings, the node additionally starts local maintenance. - SetNetmapStatus(st control.NetmapStatus) error + SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error // ForceMaintenance works like SetNetmapStatus(control.NetmapStatus_MAINTENANCE) // but starts local maintenance regardless of the network settings. - ForceMaintenance() error + ForceMaintenance(ctx context.Context) error GetNetmapStatus() (control.NetmapStatus, uint64, error) } diff --git a/pkg/services/control/server/set_netmap_status.go b/pkg/services/control/server/set_netmap_status.go index 3fd69df12..529041dca 100644 --- a/pkg/services/control/server/set_netmap_status.go +++ b/pkg/services/control/server/set_netmap_status.go @@ -12,7 +12,7 @@ import ( // SetNetmapStatus sets node status in FrostFS network. // // If request is unsigned or signed by disallowed key, permission error returns. -func (s *Server) SetNetmapStatus(_ context.Context, req *control.SetNetmapStatusRequest) (*control.SetNetmapStatusResponse, error) { +func (s *Server) SetNetmapStatus(ctx context.Context, req *control.SetNetmapStatusRequest) (*control.SetNetmapStatusResponse, error) { // verify request if err := s.isValidRequest(req); err != nil { return nil, status.Error(codes.PermissionDenied, err.Error()) @@ -29,9 +29,9 @@ func (s *Server) SetNetmapStatus(_ context.Context, req *control.SetNetmapStatus "force_maintenance MUST be set for %s status only", control.NetmapStatus_MAINTENANCE) } - err = s.nodeState.ForceMaintenance() + err = s.nodeState.ForceMaintenance(ctx) } else { - err = s.nodeState.SetNetmapStatus(st) + err = s.nodeState.SetNetmapStatus(ctx, st) } if err != nil { diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go index 7954cb175..ebeb67242 100644 --- a/pkg/services/object/acl/v2/service.go +++ b/pkg/services/object/acl/v2/service.go @@ -224,7 +224,7 @@ func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream src: request, } - reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectGet) + reqInfo, err := b.findRequestInfo(stream.Context(), req, cnr, acl.OpObjectGet) if err != nil { return err } @@ -246,8 +246,8 @@ func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream }) } -func (b Service) Put() (object.PutObjectStream, error) { - streamer, err := b.next.Put() +func (b Service) Put(ctx context.Context) (object.PutObjectStream, error) { + streamer, err := b.next.Put(ctx) return putStreamBasicChecker{ source: &b, @@ -255,8 +255,8 @@ func (b Service) Put() (object.PutObjectStream, error) { }, err } -func (b Service) Patch() (object.PatchObjectStream, error) { - streamer, err := b.next.Patch() +func (b Service) Patch(ctx context.Context) (object.PatchObjectStream, error) { + streamer, err := b.next.Patch(ctx) return &patchStreamBasicChecker{ source: &b, @@ -302,7 +302,7 @@ func (b Service) Head( src: request, } - reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectHead) + reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectHead) if err != nil { return nil, err } @@ -357,7 +357,7 @@ func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStr src: request, } - reqInfo, err := b.findRequestInfo(req, id, acl.OpObjectSearch) + reqInfo, err := b.findRequestInfo(stream.Context(), req, id, acl.OpObjectSearch) if err != nil { return err } @@ -415,7 +415,7 @@ func (b Service) Delete( src: request, } - reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectDelete) + reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectDelete) if err != nil { return nil, err } @@ -468,7 +468,7 @@ func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetOb src: request, } - reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectRange) + reqInfo, err := b.findRequestInfo(stream.Context(), req, cnr, acl.OpObjectRange) if err != nil { return err } @@ -539,7 +539,7 @@ func (b Service) GetRangeHash( src: request, } - reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectHash) + reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectHash) if err != nil { return nil, err } @@ -598,7 +598,7 @@ func (b Service) PutSingle(ctx context.Context, request *objectV2.PutSingleReque src: request, } - reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectPut) + reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectPut) if err != nil { return nil, err } @@ -672,7 +672,7 @@ func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRe src: request, } - reqInfo, err := p.source.findRequestInfo(req, cnr, acl.OpObjectPut) + reqInfo, err := p.source.findRequestInfo(ctx, req, cnr, acl.OpObjectPut) if err != nil { return err } @@ -791,7 +791,7 @@ func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.Pa src: request, } - reqInfo, err := p.source.findRequestInfoWithoutACLOperationAssert(req, cnr) + reqInfo, err := p.source.findRequestInfoWithoutACLOperationAssert(ctx, req, cnr) if err != nil { return err } @@ -808,7 +808,7 @@ func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.Pa return p.next.CloseAndRecv(ctx) } -func (b Service) findRequestInfo(req MetaWithToken, idCnr cid.ID, op acl.Op) (info RequestInfo, err error) { +func (b Service) findRequestInfo(ctx context.Context, req MetaWithToken, idCnr cid.ID, op acl.Op) (info RequestInfo, err error) { cnr, err := b.containers.Get(idCnr) // fetch actual container if err != nil { return info, err @@ -837,7 +837,7 @@ func (b Service) findRequestInfo(req MetaWithToken, idCnr cid.ID, op acl.Op) (in if err != nil { return info, err } - res, err := b.c.Classify(ownerID, ownerKey, idCnr, cnr.Value) + res, err := b.c.Classify(ctx, ownerID, ownerKey, idCnr, cnr.Value) if err != nil { return info, err } @@ -866,7 +866,7 @@ func (b Service) findRequestInfo(req MetaWithToken, idCnr cid.ID, op acl.Op) (in } // findRequestInfoWithoutACLOperationAssert is findRequestInfo without session token verb assert. -func (b Service) findRequestInfoWithoutACLOperationAssert(req MetaWithToken, idCnr cid.ID) (info RequestInfo, err error) { +func (b Service) findRequestInfoWithoutACLOperationAssert(ctx context.Context, req MetaWithToken, idCnr cid.ID) (info RequestInfo, err error) { cnr, err := b.containers.Get(idCnr) // fetch actual container if err != nil { return info, err @@ -891,7 +891,7 @@ func (b Service) findRequestInfoWithoutACLOperationAssert(req MetaWithToken, idC if err != nil { return info, err } - res, err := b.c.Classify(ownerID, ownerKey, idCnr, cnr.Value) + res, err := b.c.Classify(ctx, ownerID, ownerKey, idCnr, cnr.Value) if err != nil { return info, err } diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go index 6eedaf99e..3173fe5e5 100644 --- a/pkg/services/object/ape/service.go +++ b/pkg/services/object/ape/service.go @@ -191,8 +191,8 @@ func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutR return p.next.CloseAndRecv(ctx) } -func (c *Service) Put() (objectSvc.PutObjectStream, error) { - streamer, err := c.next.Put() +func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) { + streamer, err := c.next.Put(ctx) return &putStreamBasicChecker{ apeChecker: c.apeChecker, @@ -247,8 +247,8 @@ func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.Pa return p.next.CloseAndRecv(ctx) } -func (c *Service) Patch() (objectSvc.PatchObjectStream, error) { - streamer, err := c.next.Patch() +func (c *Service) Patch(ctx context.Context) (objectSvc.PatchObjectStream, error) { + streamer, err := c.next.Patch(ctx) return &patchStreamBasicChecker{ apeChecker: c.apeChecker, diff --git a/pkg/services/object/audit.go b/pkg/services/object/audit.go index 39e1f9f2d..44d7016c0 100644 --- a/pkg/services/object/audit.go +++ b/pkg/services/object/audit.go @@ -37,7 +37,7 @@ func (a *auditService) Delete(ctx context.Context, req *object.DeleteRequest) (* if !a.enabled.Load() { return res, err } - audit.LogRequest(a.log, objectGRPC.ObjectService_Delete_FullMethodName, req, + audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Delete_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil) return res, err } @@ -48,7 +48,7 @@ func (a *auditService) Get(req *object.GetRequest, stream GetObjectStream) error if !a.enabled.Load() { return err } - audit.LogRequest(a.log, objectGRPC.ObjectService_Get_FullMethodName, req, + audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_Get_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil) return err } @@ -59,7 +59,7 @@ func (a *auditService) GetRange(req *object.GetRangeRequest, stream GetObjectRan if !a.enabled.Load() { return err } - audit.LogRequest(a.log, objectGRPC.ObjectService_GetRange_FullMethodName, req, + audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_GetRange_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil) return err } @@ -70,7 +70,7 @@ func (a *auditService) GetRangeHash(ctx context.Context, req *object.GetRangeHas if !a.enabled.Load() { return resp, err } - audit.LogRequest(a.log, objectGRPC.ObjectService_GetRangeHash_FullMethodName, req, + audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_GetRangeHash_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil) return resp, err } @@ -81,19 +81,19 @@ func (a *auditService) Head(ctx context.Context, req *object.HeadRequest) (*obje if !a.enabled.Load() { return resp, err } - audit.LogRequest(a.log, objectGRPC.ObjectService_Head_FullMethodName, req, + audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Head_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil) return resp, err } // Put implements ServiceServer. -func (a *auditService) Put() (PutObjectStream, error) { - res, err := a.next.Put() +func (a *auditService) Put(ctx context.Context) (PutObjectStream, error) { + res, err := a.next.Put(ctx) if !a.enabled.Load() { return res, err } if err != nil { - audit.LogRequest(a.log, objectGRPC.ObjectService_Put_FullMethodName, nil, nil, false) + audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, nil, nil, false) return res, err } return &auditPutStream{ @@ -108,7 +108,7 @@ func (a *auditService) PutSingle(ctx context.Context, req *object.PutSingleReque if !a.enabled.Load() { return resp, err } - audit.LogRequest(a.log, objectGRPC.ObjectService_PutSingle_FullMethodName, req, + audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_PutSingle_FullMethodName, req, audit.TargetFromContainerIDObjectID(req.GetBody().GetObject().GetHeader().GetContainerID(), req.GetBody().GetObject().GetObjectID()), err == nil) @@ -121,7 +121,7 @@ func (a *auditService) Search(req *object.SearchRequest, stream SearchStream) er if !a.enabled.Load() { return err } - audit.LogRequest(a.log, objectGRPC.ObjectService_Search_FullMethodName, req, + audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_Search_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil) return err } @@ -145,7 +145,7 @@ func (a *auditPutStream) CloseAndRecv(ctx context.Context) (*object.PutResponse, a.failed = true } a.objectID = resp.GetBody().GetObjectID() - audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key, + audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key, audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), !a.failed) return resp, err @@ -164,7 +164,7 @@ func (a *auditPutStream) Send(ctx context.Context, req *object.PutRequest) error a.failed = true } if !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here - audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key, + audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key, audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), !a.failed) } @@ -183,13 +183,13 @@ type auditPatchStream struct { nonFirstSend bool } -func (a *auditService) Patch() (PatchObjectStream, error) { - res, err := a.next.Patch() +func (a *auditService) Patch(ctx context.Context) (PatchObjectStream, error) { + res, err := a.next.Patch(ctx) if !a.enabled.Load() { return res, err } if err != nil { - audit.LogRequest(a.log, objectGRPC.ObjectService_Patch_FullMethodName, nil, nil, false) + audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, nil, nil, false) return res, err } return &auditPatchStream{ @@ -205,7 +205,7 @@ func (a *auditPatchStream) CloseAndRecv(ctx context.Context) (*object.PatchRespo a.failed = true } a.objectID = resp.GetBody().GetObjectID() - audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key, + audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key, audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), !a.failed) return resp, err @@ -225,7 +225,7 @@ func (a *auditPatchStream) Send(ctx context.Context, req *object.PatchRequest) e a.failed = true } if !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here - audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key, + audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key, audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), !a.failed) } diff --git a/pkg/services/object/common.go b/pkg/services/object/common.go index f48cc5b3d..bc2378efd 100644 --- a/pkg/services/object/common.go +++ b/pkg/services/object/common.go @@ -40,20 +40,20 @@ func (x *Common) Get(req *objectV2.GetRequest, stream GetObjectStream) error { return x.nextHandler.Get(req, stream) } -func (x *Common) Put() (PutObjectStream, error) { +func (x *Common) Put(ctx context.Context) (PutObjectStream, error) { if x.state.IsMaintenance() { return nil, new(apistatus.NodeUnderMaintenance) } - return x.nextHandler.Put() + return x.nextHandler.Put(ctx) } -func (x *Common) Patch() (PatchObjectStream, error) { +func (x *Common) Patch(ctx context.Context) (PatchObjectStream, error) { if x.state.IsMaintenance() { return nil, new(apistatus.NodeUnderMaintenance) } - return x.nextHandler.Patch() + return x.nextHandler.Patch(ctx) } func (x *Common) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV2.HeadResponse, error) { diff --git a/pkg/services/object/common/writer/common.go b/pkg/services/object/common/writer/common.go index 8b92d34ed..3b68efab4 100644 --- a/pkg/services/object/common/writer/common.go +++ b/pkg/services/object/common/writer/common.go @@ -89,7 +89,7 @@ func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement. err := f(ctx, NodeDescriptor{Local: isLocal, Info: addr}) if err != nil { resErr.Store(err) - svcutil.LogServiceError(n.cfg.Logger, "PUT", addr.Addresses(), err) + svcutil.LogServiceError(ctx, n.cfg.Logger, "PUT", addr.Addresses(), err) return } @@ -97,7 +97,7 @@ func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement. *item = true }); err != nil { wg.Done() - svcutil.LogWorkerPoolError(n.cfg.Logger, "PUT", err) + svcutil.LogWorkerPoolError(ctx, n.cfg.Logger, "PUT", err) return true } diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go index 64115b86b..fdaa569da 100644 --- a/pkg/services/object/common/writer/ec.go +++ b/pkg/services/object/common/writer/ec.go @@ -154,7 +154,7 @@ func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index err = e.Relay(ctx, info, c) }); poolErr != nil { close(completed) - svcutil.LogWorkerPoolError(e.Config.Logger, "PUT", poolErr) + svcutil.LogWorkerPoolError(ctx, e.Config.Logger, "PUT", poolErr) return poolErr } <-completed diff --git a/pkg/services/object/get/assembleec.go b/pkg/services/object/get/assembleec.go index 8ab423c87..59dd7fd93 100644 --- a/pkg/services/object/get/assembleec.go +++ b/pkg/services/object/get/assembleec.go @@ -37,7 +37,7 @@ func (r *request) assembleEC(ctx context.Context) { r.log.Debug(ctx, logs.GetTryingToAssembleTheECObject) // initialize epoch number - ok := r.initEpoch() + ok := r.initEpoch(ctx) if !ok { return } diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go index 2b84c5b32..0ee8aed53 100644 --- a/pkg/services/object/get/container.go +++ b/pkg/services/object/get/container.go @@ -21,7 +21,7 @@ func (r *request) executeOnContainer(ctx context.Context) { ) // initialize epoch number - ok := r.initEpoch() + ok := r.initEpoch(ctx) if !ok { return } @@ -50,7 +50,7 @@ func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool zap.Uint64("number", r.curProcEpoch), ) - traverser, ok := r.generateTraverser(r.address()) + traverser, ok := r.generateTraverser(ctx, r.address()) if !ok { return true } diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go index b6a83fd0c..78ca5b5e3 100644 --- a/pkg/services/object/get/remote.go +++ b/pkg/services/object/get/remote.go @@ -20,7 +20,7 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool { r.log.Debug(ctx, logs.ProcessingNode, zap.String("node_key", hex.EncodeToString(info.PublicKey()))) - rs, ok := r.getRemoteStorage(info) + rs, ok := r.getRemoteStorage(ctx, info) if !ok { return true } diff --git a/pkg/services/object/get/request.go b/pkg/services/object/get/request.go index bba767d2d..be0950c60 100644 --- a/pkg/services/object/get/request.go +++ b/pkg/services/object/get/request.go @@ -116,7 +116,7 @@ func (r *request) netmapLookupDepth() uint64 { return r.prm.common.NetmapLookupDepth() } -func (r *request) initEpoch() bool { +func (r *request) initEpoch(ctx context.Context) bool { r.curProcEpoch = r.netmapEpoch() if r.curProcEpoch > 0 { return true @@ -129,7 +129,7 @@ func (r *request) initEpoch() bool { r.status = statusUndefined r.err = err - r.log.Debug(context.Background(), logs.CouldNotGetCurrentEpochNumber, zap.Error(err)) + r.log.Debug(ctx, logs.CouldNotGetCurrentEpochNumber, zap.Error(err)) return false case err == nil: @@ -138,7 +138,7 @@ func (r *request) initEpoch() bool { } } -func (r *request) generateTraverser(addr oid.Address) (*placement.Traverser, bool) { +func (r *request) generateTraverser(ctx context.Context, addr oid.Address) (*placement.Traverser, bool) { obj := addr.Object() t, _, err := r.traverserGenerator.GenerateTraverser(addr.Container(), &obj, r.curProcEpoch) @@ -148,7 +148,7 @@ func (r *request) generateTraverser(addr oid.Address) (*placement.Traverser, boo r.status = statusUndefined r.err = err - r.log.Debug(context.Background(), logs.GetCouldNotGenerateContainerTraverser, zap.Error(err)) + r.log.Debug(ctx, logs.GetCouldNotGenerateContainerTraverser, zap.Error(err)) return nil, false case err == nil: @@ -156,13 +156,13 @@ func (r *request) generateTraverser(addr oid.Address) (*placement.Traverser, boo } } -func (r *request) getRemoteStorage(info clientcore.NodeInfo) (remoteStorage, bool) { +func (r *request) getRemoteStorage(ctx context.Context, info clientcore.NodeInfo) (remoteStorage, bool) { rs, err := r.remoteStorageConstructor.Get(info) if err != nil { r.status = statusUndefined r.err = err - r.log.Debug(context.Background(), logs.GetCouldNotConstructRemoteNodeClient) + r.log.Debug(ctx, logs.GetCouldNotConstructRemoteNodeClient) return nil, false } diff --git a/pkg/services/object/metrics.go b/pkg/services/object/metrics.go index 61aed5003..ec47456de 100644 --- a/pkg/services/object/metrics.go +++ b/pkg/services/object/metrics.go @@ -64,11 +64,11 @@ func (m MetricCollector) Get(req *object.GetRequest, stream GetObjectStream) (er return } -func (m MetricCollector) Put() (PutObjectStream, error) { +func (m MetricCollector) Put(ctx context.Context) (PutObjectStream, error) { if m.enabled { t := time.Now() - stream, err := m.next.Put() + stream, err := m.next.Put(ctx) if err != nil { return nil, err } @@ -79,14 +79,14 @@ func (m MetricCollector) Put() (PutObjectStream, error) { start: t, }, nil } - return m.next.Put() + return m.next.Put(ctx) } -func (m MetricCollector) Patch() (PatchObjectStream, error) { +func (m MetricCollector) Patch(ctx context.Context) (PatchObjectStream, error) { if m.enabled { t := time.Now() - stream, err := m.next.Patch() + stream, err := m.next.Patch(ctx) if err != nil { return nil, err } @@ -97,7 +97,7 @@ func (m MetricCollector) Patch() (PatchObjectStream, error) { start: t, }, nil } - return m.next.Patch() + return m.next.Patch(ctx) } func (m MetricCollector) PutSingle(ctx context.Context, request *object.PutSingleRequest) (*object.PutSingleResponse, error) { diff --git a/pkg/services/object/response.go b/pkg/services/object/response.go index d7ba9f843..720641455 100644 --- a/pkg/services/object/response.go +++ b/pkg/services/object/response.go @@ -80,8 +80,8 @@ func (s *putStreamResponser) CloseAndRecv(ctx context.Context) (*object.PutRespo return r, nil } -func (s *ResponseService) Put() (PutObjectStream, error) { - stream, err := s.svc.Put() +func (s *ResponseService) Put(ctx context.Context) (PutObjectStream, error) { + stream, err := s.svc.Put(ctx) if err != nil { return nil, fmt.Errorf("could not create Put object streamer: %w", err) } @@ -109,8 +109,8 @@ func (s *patchStreamResponser) CloseAndRecv(ctx context.Context) (*object.PatchR return r, nil } -func (s *ResponseService) Patch() (PatchObjectStream, error) { - stream, err := s.svc.Patch() +func (s *ResponseService) Patch(ctx context.Context) (PatchObjectStream, error) { + stream, err := s.svc.Patch(ctx) if err != nil { return nil, fmt.Errorf("could not create Put object streamer: %w", err) } diff --git a/pkg/services/object/search/search.go b/pkg/services/object/search/search.go index bb5c720ff..e24da975d 100644 --- a/pkg/services/object/search/search.go +++ b/pkg/services/object/search/search.go @@ -23,7 +23,7 @@ func (exec *execCtx) execute(ctx context.Context) error { exec.log.Debug(ctx, logs.ServingRequest) err := exec.executeLocal(ctx) - exec.logResult(err) + exec.logResult(ctx, err) if exec.isLocal() { exec.log.Debug(ctx, logs.SearchReturnResultDirectly) @@ -31,15 +31,15 @@ func (exec *execCtx) execute(ctx context.Context) error { } err = exec.executeOnContainer(ctx) - exec.logResult(err) + exec.logResult(ctx, err) return err } -func (exec *execCtx) logResult(err error) { +func (exec *execCtx) logResult(ctx context.Context, err error) { switch { default: - exec.log.Debug(context.Background(), logs.OperationFinishedWithError, zap.String("error", err.Error())) + exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.String("error", err.Error())) case err == nil: - exec.log.Debug(context.Background(), logs.OperationFinishedSuccessfully) + exec.log.Debug(ctx, logs.OperationFinishedSuccessfully) } } diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go index da98ce245..6d93ea460 100644 --- a/pkg/services/object/server.go +++ b/pkg/services/object/server.go @@ -41,8 +41,8 @@ type PatchObjectStream interface { // serving v2 Object service. type ServiceServer interface { Get(*object.GetRequest, GetObjectStream) error - Put() (PutObjectStream, error) - Patch() (PatchObjectStream, error) + Put(context.Context) (PutObjectStream, error) + Patch(context.Context) (PatchObjectStream, error) Head(context.Context, *object.HeadRequest) (*object.HeadResponse, error) Search(*object.SearchRequest, SearchStream) error Delete(context.Context, *object.DeleteRequest) (*object.DeleteResponse, error) diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go index f5ae97b62..e3822ab12 100644 --- a/pkg/services/object/sign.go +++ b/pkg/services/object/sign.go @@ -103,8 +103,8 @@ func (s *putStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.PutRes return resp, s.sigSvc.SignResponse(resp, err) } -func (s *SignService) Put() (PutObjectStream, error) { - stream, err := s.svc.Put() +func (s *SignService) Put(ctx context.Context) (PutObjectStream, error) { + stream, err := s.svc.Put(ctx) if err != nil { return nil, fmt.Errorf("could not create Put object streamer: %w", err) } @@ -139,8 +139,8 @@ func (s *patchStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.Patc return resp, s.sigSvc.SignResponse(resp, err) } -func (s *SignService) Patch() (PatchObjectStream, error) { - stream, err := s.svc.Patch() +func (s *SignService) Patch(ctx context.Context) (PatchObjectStream, error) { + stream, err := s.svc.Patch(ctx) if err != nil { return nil, fmt.Errorf("could not create Put object streamer: %w", err) } diff --git a/pkg/services/object/transport_splitter.go b/pkg/services/object/transport_splitter.go index e560d6d8c..2bd8fff28 100644 --- a/pkg/services/object/transport_splitter.go +++ b/pkg/services/object/transport_splitter.go @@ -87,12 +87,12 @@ func (c *TransportSplitter) Get(req *object.GetRequest, stream GetObjectStream) }) } -func (c TransportSplitter) Put() (PutObjectStream, error) { - return c.next.Put() +func (c TransportSplitter) Put(ctx context.Context) (PutObjectStream, error) { + return c.next.Put(ctx) } -func (c TransportSplitter) Patch() (PatchObjectStream, error) { - return c.next.Patch() +func (c TransportSplitter) Patch(ctx context.Context) (PatchObjectStream, error) { + return c.next.Patch(ctx) } func (c TransportSplitter) Head(ctx context.Context, request *object.HeadRequest) (*object.HeadResponse, error) { diff --git a/pkg/services/object/util/log.go b/pkg/services/object/util/log.go index 5075344a4..a9f875d8d 100644 --- a/pkg/services/object/util/log.go +++ b/pkg/services/object/util/log.go @@ -10,8 +10,8 @@ import ( ) // LogServiceError writes error message of object service to provided logger. -func LogServiceError(l *logger.Logger, req string, node network.AddressGroup, err error) { - l.Error(context.Background(), logs.UtilObjectServiceError, +func LogServiceError(ctx context.Context, l *logger.Logger, req string, node network.AddressGroup, err error) { + l.Error(ctx, logs.UtilObjectServiceError, zap.String("node", network.StringifyGroup(node)), zap.String("request", req), zap.String("error", err.Error()), @@ -19,8 +19,8 @@ func LogServiceError(l *logger.Logger, req string, node network.AddressGroup, er } // LogWorkerPoolError writes debug error message of object worker pool to provided logger. -func LogWorkerPoolError(l *logger.Logger, req string, err error) { - l.Error(context.Background(), logs.UtilCouldNotPushTaskToWorkerPool, +func LogWorkerPoolError(ctx context.Context, l *logger.Logger, req string, err error) { + l.Error(ctx, logs.UtilCouldNotPushTaskToWorkerPool, zap.String("request", req), zap.String("error", err.Error()), ) diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go index 4dbfdea91..e39863601 100644 --- a/pkg/services/object_manager/tombstone/checker.go +++ b/pkg/services/object_manager/tombstone/checker.go @@ -63,7 +63,7 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr ) } else { if ts != nil { - return g.handleTS(addrStr, ts, epoch) + return g.handleTS(ctx, addrStr, ts, epoch) } } @@ -72,12 +72,12 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr return false } -func (g *ExpirationChecker) handleTS(addr string, ts *objectSDK.Object, reqEpoch uint64) bool { +func (g *ExpirationChecker) handleTS(ctx context.Context, addr string, ts *objectSDK.Object, reqEpoch uint64) bool { for _, atr := range ts.Attributes() { if atr.Key() == objectV2.SysAttributeExpEpoch { epoch, err := strconv.ParseUint(atr.Value(), 10, 64) if err != nil { - g.log.Warn(context.Background(), + g.log.Warn(ctx, logs.TombstoneExpirationParseFailure, zap.Error(err), ) diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go index c82680a1e..2e5e54dfd 100644 --- a/pkg/services/policer/check.go +++ b/pkg/services/policer/check.go @@ -122,7 +122,7 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe shortage-- } else if nodes[i].Status().IsMaintenance() { - shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies) + shortage, uncheckedCopies = p.handleMaintenance(ctx, nodes[i], checkedNodes, shortage, uncheckedCopies) } else { if status := checkedNodes.processStatus(nodes[i]); status.Processed() { if status == nodeHoldsObject { @@ -149,7 +149,7 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe checkedNodes.submitReplicaCandidate(nodes[i]) continue } else if client.IsErrNodeUnderMaintenance(err) { - shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies) + shortage, uncheckedCopies = p.handleMaintenance(ctx, nodes[i], checkedNodes, shortage, uncheckedCopies) } else { p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", addr), @@ -173,12 +173,12 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe // prevent spam with new replicas. // However, additional copies should not be removed in this case, // because we can remove the only copy this way. -func (p *Policer) handleMaintenance(node netmap.NodeInfo, checkedNodes nodeCache, shortage uint32, uncheckedCopies int) (uint32, int) { +func (p *Policer) handleMaintenance(ctx context.Context, node netmap.NodeInfo, checkedNodes nodeCache, shortage uint32, uncheckedCopies int) (uint32, int) { checkedNodes.submitReplicaHolder(node) shortage-- uncheckedCopies++ - p.log.Debug(context.Background(), logs.PolicerConsiderNodeUnderMaintenanceAsOK, + p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(node)), ) return shortage, uncheckedCopies diff --git a/pkg/services/tree/getsubtree_test.go b/pkg/services/tree/getsubtree_test.go index 95bdda34b..e7a13827e 100644 --- a/pkg/services/tree/getsubtree_test.go +++ b/pkg/services/tree/getsubtree_test.go @@ -131,7 +131,7 @@ func TestGetSubTreeOrderAsc(t *testing.T) { t.Run("boltdb forest", func(t *testing.T) { p := pilorama.NewBoltForest(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))) require.NoError(t, p.Open(context.Background(), 0o644)) - require.NoError(t, p.Init()) + require.NoError(t, p.Init(context.Background())) testGetSubTreeOrderAsc(t, p) }) } diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index 9ed0bf75f..9fd25351d 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -396,7 +396,7 @@ func (s *Service) syncLoop(ctx context.Context) { break } - newMap, cnrsToSync := s.containersToSync(cnrs) + newMap, cnrsToSync := s.containersToSync(ctx, cnrs) s.syncContainers(ctx, cnrsToSync) @@ -483,14 +483,14 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID } } -func (s *Service) containersToSync(cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID) { +func (s *Service) containersToSync(ctx context.Context, cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID) { newMap := make(map[cid.ID]struct{}, len(s.cnrMap)) cnrsToSync := make([]cid.ID, 0, len(cnrs)) for _, cnr := range cnrs { _, pos, err := s.getContainerNodes(cnr) if err != nil { - s.log.Error(context.Background(), logs.TreeCouldNotCalculateContainerNodes, + s.log.Error(ctx, logs.TreeCouldNotCalculateContainerNodes, zap.Stringer("cid", cnr), zap.Error(err)) continue diff --git a/pkg/util/http/calls.go b/pkg/util/http/calls.go index a9877e007..8569ec734 100644 --- a/pkg/util/http/calls.go +++ b/pkg/util/http/calls.go @@ -32,8 +32,8 @@ func (x *Server) Serve() error { // // Once Shutdown has been called on a server, it may not be reused; // future calls to Serve method will have no effect. -func (x *Server) Shutdown() error { - ctx, cancel := context.WithTimeout(context.Background(), x.shutdownTimeout) +func (x *Server) Shutdown(ctx context.Context) error { + ctx, cancel := context.WithTimeout(context.WithoutCancel(ctx), x.shutdownTimeout) err := x.srv.Shutdown(ctx) diff --git a/scripts/populate-metabase/main.go b/scripts/populate-metabase/main.go index 6f6b233cf..8c4ea41ad 100644 --- a/scripts/populate-metabase/main.go +++ b/scripts/populate-metabase/main.go @@ -91,15 +91,15 @@ func populate() (err error) { return fmt.Errorf("couldn't open the metabase: %w", err) } defer func() { - if errOnClose := db.Close(); errOnClose != nil { + if errOnClose := db.Close(ctx); errOnClose != nil { err = errors.Join( err, - fmt.Errorf("couldn't close the metabase: %w", db.Close()), + fmt.Errorf("couldn't close the metabase: %w", db.Close(ctx)), ) } }() - if err = db.Init(); err != nil { + if err = db.Init(ctx); err != nil { return fmt.Errorf("couldn't init the metabase: %w", err) }