From 6db46257c0d30ddba71138de248aa86c18611730 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov <d.stepanov@yadro.com>
Date: Mon, 21 Oct 2024 10:22:54 +0300
Subject: [PATCH] [#1437] node: Use ctx for logging

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
---
 cmd/frostfs-ir/config.go                      | 25 +++----
 cmd/frostfs-ir/httpcomponent.go               | 15 ++--
 cmd/frostfs-ir/main.go                        | 12 ++--
 cmd/frostfs-ir/pprof.go                       |  7 +-
 cmd/frostfs-node/config.go                    | 68 +++++++++---------
 cmd/frostfs-node/container.go                 |  6 +-
 cmd/frostfs-node/control.go                   |  6 +-
 cmd/frostfs-node/grpc.go                      | 29 ++++----
 cmd/frostfs-node/httpcomponent.go             |  4 +-
 cmd/frostfs-node/main.go                      | 20 +++---
 cmd/frostfs-node/morph.go                     | 22 +++---
 cmd/frostfs-node/netmap.go                    | 10 +--
 cmd/frostfs-node/object.go                    |  6 +-
 cmd/frostfs-node/runtime.go                   |  5 +-
 cmd/frostfs-node/tracing.go                   |  6 +-
 cmd/frostfs-node/tree.go                      | 10 +--
 internal/audit/request.go                     |  4 +-
 pkg/core/object/fmt_test.go                   | 12 ++--
 pkg/core/object/sender_classifier.go          |  5 +-
 pkg/innerring/initialization.go               | 12 ++--
 pkg/innerring/innerring.go                    | 14 ++--
 pkg/innerring/notary.go                       |  8 +--
 pkg/innerring/processors/alphabet/handlers.go |  6 +-
 .../processors/alphabet/process_emit.go       | 19 ++---
 .../processors/alphabet/processor.go          |  3 +-
 pkg/innerring/processors/balance/handlers.go  |  5 +-
 .../processors/balance/process_assets.go      |  6 +-
 pkg/innerring/processors/balance/processor.go |  3 +-
 .../processors/container/handlers.go          |  9 +--
 .../processors/container/process_container.go | 13 ++--
 .../processors/container/processor.go         |  3 +-
 pkg/innerring/processors/frostfs/handlers.go  | 17 ++---
 .../processors/frostfs/process_assets.go      | 24 ++++---
 .../processors/frostfs/process_config.go      |  6 +-
 pkg/innerring/processors/frostfs/processor.go |  3 +-
 .../processors/governance/handlers.go         |  6 +-
 .../processors/governance/process_update.go   | 29 ++++----
 pkg/innerring/processors/netmap/handlers.go   | 23 +++---
 .../processors/netmap/process_cleanup.go      | 12 ++--
 .../processors/netmap/process_epoch.go        | 16 +++--
 .../processors/netmap/process_peers.go        | 19 ++---
 pkg/innerring/processors/netmap/processor.go  |  3 +-
 pkg/innerring/state.go                        | 15 ++--
 .../blobovnicza/blobovnicza.go                |  4 +-
 .../blobovnicza/control.go                    | 19 ++---
 .../blobovnicza/delete.go                     |  2 +-
 .../blobstor/blobovniczatree/control.go       |  6 +-
 .../blobstor/blobovniczatree/delete.go        |  2 +-
 .../blobstor/blobovniczatree/exists.go        |  2 +-
 .../blobstor/blobovniczatree/get.go           |  2 +-
 .../blobstor/blobovniczatree/get_range.go     |  2 +-
 .../blobstor/blobovniczatree/iterate.go       |  4 +-
 .../blobstor/blobovniczatree/manager.go       |  7 +-
 .../blobstor/blobovniczatree/option.go        |  2 +-
 .../blobstor/blobovniczatree/put.go           |  6 +-
 .../blobstor/blobovniczatree/rebuild.go       | 50 ++++++-------
 pkg/local_object_storage/blobstor/blobstor.go |  4 +-
 pkg/local_object_storage/blobstor/control.go  |  8 +--
 pkg/local_object_storage/blobstor/delete.go   |  4 +-
 pkg/local_object_storage/blobstor/exists.go   |  2 +-
 .../blobstor/fstree/fstree.go                 |  6 +-
 .../blobstor/fstree/option.go                 |  2 +-
 pkg/local_object_storage/blobstor/iterate.go  |  2 +-
 pkg/local_object_storage/blobstor/logger.go   |  6 +-
 pkg/local_object_storage/blobstor/put.go      |  2 +-
 pkg/local_object_storage/blobstor/rebuild.go  |  4 +-
 pkg/local_object_storage/engine/control.go    | 14 ++--
 pkg/local_object_storage/engine/delete.go     |  6 +-
 pkg/local_object_storage/engine/engine.go     | 20 +++---
 pkg/local_object_storage/engine/evacuate.go   | 28 ++++----
 pkg/local_object_storage/engine/get.go        |  2 +-
 pkg/local_object_storage/engine/inhume.go     | 24 +++----
 pkg/local_object_storage/engine/put.go        |  8 +--
 pkg/local_object_storage/engine/range.go      |  2 +-
 .../engine/remove_copies.go                   |  8 +--
 pkg/local_object_storage/engine/shards.go     | 16 ++---
 pkg/local_object_storage/internal/log/log.go  |  6 +-
 pkg/local_object_storage/metabase/control.go  |  6 +-
 pkg/local_object_storage/metabase/db.go       |  2 +-
 pkg/local_object_storage/metabase/delete.go   |  2 +-
 pkg/local_object_storage/metabase/inhume.go   |  2 +-
 pkg/local_object_storage/metabase/put.go      |  2 +-
 .../metabase/upgrade_test.go                  | 10 +--
 pkg/local_object_storage/shard/control.go     | 16 ++---
 pkg/local_object_storage/shard/delete.go      |  6 +-
 pkg/local_object_storage/shard/gc.go          | 70 +++++++++----------
 pkg/local_object_storage/shard/get.go         |  6 +-
 pkg/local_object_storage/shard/id.go          |  3 +-
 pkg/local_object_storage/shard/inhume.go      |  2 +-
 pkg/local_object_storage/shard/list.go        |  2 +-
 pkg/local_object_storage/shard/lock_test.go   |  2 +-
 pkg/local_object_storage/shard/mode.go        |  6 +-
 pkg/local_object_storage/shard/put.go         |  2 +-
 pkg/local_object_storage/shard/rebuild.go     |  8 +--
 pkg/local_object_storage/shard/shard.go       | 10 +--
 pkg/local_object_storage/shard/writecache.go  |  6 +-
 pkg/local_object_storage/writecache/cache.go  |  2 +-
 pkg/local_object_storage/writecache/delete.go |  2 +-
 pkg/local_object_storage/writecache/flush.go  |  4 +-
 .../writecache/flush_test.go                  |  2 +-
 pkg/local_object_storage/writecache/mode.go   |  4 +-
 .../writecache/options.go                     |  2 +-
 pkg/local_object_storage/writecache/put.go    |  2 +-
 .../writecache/storage.go                     |  4 +-
 pkg/morph/client/client.go                    | 10 +--
 pkg/morph/client/constructor.go               |  6 +-
 pkg/morph/client/multi.go                     |  8 +--
 pkg/morph/client/notary.go                    |  9 +--
 pkg/morph/event/listener.go                   | 70 +++++++++----------
 pkg/morph/event/utils.go                      |  3 +-
 pkg/morph/subscriber/subscriber.go            | 10 +--
 pkg/services/apemanager/executor.go           |  2 +-
 pkg/services/object/acl/v2/service.go         |  2 +-
 pkg/services/object/common/writer/common.go   |  2 +-
 pkg/services/object/common/writer/ec.go       |  8 +--
 pkg/services/object/delete/delete.go          |  6 +-
 pkg/services/object/delete/exec.go            | 18 ++---
 pkg/services/object/delete/local.go           |  6 +-
 pkg/services/object/delete/service.go         |  4 +-
 pkg/services/object/get/assemble.go           | 10 +--
 pkg/services/object/get/assembleec.go         | 10 +--
 pkg/services/object/get/assemblerec.go        | 18 ++---
 pkg/services/object/get/container.go          | 12 ++--
 pkg/services/object/get/get.go                | 14 ++--
 pkg/services/object/get/local.go              |  2 +-
 pkg/services/object/get/remote.go             |  4 +-
 pkg/services/object/get/request.go            | 14 ++--
 pkg/services/object/get/service.go            |  4 +-
 pkg/services/object/get/v2/get_range_hash.go  |  8 +--
 pkg/services/object/get/v2/service.go         |  4 +-
 pkg/services/object/put/service.go            |  2 +-
 pkg/services/object/put/single.go             |  2 +-
 pkg/services/object/search/container.go       | 16 ++---
 pkg/services/object/search/exec.go            |  4 +-
 pkg/services/object/search/local.go           |  2 +-
 pkg/services/object/search/search.go          |  8 +--
 pkg/services/object/search/service.go         |  4 +-
 pkg/services/object/util/log.go               |  6 +-
 .../object_manager/tombstone/checker.go       |  4 +-
 .../object_manager/tombstone/constructor.go   |  2 +-
 pkg/services/policer/check.go                 | 10 +--
 pkg/services/policer/ec.go                    | 40 +++++------
 pkg/services/policer/option.go                |  2 +-
 pkg/services/policer/policer.go               |  3 +-
 pkg/services/policer/process.go               |  8 +--
 pkg/services/replicator/process.go            |  8 +--
 pkg/services/replicator/pull.go               |  8 +--
 pkg/services/replicator/put.go                |  6 +-
 pkg/services/replicator/replicator.go         |  2 +-
 pkg/services/session/executor.go              |  2 +-
 .../session/storage/persistent/options.go     |  2 +-
 .../session/storage/persistent/storage.go     |  7 +-
 pkg/services/tree/redirect.go                 |  2 +-
 pkg/services/tree/replicator.go               |  8 +--
 pkg/services/tree/service.go                  |  2 +-
 pkg/services/tree/sync.go                     | 36 +++++-----
 pkg/util/logger/logger.go                     |  4 ++
 157 files changed, 764 insertions(+), 713 deletions(-)

diff --git a/cmd/frostfs-ir/config.go b/cmd/frostfs-ir/config.go
index 137e764ed..34d9d5595 100644
--- a/cmd/frostfs-ir/config.go
+++ b/cmd/frostfs-ir/config.go
@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"os"
 	"os/signal"
 	"syscall"
@@ -58,13 +59,13 @@ func watchForSignal(cancel func()) {
 		// signals causing application to shut down should have priority over
 		// reconfiguration signal
 		case <-ch:
-			log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
+			log.Info(context.Background(), logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
 			cancel()
 			shutdown()
-			log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
+			log.Info(context.Background(), logs.FrostFSNodeTerminationSignalProcessingIsComplete)
 			return
 		case err := <-intErr: // internal application error
-			log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
+			log.Info(context.Background(), logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
 			cancel()
 			shutdown()
 			return
@@ -72,35 +73,35 @@ func watchForSignal(cancel func()) {
 			// block until any signal is receieved
 			select {
 			case <-ch:
-				log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
+				log.Info(context.Background(), logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
 				cancel()
 				shutdown()
-				log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
+				log.Info(context.Background(), logs.FrostFSNodeTerminationSignalProcessingIsComplete)
 				return
 			case err := <-intErr: // internal application error
-				log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
+				log.Info(context.Background(), logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
 				cancel()
 				shutdown()
 				return
 			case <-sighupCh:
-				log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
+				log.Info(context.Background(), logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
 				if !innerRing.CompareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
-					log.Info(logs.FrostFSNodeSIGHUPSkip)
+					log.Info(context.Background(), logs.FrostFSNodeSIGHUPSkip)
 					break
 				}
 				err := reloadConfig()
 				if err != nil {
-					log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
+					log.Error(context.Background(), logs.FrostFSNodeConfigurationReading, zap.Error(err))
 				}
 				pprofCmp.reload()
 				metricsCmp.reload()
-				log.Info(logs.FrostFSIRReloadExtraWallets)
+				log.Info(context.Background(), logs.FrostFSIRReloadExtraWallets)
 				err = innerRing.SetExtraWallets(cfg)
 				if err != nil {
-					log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
+					log.Error(context.Background(), logs.FrostFSNodeConfigurationReading, zap.Error(err))
 				}
 				innerRing.CompareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
-				log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
+				log.Info(context.Background(), logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
 			}
 		}
 	}
diff --git a/cmd/frostfs-ir/httpcomponent.go b/cmd/frostfs-ir/httpcomponent.go
index 2792c3548..685ef61ad 100644
--- a/cmd/frostfs-ir/httpcomponent.go
+++ b/cmd/frostfs-ir/httpcomponent.go
@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"net/http"
 	"time"
 
@@ -25,7 +26,7 @@ const (
 )
 
 func (c *httpComponent) init() {
-	log.Info("init " + c.name)
+	log.Info(context.Background(), "init "+c.name)
 	c.enabled = cfg.GetBool(c.name + enabledKeyPostfix)
 	c.address = cfg.GetString(c.name + addressKeyPostfix)
 	c.shutdownDur = cfg.GetDuration(c.name + shutdownTimeoutKeyPostfix)
@@ -39,14 +40,14 @@ func (c *httpComponent) init() {
 			httputil.WithShutdownTimeout(c.shutdownDur),
 		)
 	} else {
-		log.Info(c.name + " is disabled, skip")
+		log.Info(context.Background(), c.name+" is disabled, skip")
 		c.srv = nil
 	}
 }
 
 func (c *httpComponent) start() {
 	if c.srv != nil {
-		log.Info("start " + c.name)
+		log.Info(context.Background(), "start "+c.name)
 		wg.Add(1)
 		go func() {
 			defer wg.Done()
@@ -57,7 +58,7 @@ func (c *httpComponent) start() {
 
 func (c *httpComponent) shutdown() error {
 	if c.srv != nil {
-		log.Info("shutdown " + c.name)
+		log.Info(context.Background(), "shutdown "+c.name)
 		return c.srv.Shutdown()
 	}
 	return nil
@@ -71,11 +72,11 @@ func (c *httpComponent) needReload() bool {
 }
 
 func (c *httpComponent) reload() {
-	log.Info("reload " + c.name)
+	log.Info(context.Background(), "reload "+c.name)
 	if c.needReload() {
-		log.Info(c.name + " config updated")
+		log.Info(context.Background(), c.name+" config updated")
 		if err := c.shutdown(); err != nil {
-			log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
+			log.Debug(context.Background(), logs.FrostFSIRCouldNotShutdownHTTPServer,
 				zap.String("error", err.Error()),
 			)
 		} else {
diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go
index 4bc5923a0..55a8ce00d 100644
--- a/cmd/frostfs-ir/main.go
+++ b/cmd/frostfs-ir/main.go
@@ -103,32 +103,32 @@ func main() {
 	err = innerRing.Start(ctx, intErr)
 	exitErr(err)
 
-	log.Info(logs.CommonApplicationStarted,
+	log.Info(ctx, logs.CommonApplicationStarted,
 		zap.String("version", misc.Version))
 
 	watchForSignal(cancel)
 
 	<-ctx.Done() // graceful shutdown
-	log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop)
+	log.Debug(ctx, logs.FrostFSNodeWaitingForAllProcessesToStop)
 	wg.Wait()
 
-	log.Info(logs.FrostFSIRApplicationStopped)
+	log.Info(ctx, logs.FrostFSIRApplicationStopped)
 }
 
 func shutdown() {
 	innerRing.Stop()
 	if err := metricsCmp.shutdown(); err != nil {
-		log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
+		log.Debug(context.Background(), logs.FrostFSIRCouldNotShutdownHTTPServer,
 			zap.String("error", err.Error()),
 		)
 	}
 	if err := pprofCmp.shutdown(); err != nil {
-		log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
+		log.Debug(context.Background(), logs.FrostFSIRCouldNotShutdownHTTPServer,
 			zap.String("error", err.Error()),
 		)
 	}
 
 	if err := sdnotify.ClearStatus(); err != nil {
-		log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
+		log.Error(context.Background(), logs.FailedToReportStatusToSystemd, zap.Error(err))
 	}
 }
diff --git a/cmd/frostfs-ir/pprof.go b/cmd/frostfs-ir/pprof.go
index ff5642008..e95fd117f 100644
--- a/cmd/frostfs-ir/pprof.go
+++ b/cmd/frostfs-ir/pprof.go
@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"runtime"
 
 	"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -52,11 +53,11 @@ func (c *pprofComponent) needReload() bool {
 }
 
 func (c *pprofComponent) reload() {
-	log.Info("reload " + c.name)
+	log.Info(context.Background(), "reload "+c.name)
 	if c.needReload() {
-		log.Info(c.name + " config updated")
+		log.Info(context.Background(), c.name+" config updated")
 		if err := c.shutdown(); err != nil {
-			log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
+			log.Debug(context.Background(), logs.FrostFSIRCouldNotShutdownHTTPServer,
 				zap.String("error", err.Error()))
 			return
 		}
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index 902187560..bd1b99095 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -400,13 +400,13 @@ type internals struct {
 func (c *cfg) startMaintenance() {
 	c.isMaintenance.Store(true)
 	c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE)
-	c.log.Info(logs.FrostFSNodeStartedLocalNodesMaintenance)
+	c.log.Info(context.Background(), logs.FrostFSNodeStartedLocalNodesMaintenance)
 }
 
 // stops node's maintenance.
 func (c *internals) stopMaintenance() {
 	if c.isMaintenance.CompareAndSwap(true, false) {
-		c.log.Info(logs.FrostFSNodeStoppedLocalNodesMaintenance)
+		c.log.Info(context.Background(), logs.FrostFSNodeStoppedLocalNodesMaintenance)
 	}
 }
 
@@ -705,7 +705,7 @@ func initCfg(appCfg *config.Config) *cfg {
 	log, err := logger.NewLogger(logPrm)
 	fatalOnErr(err)
 	if loggerconfig.ToLokiConfig(appCfg).Enabled {
-		log.Logger = log.Logger.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
+		log.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
 			lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg))
 			return lokiCore
 		}))
@@ -1103,10 +1103,10 @@ func initLocalStorage(ctx context.Context, c *cfg) {
 			shard.WithTombstoneSource(c.createTombstoneSource()),
 			shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)))...)
 		if err != nil {
-			c.log.Error(logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
+			c.log.Error(ctx, logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
 		} else {
 			shardsAttached++
-			c.log.Info(logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id))
+			c.log.Info(ctx, logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id))
 		}
 	}
 	if shardsAttached == 0 {
@@ -1116,15 +1116,15 @@ func initLocalStorage(ctx context.Context, c *cfg) {
 	c.cfgObject.cfgLocalStorage.localStorage = ls
 
 	c.onShutdown(func() {
-		c.log.Info(logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
+		c.log.Info(ctx, logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
 
 		err := ls.Close(context.WithoutCancel(ctx))
 		if err != nil {
-			c.log.Info(logs.FrostFSNodeStorageEngineClosingFailure,
+			c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure,
 				zap.String("error", err.Error()),
 			)
 		} else {
-			c.log.Info(logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
+			c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
 		}
 	})
 }
@@ -1132,7 +1132,7 @@ func initLocalStorage(ctx context.Context, c *cfg) {
 func initAccessPolicyEngine(_ context.Context, c *cfg) {
 	var localOverrideDB chainbase.LocalOverrideDatabase
 	if nodeconfig.PersistentPolicyRules(c.appCfg).Path() == "" {
-		c.log.Warn(logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed)
+		c.log.Warn(context.Background(), logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed)
 		localOverrideDB = chainbase.NewInmemoryLocalOverrideDatabase()
 	} else {
 		localOverrideDB = chainbase.NewBoltLocalOverrideDatabase(
@@ -1157,7 +1157,7 @@ func initAccessPolicyEngine(_ context.Context, c *cfg) {
 
 	c.onShutdown(func() {
 		if err := ape.LocalOverrideDatabaseCore().Close(); err != nil {
-			c.log.Warn(logs.FrostFSNodeAccessPolicyEngineClosingFailure,
+			c.log.Warn(context.Background(), logs.FrostFSNodeAccessPolicyEngineClosingFailure,
 				zap.Error(err),
 			)
 		}
@@ -1209,7 +1209,7 @@ func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) {
 func (c *cfg) updateContractNodeInfo(epoch uint64) {
 	ni, err := c.netmapLocalNodeState(epoch)
 	if err != nil {
-		c.log.Error(logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
+		c.log.Error(context.Background(), logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
 			zap.Uint64("epoch", epoch),
 			zap.String("error", err.Error()))
 		return
@@ -1245,13 +1245,13 @@ func (c *cfg) bootstrap() error {
 	// switch to online except when under maintenance
 	st := c.cfgNetmap.state.controlNetmapStatus()
 	if st == control.NetmapStatus_MAINTENANCE {
-		c.log.Info(logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
+		c.log.Info(context.Background(), logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
 		return c.bootstrapWithState(func(ni *netmap.NodeInfo) {
 			ni.SetStatus(netmap.Maintenance)
 		})
 	}
 
-	c.log.Info(logs.FrostFSNodeBootstrappingWithOnlineState,
+	c.log.Info(context.Background(), logs.FrostFSNodeBootstrappingWithOnlineState,
 		zap.Stringer("previous", st),
 	)
 
@@ -1280,19 +1280,19 @@ func (c *cfg) signalWatcher(ctx context.Context) {
 		// signals causing application to shut down should have priority over
 		// reconfiguration signal
 		case <-ch:
-			c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
+			c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
 
 			c.shutdown()
 
-			c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
+			c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
 			return
 		case err := <-c.internalErr: // internal application error
-			c.log.Warn(logs.FrostFSNodeInternalApplicationError,
+			c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError,
 				zap.String("message", err.Error()))
 
 			c.shutdown()
 
-			c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete)
+			c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete)
 			return
 		default:
 			// block until any signal is receieved
@@ -1300,19 +1300,19 @@ func (c *cfg) signalWatcher(ctx context.Context) {
 			case <-sighupCh:
 				c.reloadConfig(ctx)
 			case <-ch:
-				c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
+				c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
 
 				c.shutdown()
 
-				c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
+				c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
 				return
 			case err := <-c.internalErr: // internal application error
-				c.log.Warn(logs.FrostFSNodeInternalApplicationError,
+				c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError,
 					zap.String("message", err.Error()))
 
 				c.shutdown()
 
-				c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete)
+				c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete)
 				return
 			}
 		}
@@ -1320,17 +1320,17 @@ func (c *cfg) signalWatcher(ctx context.Context) {
 }
 
 func (c *cfg) reloadConfig(ctx context.Context) {
-	c.log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
+	c.log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
 
 	if !c.compareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
-		c.log.Info(logs.FrostFSNodeSIGHUPSkip)
+		c.log.Info(ctx, logs.FrostFSNodeSIGHUPSkip)
 		return
 	}
 	defer c.compareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
 
 	err := c.reloadAppConfig()
 	if err != nil {
-		c.log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
+		c.log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err))
 		return
 	}
 
@@ -1341,7 +1341,7 @@ func (c *cfg) reloadConfig(ctx context.Context) {
 
 	logPrm, err := c.loggerPrm()
 	if err != nil {
-		c.log.Error(logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
+		c.log.Error(ctx, logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
 		return
 	}
 
@@ -1362,25 +1362,25 @@ func (c *cfg) reloadConfig(ctx context.Context) {
 
 	err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
 	if err != nil {
-		c.log.Error(logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
+		c.log.Error(ctx, logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
 		return
 	}
 
 	for _, component := range components {
 		err = component.reloadFunc()
 		if err != nil {
-			c.log.Error(logs.FrostFSNodeUpdatedConfigurationApplying,
+			c.log.Error(ctx, logs.FrostFSNodeUpdatedConfigurationApplying,
 				zap.String("component", component.name),
 				zap.Error(err))
 		}
 	}
 
 	if err := c.dialerSource.Update(internalNetConfig(c.appCfg, c.metricsCollector.MultinetMetrics())); err != nil {
-		c.log.Error(logs.FailedToUpdateMultinetConfiguration, zap.Error(err))
+		c.log.Error(ctx, logs.FailedToUpdateMultinetConfiguration, zap.Error(err))
 		return
 	}
 
-	c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
+	c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
 }
 
 func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
@@ -1403,7 +1403,7 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
 		}
 		updated, err := tracing.Setup(ctx, *traceConfig)
 		if updated {
-			c.log.Info(logs.FrostFSNodeTracingConfigationUpdated)
+			c.log.Info(ctx, logs.FrostFSNodeTracingConfigationUpdated)
 		}
 		return err
 	}})
@@ -1438,7 +1438,7 @@ func (c *cfg) reloadPools() error {
 func (c *cfg) reloadPool(p *ants.Pool, newSize int, name string) {
 	oldSize := p.Cap()
 	if oldSize != newSize {
-		c.log.Info(logs.FrostFSNodePoolConfigurationUpdate, zap.String("field", name),
+		c.log.Info(context.Background(), logs.FrostFSNodePoolConfigurationUpdate, zap.String("field", name),
 			zap.Int("old", oldSize), zap.Int("new", newSize))
 		p.Tune(newSize)
 	}
@@ -1477,11 +1477,11 @@ func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoPro
 func (c *cfg) shutdown() {
 	old := c.swapHealthStatus(control.HealthStatus_SHUTTING_DOWN)
 	if old == control.HealthStatus_SHUTTING_DOWN {
-		c.log.Info(logs.FrostFSNodeShutdownSkip)
+		c.log.Info(context.Background(), logs.FrostFSNodeShutdownSkip)
 		return
 	}
 	if old == control.HealthStatus_STARTING {
-		c.log.Warn(logs.FrostFSNodeShutdownWhenNotReady)
+		c.log.Warn(context.Background(), logs.FrostFSNodeShutdownWhenNotReady)
 	}
 
 	c.ctxCancel()
@@ -1491,6 +1491,6 @@ func (c *cfg) shutdown() {
 	}
 
 	if err := sdnotify.ClearStatus(); err != nil {
-		c.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
+		c.log.Error(context.Background(), logs.FailedToReportStatusToSystemd, zap.Error(err))
 	}
 }
diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go
index cc38876ee..1a54f9ffc 100644
--- a/cmd/frostfs-node/container.go
+++ b/cmd/frostfs-node/container.go
@@ -102,13 +102,13 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
 				} else {
 					// unlike removal, we expect successful receive of the container
 					// after successful creation, so logging can be useful
-					c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
+					c.log.Error(context.Background(), logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
 						zap.Stringer("id", ev.ID),
 						zap.Error(err),
 					)
 				}
 
-				c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt,
+				c.log.Debug(context.Background(), logs.FrostFSNodeContainerCreationEventsReceipt,
 					zap.Stringer("id", ev.ID),
 				)
 			})
@@ -116,7 +116,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
 			subscribeToContainerRemoval(c, func(e event.Event) {
 				ev := e.(containerEvent.DeleteSuccess)
 				containerCache.handleRemoval(ev.ID)
-				c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt,
+				c.log.Debug(context.Background(), logs.FrostFSNodeContainerRemovalEventsReceipt,
 					zap.Stringer("id", ev.ID),
 				)
 			})
diff --git a/cmd/frostfs-node/control.go b/cmd/frostfs-node/control.go
index e1e6e3ac9..ffac23eec 100644
--- a/cmd/frostfs-node/control.go
+++ b/cmd/frostfs-node/control.go
@@ -46,7 +46,7 @@ func initControlService(c *cfg) {
 
 	lis, err := net.Listen("tcp", endpoint)
 	if err != nil {
-		c.log.Error(logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
+		c.log.Error(context.Background(), logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
 		return
 	}
 
@@ -60,7 +60,7 @@ func initControlService(c *cfg) {
 
 	c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) {
 		runAndLog(ctx, c, serviceNameControl, false, func(context.Context, *cfg) {
-			c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
+			c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint,
 				zap.String("service", serviceNameControl),
 				zap.String("endpoint", endpoint))
 			fatalOnErr(c.cfgControlService.server.Serve(lis))
@@ -113,6 +113,6 @@ func (c *cfg) notifySystemd(st control.HealthStatus) {
 		err = sdnotify.Status(fmt.Sprintf("%v", st))
 	}
 	if err != nil {
-		c.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
+		c.log.Error(context.Background(), logs.FailedToReportStatusToSystemd, zap.Error(err))
 	}
 }
diff --git a/cmd/frostfs-node/grpc.go b/cmd/frostfs-node/grpc.go
index 3a38b2cca..271810ee6 100644
--- a/cmd/frostfs-node/grpc.go
+++ b/cmd/frostfs-node/grpc.go
@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"crypto/tls"
 	"errors"
 	"net"
@@ -30,7 +31,7 @@ func initGRPC(c *cfg) {
 		lis, err := net.Listen("tcp", sc.Endpoint())
 		if err != nil {
 			c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(sc.Endpoint())
-			c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
+			c.log.Error(context.Background(), logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
 			endpointsToReconnect = append(endpointsToReconnect, sc.Endpoint())
 			return
 		}
@@ -76,19 +77,19 @@ func scheduleReconnect(endpoint string, c *cfg) {
 }
 
 func tryReconnect(endpoint string, c *cfg) bool {
-	c.log.Info(logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint))
+	c.log.Info(context.Background(), logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint))
 
 	serverOpts, found := getGRPCEndpointOpts(endpoint, c)
 	if !found {
-		c.log.Warn(logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint))
+		c.log.Warn(context.Background(), logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint))
 		return true
 	}
 
 	lis, err := net.Listen("tcp", endpoint)
 	if err != nil {
 		c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(endpoint)
-		c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
-		c.log.Warn(logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout))
+		c.log.Error(context.Background(), logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
+		c.log.Warn(context.Background(), logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout))
 		return false
 	}
 	c.metricsCollector.GrpcServerMetrics().MarkHealthy(endpoint)
@@ -101,7 +102,7 @@ func tryReconnect(endpoint string, c *cfg) bool {
 
 	c.cfgGRPC.appendAndHandle(endpoint, lis, srv)
 
-	c.log.Info(logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint))
+	c.log.Info(context.Background(), logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint))
 	return true
 }
 
@@ -143,7 +144,7 @@ func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool
 	if tlsCfg != nil {
 		cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile())
 		if err != nil {
-			c.log.Error(logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
+			c.log.Error(context.Background(), logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
 			return nil, false
 		}
 
@@ -180,21 +181,21 @@ func serveGRPC(c *cfg) {
 
 		go func() {
 			defer func() {
-				c.log.Info(logs.FrostFSNodeStopListeningGRPCEndpoint,
+				c.log.Info(context.Background(), logs.FrostFSNodeStopListeningGRPCEndpoint,
 					zap.Stringer("endpoint", l.Addr()),
 				)
 
 				c.wg.Done()
 			}()
 
-			c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
+			c.log.Info(context.Background(), logs.FrostFSNodeStartListeningEndpoint,
 				zap.String("service", "gRPC"),
 				zap.Stringer("endpoint", l.Addr()),
 			)
 
 			if err := s.Serve(l); err != nil {
 				c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(e)
-				c.log.Error(logs.FrostFSNodeGRPCServerError, zap.Error(err))
+				c.log.Error(context.Background(), logs.FrostFSNodeGRPCServerError, zap.Error(err))
 				c.cfgGRPC.dropConnection(e)
 				scheduleReconnect(e, c)
 			}
@@ -203,9 +204,9 @@ func serveGRPC(c *cfg) {
 }
 
 func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
-	l = &logger.Logger{Logger: l.With(zap.String("name", name))}
+	l = l.With(zap.String("name", name))
 
-	l.Info(logs.FrostFSNodeStoppingGRPCServer)
+	l.Info(context.Background(), logs.FrostFSNodeStoppingGRPCServer)
 
 	// GracefulStop() may freeze forever, see #1270
 	done := make(chan struct{})
@@ -217,9 +218,9 @@ func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
 	select {
 	case <-done:
 	case <-time.After(1 * time.Minute):
-		l.Info(logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
+		l.Info(context.Background(), logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
 		s.Stop()
 	}
 
-	l.Info(logs.FrostFSNodeGRPCServerStoppedSuccessfully)
+	l.Info(context.Background(), logs.FrostFSNodeGRPCServerStoppedSuccessfully)
 }
diff --git a/cmd/frostfs-node/httpcomponent.go b/cmd/frostfs-node/httpcomponent.go
index 2ec20d848..a699a72a1 100644
--- a/cmd/frostfs-node/httpcomponent.go
+++ b/cmd/frostfs-node/httpcomponent.go
@@ -22,7 +22,7 @@ type httpComponent struct {
 
 func (cmp *httpComponent) init(c *cfg) {
 	if !cmp.enabled {
-		c.log.Info(cmp.name + " is disabled")
+		c.log.Info(context.Background(), cmp.name+" is disabled")
 		return
 	}
 	// Init server with parameters
@@ -39,7 +39,7 @@ func (cmp *httpComponent) init(c *cfg) {
 	go func() {
 		defer c.wg.Done()
 
-		c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
+		c.log.Info(context.Background(), logs.FrostFSNodeStartListeningEndpoint,
 			zap.String("service", cmp.name),
 			zap.String("endpoint", cmp.address))
 		fatalOnErr(srv.Serve())
diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go
index e4f0a434c..cd42d5f1d 100644
--- a/cmd/frostfs-node/main.go
+++ b/cmd/frostfs-node/main.go
@@ -73,9 +73,9 @@ func main() {
 }
 
 func initAndLog(c *cfg, name string, initializer func(*cfg)) {
-	c.log.Info(fmt.Sprintf("initializing %s service...", name))
+	c.log.Info(context.Background(), fmt.Sprintf("initializing %s service...", name))
 	initializer(c)
-	c.log.Info(name + " service has been successfully initialized")
+	c.log.Info(context.Background(), name+" service has been successfully initialized")
 }
 
 func initApp(ctx context.Context, c *cfg) {
@@ -120,25 +120,25 @@ func initApp(ctx context.Context, c *cfg) {
 }
 
 func runAndLog(ctx context.Context, c *cfg, name string, logSuccess bool, starter func(context.Context, *cfg)) {
-	c.log.Info(fmt.Sprintf("starting %s service...", name))
+	c.log.Info(ctx, fmt.Sprintf("starting %s service...", name))
 	starter(ctx, c)
 
 	if logSuccess {
-		c.log.Info(name + " service started successfully")
+		c.log.Info(ctx, name+" service started successfully")
 	}
 }
 
 func stopAndLog(c *cfg, name string, stopper func() error) {
-	c.log.Debug(fmt.Sprintf("shutting down %s service", name))
+	c.log.Debug(context.Background(), fmt.Sprintf("shutting down %s service", name))
 
 	err := stopper()
 	if err != nil {
-		c.log.Debug(fmt.Sprintf("could not shutdown %s server", name),
+		c.log.Debug(context.Background(), fmt.Sprintf("could not shutdown %s server", name),
 			zap.String("error", err.Error()),
 		)
 	}
 
-	c.log.Debug(name + " service has been stopped")
+	c.log.Debug(context.Background(), name+" service has been stopped")
 }
 
 func bootUp(ctx context.Context, c *cfg) {
@@ -150,7 +150,7 @@ func bootUp(ctx context.Context, c *cfg) {
 }
 
 func wait(c *cfg) {
-	c.log.Info(logs.CommonApplicationStarted,
+	c.log.Info(context.Background(), logs.CommonApplicationStarted,
 		zap.String("version", misc.Version))
 
 	<-c.done // graceful shutdown
@@ -160,12 +160,12 @@ func wait(c *cfg) {
 	go func() {
 		defer drain.Done()
 		for err := range c.internalErr {
-			c.log.Warn(logs.FrostFSNodeInternalApplicationError,
+			c.log.Warn(context.Background(), logs.FrostFSNodeInternalApplicationError,
 				zap.String("message", err.Error()))
 		}
 	}()
 
-	c.log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop)
+	c.log.Debug(context.Background(), logs.FrostFSNodeWaitingForAllProcessesToStop)
 
 	c.wg.Wait()
 
diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go
index 0969f5579..3e010b181 100644
--- a/cmd/frostfs-node/morph.go
+++ b/cmd/frostfs-node/morph.go
@@ -48,7 +48,7 @@ func (c *cfg) initMorphComponents(ctx context.Context) {
 		fatalOnErr(err)
 	}
 
-	c.log.Info(logs.FrostFSNodeNotarySupport,
+	c.log.Info(ctx, logs.FrostFSNodeNotarySupport,
 		zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled),
 	)
 
@@ -64,7 +64,7 @@ func (c *cfg) initMorphComponents(ctx context.Context) {
 		msPerBlock, err := c.cfgMorph.client.MsPerBlock()
 		fatalOnErr(err)
 		c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond
-		c.log.Debug(logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL))
+		c.log.Debug(ctx, logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL))
 	}
 
 	if c.cfgMorph.cacheTTL < 0 {
@@ -102,7 +102,7 @@ func initMorphClient(ctx context.Context, c *cfg) {
 		client.WithDialerSource(c.dialerSource),
 	)
 	if err != nil {
-		c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient,
+		c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient,
 			zap.Any("endpoints", addresses),
 			zap.String("error", err.Error()),
 		)
@@ -111,12 +111,12 @@ func initMorphClient(ctx context.Context, c *cfg) {
 	}
 
 	c.onShutdown(func() {
-		c.log.Info(logs.FrostFSNodeClosingMorphComponents)
+		c.log.Info(ctx, logs.FrostFSNodeClosingMorphComponents)
 		cli.Close()
 	})
 
 	if err := cli.SetGroupSignerScope(); err != nil {
-		c.log.Info(logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
+		c.log.Info(ctx, logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
 	}
 
 	c.cfgMorph.client = cli
@@ -136,7 +136,7 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
 		// non-error deposit with an empty TX hash means
 		// that the deposit has already been made; no
 		// need to wait it.
-		c.log.Info(logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade)
+		c.log.Info(ctx, logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade)
 		return
 	}
 
@@ -202,7 +202,7 @@ func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32)
 		return fmt.Errorf("could not wait for notary deposit persists in chain: %w", err)
 	}
 	if res.Execution.VMState.HasFlag(vmstate.Halt) {
-		c.log.Info(logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted)
+		c.log.Info(ctx, logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted)
 		return nil
 	}
 	return errNotaryDepositFail
@@ -217,7 +217,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
 	fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
 	if err != nil {
 		fromSideChainBlock = 0
-		c.log.Warn(logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
+		c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
 	}
 
 	subs, err = subscriber.New(ctx, &subscriber.Params{
@@ -246,7 +246,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
 	setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) {
 		res, err := netmapEvent.ParseNewEpoch(src)
 		if err == nil {
-			c.log.Info(logs.FrostFSNodeNewEpochEventFromSidechain,
+			c.log.Info(ctx, logs.FrostFSNodeNewEpochEventFromSidechain,
 				zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()),
 			)
 		}
@@ -257,11 +257,11 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
 	registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers)
 
 	registerBlockHandler(lis, func(block *block.Block) {
-		c.log.Debug(logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
+		c.log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
 
 		err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index)
 		if err != nil {
-			c.log.Warn(logs.FrostFSNodeCantUpdatePersistentState,
+			c.log.Warn(ctx, logs.FrostFSNodeCantUpdatePersistentState,
 				zap.String("chain", "side"),
 				zap.Uint32("block_index", block.Index))
 		}
diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go
index 73871bfc9..18667e636 100644
--- a/cmd/frostfs-node/netmap.go
+++ b/cmd/frostfs-node/netmap.go
@@ -189,7 +189,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
 		}
 
 		if err := c.bootstrap(); err != nil {
-			c.log.Warn(logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
+			c.log.Warn(context.Background(), logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
 		}
 	})
 
@@ -197,7 +197,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
 		addNewEpochAsyncNotificationHandler(c, func(_ event.Event) {
 			_, _, err := makeNotaryDeposit(c)
 			if err != nil {
-				c.log.Error(logs.FrostFSNodeCouldNotMakeNotaryDeposit,
+				c.log.Error(context.Background(), logs.FrostFSNodeCouldNotMakeNotaryDeposit,
 					zap.String("error", err.Error()),
 				)
 			}
@@ -210,7 +210,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
 func bootstrapNode(c *cfg) {
 	if c.needBootstrap() {
 		if c.IsMaintenance() {
-			c.log.Info(logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap)
+			c.log.Info(context.Background(), logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap)
 			return
 		}
 		err := c.bootstrap()
@@ -250,7 +250,7 @@ func initNetmapState(c *cfg) {
 
 	stateWord := nodeState(ni)
 
-	c.log.Info(logs.FrostFSNodeInitialNetworkState,
+	c.log.Info(context.Background(), logs.FrostFSNodeInitialNetworkState,
 		zap.Uint64("epoch", epoch),
 		zap.String("state", stateWord),
 	)
@@ -307,7 +307,7 @@ func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error
 	if nmState != candidateState {
 		// This happens when the node was switched to maintenance without epoch tick.
 		// We expect it to continue staying in maintenance.
-		c.log.Info(logs.CandidateStatusPriority,
+		c.log.Info(context.Background(), logs.CandidateStatusPriority,
 			zap.String("netmap", nmState),
 			zap.String("candidate", candidateState))
 	}
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index aeab1d6cb..cad6d5ee3 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -58,7 +58,7 @@ type objectSvc struct {
 func (c *cfg) MaxObjectSize() uint64 {
 	sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
 	if err != nil {
-		c.log.Error(logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
+		c.log.Error(context.Background(), logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
 			zap.String("error", err.Error()),
 		)
 	}
@@ -223,7 +223,7 @@ func initObjectService(c *cfg) {
 
 func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.ClientCache) {
 	if policerconfig.UnsafeDisable(c.appCfg) {
-		c.log.Warn(logs.FrostFSNodePolicerIsDisabled)
+		c.log.Warn(context.Background(), logs.FrostFSNodePolicerIsDisabled)
 		return
 	}
 
@@ -287,7 +287,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
 
 			_, err := ls.Inhume(ctx, inhumePrm)
 			if err != nil {
-				c.log.Warn(logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
+				c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
 					zap.String("error", err.Error()),
 				)
 			}
diff --git a/cmd/frostfs-node/runtime.go b/cmd/frostfs-node/runtime.go
index d858ba490..0b1000e70 100644
--- a/cmd/frostfs-node/runtime.go
+++ b/cmd/frostfs-node/runtime.go
@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"os"
 	"runtime/debug"
 
@@ -12,14 +13,14 @@ import (
 func setRuntimeParameters(c *cfg) {
 	if len(os.Getenv("GOMEMLIMIT")) != 0 {
 		// default limit < yaml limit < app env limit < GOMEMLIMIT
-		c.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
+		c.log.Warn(context.Background(), logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
 		return
 	}
 
 	memLimitBytes := runtime.GCMemoryLimitBytes(c.appCfg)
 	previous := debug.SetMemoryLimit(memLimitBytes)
 	if memLimitBytes != previous {
-		c.log.Info(logs.RuntimeSoftMemoryLimitUpdated,
+		c.log.Info(context.Background(), logs.RuntimeSoftMemoryLimitUpdated,
 			zap.Int64("new_value", memLimitBytes),
 			zap.Int64("old_value", previous))
 	}
diff --git a/cmd/frostfs-node/tracing.go b/cmd/frostfs-node/tracing.go
index f550dd882..65f5aec15 100644
--- a/cmd/frostfs-node/tracing.go
+++ b/cmd/frostfs-node/tracing.go
@@ -13,12 +13,12 @@ import (
 func initTracing(ctx context.Context, c *cfg) {
 	conf, err := tracingconfig.ToTracingConfig(c.appCfg)
 	if err != nil {
-		c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
+		c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err))
 		return
 	}
 	_, err = tracing.Setup(ctx, *conf)
 	if err != nil {
-		c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
+		c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err))
 		return
 	}
 
@@ -29,7 +29,7 @@ func initTracing(ctx context.Context, c *cfg) {
 			defer cancel()
 			err := tracing.Shutdown(ctx) // cfg context cancels before close
 			if err != nil {
-				c.log.Error(logs.FrostFSNodeFailedShutdownTracing, zap.Error(err))
+				c.log.Error(ctx, logs.FrostFSNodeFailedShutdownTracing, zap.Error(err))
 			}
 		},
 	})
diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go
index a92979daf..59923ee2f 100644
--- a/cmd/frostfs-node/tree.go
+++ b/cmd/frostfs-node/tree.go
@@ -44,7 +44,7 @@ func (c cnrSource) List() ([]cid.ID, error) {
 func initTreeService(c *cfg) {
 	treeConfig := treeconfig.Tree(c.appCfg)
 	if !treeConfig.Enabled() {
-		c.log.Info(logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization)
+		c.log.Info(context.Background(), logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization)
 		return
 	}
 
@@ -83,7 +83,7 @@ func initTreeService(c *cfg) {
 		addNewEpochNotificationHandler(c, func(_ event.Event) {
 			err := c.treeService.SynchronizeAll()
 			if err != nil {
-				c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
+				c.log.Error(context.Background(), logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
 			}
 		})
 	} else {
@@ -94,7 +94,7 @@ func initTreeService(c *cfg) {
 			for range tick.C {
 				err := c.treeService.SynchronizeAll()
 				if err != nil {
-					c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
+					c.log.Error(context.Background(), logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
 					if errors.Is(err, tree.ErrShuttingDown) {
 						return
 					}
@@ -107,11 +107,11 @@ func initTreeService(c *cfg) {
 		ev := e.(containerEvent.DeleteSuccess)
 
 		// This is executed asynchronously, so we don't care about the operation taking some time.
-		c.log.Debug(logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID))
+		c.log.Debug(context.Background(), logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID))
 		err := c.treeService.DropTree(context.Background(), ev.ID, "")
 		if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
 			// Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged.
-			c.log.Error(logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
+			c.log.Error(context.Background(), logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
 				zap.Stringer("cid", ev.ID),
 				zap.String("error", err.Error()))
 		}
diff --git a/internal/audit/request.go b/internal/audit/request.go
index 3355087f1..15a4a7960 100644
--- a/internal/audit/request.go
+++ b/internal/audit/request.go
@@ -1,6 +1,8 @@
 package audit
 
 import (
+	"context"
+
 	crypto "git.frostfs.info/TrueCloudLab/frostfs-crypto"
 	"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
 	"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@@ -37,7 +39,7 @@ func LogRequestWithKey(log *logger.Logger, operation string, key []byte, target
 		object = target.String()
 	}
 
-	log.Info(logs.AuditEventLogRecord,
+	log.Info(context.Background(), logs.AuditEventLogRecord,
 		zap.String("operation", operation),
 		zap.String("object", object),
 		zap.String("subject", subject),
diff --git a/pkg/core/object/fmt_test.go b/pkg/core/object/fmt_test.go
index b428b56da..20560cf3a 100644
--- a/pkg/core/object/fmt_test.go
+++ b/pkg/core/object/fmt_test.go
@@ -65,7 +65,7 @@ func TestFormatValidator_Validate(t *testing.T) {
 			epoch: curEpoch,
 		}),
 		WithLockSource(ls),
-		WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
+		WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
 	)
 
 	ownerKey, err := keys.NewPrivateKey()
@@ -290,7 +290,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
 			}),
 			WithLockSource(ls),
 			WithVerifySessionTokenIssuer(false),
-			WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
+			WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
 		)
 
 		tok := sessiontest.Object()
@@ -339,7 +339,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
 					},
 				},
 			),
-			WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
+			WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
 		)
 
 		tok := sessiontest.Object()
@@ -417,7 +417,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
 					currentEpoch: curEpoch,
 				},
 			),
-			WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
+			WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
 		)
 
 		require.NoError(t, v.Validate(context.Background(), obj, false))
@@ -491,7 +491,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
 					currentEpoch: curEpoch,
 				},
 			),
-			WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
+			WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
 		)
 
 		require.NoError(t, v.Validate(context.Background(), obj, false))
@@ -567,7 +567,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
 					currentEpoch: curEpoch,
 				},
 			),
-			WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
+			WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
 		)
 
 		require.Error(t, v.Validate(context.Background(), obj, false))
diff --git a/pkg/core/object/sender_classifier.go b/pkg/core/object/sender_classifier.go
index 13d0ebfb1..ed438c0b9 100644
--- a/pkg/core/object/sender_classifier.go
+++ b/pkg/core/object/sender_classifier.go
@@ -2,6 +2,7 @@ package object
 
 import (
 	"bytes"
+	"context"
 	"crypto/sha256"
 
 	"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -64,7 +65,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idC
 	isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes)
 	if err != nil {
 		// do not throw error, try best case matching
-		c.log.Debug(logs.V2CantCheckIfRequestFromInnerRing,
+		c.log.Debug(context.Background(), logs.V2CantCheckIfRequestFromInnerRing,
 			zap.String("error", err.Error()))
 	} else if isInnerRingNode {
 		return &ClassifyResult{
@@ -81,7 +82,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idC
 		// error might happen if request has `RoleOther` key and placement
 		// is not possible for previous epoch, so
 		// do not throw error, try best case matching
-		c.log.Debug(logs.V2CantCheckIfRequestFromContainerNode,
+		c.log.Debug(context.Background(), logs.V2CantCheckIfRequestFromContainerNode,
 			zap.String("error", err.Error()))
 	} else if isContainerNode {
 		return &ClassifyResult{
diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go
index d6b474c32..b8812819e 100644
--- a/pkg/innerring/initialization.go
+++ b/pkg/innerring/initialization.go
@@ -97,7 +97,7 @@ func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain *
 	fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey)
 	if err != nil {
 		fromMainChainBlock = 0
-		s.log.Warn(logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error()))
+		s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error()))
 	}
 	mainnetChain.from = fromMainChainBlock
 
@@ -142,7 +142,7 @@ func (s *Server) initNotaryConfig() {
 		!s.withoutMainNet && s.mainnetClient.ProbeNotary(), // if mainnet disabled then notary flag must be disabled too
 	)
 
-	s.log.Info(logs.InnerringNotarySupport,
+	s.log.Info(context.Background(), logs.InnerringNotarySupport,
 		zap.Bool("sidechain_enabled", true),
 		zap.Bool("mainchain_enabled", !s.mainNotaryConfig.disabled),
 	)
@@ -153,7 +153,7 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli
 
 	if s.withoutMainNet || cfg.GetBool("governance.disable") {
 		alphaSync = func(event.Event) {
-			s.log.Debug(logs.InnerringAlphabetKeysSyncIsDisabled)
+			s.log.Debug(context.Background(), logs.InnerringAlphabetKeysSyncIsDisabled)
 		}
 	} else {
 		// create governance processor
@@ -307,7 +307,7 @@ func (s *Server) initFrostFSMainnetProcessor(cfg *viper.Viper) error {
 func (s *Server) initGRPCServer(cfg *viper.Viper, log *logger.Logger, audit *atomic.Bool) error {
 	controlSvcEndpoint := cfg.GetString("control.grpc.endpoint")
 	if controlSvcEndpoint == "" {
-		s.log.Info(logs.InnerringNoControlServerEndpointSpecified)
+		s.log.Info(context.Background(), logs.InnerringNoControlServerEndpointSpecified)
 		return nil
 	}
 
@@ -446,7 +446,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
 	fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey)
 	if err != nil {
 		fromSideChainBlock = 0
-		s.log.Warn(logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
+		s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
 	}
 
 	morphChain := &chainParams{
@@ -471,7 +471,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
 		return nil, err
 	}
 	if err := s.morphClient.SetGroupSignerScope(); err != nil {
-		morphChain.log.Info(logs.InnerringFailedToSetGroupSignerScope, zap.Error(err))
+		morphChain.log.Info(ctx, logs.InnerringFailedToSetGroupSignerScope, zap.Error(err))
 	}
 
 	return morphChain, nil
diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go
index 5fae302c4..63a4cb1cb 100644
--- a/pkg/innerring/innerring.go
+++ b/pkg/innerring/innerring.go
@@ -176,7 +176,7 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
 	err = s.voteForSidechainValidator(prm)
 	if err != nil {
 		// we don't stop inner ring execution on this error
-		s.log.Warn(logs.InnerringCantVoteForPreparedValidators,
+		s.log.Warn(ctx, logs.InnerringCantVoteForPreparedValidators,
 			zap.String("error", err.Error()))
 	}
 
@@ -218,13 +218,13 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
 
 func (s *Server) registerMorphNewBlockEventHandler() {
 	s.morphListener.RegisterBlockHandler(func(b *block.Block) {
-		s.log.Debug(logs.InnerringNewBlock,
+		s.log.Debug(context.Background(), logs.InnerringNewBlock,
 			zap.Uint32("index", b.Index),
 		)
 
 		err := s.persistate.SetUInt32(persistateSideChainLastBlockKey, b.Index)
 		if err != nil {
-			s.log.Warn(logs.InnerringCantUpdatePersistentState,
+			s.log.Warn(context.Background(), logs.InnerringCantUpdatePersistentState,
 				zap.String("chain", "side"),
 				zap.Uint32("block_index", b.Index))
 		}
@@ -238,7 +238,7 @@ func (s *Server) registerMainnetNewBlockEventHandler() {
 		s.mainnetListener.RegisterBlockHandler(func(b *block.Block) {
 			err := s.persistate.SetUInt32(persistateMainChainLastBlockKey, b.Index)
 			if err != nil {
-				s.log.Warn(logs.InnerringCantUpdatePersistentState,
+				s.log.Warn(context.Background(), logs.InnerringCantUpdatePersistentState,
 					zap.String("chain", "main"),
 					zap.Uint32("block_index", b.Index))
 			}
@@ -307,7 +307,7 @@ func (s *Server) Stop() {
 
 	for _, c := range s.closers {
 		if err := c(); err != nil {
-			s.log.Warn(logs.InnerringCloserError,
+			s.log.Warn(context.Background(), logs.InnerringCloserError,
 				zap.String("error", err.Error()),
 			)
 		}
@@ -438,7 +438,7 @@ func createListener(ctx context.Context, cli *client.Client, p *chainParams) (ev
 	}
 
 	listener, err := event.NewListener(event.ListenerParams{
-		Logger:     &logger.Logger{Logger: p.log.With(zap.String("chain", p.name))},
+		Logger:     p.log.With(zap.String("chain", p.name)),
 		Subscriber: sub,
 	})
 	if err != nil {
@@ -602,7 +602,7 @@ func (s *Server) initConfigFromBlockchain() error {
 		return err
 	}
 
-	s.log.Debug(logs.InnerringReadConfigFromBlockchain,
+	s.log.Debug(context.Background(), logs.InnerringReadConfigFromBlockchain,
 		zap.Bool("active", s.IsActive()),
 		zap.Bool("alphabet", s.IsAlphabet()),
 		zap.Uint64("epoch", epoch),
diff --git a/pkg/innerring/notary.go b/pkg/innerring/notary.go
index 5cdbb971c..902a4c30a 100644
--- a/pkg/innerring/notary.go
+++ b/pkg/innerring/notary.go
@@ -54,12 +54,12 @@ func (s *Server) notaryHandler(_ event.Event) {
 	if !s.mainNotaryConfig.disabled {
 		_, err := s.depositMainNotary()
 		if err != nil {
-			s.log.Error(logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err))
+			s.log.Error(context.Background(), logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err))
 		}
 	}
 
 	if _, err := s.depositSideNotary(); err != nil {
-		s.log.Error(logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err))
+		s.log.Error(context.Background(), logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err))
 	}
 }
 
@@ -81,11 +81,11 @@ func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaite
 		// non-error deposit with an empty TX hash means
 		// that the deposit has already been made; no
 		// need to wait it.
-		s.log.Info(logs.InnerringNotaryDepositHasAlreadyBeenMade)
+		s.log.Info(ctx, logs.InnerringNotaryDepositHasAlreadyBeenMade)
 		return nil
 	}
 
-	s.log.Info(msg)
+	s.log.Info(ctx, msg)
 
 	return await(ctx, tx)
 }
diff --git a/pkg/innerring/processors/alphabet/handlers.go b/pkg/innerring/processors/alphabet/handlers.go
index 9de075f17..0cc2a5f39 100644
--- a/pkg/innerring/processors/alphabet/handlers.go
+++ b/pkg/innerring/processors/alphabet/handlers.go
@@ -1,6 +1,8 @@
 package alphabet
 
 import (
+	"context"
+
 	"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
 	"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
 	"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
@@ -10,14 +12,14 @@ import (
 
 func (ap *Processor) HandleGasEmission(ev event.Event) {
 	_ = ev.(timers.NewAlphabetEmitTick)
-	ap.log.Info(logs.AlphabetTick, zap.String("type", "alphabet gas emit"))
+	ap.log.Info(context.Background(), logs.AlphabetTick, zap.String("type", "alphabet gas emit"))
 
 	// send event to the worker pool
 
 	err := processors.SubmitEvent(ap.pool, ap.metrics, "alphabet_emit_gas", ap.processEmit)
 	if err != nil {
 		// there system can be moved into controlled degradation stage
-		ap.log.Warn(logs.AlphabetAlphabetProcessorWorkerPoolDrained,
+		ap.log.Warn(context.Background(), logs.AlphabetAlphabetProcessorWorkerPoolDrained,
 			zap.Int("capacity", ap.pool.Cap()))
 	}
 }
diff --git a/pkg/innerring/processors/alphabet/process_emit.go b/pkg/innerring/processors/alphabet/process_emit.go
index 2317f3e98..142409631 100644
--- a/pkg/innerring/processors/alphabet/process_emit.go
+++ b/pkg/innerring/processors/alphabet/process_emit.go
@@ -1,6 +1,7 @@
 package alphabet
 
 import (
+	"context"
 	"crypto/elliptic"
 
 	"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -16,14 +17,14 @@ const emitMethod = "emit"
 func (ap *Processor) processEmit() bool {
 	index := ap.irList.AlphabetIndex()
 	if index < 0 {
-		ap.log.Info(logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent)
+		ap.log.Info(context.Background(), logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent)
 
 		return true
 	}
 
 	contract, ok := ap.alphabetContracts.GetByIndex(index)
 	if !ok {
-		ap.log.Debug(logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent,
+		ap.log.Debug(context.Background(), logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent,
 			zap.Int("index", index))
 
 		return false
@@ -32,20 +33,20 @@ func (ap *Processor) processEmit() bool {
 	// there is no signature collecting, so we don't need extra fee
 	_, err := ap.morphClient.Invoke(contract, 0, emitMethod)
 	if err != nil {
-		ap.log.Warn(logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error()))
+		ap.log.Warn(context.Background(), logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error()))
 
 		return false
 	}
 
 	if ap.storageEmission == 0 {
-		ap.log.Info(logs.AlphabetStorageNodeEmissionIsOff)
+		ap.log.Info(context.Background(), logs.AlphabetStorageNodeEmissionIsOff)
 
 		return true
 	}
 
 	networkMap, err := ap.netmapClient.NetMap()
 	if err != nil {
-		ap.log.Warn(logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
+		ap.log.Warn(context.Background(), logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
 			zap.String("error", err.Error()))
 
 		return false
@@ -58,7 +59,7 @@ func (ap *Processor) processEmit() bool {
 	ap.pwLock.RUnlock()
 	extraLen := len(pw)
 
-	ap.log.Debug(logs.AlphabetGasEmission,
+	ap.log.Debug(context.Background(), logs.AlphabetGasEmission,
 		zap.Int("network_map", nmLen),
 		zap.Int("extra_wallets", extraLen))
 
@@ -81,7 +82,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN
 
 		key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256())
 		if err != nil {
-			ap.log.Warn(logs.AlphabetCantParseNodePublicKey,
+			ap.log.Warn(context.Background(), logs.AlphabetCantParseNodePublicKey,
 				zap.String("error", err.Error()))
 
 			continue
@@ -89,7 +90,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN
 
 		err = ap.morphClient.TransferGas(key.GetScriptHash(), gasPerNode)
 		if err != nil {
-			ap.log.Warn(logs.AlphabetCantTransferGas,
+			ap.log.Warn(context.Background(), logs.AlphabetCantTransferGas,
 				zap.String("receiver", key.Address()),
 				zap.Int64("amount", int64(gasPerNode)),
 				zap.String("error", err.Error()),
@@ -106,7 +107,7 @@ func (ap *Processor) transferGasToExtraNodes(pw []util.Uint160, gasPerNode fixed
 			for i, addr := range pw {
 				receiversLog[i] = addr.StringLE()
 			}
-			ap.log.Warn(logs.AlphabetCantTransferGasToWallet,
+			ap.log.Warn(context.Background(), logs.AlphabetCantTransferGasToWallet,
 				zap.Strings("receivers", receiversLog),
 				zap.Int64("amount", int64(gasPerNode)),
 				zap.String("error", err.Error()),
diff --git a/pkg/innerring/processors/alphabet/processor.go b/pkg/innerring/processors/alphabet/processor.go
index ce6679969..8dbef1e20 100644
--- a/pkg/innerring/processors/alphabet/processor.go
+++ b/pkg/innerring/processors/alphabet/processor.go
@@ -1,6 +1,7 @@
 package alphabet
 
 import (
+	"context"
 	"errors"
 	"fmt"
 	"sync"
@@ -85,7 +86,7 @@ func New(p *Params) (*Processor, error) {
 		return nil, errors.New("ir/alphabet: global state is not set")
 	}
 
-	p.Log.Debug(logs.AlphabetAlphabetWorkerPool, zap.Int("size", p.PoolSize))
+	p.Log.Debug(context.Background(), logs.AlphabetAlphabetWorkerPool, zap.Int("size", p.PoolSize))
 
 	pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
 	if err != nil {
diff --git a/pkg/innerring/processors/balance/handlers.go b/pkg/innerring/processors/balance/handlers.go
index e39f3abbd..3792fc2af 100644
--- a/pkg/innerring/processors/balance/handlers.go
+++ b/pkg/innerring/processors/balance/handlers.go
@@ -1,6 +1,7 @@
 package balance
 
 import (
+	"context"
 	"encoding/hex"
 
 	"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -12,7 +13,7 @@ import (
 
 func (bp *Processor) handleLock(ev event.Event) {
 	lock := ev.(balanceEvent.Lock)
-	bp.log.Info(logs.Notification,
+	bp.log.Info(context.Background(), logs.Notification,
 		zap.String("type", "lock"),
 		zap.String("value", hex.EncodeToString(lock.ID())))
 
@@ -23,7 +24,7 @@ func (bp *Processor) handleLock(ev event.Event) {
 	})
 	if err != nil {
 		// there system can be moved into controlled degradation stage
-		bp.log.Warn(logs.BalanceBalanceWorkerPoolDrained,
+		bp.log.Warn(context.Background(), logs.BalanceBalanceWorkerPoolDrained,
 			zap.Int("capacity", bp.pool.Cap()))
 	}
 }
diff --git a/pkg/innerring/processors/balance/process_assets.go b/pkg/innerring/processors/balance/process_assets.go
index 1d94fa454..ac6a1e493 100644
--- a/pkg/innerring/processors/balance/process_assets.go
+++ b/pkg/innerring/processors/balance/process_assets.go
@@ -1,6 +1,8 @@
 package balance
 
 import (
+	"context"
+
 	"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
 	frostfsContract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
 	balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
@@ -11,7 +13,7 @@ import (
 // back to the withdraw issuer.
 func (bp *Processor) processLock(lock *balanceEvent.Lock) bool {
 	if !bp.alphabetState.IsAlphabet() {
-		bp.log.Info(logs.BalanceNonAlphabetModeIgnoreBalanceLock)
+		bp.log.Info(context.Background(), logs.BalanceNonAlphabetModeIgnoreBalanceLock)
 		return true
 	}
 
@@ -25,7 +27,7 @@ func (bp *Processor) processLock(lock *balanceEvent.Lock) bool {
 
 	err := bp.frostfsClient.Cheque(prm)
 	if err != nil {
-		bp.log.Error(logs.BalanceCantSendLockAssetTx, zap.Error(err))
+		bp.log.Error(context.Background(), logs.BalanceCantSendLockAssetTx, zap.Error(err))
 		return false
 	}
 
diff --git a/pkg/innerring/processors/balance/processor.go b/pkg/innerring/processors/balance/processor.go
index 5cc849b5c..c4078461e 100644
--- a/pkg/innerring/processors/balance/processor.go
+++ b/pkg/innerring/processors/balance/processor.go
@@ -1,6 +1,7 @@
 package balance
 
 import (
+	"context"
 	"errors"
 	"fmt"
 
@@ -68,7 +69,7 @@ func New(p *Params) (*Processor, error) {
 		return nil, errors.New("ir/balance: balance precision converter is not set")
 	}
 
-	p.Log.Debug(logs.BalanceBalanceWorkerPool, zap.Int("size", p.PoolSize))
+	p.Log.Debug(context.Background(), logs.BalanceBalanceWorkerPool, zap.Int("size", p.PoolSize))
 
 	pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
 	if err != nil {
diff --git a/pkg/innerring/processors/container/handlers.go b/pkg/innerring/processors/container/handlers.go
index a54f3c772..b3d50d9d0 100644
--- a/pkg/innerring/processors/container/handlers.go
+++ b/pkg/innerring/processors/container/handlers.go
@@ -1,6 +1,7 @@
 package container
 
 import (
+	"context"
 	"crypto/sha256"
 
 	"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -15,7 +16,7 @@ func (cp *Processor) handlePut(ev event.Event) {
 	put := ev.(putEvent)
 
 	id := sha256.Sum256(put.Container())
-	cp.log.Info(logs.Notification,
+	cp.log.Info(context.Background(), logs.Notification,
 		zap.String("type", "container put"),
 		zap.String("id", base58.Encode(id[:])))
 
@@ -26,14 +27,14 @@ func (cp *Processor) handlePut(ev event.Event) {
 	})
 	if err != nil {
 		// there system can be moved into controlled degradation stage
-		cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained,
+		cp.log.Warn(context.Background(), logs.ContainerContainerProcessorWorkerPoolDrained,
 			zap.Int("capacity", cp.pool.Cap()))
 	}
 }
 
 func (cp *Processor) handleDelete(ev event.Event) {
 	del := ev.(containerEvent.Delete)
-	cp.log.Info(logs.Notification,
+	cp.log.Info(context.Background(), logs.Notification,
 		zap.String("type", "container delete"),
 		zap.String("id", base58.Encode(del.ContainerID())))
 
@@ -44,7 +45,7 @@ func (cp *Processor) handleDelete(ev event.Event) {
 	})
 	if err != nil {
 		// there system can be moved into controlled degradation stage
-		cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained,
+		cp.log.Warn(context.Background(), logs.ContainerContainerProcessorWorkerPoolDrained,
 			zap.Int("capacity", cp.pool.Cap()))
 	}
 }
diff --git a/pkg/innerring/processors/container/process_container.go b/pkg/innerring/processors/container/process_container.go
index d89b63e82..2b9c5995c 100644
--- a/pkg/innerring/processors/container/process_container.go
+++ b/pkg/innerring/processors/container/process_container.go
@@ -1,6 +1,7 @@
 package container
 
 import (
+	"context"
 	"errors"
 	"fmt"
 	"strings"
@@ -38,7 +39,7 @@ var errContainerAndOwnerNamespaceDontMatch = errors.New("container and owner nam
 // and sending approve tx back to the morph.
 func (cp *Processor) processContainerPut(put putEvent) bool {
 	if !cp.alphabetState.IsAlphabet() {
-		cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerPut)
+		cp.log.Info(context.Background(), logs.ContainerNonAlphabetModeIgnoreContainerPut)
 		return true
 	}
 
@@ -48,7 +49,7 @@ func (cp *Processor) processContainerPut(put putEvent) bool {
 
 	err := cp.checkPutContainer(ctx)
 	if err != nil {
-		cp.log.Error(logs.ContainerPutContainerCheckFailed,
+		cp.log.Error(context.Background(), logs.ContainerPutContainerCheckFailed,
 			zap.String("error", err.Error()),
 		)
 
@@ -56,7 +57,7 @@ func (cp *Processor) processContainerPut(put putEvent) bool {
 	}
 
 	if err := cp.morphClient.NotarySignAndInvokeTX(ctx.e.NotaryRequest().MainTransaction); err != nil {
-		cp.log.Error(logs.ContainerCouldNotApprovePutContainer,
+		cp.log.Error(context.Background(), logs.ContainerCouldNotApprovePutContainer,
 			zap.String("error", err.Error()),
 		)
 		return false
@@ -105,13 +106,13 @@ func (cp *Processor) checkPutContainer(ctx *putContainerContext) error {
 // and sending approve tx back to morph.
 func (cp *Processor) processContainerDelete(e containerEvent.Delete) bool {
 	if !cp.alphabetState.IsAlphabet() {
-		cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerDelete)
+		cp.log.Info(context.Background(), logs.ContainerNonAlphabetModeIgnoreContainerDelete)
 		return true
 	}
 
 	err := cp.checkDeleteContainer(e)
 	if err != nil {
-		cp.log.Error(logs.ContainerDeleteContainerCheckFailed,
+		cp.log.Error(context.Background(), logs.ContainerDeleteContainerCheckFailed,
 			zap.String("error", err.Error()),
 		)
 
@@ -119,7 +120,7 @@ func (cp *Processor) processContainerDelete(e containerEvent.Delete) bool {
 	}
 
 	if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil {
-		cp.log.Error(logs.ContainerCouldNotApproveDeleteContainer,
+		cp.log.Error(context.Background(), logs.ContainerCouldNotApproveDeleteContainer,
 			zap.String("error", err.Error()),
 		)
 
diff --git a/pkg/innerring/processors/container/processor.go b/pkg/innerring/processors/container/processor.go
index a6fbdc707..7a50ca773 100644
--- a/pkg/innerring/processors/container/processor.go
+++ b/pkg/innerring/processors/container/processor.go
@@ -1,6 +1,7 @@
 package container
 
 import (
+	"context"
 	"errors"
 	"fmt"
 
@@ -97,7 +98,7 @@ func New(p *Params) (*Processor, error) {
 		return nil, errors.New("ir/container: FrostFSID client is not set")
 	}
 
-	p.Log.Debug(logs.ContainerContainerWorkerPool, zap.Int("size", p.PoolSize))
+	p.Log.Debug(context.Background(), logs.ContainerContainerWorkerPool, zap.Int("size", p.PoolSize))
 
 	pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
 	if err != nil {
diff --git a/pkg/innerring/processors/frostfs/handlers.go b/pkg/innerring/processors/frostfs/handlers.go
index c80f9fdc5..02dfbaf60 100644
--- a/pkg/innerring/processors/frostfs/handlers.go
+++ b/pkg/innerring/processors/frostfs/handlers.go
@@ -2,6 +2,7 @@ package frostfs
 
 import (
 	"bytes"
+	"context"
 	"encoding/hex"
 	"slices"
 
@@ -16,7 +17,7 @@ func (np *Processor) handleDeposit(ev event.Event) {
 	deposit := ev.(frostfsEvent.Deposit)
 	depositIDBin := bytes.Clone(deposit.ID())
 	slices.Reverse(depositIDBin)
-	np.log.Info(logs.Notification,
+	np.log.Info(context.Background(), logs.Notification,
 		zap.String("type", "deposit"),
 		zap.String("id", hex.EncodeToString(depositIDBin)))
 
@@ -27,7 +28,7 @@ func (np *Processor) handleDeposit(ev event.Event) {
 	})
 	if err != nil {
 		// there system can be moved into controlled degradation stage
-		np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
+		np.log.Warn(context.Background(), logs.FrostFSFrostfsProcessorWorkerPoolDrained,
 			zap.Int("capacity", np.pool.Cap()))
 	}
 }
@@ -36,7 +37,7 @@ func (np *Processor) handleWithdraw(ev event.Event) {
 	withdraw := ev.(frostfsEvent.Withdraw)
 	withdrawBin := bytes.Clone(withdraw.ID())
 	slices.Reverse(withdrawBin)
-	np.log.Info(logs.Notification,
+	np.log.Info(context.Background(), logs.Notification,
 		zap.String("type", "withdraw"),
 		zap.String("id", hex.EncodeToString(withdrawBin)))
 
@@ -47,14 +48,14 @@ func (np *Processor) handleWithdraw(ev event.Event) {
 	})
 	if err != nil {
 		// there system can be moved into controlled degradation stage
-		np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
+		np.log.Warn(context.Background(), logs.FrostFSFrostfsProcessorWorkerPoolDrained,
 			zap.Int("capacity", np.pool.Cap()))
 	}
 }
 
 func (np *Processor) handleCheque(ev event.Event) {
 	cheque := ev.(frostfsEvent.Cheque)
-	np.log.Info(logs.Notification,
+	np.log.Info(context.Background(), logs.Notification,
 		zap.String("type", "cheque"),
 		zap.String("id", hex.EncodeToString(cheque.ID())))
 
@@ -65,14 +66,14 @@ func (np *Processor) handleCheque(ev event.Event) {
 	})
 	if err != nil {
 		// there system can be moved into controlled degradation stage
-		np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
+		np.log.Warn(context.Background(), logs.FrostFSFrostfsProcessorWorkerPoolDrained,
 			zap.Int("capacity", np.pool.Cap()))
 	}
 }
 
 func (np *Processor) handleConfig(ev event.Event) {
 	cfg := ev.(frostfsEvent.Config)
-	np.log.Info(logs.Notification,
+	np.log.Info(context.Background(), logs.Notification,
 		zap.String("type", "set config"),
 		zap.String("key", hex.EncodeToString(cfg.Key())),
 		zap.String("value", hex.EncodeToString(cfg.Value())))
@@ -84,7 +85,7 @@ func (np *Processor) handleConfig(ev event.Event) {
 	})
 	if err != nil {
 		// there system can be moved into controlled degradation stage
-		np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
+		np.log.Warn(context.Background(), logs.FrostFSFrostfsProcessorWorkerPoolDrained,
 			zap.Int("capacity", np.pool.Cap()))
 	}
 }
diff --git a/pkg/innerring/processors/frostfs/process_assets.go b/pkg/innerring/processors/frostfs/process_assets.go
index c72aeceee..3bee6ed96 100644
--- a/pkg/innerring/processors/frostfs/process_assets.go
+++ b/pkg/innerring/processors/frostfs/process_assets.go
@@ -1,6 +1,8 @@
 package frostfs
 
 import (
+	"context"
+
 	"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
 	"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
 	frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
@@ -17,7 +19,7 @@ const (
 // gas in the sidechain.
 func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
 	if !np.alphabetState.IsAlphabet() {
-		np.log.Info(logs.FrostFSNonAlphabetModeIgnoreDeposit)
+		np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreDeposit)
 		return true
 	}
 
@@ -30,7 +32,7 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
 	// send transferX to a balance contract
 	err := np.balanceClient.Mint(prm)
 	if err != nil {
-		np.log.Error(logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err))
+		np.log.Error(context.Background(), logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err))
 	}
 
 	curEpoch := np.epochState.EpochCounter()
@@ -44,7 +46,7 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
 
 	val, ok := np.mintEmitCache.Get(receiver.String())
 	if ok && val+np.mintEmitThreshold >= curEpoch {
-		np.log.Warn(logs.FrostFSDoubleMintEmissionDeclined,
+		np.log.Warn(context.Background(), logs.FrostFSDoubleMintEmissionDeclined,
 			zap.Stringer("receiver", receiver),
 			zap.Uint64("last_emission", val),
 			zap.Uint64("current_epoch", curEpoch))
@@ -56,12 +58,12 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
 	// before gas transfer check if the balance is greater than the threshold
 	balance, err := np.morphClient.GasBalance()
 	if err != nil {
-		np.log.Error(logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err))
+		np.log.Error(context.Background(), logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err))
 		return false
 	}
 
 	if balance < np.gasBalanceThreshold {
-		np.log.Warn(logs.FrostFSGasBalanceThresholdHasBeenReached,
+		np.log.Warn(context.Background(), logs.FrostFSGasBalanceThresholdHasBeenReached,
 			zap.Int64("balance", balance),
 			zap.Int64("threshold", np.gasBalanceThreshold))
 
@@ -70,7 +72,7 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
 
 	err = np.morphClient.TransferGas(receiver, np.mintEmitValue)
 	if err != nil {
-		np.log.Error(logs.FrostFSCantTransferNativeGasToReceiver,
+		np.log.Error(context.Background(), logs.FrostFSCantTransferNativeGasToReceiver,
 			zap.String("error", err.Error()))
 
 		return false
@@ -84,14 +86,14 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
 // Process withdraw event by locking assets in the balance account.
 func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool {
 	if !np.alphabetState.IsAlphabet() {
-		np.log.Info(logs.FrostFSNonAlphabetModeIgnoreWithdraw)
+		np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreWithdraw)
 		return true
 	}
 
 	// create lock account
 	lock, err := util.Uint160DecodeBytesBE(withdraw.ID()[:util.Uint160Size])
 	if err != nil {
-		np.log.Error(logs.FrostFSCantCreateLockAccount, zap.Error(err))
+		np.log.Error(context.Background(), logs.FrostFSCantCreateLockAccount, zap.Error(err))
 		return false
 	}
 
@@ -107,7 +109,7 @@ func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool {
 
 	err = np.balanceClient.Lock(prm)
 	if err != nil {
-		np.log.Error(logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err))
+		np.log.Error(context.Background(), logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err))
 		return false
 	}
 
@@ -118,7 +120,7 @@ func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool {
 // the reserve account.
 func (np *Processor) processCheque(cheque frostfsEvent.Cheque) bool {
 	if !np.alphabetState.IsAlphabet() {
-		np.log.Info(logs.FrostFSNonAlphabetModeIgnoreCheque)
+		np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreCheque)
 		return true
 	}
 
@@ -130,7 +132,7 @@ func (np *Processor) processCheque(cheque frostfsEvent.Cheque) bool {
 
 	err := np.balanceClient.Burn(prm)
 	if err != nil {
-		np.log.Error(logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err))
+		np.log.Error(context.Background(), logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err))
 		return false
 	}
 
diff --git a/pkg/innerring/processors/frostfs/process_config.go b/pkg/innerring/processors/frostfs/process_config.go
index 2ae3e6ced..814dd40b4 100644
--- a/pkg/innerring/processors/frostfs/process_config.go
+++ b/pkg/innerring/processors/frostfs/process_config.go
@@ -1,6 +1,8 @@
 package frostfs
 
 import (
+	"context"
+
 	"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
 	nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
 	frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
@@ -11,7 +13,7 @@ import (
 // the sidechain.
 func (np *Processor) processConfig(config frostfsEvent.Config) bool {
 	if !np.alphabetState.IsAlphabet() {
-		np.log.Info(logs.FrostFSNonAlphabetModeIgnoreConfig)
+		np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreConfig)
 		return true
 	}
 
@@ -24,7 +26,7 @@ func (np *Processor) processConfig(config frostfsEvent.Config) bool {
 
 	err := np.netmapClient.SetConfig(prm)
 	if err != nil {
-		np.log.Error(logs.FrostFSCantRelaySetConfigEvent, zap.Error(err))
+		np.log.Error(context.Background(), logs.FrostFSCantRelaySetConfigEvent, zap.Error(err))
 		return false
 	}
 
diff --git a/pkg/innerring/processors/frostfs/processor.go b/pkg/innerring/processors/frostfs/processor.go
index 2019857ac..fdc31d82e 100644
--- a/pkg/innerring/processors/frostfs/processor.go
+++ b/pkg/innerring/processors/frostfs/processor.go
@@ -1,6 +1,7 @@
 package frostfs
 
 import (
+	"context"
 	"errors"
 	"fmt"
 	"sync"
@@ -110,7 +111,7 @@ func New(p *Params) (*Processor, error) {
 		return nil, errors.New("ir/frostfs: balance precision converter is not set")
 	}
 
-	p.Log.Debug(logs.FrostFSFrostfsWorkerPool, zap.Int("size", p.PoolSize))
+	p.Log.Debug(context.Background(), logs.FrostFSFrostfsWorkerPool, zap.Int("size", p.PoolSize))
 
 	pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
 	if err != nil {
diff --git a/pkg/innerring/processors/governance/handlers.go b/pkg/innerring/processors/governance/handlers.go
index fd7f539c3..dee8c13e2 100644
--- a/pkg/innerring/processors/governance/handlers.go
+++ b/pkg/innerring/processors/governance/handlers.go
@@ -1,6 +1,8 @@
 package governance
 
 import (
+	"context"
+
 	"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
 	"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors"
 	"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
@@ -32,7 +34,7 @@ func (gp *Processor) HandleAlphabetSync(e event.Event) {
 		return
 	}
 
-	gp.log.Info(logs.GovernanceNewEvent, zap.String("type", typ))
+	gp.log.Info(context.Background(), logs.GovernanceNewEvent, zap.String("type", typ))
 
 	// send event to the worker pool
 
@@ -41,7 +43,7 @@ func (gp *Processor) HandleAlphabetSync(e event.Event) {
 	})
 	if err != nil {
 		// there system can be moved into controlled degradation stage
-		gp.log.Warn(logs.GovernanceGovernanceWorkerPoolDrained,
+		gp.log.Warn(context.Background(), logs.GovernanceGovernanceWorkerPoolDrained,
 			zap.Int("capacity", gp.pool.Cap()))
 	}
 }
diff --git a/pkg/innerring/processors/governance/process_update.go b/pkg/innerring/processors/governance/process_update.go
index 50ba58e77..faca22f67 100644
--- a/pkg/innerring/processors/governance/process_update.go
+++ b/pkg/innerring/processors/governance/process_update.go
@@ -1,6 +1,7 @@
 package governance
 
 import (
+	"context"
 	"encoding/binary"
 	"encoding/hex"
 	"sort"
@@ -20,37 +21,37 @@ const (
 
 func (gp *Processor) processAlphabetSync(txHash util.Uint256) bool {
 	if !gp.alphabetState.IsAlphabet() {
-		gp.log.Info(logs.GovernanceNonAlphabetModeIgnoreAlphabetSync)
+		gp.log.Info(context.Background(), logs.GovernanceNonAlphabetModeIgnoreAlphabetSync)
 		return true
 	}
 
 	mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList()
 	if err != nil {
-		gp.log.Error(logs.GovernanceCantFetchAlphabetListFromMainNet,
+		gp.log.Error(context.Background(), logs.GovernanceCantFetchAlphabetListFromMainNet,
 			zap.String("error", err.Error()))
 		return false
 	}
 
 	sidechainAlphabet, err := gp.morphClient.Committee()
 	if err != nil {
-		gp.log.Error(logs.GovernanceCantFetchAlphabetListFromSideChain,
+		gp.log.Error(context.Background(), logs.GovernanceCantFetchAlphabetListFromSideChain,
 			zap.String("error", err.Error()))
 		return false
 	}
 
 	newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet)
 	if err != nil {
-		gp.log.Error(logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
+		gp.log.Error(context.Background(), logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
 			zap.String("error", err.Error()))
 		return false
 	}
 
 	if newAlphabet == nil {
-		gp.log.Info(logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged)
+		gp.log.Info(context.Background(), logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged)
 		return true
 	}
 
-	gp.log.Info(logs.GovernanceAlphabetListHasBeenChangedStartingUpdate,
+	gp.log.Info(context.Background(), logs.GovernanceAlphabetListHasBeenChangedStartingUpdate,
 		zap.String("side_chain_alphabet", prettyKeys(sidechainAlphabet)),
 		zap.String("new_alphabet", prettyKeys(newAlphabet)),
 	)
@@ -63,7 +64,7 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) bool {
 	// 1. Vote to sidechain committee via alphabet contracts.
 	err = gp.voter.VoteForSidechainValidator(votePrm)
 	if err != nil {
-		gp.log.Error(logs.GovernanceCantVoteForSideChainCommittee,
+		gp.log.Error(context.Background(), logs.GovernanceCantVoteForSideChainCommittee,
 			zap.String("error", err.Error()))
 	}
 
@@ -76,7 +77,7 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) bool {
 	// 4. Update FrostFS contract in the mainnet.
 	gp.updateFrostFSContractInMainnet(newAlphabet)
 
-	gp.log.Info(logs.GovernanceFinishedAlphabetListUpdate)
+	gp.log.Info(context.Background(), logs.GovernanceFinishedAlphabetListUpdate)
 
 	return true
 }
@@ -96,21 +97,21 @@ func prettyKeys(keys keys.PublicKeys) string {
 func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) {
 	innerRing, err := gp.irFetcher.InnerRingKeys()
 	if err != nil {
-		gp.log.Error(logs.GovernanceCantFetchInnerRingListFromSideChain,
+		gp.log.Error(context.Background(), logs.GovernanceCantFetchInnerRingListFromSideChain,
 			zap.String("error", err.Error()))
 		return
 	}
 
 	newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet)
 	if err != nil {
-		gp.log.Error(logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
+		gp.log.Error(context.Background(), logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
 			zap.String("error", err.Error()))
 		return
 	}
 
 	sort.Sort(newInnerRing)
 
-	gp.log.Info(logs.GovernanceUpdateOfTheInnerRingList,
+	gp.log.Info(context.Background(), logs.GovernanceUpdateOfTheInnerRingList,
 		zap.String("before", prettyKeys(innerRing)),
 		zap.String("after", prettyKeys(newInnerRing)),
 	)
@@ -120,7 +121,7 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAl
 	updPrm.SetHash(txHash)
 
 	if err = gp.morphClient.UpdateNeoFSAlphabetList(updPrm); err != nil {
-		gp.log.Error(logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
+		gp.log.Error(context.Background(), logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
 			zap.String("error", err.Error()))
 	}
 }
@@ -133,7 +134,7 @@ func (gp *Processor) updateNotaryRoleInSidechain(newAlphabet keys.PublicKeys, tx
 
 	err := gp.morphClient.UpdateNotaryList(updPrm)
 	if err != nil {
-		gp.log.Error(logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
+		gp.log.Error(context.Background(), logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
 			zap.String("error", err.Error()))
 	}
 }
@@ -153,7 +154,7 @@ func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys)
 
 	err := gp.frostfsClient.AlphabetUpdate(prm)
 	if err != nil {
-		gp.log.Error(logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
+		gp.log.Error(context.Background(), logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
 			zap.String("error", err.Error()))
 	}
 }
diff --git a/pkg/innerring/processors/netmap/handlers.go b/pkg/innerring/processors/netmap/handlers.go
index c6053e281..478ab5eab 100644
--- a/pkg/innerring/processors/netmap/handlers.go
+++ b/pkg/innerring/processors/netmap/handlers.go
@@ -1,6 +1,7 @@
 package netmap
 
 import (
+	"context"
 	"encoding/hex"
 
 	"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -13,21 +14,21 @@ import (
 
 func (np *Processor) HandleNewEpochTick(ev event.Event) {
 	_ = ev.(timerEvent.NewEpochTick)
-	np.log.Info(logs.NetmapTick, zap.String("type", "epoch"))
+	np.log.Info(context.Background(), logs.NetmapTick, zap.String("type", "epoch"))
 
 	// send an event to the worker pool
 
 	err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", np.processNewEpochTick)
 	if err != nil {
 		// there system can be moved into controlled degradation stage
-		np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
+		np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained,
 			zap.Int("capacity", np.pool.Cap()))
 	}
 }
 
 func (np *Processor) handleNewEpoch(ev event.Event) {
 	epochEvent := ev.(netmapEvent.NewEpoch)
-	np.log.Info(logs.Notification,
+	np.log.Info(context.Background(), logs.Notification,
 		zap.String("type", "new epoch"),
 		zap.Uint64("value", epochEvent.EpochNumber()))
 
@@ -38,7 +39,7 @@ func (np *Processor) handleNewEpoch(ev event.Event) {
 	})
 	if err != nil {
 		// there system can be moved into controlled degradation stage
-		np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
+		np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained,
 			zap.Int("capacity", np.pool.Cap()))
 	}
 }
@@ -46,7 +47,7 @@ func (np *Processor) handleNewEpoch(ev event.Event) {
 func (np *Processor) handleAddPeer(ev event.Event) {
 	newPeer := ev.(netmapEvent.AddPeer)
 
-	np.log.Info(logs.Notification,
+	np.log.Info(context.Background(), logs.Notification,
 		zap.String("type", "add peer"),
 	)
 
@@ -57,14 +58,14 @@ func (np *Processor) handleAddPeer(ev event.Event) {
 	})
 	if err != nil {
 		// there system can be moved into controlled degradation stage
-		np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
+		np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained,
 			zap.Int("capacity", np.pool.Cap()))
 	}
 }
 
 func (np *Processor) handleUpdateState(ev event.Event) {
 	updPeer := ev.(netmapEvent.UpdatePeer)
-	np.log.Info(logs.Notification,
+	np.log.Info(context.Background(), logs.Notification,
 		zap.String("type", "update peer state"),
 		zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes())))
 
@@ -75,21 +76,21 @@ func (np *Processor) handleUpdateState(ev event.Event) {
 	})
 	if err != nil {
 		// there system can be moved into controlled degradation stage
-		np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
+		np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained,
 			zap.Int("capacity", np.pool.Cap()))
 	}
 }
 
 func (np *Processor) handleCleanupTick(ev event.Event) {
 	if !np.netmapSnapshot.enabled {
-		np.log.Debug(logs.NetmapNetmapCleanUpRoutineIsDisabled518)
+		np.log.Debug(context.Background(), logs.NetmapNetmapCleanUpRoutineIsDisabled518)
 
 		return
 	}
 
 	cleanup := ev.(netmapCleanupTick)
 
-	np.log.Info(logs.NetmapTick, zap.String("type", "netmap cleaner"))
+	np.log.Info(context.Background(), logs.NetmapTick, zap.String("type", "netmap cleaner"))
 
 	// send event to the worker pool
 	err := processors.SubmitEvent(np.pool, np.metrics, "netmap_cleanup_tick", func() bool {
@@ -97,7 +98,7 @@ func (np *Processor) handleCleanupTick(ev event.Event) {
 	})
 	if err != nil {
 		// there system can be moved into controlled degradation stage
-		np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
+		np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained,
 			zap.Int("capacity", np.pool.Cap()))
 	}
 }
diff --git a/pkg/innerring/processors/netmap/process_cleanup.go b/pkg/innerring/processors/netmap/process_cleanup.go
index 269e79c5e..9529d3a0c 100644
--- a/pkg/innerring/processors/netmap/process_cleanup.go
+++ b/pkg/innerring/processors/netmap/process_cleanup.go
@@ -1,6 +1,8 @@
 package netmap
 
 import (
+	"context"
+
 	"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
 	v2netmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
 	"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@@ -9,7 +11,7 @@ import (
 
 func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
 	if !np.alphabetState.IsAlphabet() {
-		np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick)
+		np.log.Info(context.Background(), logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick)
 
 		return true
 	}
@@ -17,13 +19,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
 	err := np.netmapSnapshot.forEachRemoveCandidate(ev.epoch, func(s string) error {
 		key, err := keys.NewPublicKeyFromString(s)
 		if err != nil {
-			np.log.Warn(logs.NetmapCantDecodePublicKeyOfNetmapNode,
+			np.log.Warn(context.Background(), logs.NetmapCantDecodePublicKeyOfNetmapNode,
 				zap.String("key", s))
 
 			return nil
 		}
 
-		np.log.Info(logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s))
+		np.log.Info(context.Background(), logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s))
 
 		// In notary environments we call UpdateStateIR method instead of UpdateState.
 		// It differs from UpdateState only by name, so we can do this in the same form.
@@ -39,13 +41,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool {
 			int64(v2netmap.Offline), key.Bytes(),
 		)
 		if err != nil {
-			np.log.Error(logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err))
+			np.log.Error(context.Background(), logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err))
 		}
 
 		return nil
 	})
 	if err != nil {
-		np.log.Warn(logs.NetmapCantIterateOnNetmapCleanerCache,
+		np.log.Warn(context.Background(), logs.NetmapCantIterateOnNetmapCleanerCache,
 			zap.String("error", err.Error()))
 		return false
 	}
diff --git a/pkg/innerring/processors/netmap/process_epoch.go b/pkg/innerring/processors/netmap/process_epoch.go
index 9522df26c..8ad295a74 100644
--- a/pkg/innerring/processors/netmap/process_epoch.go
+++ b/pkg/innerring/processors/netmap/process_epoch.go
@@ -1,6 +1,8 @@
 package netmap
 
 import (
+	"context"
+
 	"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
 	"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
 	netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
@@ -14,7 +16,7 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) bool {
 
 	epochDuration, err := np.netmapClient.EpochDuration()
 	if err != nil {
-		np.log.Warn(logs.NetmapCantGetEpochDuration,
+		np.log.Warn(context.Background(), logs.NetmapCantGetEpochDuration,
 			zap.String("error", err.Error()))
 	} else {
 		np.epochState.SetEpochDuration(epochDuration)
@@ -24,20 +26,20 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) bool {
 
 	h, err := np.netmapClient.MorphTxHeight(ev.TxHash())
 	if err != nil {
-		np.log.Warn(logs.NetmapCantGetTransactionHeight,
+		np.log.Warn(context.Background(), logs.NetmapCantGetTransactionHeight,
 			zap.String("hash", ev.TxHash().StringLE()),
 			zap.String("error", err.Error()))
 	}
 
 	if err := np.epochTimer.ResetEpochTimer(h); err != nil {
-		np.log.Warn(logs.NetmapCantResetEpochTimer,
+		np.log.Warn(context.Background(), logs.NetmapCantResetEpochTimer,
 			zap.String("error", err.Error()))
 	}
 
 	// get new netmap snapshot
 	networkMap, err := np.netmapClient.NetMap()
 	if err != nil {
-		np.log.Warn(logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
+		np.log.Warn(context.Background(), logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
 			zap.String("error", err.Error()))
 
 		return false
@@ -54,16 +56,16 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) bool {
 // Process new epoch tick by invoking new epoch method in network map contract.
 func (np *Processor) processNewEpochTick() bool {
 	if !np.alphabetState.IsAlphabet() {
-		np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewEpochTick)
+		np.log.Info(context.Background(), logs.NetmapNonAlphabetModeIgnoreNewEpochTick)
 		return true
 	}
 
 	nextEpoch := np.epochState.EpochCounter() + 1
-	np.log.Debug(logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch))
+	np.log.Debug(context.Background(), logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch))
 
 	err := np.netmapClient.NewEpoch(nextEpoch)
 	if err != nil {
-		np.log.Error(logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err))
+		np.log.Error(context.Background(), logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err))
 		return false
 	}
 
diff --git a/pkg/innerring/processors/netmap/process_peers.go b/pkg/innerring/processors/netmap/process_peers.go
index c8c7928a3..42d1b5ec6 100644
--- a/pkg/innerring/processors/netmap/process_peers.go
+++ b/pkg/innerring/processors/netmap/process_peers.go
@@ -1,6 +1,7 @@
 package netmap
 
 import (
+	"context"
 	"encoding/hex"
 
 	"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -14,7 +15,7 @@ import (
 // local epoch timer.
 func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
 	if !np.alphabetState.IsAlphabet() {
-		np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewPeerNotification)
+		np.log.Info(context.Background(), logs.NetmapNonAlphabetModeIgnoreNewPeerNotification)
 		return true
 	}
 
@@ -22,7 +23,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
 	tx := ev.NotaryRequest().MainTransaction
 	ok, err := np.netmapClient.MorphIsValidScript(tx.Script, tx.Signers)
 	if err != nil || !ok {
-		np.log.Warn(logs.NetmapNonhaltNotaryTransaction,
+		np.log.Warn(context.Background(), logs.NetmapNonhaltNotaryTransaction,
 			zap.String("method", "netmap.AddPeer"),
 			zap.String("hash", tx.Hash().StringLE()),
 			zap.Error(err))
@@ -33,14 +34,14 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
 	var nodeInfo netmap.NodeInfo
 	if err := nodeInfo.Unmarshal(ev.Node()); err != nil {
 		// it will be nice to have tx id at event structure to log it
-		np.log.Warn(logs.NetmapCantParseNetworkMapCandidate)
+		np.log.Warn(context.Background(), logs.NetmapCantParseNetworkMapCandidate)
 		return false
 	}
 
 	// validate and update node info
 	err = np.nodeValidator.VerifyAndUpdate(&nodeInfo)
 	if err != nil {
-		np.log.Warn(logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
+		np.log.Warn(context.Background(), logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
 			zap.String("error", err.Error()),
 		)
 
@@ -63,7 +64,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
 	// That is why we need to perform `addPeerIR` only in case when node is online,
 	// because in scope of this method, contract set state `ONLINE` for the node.
 	if updated && nodeInfo.Status().IsOnline() {
-		np.log.Info(logs.NetmapApprovingNetworkMapCandidate,
+		np.log.Info(context.Background(), logs.NetmapApprovingNetworkMapCandidate,
 			zap.String("key", keyString))
 
 		prm := netmapclient.AddPeerPrm{}
@@ -84,7 +85,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
 			nodeInfoBinary,
 		)
 		if err != nil {
-			np.log.Error(logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err))
+			np.log.Error(context.Background(), logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err))
 			return false
 		}
 	}
@@ -95,7 +96,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
 // Process update peer notification by sending approval tx to the smart contract.
 func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool {
 	if !np.alphabetState.IsAlphabet() {
-		np.log.Info(logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification)
+		np.log.Info(context.Background(), logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification)
 		return true
 	}
 
@@ -108,7 +109,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool {
 	if ev.Maintenance() {
 		err = np.nodeStateSettings.MaintenanceModeAllowed()
 		if err != nil {
-			np.log.Info(logs.NetmapPreventSwitchingNodeToMaintenanceState,
+			np.log.Info(context.Background(), logs.NetmapPreventSwitchingNodeToMaintenanceState,
 				zap.Error(err),
 			)
 
@@ -117,7 +118,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool {
 	}
 
 	if err = np.netmapClient.MorphNotarySignAndInvokeTX(ev.NotaryRequest().MainTransaction); err != nil {
-		np.log.Error(logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err))
+		np.log.Error(context.Background(), logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err))
 		return false
 	}
 
diff --git a/pkg/innerring/processors/netmap/processor.go b/pkg/innerring/processors/netmap/processor.go
index f5a91dee2..bbd60c1e1 100644
--- a/pkg/innerring/processors/netmap/processor.go
+++ b/pkg/innerring/processors/netmap/processor.go
@@ -1,6 +1,7 @@
 package netmap
 
 import (
+	"context"
 	"errors"
 	"fmt"
 
@@ -132,7 +133,7 @@ func New(p *Params) (*Processor, error) {
 		return nil, errors.New("ir/netmap: node state settings is not set")
 	}
 
-	p.Log.Debug(logs.NetmapNetmapWorkerPool, zap.Int("size", p.PoolSize))
+	p.Log.Debug(context.Background(), logs.NetmapNetmapWorkerPool, zap.Int("size", p.PoolSize))
 
 	pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
 	if err != nil {
diff --git a/pkg/innerring/state.go b/pkg/innerring/state.go
index d3071faad..250f41e5f 100644
--- a/pkg/innerring/state.go
+++ b/pkg/innerring/state.go
@@ -1,6 +1,7 @@
 package innerring
 
 import (
+	"context"
 	"fmt"
 	"sort"
 
@@ -61,7 +62,7 @@ func (s *Server) IsAlphabet() bool {
 func (s *Server) InnerRingIndex() int {
 	index, err := s.statusIndex.InnerRingIndex()
 	if err != nil {
-		s.log.Error(logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error()))
+		s.log.Error(context.Background(), logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error()))
 		return -1
 	}
 
@@ -73,7 +74,7 @@ func (s *Server) InnerRingIndex() int {
 func (s *Server) InnerRingSize() int {
 	size, err := s.statusIndex.InnerRingSize()
 	if err != nil {
-		s.log.Error(logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error()))
+		s.log.Error(context.Background(), logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error()))
 		return 0
 	}
 
@@ -85,7 +86,7 @@ func (s *Server) InnerRingSize() int {
 func (s *Server) AlphabetIndex() int {
 	index, err := s.statusIndex.AlphabetIndex()
 	if err != nil {
-		s.log.Error(logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error()))
+		s.log.Error(context.Background(), logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error()))
 		return -1
 	}
 
@@ -97,13 +98,13 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro
 
 	index := s.InnerRingIndex()
 	if s.contracts.alphabet.indexOutOfRange(index) {
-		s.log.Info(logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange)
+		s.log.Info(context.Background(), logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange)
 
 		return nil
 	}
 
 	if len(validators) == 0 {
-		s.log.Info(logs.InnerringIgnoreValidatorVoteEmptyValidatorsList)
+		s.log.Info(context.Background(), logs.InnerringIgnoreValidatorVoteEmptyValidatorsList)
 
 		return nil
 	}
@@ -128,7 +129,7 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro
 	s.contracts.alphabet.iterate(func(letter GlagoliticLetter, contract util.Uint160) {
 		_, err := s.morphClient.NotaryInvoke(contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators)
 		if err != nil {
-			s.log.Warn(logs.InnerringCantInvokeVoteMethodInAlphabetContract,
+			s.log.Warn(context.Background(), logs.InnerringCantInvokeVoteMethodInAlphabetContract,
 				zap.Int8("alphabet_index", int8(letter)),
 				zap.Uint64("epoch", epoch),
 				zap.String("error", err.Error()))
@@ -202,6 +203,6 @@ func (s *Server) notifySystemd(st control.HealthStatus) {
 		err = sdnotify.Status(fmt.Sprintf("%v", st))
 	}
 	if err != nil {
-		s.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
+		s.log.Error(context.Background(), logs.FailedToReportStatusToSystemd, zap.Error(err))
 	}
 }
diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza.go b/pkg/local_object_storage/blobovnicza/blobovnicza.go
index c787f9d5e..08ef8b86c 100644
--- a/pkg/local_object_storage/blobovnicza/blobovnicza.go
+++ b/pkg/local_object_storage/blobovnicza/blobovnicza.go
@@ -58,7 +58,7 @@ func defaultCfg(c *cfg) {
 		},
 		fullSizeLimit: 1 << 30, // 1GB
 		objSizeLimit:  1 << 20, // 1MB
-		log:           &logger.Logger{Logger: zap.L()},
+		log:           logger.NewLoggerWrapper(zap.L()),
 		metrics:       &NoopMetrics{},
 	}
 }
@@ -110,7 +110,7 @@ func WithFullSizeLimit(lim uint64) Option {
 // WithLogger returns an option to specify Blobovnicza's logger.
 func WithLogger(l *logger.Logger) Option {
 	return func(c *cfg) {
-		c.log = &logger.Logger{Logger: l.With(zap.String("component", "Blobovnicza"))}
+		c.log = l.With(zap.String("component", "Blobovnicza"))
 	}
 }
 
diff --git a/pkg/local_object_storage/blobovnicza/control.go b/pkg/local_object_storage/blobovnicza/control.go
index aeaa4e1d5..5d7135741 100644
--- a/pkg/local_object_storage/blobovnicza/control.go
+++ b/pkg/local_object_storage/blobovnicza/control.go
@@ -1,6 +1,7 @@
 package blobovnicza
 
 import (
+	"context"
 	"errors"
 	"fmt"
 	"path/filepath"
@@ -23,7 +24,7 @@ func (b *Blobovnicza) Open() error {
 		return nil
 	}
 
-	b.log.Debug(logs.BlobovniczaCreatingDirectoryForBoltDB,
+	b.log.Debug(context.Background(), logs.BlobovniczaCreatingDirectoryForBoltDB,
 		zap.String("path", b.path),
 		zap.Bool("ro", b.boltOptions.ReadOnly),
 	)
@@ -37,7 +38,7 @@ func (b *Blobovnicza) Open() error {
 		}
 	}
 
-	b.log.Debug(logs.BlobovniczaOpeningBoltDB,
+	b.log.Debug(context.Background(), logs.BlobovniczaOpeningBoltDB,
 		zap.String("path", b.path),
 		zap.Stringer("permissions", b.perm),
 	)
@@ -63,7 +64,7 @@ func (b *Blobovnicza) Init() error {
 		return errors.New("blobovnicza is not open")
 	}
 
-	b.log.Debug(logs.BlobovniczaInitializing,
+	b.log.Debug(context.Background(), logs.BlobovniczaInitializing,
 		zap.Uint64("object size limit", b.objSizeLimit),
 		zap.Uint64("storage size limit", b.fullSizeLimit),
 	)
@@ -71,7 +72,7 @@ func (b *Blobovnicza) Init() error {
 	size := b.dataSize.Load()
 	items := b.itemsCount.Load()
 	if size != 0 || items != 0 {
-		b.log.Debug(logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items))
+		b.log.Debug(context.Background(), logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items))
 		return nil
 	}
 
@@ -81,7 +82,7 @@ func (b *Blobovnicza) Init() error {
 				// create size range bucket
 
 				rangeStr := stringifyBounds(lower, upper)
-				b.log.Debug(logs.BlobovniczaCreatingBucketForSizeRange,
+				b.log.Debug(context.Background(), logs.BlobovniczaCreatingBucketForSizeRange,
 					zap.String("range", rangeStr))
 
 				_, err := tx.CreateBucketIfNotExists(key)
@@ -131,17 +132,17 @@ func (b *Blobovnicza) initializeCounters() error {
 		return fmt.Errorf("can't determine DB size: %w", err)
 	}
 	if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly {
-		b.log.Debug(logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items))
+		b.log.Debug(context.Background(), logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items))
 		if err := b.boltDB.Update(func(tx *bbolt.Tx) error {
 			if err := saveDataSize(tx, size); err != nil {
 				return err
 			}
 			return saveItemsCount(tx, items)
 		}); err != nil {
-			b.log.Debug(logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items))
+			b.log.Debug(context.Background(), logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items))
 			return fmt.Errorf("can't save blobovnicza's size and items count: %w", err)
 		}
-		b.log.Debug(logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items))
+		b.log.Debug(context.Background(), logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items))
 	}
 
 	b.dataSize.Store(size)
@@ -162,7 +163,7 @@ func (b *Blobovnicza) Close() error {
 		return nil
 	}
 
-	b.log.Debug(logs.BlobovniczaClosingBoltDB,
+	b.log.Debug(context.Background(), logs.BlobovniczaClosingBoltDB,
 		zap.String("path", b.path),
 	)
 
diff --git a/pkg/local_object_storage/blobovnicza/delete.go b/pkg/local_object_storage/blobovnicza/delete.go
index 5d6787897..d821b2991 100644
--- a/pkg/local_object_storage/blobovnicza/delete.go
+++ b/pkg/local_object_storage/blobovnicza/delete.go
@@ -91,7 +91,7 @@ func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, err
 	}
 
 	if err == nil && found {
-		b.log.Debug(logs.BlobovniczaObjectWasRemovedFromBucket,
+		b.log.Debug(ctx, logs.BlobovniczaObjectWasRemovedFromBucket,
 			zap.String("binary size", stringifyByteSize(dataSize)),
 			zap.String("range", stringifyBounds(sizeLowerBound, sizeUpperBound)),
 			zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control.go b/pkg/local_object_storage/blobstor/blobovniczatree/control.go
index 681cf876c..55c9d6630 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/control.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/control.go
@@ -24,10 +24,10 @@ func (b *Blobovniczas) Open(mode mode.ComponentMode) error {
 //
 // Should be called exactly once.
 func (b *Blobovniczas) Init() error {
-	b.log.Debug(logs.BlobovniczatreeInitializingBlobovniczas)
+	b.log.Debug(context.Background(), logs.BlobovniczatreeInitializingBlobovniczas)
 
 	if b.readOnly {
-		b.log.Debug(logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization)
+		b.log.Debug(context.Background(), logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization)
 		return nil
 	}
 
@@ -60,7 +60,7 @@ func (b *Blobovniczas) initializeDBs(ctx context.Context) error {
 				b.deleteProtectedObjects.Add(move.Address)
 			}
 
-			b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p))
+			b.log.Debug(egCtx, logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p))
 			return nil
 		})
 		return false, nil
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
index 298de3ad6..dd5258042 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
@@ -80,7 +80,7 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
 		res, err = b.deleteObjectFromLevel(ctx, bPrm, p)
 		if err != nil {
 			if !client.IsErrObjectNotFound(err) {
-				b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
+				b.log.Debug(ctx, logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
 					zap.String("level", p),
 					zap.String("error", err.Error()),
 					zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go
index a64b2bbb1..2149b17c0 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go
@@ -55,7 +55,7 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
 		_, err := b.getObjectFromLevel(ctx, gPrm, p)
 		if err != nil {
 			if !client.IsErrObjectNotFound(err) {
-				b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
+				b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
 					zap.String("level", p),
 					zap.String("error", err.Error()),
 					zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get.go b/pkg/local_object_storage/blobstor/blobovniczatree/get.go
index 08cacda8a..e79480095 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/get.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/get.go
@@ -67,7 +67,7 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
 		res, err = b.getObjectFromLevel(ctx, bPrm, p)
 		if err != nil {
 			if !client.IsErrObjectNotFound(err) {
-				b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
+				b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
 					zap.String("level", p),
 					zap.String("error", err.Error()),
 					zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
index d237ae439..20f2be2ba 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
@@ -69,7 +69,7 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
 		if err != nil {
 			outOfBounds := isErrOutOfRange(err)
 			if !outOfBounds && !client.IsErrObjectNotFound(err) {
-				b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
+				b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
 					zap.String("level", p),
 					zap.String("error", err.Error()),
 					zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go
index f6acb46aa..7f0453410 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go
@@ -42,7 +42,7 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm
 			data, err := b.compression.Decompress(elem.ObjectData())
 			if err != nil {
 				if prm.IgnoreErrors {
-					b.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
+					b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
 						zap.Stringer("address", elem.Address()),
 						zap.String("err", err.Error()),
 						zap.String("storage_id", p),
@@ -76,7 +76,7 @@ func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors boo
 		blz, err := shBlz.Open()
 		if err != nil {
 			if ignoreErrors {
-				b.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
+				b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
 					zap.String("err", err.Error()),
 					zap.String("storage_id", p),
 					zap.String("root_path", b.rootPath))
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go
index 4fdde15a9..235c9f65d 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go
@@ -1,6 +1,7 @@
 package blobovniczatree
 
 import (
+	"context"
 	"errors"
 	"fmt"
 	"os"
@@ -86,7 +87,7 @@ func (b *sharedDB) Close() {
 	defer b.cond.L.Unlock()
 
 	if b.refCount == 0 {
-		b.log.Error(logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path))
+		b.log.Error(context.Background(), logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path))
 		b.cond.Broadcast()
 		return
 	}
@@ -94,7 +95,7 @@ func (b *sharedDB) Close() {
 	if b.refCount == 1 {
 		b.refCount = 0
 		if err := b.blcza.Close(); err != nil {
-			b.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza,
+			b.log.Error(context.Background(), logs.BlobovniczatreeCouldNotCloseBlobovnicza,
 				zap.String("id", b.path),
 				zap.String("error", err.Error()),
 			)
@@ -122,7 +123,7 @@ func (b *sharedDB) CloseAndRemoveFile() error {
 	}
 
 	if err := b.blcza.Close(); err != nil {
-		b.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza,
+		b.log.Error(context.Background(), logs.BlobovniczatreeCouldNotCloseBlobovnicza,
 			zap.String("id", b.path),
 			zap.String("error", err.Error()),
 		)
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/option.go b/pkg/local_object_storage/blobstor/blobovniczatree/option.go
index 008be9543..b56251772 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/option.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/option.go
@@ -47,7 +47,7 @@ const (
 
 func initConfig(c *cfg) {
 	*c = cfg{
-		log:                    &logger.Logger{Logger: zap.L()},
+		log:                    logger.NewLoggerWrapper(zap.L()),
 		perm:                   defaultPerm,
 		openedCacheSize:        defaultOpenedCacheSize,
 		openedCacheTTL:         defaultOpenedCacheTTL,
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/put.go b/pkg/local_object_storage/blobstor/blobovniczatree/put.go
index 76c4953e4..844b43151 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/put.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/put.go
@@ -82,7 +82,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error)
 		if !isLogical(err) {
 			i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
 		} else {
-			i.B.log.Debug(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
+			i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
 				zap.String("error", err.Error()),
 				zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
 		}
@@ -91,7 +91,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error)
 	}
 
 	if active == nil {
-		i.B.log.Debug(logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath),
+		i.B.log.Debug(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath),
 			zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
 		return false, nil
 	}
@@ -104,7 +104,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error)
 		if !isLogical(err) {
 			i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
 		} else {
-			i.B.log.Debug(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
+			i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
 				zap.String("path", active.SystemPath()),
 				zap.String("error", err.Error()),
 				zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
index 202d38cd7..fee67a0a8 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
@@ -49,25 +49,25 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm
 
 	var res common.RebuildRes
 
-	b.log.Debug(logs.BlobovniczaTreeCompletingPreviousRebuild)
+	b.log.Debug(ctx, logs.BlobovniczaTreeCompletingPreviousRebuild)
 	completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage)
 	res.ObjectsMoved += completedPreviosMoves
 	if err != nil {
-		b.log.Warn(logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err))
+		b.log.Warn(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err))
 		success = false
 		return res, err
 	}
-	b.log.Debug(logs.BlobovniczaTreeCompletedPreviousRebuildSuccess)
+	b.log.Debug(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildSuccess)
 
-	b.log.Debug(logs.BlobovniczaTreeCollectingDBToRebuild)
+	b.log.Debug(ctx, logs.BlobovniczaTreeCollectingDBToRebuild)
 	dbsToMigrate, err := b.getDBsToRebuild(ctx, prm.FillPercent)
 	if err != nil {
-		b.log.Warn(logs.BlobovniczaTreeCollectingDBToRebuildFailed, zap.Error(err))
+		b.log.Warn(ctx, logs.BlobovniczaTreeCollectingDBToRebuildFailed, zap.Error(err))
 		success = false
 		return res, err
 	}
 
-	b.log.Info(logs.BlobovniczaTreeCollectingDBToRebuildSuccess, zap.Int("blobovniczas_to_rebuild", len(dbsToMigrate)))
+	b.log.Info(ctx, logs.BlobovniczaTreeCollectingDBToRebuildSuccess, zap.Int("blobovniczas_to_rebuild", len(dbsToMigrate)))
 	res, err = b.migrateDBs(ctx, dbsToMigrate, prm, res)
 	if err != nil {
 		success = false
@@ -78,14 +78,14 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm
 func (b *Blobovniczas) migrateDBs(ctx context.Context, dbs []string, prm common.RebuildPrm, res common.RebuildRes) (common.RebuildRes, error) {
 	var completedDBCount uint32
 	for _, db := range dbs {
-		b.log.Debug(logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db))
+		b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db))
 		movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.WorkerLimiter)
 		res.ObjectsMoved += movedObjects
 		if err != nil {
-			b.log.Warn(logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err))
+			b.log.Warn(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err))
 			return res, err
 		}
-		b.log.Debug(logs.BlobovniczaTreeRebuildingBlobovniczaSuccess, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects))
+		b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaSuccess, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects))
 		res.FilesRemoved++
 		completedDBCount++
 		b.metrics.SetRebuildPercent((100 * completedDBCount) / uint32(len(dbs)))
@@ -233,7 +233,7 @@ func (b *Blobovniczas) addRebuildTempFile(path string) (func(), error) {
 	}
 	return func() {
 		if err := os.Remove(sysPath); err != nil {
-			b.log.Warn(logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
+			b.log.Warn(context.Background(), logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
 		}
 	}, nil
 }
@@ -389,7 +389,7 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co
 	})
 	for _, tmp := range rebuildTempFilesToRemove {
 		if err := os.Remove(filepath.Join(b.rootPath, tmp)); err != nil {
-			b.log.Warn(logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
+			b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
 		}
 	}
 	return count, err
@@ -413,14 +413,14 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob
 		if client.IsErrObjectNotFound(err) {
 			existsInSource = false
 		} else {
-			b.log.Warn(logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
+			b.log.Warn(ctx, logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
 			return err
 		}
 	}
 
 	if !existsInSource { // object was deleted by Rebuild, need to delete move info
 		if err = source.DropMoveInfo(ctx, move.Address); err != nil {
-			b.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
+			b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
 			return err
 		}
 		b.deleteProtectedObjects.Delete(move.Address)
@@ -429,7 +429,7 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob
 
 	existsInTarget, err := target.Exists(ctx, move.Address)
 	if err != nil {
-		b.log.Warn(logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
+		b.log.Warn(ctx, logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err))
 		return err
 	}
 
@@ -439,25 +439,25 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob
 		putPrm.SetMarshaledObject(gRes.Object())
 		_, err = target.Put(ctx, putPrm)
 		if err != nil {
-			b.log.Warn(logs.BlobovniczatreeCouldNotPutObjectToTargetDB, zap.String("path", targetDB.SystemPath()), zap.Error(err))
+			b.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToTargetDB, zap.String("path", targetDB.SystemPath()), zap.Error(err))
 			return err
 		}
 	}
 
 	if err = metaStore.UpdateStorageID(ctx, move.Address, move.TargetStorageID); err != nil {
-		b.log.Warn(logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", move.Address))
+		b.log.Warn(ctx, logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", move.Address))
 		return err
 	}
 
 	var deletePrm blobovnicza.DeletePrm
 	deletePrm.SetAddress(move.Address)
 	if _, err = source.Delete(ctx, deletePrm); err != nil {
-		b.log.Warn(logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", sourcePath), zap.Error(err))
+		b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", sourcePath), zap.Error(err))
 		return err
 	}
 
 	if err = source.DropMoveInfo(ctx, move.Address); err != nil {
-		b.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
+		b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err))
 		return err
 	}
 
@@ -482,13 +482,13 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
 		if !isLogical(err) {
 			i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
 		} else {
-			i.B.log.Warn(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.Error(err))
+			i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.Error(err))
 		}
 		return false, nil
 	}
 
 	if target == nil {
-		i.B.log.Warn(logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath))
+		i.B.log.Warn(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath))
 		return false, nil
 	}
 	defer target.Close()
@@ -505,7 +505,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
 		if !isLogical(err) {
 			i.B.reportError(logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, err)
 		} else {
-			i.B.log.Warn(logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, zap.String("path", i.SourceSysPath), zap.Error(err))
+			i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, zap.String("path", i.SourceSysPath), zap.Error(err))
 		}
 		return true, nil
 	}
@@ -521,13 +521,13 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
 		if !isLogical(err) {
 			i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
 		} else {
-			i.B.log.Warn(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", target.SystemPath()), zap.Error(err))
+			i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", target.SystemPath()), zap.Error(err))
 		}
 		return true, nil
 	}
 
 	if err = i.MetaStore.UpdateStorageID(ctx, i.Address, targetStorageID.Bytes()); err != nil {
-		i.B.log.Warn(logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", i.Address))
+		i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", i.Address))
 		return true, nil
 	}
 
@@ -537,7 +537,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
 		if !isLogical(err) {
 			i.B.reportError(logs.BlobovniczatreeCouldNotDeleteFromSource, err)
 		} else {
-			i.B.log.Warn(logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", i.SourceSysPath), zap.Error(err))
+			i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", i.SourceSysPath), zap.Error(err))
 		}
 		return true, nil
 	}
@@ -546,7 +546,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool,
 		if !isLogical(err) {
 			i.B.reportError(logs.BlobovniczatreeCouldNotDropMoveInfo, err)
 		} else {
-			i.B.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", i.SourceSysPath), zap.Error(err))
+			i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", i.SourceSysPath), zap.Error(err))
 		}
 		return true, nil
 	}
diff --git a/pkg/local_object_storage/blobstor/blobstor.go b/pkg/local_object_storage/blobstor/blobstor.go
index 6f579a8ca..41c6cf161 100644
--- a/pkg/local_object_storage/blobstor/blobstor.go
+++ b/pkg/local_object_storage/blobstor/blobstor.go
@@ -47,7 +47,7 @@ type cfg struct {
 }
 
 func initConfig(c *cfg) {
-	c.log = &logger.Logger{Logger: zap.L()}
+	c.log = logger.NewLoggerWrapper(zap.L())
 	c.metrics = &noopMetrics{}
 }
 
@@ -90,7 +90,7 @@ func WithStorages(st []SubStorage) Option {
 // WithLogger returns option to specify BlobStor's logger.
 func WithLogger(l *logger.Logger) Option {
 	return func(c *cfg) {
-		c.log = &logger.Logger{Logger: l.With(zap.String("component", "BlobStor"))}
+		c.log = l.With(zap.String("component", "BlobStor"))
 	}
 }
 
diff --git a/pkg/local_object_storage/blobstor/control.go b/pkg/local_object_storage/blobstor/control.go
index 9b414a9be..43436b4eb 100644
--- a/pkg/local_object_storage/blobstor/control.go
+++ b/pkg/local_object_storage/blobstor/control.go
@@ -12,7 +12,7 @@ import (
 
 // Open opens BlobStor.
 func (b *BlobStor) Open(ctx context.Context, mode mode.Mode) error {
-	b.log.Debug(logs.BlobstorOpening)
+	b.log.Debug(ctx, logs.BlobstorOpening)
 
 	b.modeMtx.Lock()
 	defer b.modeMtx.Unlock()
@@ -51,7 +51,7 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag
 //
 // Returns wrapped ErrInitBlobovniczas on blobovnicza tree's initializaiton failure.
 func (b *BlobStor) Init() error {
-	b.log.Debug(logs.BlobstorInitializing)
+	b.log.Debug(context.Background(), logs.BlobstorInitializing)
 
 	if err := b.compression.Init(); err != nil {
 		return err
@@ -68,13 +68,13 @@ func (b *BlobStor) Init() error {
 
 // Close releases all internal resources of BlobStor.
 func (b *BlobStor) Close() error {
-	b.log.Debug(logs.BlobstorClosing)
+	b.log.Debug(context.Background(), logs.BlobstorClosing)
 
 	var firstErr error
 	for i := range b.storage {
 		err := b.storage[i].Storage.Close()
 		if err != nil {
-			b.log.Info(logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error()))
+			b.log.Info(context.Background(), logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error()))
 			if firstErr == nil {
 				firstErr = err
 			}
diff --git a/pkg/local_object_storage/blobstor/delete.go b/pkg/local_object_storage/blobstor/delete.go
index c91508e6d..86d8f15e3 100644
--- a/pkg/local_object_storage/blobstor/delete.go
+++ b/pkg/local_object_storage/blobstor/delete.go
@@ -39,7 +39,7 @@ func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.Del
 			if err == nil || !client.IsErrObjectNotFound(err) {
 				if err == nil {
 					success = true
-					logOp(b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID)
+					logOp(ctx, b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID)
 				}
 				return res, err
 			}
@@ -58,7 +58,7 @@ func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.Del
 	res, err := st.Delete(ctx, prm)
 	if err == nil {
 		success = true
-		logOp(b.log, deleteOp, prm.Address, st.Type(), prm.StorageID)
+		logOp(ctx, b.log, deleteOp, prm.Address, st.Type(), prm.StorageID)
 	}
 
 	return res, err
diff --git a/pkg/local_object_storage/blobstor/exists.go b/pkg/local_object_storage/blobstor/exists.go
index 43feec7c9..556f53e12 100644
--- a/pkg/local_object_storage/blobstor/exists.go
+++ b/pkg/local_object_storage/blobstor/exists.go
@@ -73,7 +73,7 @@ func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi
 	}
 
 	for _, err := range errors[:len(errors)-1] {
-		b.log.Warn(logs.BlobstorErrorOccurredDuringObjectExistenceChecking,
+		b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringObjectExistenceChecking,
 			zap.Stringer("address", prm.Address),
 			zap.String("error", err.Error()),
 			zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go
index 057796db2..7f52762a7 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree.go
@@ -87,7 +87,7 @@ func New(opts ...Option) *FSTree {
 		DirNameLen:  DirNameLen,
 		metrics:     &noopMetrics{},
 		fileCounter: &noopCounter{},
-		log:         &logger.Logger{Logger: zap.L()},
+		log:         logger.NewLoggerWrapper(zap.L()),
 	}
 	for i := range opts {
 		opts[i](f)
@@ -152,7 +152,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
 	des, err := os.ReadDir(dirPath)
 	if err != nil {
 		if prm.IgnoreErrors {
-			t.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
+			t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
 				zap.String("err", err.Error()),
 				zap.String("directory_path", dirPath))
 			return nil
@@ -200,7 +200,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
 		}
 		if err != nil {
 			if prm.IgnoreErrors {
-				t.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
+				t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
 					zap.Stringer("address", addr),
 					zap.String("err", err.Error()),
 					zap.String("path", path))
diff --git a/pkg/local_object_storage/blobstor/fstree/option.go b/pkg/local_object_storage/blobstor/fstree/option.go
index 4d1f8fc22..7155ddcbb 100644
--- a/pkg/local_object_storage/blobstor/fstree/option.go
+++ b/pkg/local_object_storage/blobstor/fstree/option.go
@@ -53,6 +53,6 @@ func WithFileCounter(c FileCounter) Option {
 
 func WithLogger(l *logger.Logger) Option {
 	return func(f *FSTree) {
-		f.log = &logger.Logger{Logger: l.With(zap.String("component", "FSTree"))}
+		f.log = l.With(zap.String("component", "FSTree"))
 	}
 }
diff --git a/pkg/local_object_storage/blobstor/iterate.go b/pkg/local_object_storage/blobstor/iterate.go
index f213d7547..1ba835a95 100644
--- a/pkg/local_object_storage/blobstor/iterate.go
+++ b/pkg/local_object_storage/blobstor/iterate.go
@@ -42,7 +42,7 @@ func (b *BlobStor) Iterate(ctx context.Context, prm common.IteratePrm) (common.I
 		_, err := b.storage[i].Storage.Iterate(ctx, prm)
 		if err != nil {
 			if prm.IgnoreErrors {
-				b.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
+				b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
 					zap.String("storage_path", b.storage[i].Storage.Path()),
 					zap.String("storage_type", b.storage[i].Storage.Type()),
 					zap.String("err", err.Error()))
diff --git a/pkg/local_object_storage/blobstor/logger.go b/pkg/local_object_storage/blobstor/logger.go
index 7e057a0e3..070b1eac9 100644
--- a/pkg/local_object_storage/blobstor/logger.go
+++ b/pkg/local_object_storage/blobstor/logger.go
@@ -1,6 +1,8 @@
 package blobstor
 
 import (
+	"context"
+
 	storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
 	"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
 	oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -11,8 +13,8 @@ const (
 	putOp    = "PUT"
 )
 
-func logOp(l *logger.Logger, op string, addr oid.Address, typ string, sID []byte) {
-	storagelog.Write(l,
+func logOp(ctx context.Context, l *logger.Logger, op string, addr oid.Address, typ string, sID []byte) {
+	storagelog.Write(ctx, l,
 		storagelog.AddressField(addr),
 		storagelog.OpField(op),
 		storagelog.StorageTypeField(typ),
diff --git a/pkg/local_object_storage/blobstor/put.go b/pkg/local_object_storage/blobstor/put.go
index 1adae303d..342da28bf 100644
--- a/pkg/local_object_storage/blobstor/put.go
+++ b/pkg/local_object_storage/blobstor/put.go
@@ -63,7 +63,7 @@ func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, e
 			res, err := b.storage[i].Storage.Put(ctx, prm)
 			if err == nil {
 				success = true
-				logOp(b.log, putOp, prm.Address, b.storage[i].Storage.Type(), res.StorageID)
+				logOp(ctx, b.log, putOp, prm.Address, b.storage[i].Storage.Type(), res.StorageID)
 			}
 			return res, err
 		}
diff --git a/pkg/local_object_storage/blobstor/rebuild.go b/pkg/local_object_storage/blobstor/rebuild.go
index 7b2786ba2..2a6b94789 100644
--- a/pkg/local_object_storage/blobstor/rebuild.go
+++ b/pkg/local_object_storage/blobstor/rebuild.go
@@ -30,7 +30,7 @@ func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, limiter Con
 		summary.FilesRemoved += res.FilesRemoved
 		summary.ObjectsMoved += res.ObjectsMoved
 		if err != nil {
-			b.log.Error(logs.BlobstorRebuildFailedToRebuildStorages,
+			b.log.Error(ctx, logs.BlobstorRebuildFailedToRebuildStorages,
 				zap.String("failed_storage_path", storage.Storage.Path()),
 				zap.String("failed_storage_type", storage.Storage.Type()),
 				zap.Error(err))
@@ -38,7 +38,7 @@ func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, limiter Con
 			break
 		}
 	}
-	b.log.Info(logs.BlobstorRebuildRebuildStoragesCompleted,
+	b.log.Info(ctx, logs.BlobstorRebuildRebuildStoragesCompleted,
 		zap.Bool("success", rErr == nil),
 		zap.Uint64("total_files_removed", summary.FilesRemoved),
 		zap.Uint64("total_objects_moved", summary.ObjectsMoved))
diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go
index 80fb3f9ed..98ec73ae9 100644
--- a/pkg/local_object_storage/engine/control.go
+++ b/pkg/local_object_storage/engine/control.go
@@ -49,7 +49,7 @@ func (e *StorageEngine) open(ctx context.Context) error {
 
 	for res := range errCh {
 		if res.err != nil {
-			e.log.Error(logs.EngineCouldNotOpenShardClosingAndSkipping,
+			e.log.Error(ctx, logs.EngineCouldNotOpenShardClosingAndSkipping,
 				zap.String("id", res.id),
 				zap.Error(res.err))
 
@@ -58,7 +58,7 @@ func (e *StorageEngine) open(ctx context.Context) error {
 
 			err := sh.Close()
 			if err != nil {
-				e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard,
+				e.log.Error(ctx, logs.EngineCouldNotClosePartiallyInitializedShard,
 					zap.String("id", res.id),
 					zap.Error(res.err))
 			}
@@ -101,7 +101,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
 	for res := range errCh {
 		if res.err != nil {
 			if errors.Is(res.err, blobstor.ErrInitBlobovniczas) {
-				e.log.Error(logs.EngineCouldNotInitializeShardClosingAndSkipping,
+				e.log.Error(ctx, logs.EngineCouldNotInitializeShardClosingAndSkipping,
 					zap.String("id", res.id),
 					zap.Error(res.err))
 
@@ -110,7 +110,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
 
 				err := sh.Close()
 				if err != nil {
-					e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard,
+					e.log.Error(ctx, logs.EngineCouldNotClosePartiallyInitializedShard,
 						zap.String("id", res.id),
 						zap.Error(res.err))
 				}
@@ -165,7 +165,7 @@ func (e *StorageEngine) close(releasePools bool) error {
 
 	for id, sh := range e.shards {
 		if err := sh.Close(); err != nil {
-			e.log.Debug(logs.EngineCouldNotCloseShard,
+			e.log.Debug(context.Background(), logs.EngineCouldNotCloseShard,
 				zap.String("id", id),
 				zap.String("error", err.Error()),
 			)
@@ -311,7 +311,7 @@ loop:
 	for _, p := range shardsToReload {
 		err := p.sh.Reload(ctx, p.opts...)
 		if err != nil {
-			e.log.Error(logs.EngineCouldNotReloadAShard,
+			e.log.Error(ctx, logs.EngineCouldNotReloadAShard,
 				zap.Stringer("shard id", p.sh.ID()),
 				zap.Error(err))
 		}
@@ -340,7 +340,7 @@ loop:
 			return fmt.Errorf("could not add %s shard: %w", idStr, err)
 		}
 
-		e.log.Info(logs.EngineAddedNewShard, zap.String("id", idStr))
+		e.log.Info(ctx, logs.EngineAddedNewShard, zap.String("id", idStr))
 	}
 
 	return nil
diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go
index 9ca3a7cee..2e957eb04 100644
--- a/pkg/local_object_storage/engine/delete.go
+++ b/pkg/local_object_storage/engine/delete.go
@@ -152,7 +152,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
 	e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
 		res, err := sh.Select(ctx, selectPrm)
 		if err != nil {
-			e.log.Warn(logs.EngineErrorDuringSearchingForObjectChildren,
+			e.log.Warn(ctx, logs.EngineErrorDuringSearchingForObjectChildren,
 				zap.Stringer("addr", addr),
 				zap.String("error", err.Error()),
 				zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
@@ -164,7 +164,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
 
 			_, err = sh.Inhume(ctx, inhumePrm)
 			if err != nil {
-				e.log.Debug(logs.EngineCouldNotInhumeObjectInShard,
+				e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard,
 					zap.Stringer("addr", addr),
 					zap.String("err", err.Error()),
 					zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
@@ -194,7 +194,7 @@ func (e *StorageEngine) deleteChunks(
 		inhumePrm.MarkAsGarbage(addr)
 		_, err = sh.Inhume(ctx, inhumePrm)
 		if err != nil {
-			e.log.Debug(logs.EngineCouldNotInhumeObjectInShard,
+			e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard,
 				zap.Stringer("addr", addr),
 				zap.String("err", err.Error()),
 				zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go
index 3183d6137..8963ec099 100644
--- a/pkg/local_object_storage/engine/engine.go
+++ b/pkg/local_object_storage/engine/engine.go
@@ -99,20 +99,20 @@ func (e *StorageEngine) moveToDegraded(sh *shard.Shard, errCount uint32, isMeta
 	if isMeta {
 		err := sh.SetMode(mode.DegradedReadOnly)
 		if err == nil {
-			log.Info(logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold)
+			log.Info(context.Background(), logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold)
 			return
 		}
-		log.Error(logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly,
+		log.Error(context.Background(), logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly,
 			zap.Error(err))
 	}
 
 	err := sh.SetMode(mode.ReadOnly)
 	if err != nil {
-		log.Error(logs.EngineFailedToMoveShardInReadonlyMode, zap.Error(err))
+		log.Error(context.Background(), logs.EngineFailedToMoveShardInReadonlyMode, zap.Error(err))
 		return
 	}
 
-	log.Info(logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold)
+	log.Info(context.Background(), logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold)
 }
 
 // reportShardErrorByID increases shard error counter and logs an error.
@@ -137,7 +137,7 @@ func (e *StorageEngine) reportShardError(
 	fields ...zap.Field,
 ) {
 	if isLogical(err) {
-		e.log.Warn(msg,
+		e.log.Warn(context.Background(), msg,
 			zap.Stringer("shard_id", sh.ID()),
 			zap.String("error", err.Error()))
 		return
@@ -147,7 +147,7 @@ func (e *StorageEngine) reportShardError(
 	e.metrics.IncErrorCounter(sh.ID().String())
 
 	sid := sh.ID()
-	e.log.Warn(msg, append([]zap.Field{
+	e.log.Warn(context.Background(), msg, append([]zap.Field{
 		zap.Stringer("shard_id", sid),
 		zap.Uint32("error count", errCount),
 		zap.String("error", err.Error()),
@@ -168,7 +168,7 @@ func (e *StorageEngine) reportShardError(
 	default:
 		// For background workers we can have a lot of such errors,
 		// thus logging is done with DEBUG level.
-		e.log.Debug(logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest,
+		e.log.Debug(context.Background(), logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest,
 			zap.Stringer("shard_id", sid),
 			zap.Uint32("error_count", errCount))
 	}
@@ -197,7 +197,7 @@ type cfg struct {
 
 func defaultCfg() *cfg {
 	res := &cfg{
-		log:           &logger.Logger{Logger: zap.L()},
+		log:           logger.NewLoggerWrapper(zap.L()),
 		shardPoolSize: 20,
 		metrics:       noopMetrics{},
 	}
@@ -269,8 +269,8 @@ type containerSource struct {
 
 func (s *containerSource) IsContainerAvailable(ctx context.Context, id cid.ID) (bool, error) {
 	select {
-	case <-ctx.Done():
-		return false, ctx.Err()
+	case <-context.Background().Done():
+		return false, context.Background().Err()
 	default:
 	}
 
diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go
index 940e30323..b88c249b1 100644
--- a/pkg/local_object_storage/engine/evacuate.go
+++ b/pkg/local_object_storage/engine/evacuate.go
@@ -297,12 +297,12 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p
 		e.evacuateLimiter.Complete(err)
 	}()
 
-	e.log.Info(logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
+	e.log.Info(ctx, logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
 		zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope))
 
 	err = e.getTotals(ctx, prm, shardsToEvacuate, res)
 	if err != nil {
-		e.log.Error(logs.EngineShardsEvacuationFailedToCount, zap.Strings("shard_ids", shardIDs), zap.Error(err), evacuationOperationLogField,
+		e.log.Error(ctx, logs.EngineShardsEvacuationFailedToCount, zap.Strings("shard_ids", shardIDs), zap.Error(err), evacuationOperationLogField,
 			zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope))
 		return err
 	}
@@ -336,12 +336,12 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p
 		err = errors.Join(err, fmt.Errorf("object error: %w", errObject))
 	}
 	if err != nil {
-		e.log.Error(logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
+		e.log.Error(ctx, logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
 			zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope))
 		return err
 	}
 
-	e.log.Info(logs.EngineFinishedSuccessfullyShardsEvacuation,
+	e.log.Info(ctx, logs.EngineFinishedSuccessfullyShardsEvacuation,
 		zap.Strings("shard_ids", shardIDs),
 		evacuationOperationLogField,
 		zap.Uint64("total_objects", res.ObjectsTotal()),
@@ -494,7 +494,7 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context
 	err := sh.IterateOverContainers(ctx, cntPrm)
 	if err != nil {
 		cancel(err)
-		e.log.Error(logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField,
+		e.log.Error(ctx, logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField,
 			zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
 	}
 	return err
@@ -551,7 +551,7 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree
 			return err
 		}
 		if success {
-			e.log.Debug(logs.EngineShardsEvacuationTreeEvacuatedLocal,
+			e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedLocal,
 				zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
 				zap.String("from_shard_id", sh.ID().String()), zap.String("to_shard_id", shardID),
 				evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
@@ -561,26 +561,26 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree
 
 		moved, nodePK, err := e.evacuateTreeToOtherNode(ctx, sh, contTree, prm)
 		if err != nil {
-			e.log.Error(logs.EngineShardsEvacuationFailedToMoveTree,
+			e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree,
 				zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
 				zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField,
 				zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
 			return err
 		}
 		if moved {
-			e.log.Debug(logs.EngineShardsEvacuationTreeEvacuatedRemote,
+			e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedRemote,
 				zap.String("cid", contTree.CID.EncodeToString()), zap.String("treeID", contTree.TreeID),
 				zap.String("from_shardID", sh.ID().String()), zap.String("to_node", nodePK),
 				evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
 			res.trEvacuated.Add(1)
 		} else if prm.IgnoreErrors {
 			res.trFailed.Add(1)
-			e.log.Warn(logs.EngineShardsEvacuationFailedToMoveTree,
+			e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveTree,
 				zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
 				zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField,
 				zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
 		} else {
-			e.log.Error(logs.EngineShardsEvacuationFailedToMoveTree,
+			e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree,
 				zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID),
 				zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField,
 				zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
@@ -770,7 +770,7 @@ func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objI
 			res.objFailed.Add(1)
 			return nil
 		}
-		e.log.Error(logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
+		e.log.Error(ctx, logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
 			zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
 		return err
 	}
@@ -792,7 +792,7 @@ func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objI
 
 	moved, err := prm.ObjectsHandler(ctx, addr, getRes.Object())
 	if err != nil {
-		e.log.Error(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
+		e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
 			zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
 		return err
 	}
@@ -800,7 +800,7 @@ func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objI
 		res.objEvacuated.Add(1)
 	} else if prm.IgnoreErrors {
 		res.objFailed.Add(1)
-		e.log.Warn(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
+		e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
 			zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
 	} else {
 		return fmt.Errorf("object %s was not replicated", addr)
@@ -835,7 +835,7 @@ func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Add
 		switch e.putToShard(ctx, shards[j].hashedShard, shards[j].pool, addr, object, container.IsIndexedContainer(cnr)).status {
 		case putToShardSuccess:
 			res.objEvacuated.Add(1)
-			e.log.Debug(logs.EngineObjectIsMovedToAnotherShard,
+			e.log.Debug(ctx, logs.EngineObjectIsMovedToAnotherShard,
 				zap.Stringer("from", sh.ID()),
 				zap.Stringer("to", shards[j].ID()),
 				zap.Stringer("addr", addr),
diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go
index a1fe8a010..d6827e6c3 100644
--- a/pkg/local_object_storage/engine/get.go
+++ b/pkg/local_object_storage/engine/get.go
@@ -104,7 +104,7 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
 			return GetRes{}, it.OutError
 		}
 		if it.ShardWithMeta.Shard != nil && it.MetaError != nil {
-			e.log.Warn(logs.ShardMetaInfoPresentButObjectNotFound,
+			e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound,
 				zap.Stringer("shard_id", it.ShardWithMeta.ID()),
 				zap.String("error", it.MetaError.Error()),
 				zap.Stringer("address", prm.addr),
diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go
index b8959b534..1dc64c174 100644
--- a/pkg/local_object_storage/engine/inhume.go
+++ b/pkg/local_object_storage/engine/inhume.go
@@ -90,7 +90,7 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, e
 		if !prm.forceRemoval {
 			locked, err := e.IsLocked(ctx, prm.addrs[i])
 			if err != nil {
-				e.log.Warn(logs.EngineRemovingAnObjectWithoutFullLockingCheck,
+				e.log.Warn(ctx, logs.EngineRemovingAnObjectWithoutFullLockingCheck,
 					zap.Error(err),
 					zap.Stringer("addr", prm.addrs[i]),
 					zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
@@ -264,7 +264,7 @@ func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, l
 
 		select {
 		case <-ctx.Done():
-			e.log.Info(logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(ctx.Err()))
+			e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(ctx.Err()))
 			return true
 		default:
 			return false
@@ -278,7 +278,7 @@ func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.A
 
 		select {
 		case <-ctx.Done():
-			e.log.Info(logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(ctx.Err()))
+			e.log.Info(ctx, logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(ctx.Err()))
 			return true
 		default:
 			return false
@@ -305,7 +305,7 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid
 	e.iterateOverUnsortedShards(func(sh hashedShard) bool {
 		select {
 		case <-ctx.Done():
-			e.log.Info(logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
+			e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
 			failed = true
 			return true
 		default:
@@ -316,7 +316,7 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid
 			prm.SetContainerID(id)
 			s, err := sh.ContainerSize(prm)
 			if err != nil {
-				e.log.Warn(logs.EngineFailedToGetContainerSize, zap.Stringer("container_id", id), zap.Error(err))
+				e.log.Warn(ctx, logs.EngineFailedToGetContainerSize, zap.Stringer("container_id", id), zap.Error(err))
 				failed = true
 				return true
 			}
@@ -338,7 +338,7 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid
 	e.iterateOverUnsortedShards(func(sh hashedShard) bool {
 		select {
 		case <-ctx.Done():
-			e.log.Info(logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
+			e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
 			failed = true
 			return true
 		default:
@@ -346,7 +346,7 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid
 
 		for id := range idMap {
 			if err := sh.DeleteContainerSize(ctx, id); err != nil {
-				e.log.Warn(logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
+				e.log.Warn(ctx, logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
 				failed = true
 				return true
 			}
@@ -383,7 +383,7 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci
 	e.iterateOverUnsortedShards(func(sh hashedShard) bool {
 		select {
 		case <-ctx.Done():
-			e.log.Info(logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
+			e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
 			failed = true
 			return true
 		default:
@@ -394,7 +394,7 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci
 			prm.ContainerID = id
 			s, err := sh.ContainerCount(ctx, prm)
 			if err != nil {
-				e.log.Warn(logs.EngineFailedToGetContainerCounters, zap.Stringer("container_id", id), zap.Error(err))
+				e.log.Warn(ctx, logs.EngineFailedToGetContainerCounters, zap.Stringer("container_id", id), zap.Error(err))
 				failed = true
 				return true
 			}
@@ -416,7 +416,7 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci
 	e.iterateOverUnsortedShards(func(sh hashedShard) bool {
 		select {
 		case <-ctx.Done():
-			e.log.Info(logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
+			e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
 			failed = true
 			return true
 		default:
@@ -424,7 +424,7 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci
 
 		for id := range idMap {
 			if err := sh.DeleteContainerCount(ctx, id); err != nil {
-				e.log.Warn(logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
+				e.log.Warn(ctx, logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
 				failed = true
 				return true
 			}
@@ -449,7 +449,7 @@ func (e *StorageEngine) selectNonExistentIDs(ctx context.Context, ids []cid.ID)
 	for _, id := range ids {
 		isAvailable, err := cs.IsContainerAvailable(ctx, id)
 		if err != nil {
-			e.log.Warn(logs.EngineFailedToCheckContainerAvailability, zap.Stringer("container_id", id), zap.Error(err))
+			e.log.Warn(ctx, logs.EngineFailedToCheckContainerAvailability, zap.Stringer("container_id", id), zap.Error(err))
 			return nil, err
 		}
 		if isAvailable {
diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go
index 56d3ef490..635f0e302 100644
--- a/pkg/local_object_storage/engine/put.go
+++ b/pkg/local_object_storage/engine/put.go
@@ -141,7 +141,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti
 				// expired => do nothing with it
 				res.status = putToShardExists
 			} else {
-				e.log.Warn(logs.EngineCouldNotCheckObjectExistence,
+				e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence,
 					zap.Stringer("shard_id", sh.ID()),
 					zap.String("error", err.Error()),
 					zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
@@ -163,14 +163,14 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti
 		if err != nil {
 			if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) ||
 				errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) {
-				e.log.Warn(logs.EngineCouldNotPutObjectToShard,
+				e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
 					zap.Stringer("shard_id", sh.ID()),
 					zap.String("error", err.Error()),
 					zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
 				return
 			}
 			if client.IsErrObjectAlreadyRemoved(err) {
-				e.log.Warn(logs.EngineCouldNotPutObjectToShard,
+				e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
 					zap.Stringer("shard_id", sh.ID()),
 					zap.String("error", err.Error()),
 					zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
@@ -185,7 +185,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti
 
 		res.status = putToShardSuccess
 	}); err != nil {
-		e.log.Warn(logs.EngineCouldNotPutObjectToShard, zap.Error(err))
+		e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, zap.Error(err))
 		close(exitCh)
 	}
 
diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go
index fde6052ae..c5c94eef7 100644
--- a/pkg/local_object_storage/engine/range.go
+++ b/pkg/local_object_storage/engine/range.go
@@ -116,7 +116,7 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error
 			return RngRes{}, it.OutError
 		}
 		if it.ShardWithMeta.Shard != nil && it.MetaError != nil {
-			e.log.Warn(logs.ShardMetaInfoPresentButObjectNotFound,
+			e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound,
 				zap.Stringer("shard_id", it.ShardWithMeta.ID()),
 				zap.String("error", it.MetaError.Error()),
 				zap.Stringer("address", prm.addr),
diff --git a/pkg/local_object_storage/engine/remove_copies.go b/pkg/local_object_storage/engine/remove_copies.go
index 5e1ced56a..8ab3c5217 100644
--- a/pkg/local_object_storage/engine/remove_copies.go
+++ b/pkg/local_object_storage/engine/remove_copies.go
@@ -43,7 +43,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
 		prm.Concurrency = defaultRemoveDuplicatesConcurrency
 	}
 
-	e.log.Info(logs.EngineStartingRemovalOfLocallyredundantCopies,
+	e.log.Info(ctx, logs.EngineStartingRemovalOfLocallyredundantCopies,
 		zap.Int("concurrency", prm.Concurrency))
 
 	// The mutext must be taken for the whole duration to avoid target shard being removed
@@ -55,7 +55,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
 	// This is not currently the case, because `FreeSpace` metric used by weight sorting is always 0.
 	// However we could change weights in future and easily forget this function.
 	for _, sh := range e.shards {
-		e.log.Debug(logs.EngineStartedDuplicatesRemovalRoutine, zap.Stringer("shard_id", sh.ID()))
+		e.log.Debug(ctx, logs.EngineStartedDuplicatesRemovalRoutine, zap.Stringer("shard_id", sh.ID()))
 		ch := make(chan oid.Address)
 
 		errG, ctx := errgroup.WithContext(ctx)
@@ -93,12 +93,12 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
 			})
 		}
 		if err := errG.Wait(); err != nil {
-			e.log.Error(logs.EngineFinishedRemovalOfLocallyredundantCopies, zap.Error(err))
+			e.log.Error(ctx, logs.EngineFinishedRemovalOfLocallyredundantCopies, zap.Error(err))
 			return err
 		}
 	}
 
-	e.log.Info(logs.EngineFinishedRemovalOfLocallyredundantCopies)
+	e.log.Info(ctx, logs.EngineFinishedRemovalOfLocallyredundantCopies)
 	return nil
 }
 
diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go
index aab2c423c..e172706e3 100644
--- a/pkg/local_object_storage/engine/shards.go
+++ b/pkg/local_object_storage/engine/shards.go
@@ -140,7 +140,7 @@ func (e *StorageEngine) createShard(_ context.Context, opts []shard.Option) (*sh
 	)...)
 
 	if err := sh.UpdateID(); err != nil {
-		e.log.Warn(logs.FailedToUpdateShardID, zap.Stringer("shard_id", sh.ID()), zap.String("metabase_path", sh.DumpInfo().MetaBaseInfo.Path), zap.Error(err))
+		e.log.Warn(context.Background(), logs.FailedToUpdateShardID, zap.Stringer("shard_id", sh.ID()), zap.String("metabase_path", sh.DumpInfo().MetaBaseInfo.Path), zap.Error(err))
 	}
 
 	return sh, nil
@@ -228,7 +228,7 @@ func (e *StorageEngine) removeShards(ids ...string) {
 			delete(e.shardPools, id)
 		}
 
-		e.log.Info(logs.EngineShardHasBeenRemoved,
+		e.log.Info(context.Background(), logs.EngineShardHasBeenRemoved,
 			zap.String("id", id))
 	}
 	e.mtx.Unlock()
@@ -236,14 +236,14 @@ func (e *StorageEngine) removeShards(ids ...string) {
 	for _, sh := range ss {
 		err := sh.SetMode(mode.Disabled)
 		if err != nil {
-			e.log.Error(logs.EngineCouldNotChangeShardModeToDisabled,
+			e.log.Error(context.Background(), logs.EngineCouldNotChangeShardModeToDisabled,
 				zap.Stringer("id", sh.ID()),
 				zap.Error(err),
 			)
 		}
 		err = sh.Close()
 		if err != nil {
-			e.log.Error(logs.EngineCouldNotCloseRemovedShard,
+			e.log.Error(context.Background(), logs.EngineCouldNotCloseRemovedShard,
 				zap.Stringer("id", sh.ID()),
 				zap.Error(err),
 			)
@@ -340,7 +340,7 @@ func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) {
 			return
 		case sh.NotificationChannel() <- ev:
 		default:
-			e.log.Debug(logs.ShardEventProcessingInProgress,
+			e.log.Debug(ctx, logs.ShardEventProcessingInProgress,
 				zap.Uint64("epoch", epoch), zap.Stringer("shard", sh.ID()))
 		}
 	}
@@ -369,7 +369,7 @@ func (e *StorageEngine) closeShards(deletedShards []hashedShard) error {
 		eg.Go(func() error {
 			err := sh.SetMode(mode.Disabled)
 			if err != nil {
-				e.log.Error(logs.EngineCouldNotChangeShardModeToDisabled,
+				e.log.Error(context.Background(), logs.EngineCouldNotChangeShardModeToDisabled,
 					zap.Stringer("id", sh.ID()),
 					zap.Error(err),
 				)
@@ -380,7 +380,7 @@ func (e *StorageEngine) closeShards(deletedShards []hashedShard) error {
 
 			err = sh.Close()
 			if err != nil {
-				e.log.Error(logs.EngineCouldNotCloseRemovedShard,
+				e.log.Error(context.Background(), logs.EngineCouldNotCloseRemovedShard,
 					zap.Stringer("id", sh.ID()),
 					zap.Error(err),
 				)
@@ -432,7 +432,7 @@ func (e *StorageEngine) deleteShards(ids []*shard.ID) ([]hashedShard, error) {
 			delete(e.shardPools, idStr)
 		}
 
-		e.log.Info(logs.EngineShardHasBeenRemoved,
+		e.log.Info(context.Background(), logs.EngineShardHasBeenRemoved,
 			zap.String("id", idStr))
 	}
 
diff --git a/pkg/local_object_storage/internal/log/log.go b/pkg/local_object_storage/internal/log/log.go
index 23740868d..6b101fa60 100644
--- a/pkg/local_object_storage/internal/log/log.go
+++ b/pkg/local_object_storage/internal/log/log.go
@@ -1,14 +1,16 @@
 package storagelog
 
 import (
+	"context"
+
 	"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
 	"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
 	"go.uber.org/zap"
 )
 
 // Write writes message about storage engine's operation to logger.
-func Write(logger *logger.Logger, fields ...zap.Field) {
-	logger.Debug(logs.StorageOperation, fields...)
+func Write(ctx context.Context, logger *logger.Logger, fields ...zap.Field) {
+	logger.Debug(ctx, logs.StorageOperation, fields...)
 }
 
 // AddressField returns logger's field for object address.
diff --git a/pkg/local_object_storage/metabase/control.go b/pkg/local_object_storage/metabase/control.go
index d6546d922..68e065a0a 100644
--- a/pkg/local_object_storage/metabase/control.go
+++ b/pkg/local_object_storage/metabase/control.go
@@ -57,7 +57,7 @@ func (db *DB) openDB(mode mode.Mode) error {
 		return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err)
 	}
 
-	db.log.Debug(logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path))
+	db.log.Debug(context.Background(), logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path))
 
 	if db.boltOptions == nil {
 		opts := *bbolt.DefaultOptions
@@ -78,9 +78,9 @@ func (db *DB) openBolt() error {
 	db.boltDB.MaxBatchDelay = db.boltBatchDelay
 	db.boltDB.MaxBatchSize = db.boltBatchSize
 
-	db.log.Debug(logs.MetabaseOpenedBoltDBInstanceForMetabase)
+	db.log.Debug(context.Background(), logs.MetabaseOpenedBoltDBInstanceForMetabase)
 
-	db.log.Debug(logs.MetabaseCheckingMetabaseVersion)
+	db.log.Debug(context.Background(), logs.MetabaseCheckingMetabaseVersion)
 	return db.boltDB.View(func(tx *bbolt.Tx) error {
 		// The safest way to check if the metabase is fresh is to check if it has no buckets.
 		// However, shard info can be present. So here we check that the number of buckets is
diff --git a/pkg/local_object_storage/metabase/db.go b/pkg/local_object_storage/metabase/db.go
index 2cd990814..4474aa229 100644
--- a/pkg/local_object_storage/metabase/db.go
+++ b/pkg/local_object_storage/metabase/db.go
@@ -70,7 +70,7 @@ func defaultCfg() *cfg {
 		},
 		boltBatchDelay: bbolt.DefaultMaxBatchDelay,
 		boltBatchSize:  bbolt.DefaultMaxBatchSize,
-		log:            &logger.Logger{Logger: zap.L()},
+		log:            logger.NewLoggerWrapper(zap.L()),
 		metrics:        &noopMetrics{},
 	}
 }
diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go
index b5ac22017..62ab1056d 100644
--- a/pkg/local_object_storage/metabase/delete.go
+++ b/pkg/local_object_storage/metabase/delete.go
@@ -117,7 +117,7 @@ func (db *DB) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
 	if err == nil {
 		deleted = true
 		for i := range prm.addrs {
-			storagelog.Write(db.log,
+			storagelog.Write(ctx, db.log,
 				storagelog.AddressField(prm.addrs[i]),
 				storagelog.OpField("metabase DELETE"))
 		}
diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go
index 12f27d330..8d1e18729 100644
--- a/pkg/local_object_storage/metabase/inhume.go
+++ b/pkg/local_object_storage/metabase/inhume.go
@@ -205,7 +205,7 @@ func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
 	success = err == nil
 	if success {
 		for _, addr := range prm.target {
-			storagelog.Write(db.log,
+			storagelog.Write(ctx, db.log,
 				storagelog.AddressField(addr),
 				storagelog.OpField("metabase INHUME"))
 		}
diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go
index 2d94e7ae1..d7675869f 100644
--- a/pkg/local_object_storage/metabase/put.go
+++ b/pkg/local_object_storage/metabase/put.go
@@ -101,7 +101,7 @@ func (db *DB) Put(ctx context.Context, prm PutPrm) (res PutRes, err error) {
 	})
 	if err == nil {
 		success = true
-		storagelog.Write(db.log,
+		storagelog.Write(ctx, db.log,
 			storagelog.AddressField(objectCore.AddressOf(prm.obj)),
 			storagelog.OpField("metabase PUT"))
 	}
diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go
index aeb14aeb6..e2eee86b0 100644
--- a/pkg/local_object_storage/metabase/upgrade_test.go
+++ b/pkg/local_object_storage/metabase/upgrade_test.go
@@ -113,7 +113,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
 		})
 	}
 	require.NoError(t, eg.Wait())
-	db.log.Info("simple objects generated")
+	db.log.Info(ctx, "simple objects generated")
 	eg, ctx = errgroup.WithContext(context.Background())
 	eg.SetLimit(generateWorkersCount)
 	// complex objects
@@ -137,7 +137,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
 		})
 	}
 	require.NoError(t, eg.Wait())
-	db.log.Info("complex objects generated")
+	db.log.Info(ctx, "complex objects generated")
 	eg, ctx = errgroup.WithContext(context.Background())
 	eg.SetLimit(generateWorkersCount)
 	// simple objects deleted by gc marks
@@ -159,7 +159,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
 		})
 	}
 	require.NoError(t, eg.Wait())
-	db.log.Info("simple objects deleted by gc marks generated")
+	db.log.Info(ctx, "simple objects deleted by gc marks generated")
 	eg, ctx = errgroup.WithContext(context.Background())
 	eg.SetLimit(10000)
 	// simple objects deleted by tombstones
@@ -189,7 +189,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
 		})
 	}
 	require.NoError(t, eg.Wait())
-	db.log.Info("simple objects deleted by tombstones generated")
+	db.log.Info(ctx, "simple objects deleted by tombstones generated")
 	eg, ctx = errgroup.WithContext(context.Background())
 	eg.SetLimit(generateWorkersCount)
 	// simple objects locked by locks
@@ -216,7 +216,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
 		})
 	}
 	require.NoError(t, eg.Wait())
-	db.log.Info("simple objects locked by locks generated")
+	db.log.Info(ctx, "simple objects locked by locks generated")
 	require.NoError(t, db.boltDB.Sync())
 	require.NoError(t, db.Close())
 }
diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go
index 62800dbd0..eb3aa61c0 100644
--- a/pkg/local_object_storage/shard/control.go
+++ b/pkg/local_object_storage/shard/control.go
@@ -21,7 +21,7 @@ import (
 )
 
 func (s *Shard) handleMetabaseFailure(stage string, err error) error {
-	s.log.Error(logs.ShardMetabaseFailureSwitchingMode,
+	s.log.Error(context.Background(), logs.ShardMetabaseFailureSwitchingMode,
 		zap.String("stage", stage),
 		zap.Stringer("mode", mode.ReadOnly),
 		zap.Error(err))
@@ -31,7 +31,7 @@ func (s *Shard) handleMetabaseFailure(stage string, err error) error {
 		return nil
 	}
 
-	s.log.Error(logs.ShardCantMoveShardToReadonlySwitchMode,
+	s.log.Error(context.Background(), logs.ShardCantMoveShardToReadonlySwitchMode,
 		zap.String("stage", stage),
 		zap.Stringer("mode", mode.DegradedReadOnly),
 		zap.Error(err))
@@ -211,7 +211,7 @@ func (s *Shard) refillMetabase(ctx context.Context) error {
 	withCount := true
 	totalObjects, err := s.blobStor.ObjectsCount(ctx)
 	if err != nil {
-		s.log.Warn(logs.EngineRefillFailedToGetObjectsCount, zap.Error(err))
+		s.log.Warn(ctx, logs.EngineRefillFailedToGetObjectsCount, zap.Error(err))
 		withCount = false
 	}
 
@@ -270,7 +270,7 @@ func (s *Shard) refillMetabase(ctx context.Context) error {
 func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, descriptor []byte) error {
 	obj := objectSDK.New()
 	if err := obj.Unmarshal(data); err != nil {
-		s.log.Warn(logs.ShardCouldNotUnmarshalObject,
+		s.log.Warn(ctx, logs.ShardCouldNotUnmarshalObject,
 			zap.Stringer("address", addr),
 			zap.String("err", err.Error()))
 		return nil
@@ -285,7 +285,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address,
 			return err
 		}
 		if info.Removed {
-			s.log.Debug(logs.ShardSkipObjectFromResyncContainerDeleted, zap.Stringer("address", addr))
+			s.log.Debug(ctx, logs.ShardSkipObjectFromResyncContainerDeleted, zap.Stringer("address", addr))
 			return nil
 		}
 		isIndexedContainer = info.Indexed
@@ -386,7 +386,7 @@ func (s *Shard) Close() error {
 	for _, component := range components {
 		if err := component.Close(); err != nil {
 			lastErr = err
-			s.log.Error(logs.ShardCouldNotCloseShardComponent, zap.Error(err))
+			s.log.Error(context.Background(), logs.ShardCouldNotCloseShardComponent, zap.Error(err))
 		}
 	}
 
@@ -424,7 +424,7 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error {
 	ok, err := s.metaBase.Reload(c.metaOpts...)
 	if err != nil {
 		if errors.Is(err, meta.ErrDegradedMode) {
-			s.log.Error(logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err))
+			s.log.Error(ctx, logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err))
 			_ = s.setMode(mode.DegradedReadOnly)
 		}
 		return err
@@ -440,7 +440,7 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error {
 			err = s.metaBase.Init()
 		}
 		if err != nil {
-			s.log.Error(logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err))
+			s.log.Error(ctx, logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err))
 			_ = s.setMode(mode.DegradedReadOnly)
 			return err
 		}
diff --git a/pkg/local_object_storage/shard/delete.go b/pkg/local_object_storage/shard/delete.go
index c898fdf41..f62cecd56 100644
--- a/pkg/local_object_storage/shard/delete.go
+++ b/pkg/local_object_storage/shard/delete.go
@@ -95,7 +95,7 @@ func (s *Shard) validateWritecacheDoesntContainObject(ctx context.Context, addr
 	}
 	_, err := s.writeCache.Head(ctx, addr)
 	if err == nil {
-		s.log.Warn(logs.ObjectRemovalFailureExistsInWritecache, zap.Stringer("object_address", addr))
+		s.log.Warn(ctx, logs.ObjectRemovalFailureExistsInWritecache, zap.Stringer("object_address", addr))
 		return fmt.Errorf("object %s must be flushed from writecache", addr)
 	}
 	if client.IsErrObjectNotFound(err) {
@@ -110,7 +110,7 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error
 
 	res, err := s.metaBase.StorageID(ctx, sPrm)
 	if err != nil {
-		s.log.Debug(logs.StorageIDRetrievalFailure,
+		s.log.Debug(ctx, logs.StorageIDRetrievalFailure,
 			zap.Stringer("object", addr),
 			zap.String("error", err.Error()),
 			zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
@@ -130,7 +130,7 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error
 
 	_, err = s.blobStor.Delete(ctx, delPrm)
 	if err != nil && !client.IsErrObjectNotFound(err) {
-		s.log.Debug(logs.ObjectRemovalFailureBlobStor,
+		s.log.Debug(ctx, logs.ObjectRemovalFailureBlobStor,
 			zap.Stringer("object_address", addr),
 			zap.String("error", err.Error()),
 			zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go
index d605746e8..6fabf7103 100644
--- a/pkg/local_object_storage/shard/gc.go
+++ b/pkg/local_object_storage/shard/gc.go
@@ -131,7 +131,7 @@ type gcCfg struct {
 func defaultGCCfg() gcCfg {
 	return gcCfg{
 		removerInterval: 10 * time.Second,
-		log:             &logger.Logger{Logger: zap.L()},
+		log:             logger.NewLoggerWrapper(zap.L()),
 		workerPoolInit: func(int) util.WorkerPool {
 			return nil
 		},
@@ -161,14 +161,14 @@ func (gc *gc) listenEvents(ctx context.Context) {
 	for {
 		select {
 		case <-gc.stopChannel:
-			gc.log.Warn(logs.ShardStopEventListenerByClosedStopChannel)
+			gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedStopChannel)
 			return
 		case <-ctx.Done():
-			gc.log.Warn(logs.ShardStopEventListenerByContext)
+			gc.log.Warn(ctx, logs.ShardStopEventListenerByContext)
 			return
 		case event, ok := <-gc.eventChan:
 			if !ok {
-				gc.log.Warn(logs.ShardStopEventListenerByClosedEventChannel)
+				gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedEventChannel)
 				return
 			}
 
@@ -204,7 +204,7 @@ func (gc *gc) handleEvent(ctx context.Context, event Event) {
 			h(runCtx, event)
 		})
 		if err != nil {
-			gc.log.Warn(logs.ShardCouldNotSubmitGCJobToWorkerPool,
+			gc.log.Warn(ctx, logs.ShardCouldNotSubmitGCJobToWorkerPool,
 				zap.String("error", err.Error()),
 			)
 
@@ -222,7 +222,7 @@ func (gc *gc) releaseResources() {
 	// because it is possible that we are close it earlier than stop writing.
 	// It is ok to keep it opened.
 
-	gc.log.Debug(logs.ShardGCIsStopped)
+	gc.log.Debug(context.Background(), logs.ShardGCIsStopped)
 }
 
 func (gc *gc) tickRemover(ctx context.Context) {
@@ -263,7 +263,7 @@ func (gc *gc) stop() {
 		close(gc.stopChannel)
 	})
 
-	gc.log.Info(logs.ShardWaitingForGCWorkersToStop)
+	gc.log.Info(context.Background(), logs.ShardWaitingForGCWorkersToStop)
 	gc.wg.Wait()
 }
 
@@ -286,8 +286,8 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
 		return
 	}
 
-	s.log.Debug(logs.ShardGCRemoveGarbageStarted)
-	defer s.log.Debug(logs.ShardGCRemoveGarbageCompleted)
+	s.log.Debug(ctx, logs.ShardGCRemoveGarbageStarted)
+	defer s.log.Debug(ctx, logs.ShardGCRemoveGarbageCompleted)
 
 	buf := make([]oid.Address, 0, s.rmBatchSize)
 
@@ -312,7 +312,7 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
 	// (no more than s.rmBatchSize objects)
 	err := s.metaBase.IterateOverGarbage(ctx, iterPrm)
 	if err != nil {
-		s.log.Warn(logs.ShardIteratorOverMetabaseGraveyardFailed,
+		s.log.Warn(ctx, logs.ShardIteratorOverMetabaseGraveyardFailed,
 			zap.String("error", err.Error()),
 		)
 
@@ -333,7 +333,7 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
 	result.success = true
 
 	if err != nil {
-		s.log.Warn(logs.ShardCouldNotDeleteTheObjects,
+		s.log.Warn(ctx, logs.ShardCouldNotDeleteTheObjects,
 			zap.String("error", err.Error()),
 		)
 		result.success = false
@@ -356,8 +356,8 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
 		s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeRegular)
 	}()
 
-	s.log.Debug(logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", e.(newEpoch).epoch))
-	defer s.log.Debug(logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", e.(newEpoch).epoch))
+	s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", e.(newEpoch).epoch))
+	defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", e.(newEpoch).epoch))
 
 	workersCount, batchSize := s.getExpiredObjectsParameters()
 
@@ -396,7 +396,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
 	})
 
 	if err = errGroup.Wait(); err != nil {
-		s.log.Warn(logs.ShardIteratorOverExpiredObjectsFailed, zap.String("error", err.Error()))
+		s.log.Warn(ctx, logs.ShardIteratorOverExpiredObjectsFailed, zap.String("error", err.Error()))
 	}
 }
 
@@ -416,7 +416,7 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address)
 
 	expired, err := s.getExpiredWithLinked(ctx, expired)
 	if err != nil {
-		s.log.Warn(logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err))
+		s.log.Warn(ctx, logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err))
 		return
 	}
 
@@ -428,7 +428,7 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address)
 	// inhume the collected objects
 	res, err := s.metaBase.Inhume(ctx, inhumePrm)
 	if err != nil {
-		s.log.Warn(logs.ShardCouldNotInhumeTheObjects,
+		s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects,
 			zap.String("error", err.Error()),
 		)
 
@@ -473,8 +473,8 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
 	epoch := e.(newEpoch).epoch
 	log := s.log.With(zap.Uint64("epoch", epoch))
 
-	log.Debug(logs.ShardStartedExpiredTombstonesHandling)
-	defer log.Debug(logs.ShardFinishedExpiredTombstonesHandling)
+	log.Debug(ctx, logs.ShardStartedExpiredTombstonesHandling)
+	defer log.Debug(ctx, logs.ShardFinishedExpiredTombstonesHandling)
 
 	const tssDeleteBatch = 50
 	tss := make([]meta.TombstonedObject, 0, tssDeleteBatch)
@@ -492,12 +492,12 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
 	})
 
 	for {
-		log.Debug(logs.ShardIteratingTombstones)
+		log.Debug(ctx, logs.ShardIteratingTombstones)
 
 		s.m.RLock()
 
 		if s.info.Mode.NoMetabase() {
-			s.log.Debug(logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones)
+			s.log.Debug(ctx, logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones)
 			s.m.RUnlock()
 
 			return
@@ -505,7 +505,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
 
 		err = s.metaBase.IterateOverGraveyard(ctx, iterPrm)
 		if err != nil {
-			log.Error(logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
+			log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
 			s.m.RUnlock()
 
 			return
@@ -524,7 +524,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
 			}
 		}
 
-		log.Debug(logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp)))
+		log.Debug(ctx, logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp)))
 		if len(tssExp) > 0 {
 			s.expiredTombstonesCallback(ctx, tssExp)
 		}
@@ -543,8 +543,8 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
 		s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeLock)
 	}()
 
-	s.log.Debug(logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", e.(newEpoch).epoch))
-	defer s.log.Debug(logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", e.(newEpoch).epoch))
+	s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", e.(newEpoch).epoch))
+	defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", e.(newEpoch).epoch))
 
 	workersCount, batchSize := s.getExpiredObjectsParameters()
 
@@ -584,7 +584,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
 	})
 
 	if err = errGroup.Wait(); err != nil {
-		s.log.Warn(logs.ShardIteratorOverExpiredLocksFailed, zap.String("error", err.Error()))
+		s.log.Warn(ctx, logs.ShardIteratorOverExpiredLocksFailed, zap.String("error", err.Error()))
 	}
 }
 
@@ -645,7 +645,7 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston
 	// inhume tombstones
 	res, err := s.metaBase.Inhume(ctx, pInhume)
 	if err != nil {
-		s.log.Warn(logs.ShardCouldNotMarkTombstonesAsGarbage,
+		s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage,
 			zap.String("error", err.Error()),
 		)
 
@@ -668,7 +668,7 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston
 	// from graveyard
 	err = s.metaBase.DropGraves(ctx, tss)
 	if err != nil {
-		s.log.Warn(logs.ShardCouldNotDropExpiredGraveRecords, zap.Error(err))
+		s.log.Warn(ctx, logs.ShardCouldNotDropExpiredGraveRecords, zap.Error(err))
 	}
 }
 
@@ -680,7 +680,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
 	}
 	unlocked, err := s.metaBase.FreeLockedBy(lockers)
 	if err != nil {
-		s.log.Warn(logs.ShardFailureToUnlockObjects,
+		s.log.Warn(ctx, logs.ShardFailureToUnlockObjects,
 			zap.String("error", err.Error()),
 		)
 
@@ -693,7 +693,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
 
 	res, err := s.metaBase.Inhume(ctx, pInhume)
 	if err != nil {
-		s.log.Warn(logs.ShardFailureToMarkLockersAsGarbage,
+		s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage,
 			zap.String("error", err.Error()),
 		)
 
@@ -718,7 +718,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
 func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unlocked []oid.Address) {
 	expiredUnlocked, err := s.selectExpired(ctx, epoch, unlocked)
 	if err != nil {
-		s.log.Warn(logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err))
+		s.log.Warn(ctx, logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err))
 		return
 	}
 
@@ -737,7 +737,7 @@ func (s *Shard) HandleDeletedLocks(lockers []oid.Address) {
 
 	_, err := s.metaBase.FreeLockedBy(lockers)
 	if err != nil {
-		s.log.Warn(logs.ShardFailureToUnlockObjects,
+		s.log.Warn(context.Background(), logs.ShardFailureToUnlockObjects,
 			zap.String("error", err.Error()),
 		)
 
@@ -756,8 +756,8 @@ func (s *Shard) collectExpiredMetrics(ctx context.Context, e Event) {
 
 	epoch := e.(newEpoch).epoch
 
-	s.log.Debug(logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch))
-	defer s.log.Debug(logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch))
+	s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch))
+	defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch))
 
 	s.collectExpiredContainerSizeMetrics(ctx, epoch)
 	s.collectExpiredContainerCountMetrics(ctx, epoch)
@@ -766,7 +766,7 @@ func (s *Shard) collectExpiredMetrics(ctx context.Context, e Event) {
 func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch uint64) {
 	ids, err := s.metaBase.ZeroSizeContainers(ctx)
 	if err != nil {
-		s.log.Warn(logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err))
+		s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err))
 		return
 	}
 	if len(ids) == 0 {
@@ -778,7 +778,7 @@ func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch ui
 func (s *Shard) collectExpiredContainerCountMetrics(ctx context.Context, epoch uint64) {
 	ids, err := s.metaBase.ZeroCountContainers(ctx)
 	if err != nil {
-		s.log.Warn(logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err))
+		s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err))
 		return
 	}
 	if len(ids) == 0 {
diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go
index d1c393613..7a31a705e 100644
--- a/pkg/local_object_storage/shard/get.go
+++ b/pkg/local_object_storage/shard/get.go
@@ -144,7 +144,7 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta
 			return nil, false, logicerr.Wrap(new(apistatus.ObjectNotFound))
 		}
 	} else {
-		s.log.Warn(logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr))
+		s.log.Warn(ctx, logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr))
 	}
 
 	if s.hasWriteCache() {
@@ -153,12 +153,12 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta
 			return res, false, err
 		}
 		if client.IsErrObjectNotFound(err) {
-			s.log.Debug(logs.ShardObjectIsMissingInWritecache,
+			s.log.Debug(ctx, logs.ShardObjectIsMissingInWritecache,
 				zap.Stringer("addr", addr),
 				zap.Bool("skip_meta", skipMeta),
 				zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
 		} else {
-			s.log.Error(logs.ShardFailedToFetchObjectFromWritecache,
+			s.log.Error(ctx, logs.ShardFailedToFetchObjectFromWritecache,
 				zap.Error(err),
 				zap.Stringer("addr", addr),
 				zap.Bool("skip_meta", skipMeta),
diff --git a/pkg/local_object_storage/shard/id.go b/pkg/local_object_storage/shard/id.go
index a72313498..e27dc0733 100644
--- a/pkg/local_object_storage/shard/id.go
+++ b/pkg/local_object_storage/shard/id.go
@@ -5,7 +5,6 @@ import (
 	"fmt"
 
 	"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
-	"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
 	"github.com/mr-tron/base58"
 	"go.uber.org/zap"
 )
@@ -50,7 +49,7 @@ func (s *Shard) UpdateID() (err error) {
 		s.writeCache.GetMetrics().SetShardID(shardID)
 	}
 
-	s.log = &logger.Logger{Logger: s.log.With(zap.Stringer("shard_id", s.info.ID))}
+	s.log = s.log.With(zap.Stringer("shard_id", s.info.ID))
 	s.metaBase.SetLogger(s.log)
 	s.blobStor.SetLogger(s.log)
 	if s.hasWriteCache() {
diff --git a/pkg/local_object_storage/shard/inhume.go b/pkg/local_object_storage/shard/inhume.go
index 746177c3a..984c54fbc 100644
--- a/pkg/local_object_storage/shard/inhume.go
+++ b/pkg/local_object_storage/shard/inhume.go
@@ -109,7 +109,7 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
 			return InhumeRes{}, ErrLockObjectRemoval
 		}
 
-		s.log.Debug(logs.ShardCouldNotMarkObjectToDeleteInMetabase,
+		s.log.Debug(ctx, logs.ShardCouldNotMarkObjectToDeleteInMetabase,
 			zap.String("error", err.Error()),
 			zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
 		)
diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go
index 8d09974b8..7b267d2e4 100644
--- a/pkg/local_object_storage/shard/list.go
+++ b/pkg/local_object_storage/shard/list.go
@@ -122,7 +122,7 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) {
 
 		sRes, err := s.metaBase.Select(ctx, sPrm) // consider making List in metabase
 		if err != nil {
-			s.log.Debug(logs.ShardCantSelectAllObjects,
+			s.log.Debug(ctx, logs.ShardCantSelectAllObjects,
 				zap.Stringer("cid", lst[i]),
 				zap.String("error", err.Error()),
 				zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
diff --git a/pkg/local_object_storage/shard/lock_test.go b/pkg/local_object_storage/shard/lock_test.go
index 9ce95feb1..595afb60e 100644
--- a/pkg/local_object_storage/shard/lock_test.go
+++ b/pkg/local_object_storage/shard/lock_test.go
@@ -30,7 +30,7 @@ func TestShard_Lock(t *testing.T) {
 	rootPath := t.TempDir()
 	opts := []Option{
 		WithID(NewIDFromBytes([]byte{})),
-		WithLogger(&logger.Logger{Logger: zap.NewNop()}),
+		WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
 		WithBlobStorOptions(
 			blobstor.WithStorages([]blobstor.SubStorage{
 				{
diff --git a/pkg/local_object_storage/shard/mode.go b/pkg/local_object_storage/shard/mode.go
index d90a5f4b6..98b4c37b2 100644
--- a/pkg/local_object_storage/shard/mode.go
+++ b/pkg/local_object_storage/shard/mode.go
@@ -1,6 +1,8 @@
 package shard
 
 import (
+	"context"
+
 	"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
 	"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
 	"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@@ -26,7 +28,7 @@ func (s *Shard) SetMode(m mode.Mode) error {
 }
 
 func (s *Shard) setMode(m mode.Mode) error {
-	s.log.Info(logs.ShardSettingShardMode,
+	s.log.Info(context.Background(), logs.ShardSettingShardMode,
 		zap.Stringer("old_mode", s.info.Mode),
 		zap.Stringer("new_mode", m))
 
@@ -67,7 +69,7 @@ func (s *Shard) setMode(m mode.Mode) error {
 	s.info.Mode = m
 	s.metricsWriter.SetMode(s.info.Mode)
 
-	s.log.Info(logs.ShardShardModeSetSuccessfully,
+	s.log.Info(context.Background(), logs.ShardShardModeSetSuccessfully,
 		zap.Stringer("mode", s.info.Mode))
 	return nil
 }
diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go
index 24cc75154..50125a88d 100644
--- a/pkg/local_object_storage/shard/put.go
+++ b/pkg/local_object_storage/shard/put.go
@@ -75,7 +75,7 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
 	}
 	if err != nil || !tryCache {
 		if err != nil {
-			s.log.Debug(logs.ShardCantPutObjectToTheWritecacheTryingBlobstor,
+			s.log.Debug(ctx, logs.ShardCantPutObjectToTheWritecacheTryingBlobstor,
 				zap.String("err", err.Error()))
 		}
 
diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go
index 0d83caa0c..124b72a5c 100644
--- a/pkg/local_object_storage/shard/rebuild.go
+++ b/pkg/local_object_storage/shard/rebuild.go
@@ -102,11 +102,11 @@ func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *lo
 		return
 	default:
 	}
-	log.Info(logs.BlobstoreRebuildStarted)
+	log.Info(ctx, logs.BlobstoreRebuildStarted)
 	if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, limiter, fillPercent); err != nil {
-		log.Warn(logs.FailedToRebuildBlobstore, zap.Error(err))
+		log.Warn(ctx, logs.FailedToRebuildBlobstore, zap.Error(err))
 	} else {
-		log.Info(logs.BlobstoreRebuildCompletedSuccessfully)
+		log.Info(ctx, logs.BlobstoreRebuildCompletedSuccessfully)
 	}
 }
 
@@ -138,7 +138,7 @@ func (r *rebuilder) Stop(log *logger.Logger) {
 	r.wg.Wait()
 	r.cancel = nil
 	r.done = nil
-	log.Info(logs.BlobstoreRebuildStopped)
+	log.Info(context.Background(), logs.BlobstoreRebuildStopped)
 }
 
 var errMBIsNotAvailable = errors.New("metabase is not available")
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index 413bfd2f7..3a06fe8a7 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -103,7 +103,7 @@ type cfg struct {
 func defaultCfg() *cfg {
 	return &cfg{
 		rmBatchSize:                 100,
-		log:                         &logger.Logger{Logger: zap.L()},
+		log:                         logger.NewLoggerWrapper(zap.L()),
 		gcCfg:                       defaultGCCfg(),
 		reportErrorFunc:             func(string, string, error) {},
 		zeroSizeContainersCallback:  func(context.Context, []cid.ID) {},
@@ -401,7 +401,7 @@ func (s *Shard) updateMetrics(ctx context.Context) {
 
 	cc, err := s.metaBase.ObjectCounters()
 	if err != nil {
-		s.log.Warn(logs.ShardMetaObjectCounterRead,
+		s.log.Warn(ctx, logs.ShardMetaObjectCounterRead,
 			zap.Error(err),
 		)
 
@@ -414,7 +414,7 @@ func (s *Shard) updateMetrics(ctx context.Context) {
 
 	cnrList, err := s.metaBase.Containers(ctx)
 	if err != nil {
-		s.log.Warn(logs.ShardMetaCantReadContainerList, zap.Error(err))
+		s.log.Warn(ctx, logs.ShardMetaCantReadContainerList, zap.Error(err))
 		return
 	}
 
@@ -423,7 +423,7 @@ func (s *Shard) updateMetrics(ctx context.Context) {
 	for i := range cnrList {
 		size, err := s.metaBase.ContainerSize(cnrList[i])
 		if err != nil {
-			s.log.Warn(logs.ShardMetaCantReadContainerSize,
+			s.log.Warn(ctx, logs.ShardMetaCantReadContainerSize,
 				zap.String("cid", cnrList[i].EncodeToString()),
 				zap.Error(err))
 			continue
@@ -436,7 +436,7 @@ func (s *Shard) updateMetrics(ctx context.Context) {
 
 	contCount, err := s.metaBase.ContainerCounters(ctx)
 	if err != nil {
-		s.log.Warn(logs.FailedToGetContainerCounters, zap.Error(err))
+		s.log.Warn(ctx, logs.FailedToGetContainerCounters, zap.Error(err))
 		return
 	}
 	for contID, count := range contCount.Counts {
diff --git a/pkg/local_object_storage/shard/writecache.go b/pkg/local_object_storage/shard/writecache.go
index a6de07f03..f655e477a 100644
--- a/pkg/local_object_storage/shard/writecache.go
+++ b/pkg/local_object_storage/shard/writecache.go
@@ -124,12 +124,12 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error {
 			close(started)
 			defer cleanup()
 
-			s.log.Info(logs.StartedWritecacheSealAsync)
+			s.log.Info(ctx, logs.StartedWritecacheSealAsync)
 			if err := s.writeCache.Seal(ctx, prm); err != nil {
-				s.log.Warn(logs.FailedToSealWritecacheAsync, zap.Error(err))
+				s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err))
 				return
 			}
-			s.log.Info(logs.WritecacheSealCompletedAsync)
+			s.log.Info(ctx, logs.WritecacheSealCompletedAsync)
 		}()
 		select {
 		case <-ctx.Done():
diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go
index b97fc5856..098872e08 100644
--- a/pkg/local_object_storage/writecache/cache.go
+++ b/pkg/local_object_storage/writecache/cache.go
@@ -55,7 +55,7 @@ func New(opts ...Option) Cache {
 		counter: fstree.NewSimpleCounter(),
 
 		options: options{
-			log:            &logger.Logger{Logger: zap.NewNop()},
+			log:            logger.NewLoggerWrapper(zap.NewNop()),
 			maxObjectSize:  defaultMaxObjectSize,
 			workersCount:   defaultFlushWorkersCount,
 			maxCacheSize:   defaultMaxCacheSize,
diff --git a/pkg/local_object_storage/writecache/delete.go b/pkg/local_object_storage/writecache/delete.go
index dda284439..94a0a40db 100644
--- a/pkg/local_object_storage/writecache/delete.go
+++ b/pkg/local_object_storage/writecache/delete.go
@@ -46,7 +46,7 @@ func (c *cache) Delete(ctx context.Context, addr oid.Address) error {
 	storageType = StorageTypeFSTree
 	_, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr})
 	if err == nil {
-		storagelog.Write(c.log,
+		storagelog.Write(ctx, c.log,
 			storagelog.AddressField(addr.EncodeToString()),
 			storagelog.StorageTypeField(wcStorageType),
 			storagelog.OpField("fstree DELETE"),
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
index bfa6aacb0..123eb4abc 100644
--- a/pkg/local_object_storage/writecache/flush.go
+++ b/pkg/local_object_storage/writecache/flush.go
@@ -80,7 +80,7 @@ func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) {
 				}
 			})
 			if err != nil {
-				c.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err))
+				c.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err))
 			}
 
 			c.modeMtx.RUnlock()
@@ -130,7 +130,7 @@ func (c *cache) reportFlushError(msg string, addr string, err error) {
 	if c.reportError != nil {
 		c.reportError(msg, err)
 	} else {
-		c.log.Error(msg,
+		c.log.Error(context.Background(), msg,
 			zap.String("address", addr),
 			zap.Error(err))
 	}
diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go
index 59a4e4895..26f47e82e 100644
--- a/pkg/local_object_storage/writecache/flush_test.go
+++ b/pkg/local_object_storage/writecache/flush_test.go
@@ -40,7 +40,7 @@ func TestFlush(t *testing.T) {
 		cnt := &atomic.Uint32{}
 		return WithReportErrorFunc(func(msg string, err error) {
 			cnt.Add(1)
-			testlogger.Warn(msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err))
+			testlogger.Warn(context.Background(), msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err))
 		}), cnt
 	}
 
diff --git a/pkg/local_object_storage/writecache/mode.go b/pkg/local_object_storage/writecache/mode.go
index d12dd603b..26658e9b8 100644
--- a/pkg/local_object_storage/writecache/mode.go
+++ b/pkg/local_object_storage/writecache/mode.go
@@ -60,7 +60,7 @@ func (c *cache) setMode(ctx context.Context, m mode.Mode, prm setModePrm) error
 	// flushCh is populated by `flush` with `modeMtx` taken, thus waiting until it is empty
 	// guarantees that there are no in-fly operations.
 	for len(c.flushCh) != 0 {
-		c.log.Info(logs.WritecacheWaitingForChannelsToFlush)
+		c.log.Info(ctx, logs.WritecacheWaitingForChannelsToFlush)
 		time.Sleep(time.Second)
 	}
 
@@ -110,7 +110,7 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error {
 			return fmt.Errorf("failed to remove write-cache files: %w", err)
 		}
 	} else {
-		c.log.Info(logs.WritecacheShrinkSkippedNotEmpty)
+		c.log.Info(ctx, logs.WritecacheShrinkSkippedNotEmpty)
 	}
 	return nil
 }
diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go
index 66ac7805c..25c1694a8 100644
--- a/pkg/local_object_storage/writecache/options.go
+++ b/pkg/local_object_storage/writecache/options.go
@@ -41,7 +41,7 @@ type options struct {
 // WithLogger sets logger.
 func WithLogger(log *logger.Logger) Option {
 	return func(o *options) {
-		o.log = &logger.Logger{Logger: log.With(zap.String("component", "WriteCache"))}
+		o.log = log.With(zap.String("component", "WriteCache"))
 	}
 }
 
diff --git a/pkg/local_object_storage/writecache/put.go b/pkg/local_object_storage/writecache/put.go
index c53067bea..7da5c4d3a 100644
--- a/pkg/local_object_storage/writecache/put.go
+++ b/pkg/local_object_storage/writecache/put.go
@@ -68,7 +68,7 @@ func (c *cache) putBig(ctx context.Context, prm common.PutPrm) error {
 		return err
 	}
 
-	storagelog.Write(c.log,
+	storagelog.Write(ctx, c.log,
 		storagelog.AddressField(prm.Address.EncodeToString()),
 		storagelog.StorageTypeField(wcStorageType),
 		storagelog.OpField("fstree PUT"),
diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go
index 2e52e5b20..a0e236cb7 100644
--- a/pkg/local_object_storage/writecache/storage.go
+++ b/pkg/local_object_storage/writecache/storage.go
@@ -43,9 +43,9 @@ func (c *cache) openStore(mod mode.ComponentMode) error {
 func (c *cache) deleteFromDisk(ctx context.Context, addr oid.Address, size uint64) {
 	_, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr, Size: size})
 	if err != nil && !client.IsErrObjectNotFound(err) {
-		c.log.Error(logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err))
+		c.log.Error(ctx, logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err))
 	} else if err == nil {
-		storagelog.Write(c.log,
+		storagelog.Write(ctx, c.log,
 			storagelog.AddressField(addr.EncodeToString()),
 			storagelog.StorageTypeField(wcStorageType),
 			storagelog.OpField("fstree DELETE"),
diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go
index 933f1039f..12c0e0842 100644
--- a/pkg/morph/client/client.go
+++ b/pkg/morph/client/client.go
@@ -199,7 +199,7 @@ func (c *Client) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string,
 		return 0, fmt.Errorf("could not invoke %s: %w", method, err)
 	}
 
-	c.logger.Debug(logs.ClientNeoClientInvoke,
+	c.logger.Debug(context.Background(), logs.ClientNeoClientInvoke,
 		zap.String("method", method),
 		zap.Uint32("vub", vub),
 		zap.Stringer("tx_hash", txHash.Reverse()))
@@ -328,7 +328,7 @@ func (c *Client) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error
 		return err
 	}
 
-	c.logger.Debug(logs.ClientNativeGasTransferInvoke,
+	c.logger.Debug(context.Background(), logs.ClientNativeGasTransferInvoke,
 		zap.String("to", receiver.StringLE()),
 		zap.Stringer("tx_hash", txHash.Reverse()),
 		zap.Uint32("vub", vub))
@@ -362,7 +362,7 @@ func (c *Client) BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8
 		return err
 	}
 
-	c.logger.Debug(logs.ClientBatchGasTransferInvoke,
+	c.logger.Debug(context.Background(), logs.ClientBatchGasTransferInvoke,
 		zap.Strings("to", receiversLog),
 		zap.Stringer("tx_hash", txHash.Reverse()),
 		zap.Uint32("vub", vub))
@@ -389,7 +389,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
 
 	height, err = c.rpcActor.GetBlockCount()
 	if err != nil {
-		c.logger.Error(logs.ClientCantGetBlockchainHeight,
+		c.logger.Error(context.Background(), logs.ClientCantGetBlockchainHeight,
 			zap.String("error", err.Error()))
 		return nil
 	}
@@ -403,7 +403,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
 
 		newHeight, err = c.rpcActor.GetBlockCount()
 		if err != nil {
-			c.logger.Error(logs.ClientCantGetBlockchainHeight243,
+			c.logger.Error(context.Background(), logs.ClientCantGetBlockchainHeight243,
 				zap.String("error", err.Error()))
 			return nil
 		}
diff --git a/pkg/morph/client/constructor.go b/pkg/morph/client/constructor.go
index 08d16deb4..d061747bb 100644
--- a/pkg/morph/client/constructor.go
+++ b/pkg/morph/client/constructor.go
@@ -61,7 +61,7 @@ var ErrNoHealthyEndpoint = errors.New("no healthy endpoint")
 func defaultConfig() *cfg {
 	return &cfg{
 		dialTimeout:  defaultDialTimeout,
-		logger:       &logger.Logger{Logger: zap.L()},
+		logger:       logger.NewLoggerWrapper(zap.L()),
 		metrics:      morphmetrics.NoopRegister{},
 		waitInterval: defaultWaitInterval,
 		signer: &transaction.Signer{
@@ -130,10 +130,10 @@ func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, er
 	for cli.endpoints.curr, endpoint = range cli.endpoints.list {
 		cli.client, act, err = cli.newCli(ctx, endpoint)
 		if err != nil {
-			cli.logger.Warn(logs.FrostFSIRCouldntCreateRPCClientForEndpoint,
+			cli.logger.Warn(ctx, logs.FrostFSIRCouldntCreateRPCClientForEndpoint,
 				zap.Error(err), zap.String("endpoint", endpoint.Address))
 		} else {
-			cli.logger.Info(logs.FrostFSIRCreatedRPCClientForEndpoint,
+			cli.logger.Info(ctx, logs.FrostFSIRCreatedRPCClientForEndpoint,
 				zap.String("endpoint", endpoint.Address))
 			if cli.endpoints.curr > 0 && cli.cfg.switchInterval != 0 {
 				cli.switchIsActive.Store(true)
diff --git a/pkg/morph/client/multi.go b/pkg/morph/client/multi.go
index 10ed21582..708d3b39f 100644
--- a/pkg/morph/client/multi.go
+++ b/pkg/morph/client/multi.go
@@ -42,7 +42,7 @@ func (c *Client) SwitchRPC(ctx context.Context) bool {
 		newEndpoint := c.endpoints.list[c.endpoints.curr]
 		cli, act, err := c.newCli(ctx, newEndpoint)
 		if err != nil {
-			c.logger.Warn(logs.ClientCouldNotEstablishConnectionToTheSwitchedRPCNode,
+			c.logger.Warn(ctx, logs.ClientCouldNotEstablishConnectionToTheSwitchedRPCNode,
 				zap.String("endpoint", newEndpoint.Address),
 				zap.Error(err),
 			)
@@ -52,7 +52,7 @@ func (c *Client) SwitchRPC(ctx context.Context) bool {
 
 		c.cache.invalidate()
 
-		c.logger.Info(logs.ClientConnectionToTheNewRPCNodeHasBeenEstablished,
+		c.logger.Info(ctx, logs.ClientConnectionToTheNewRPCNodeHasBeenEstablished,
 			zap.String("endpoint", newEndpoint.Address))
 
 		c.client = cli
@@ -122,7 +122,7 @@ mainLoop:
 
 				cli, act, err := c.newCli(ctx, e)
 				if err != nil {
-					c.logger.Warn(logs.ClientCouldNotCreateClientToTheHigherPriorityNode,
+					c.logger.Warn(ctx, logs.ClientCouldNotCreateClientToTheHigherPriorityNode,
 						zap.String("endpoint", tryE),
 						zap.Error(err),
 					)
@@ -147,7 +147,7 @@ mainLoop:
 
 				c.switchLock.Unlock()
 
-				c.logger.Info(logs.ClientSwitchedToTheHigherPriorityRPC,
+				c.logger.Info(ctx, logs.ClientSwitchedToTheHigherPriorityRPC,
 					zap.String("endpoint", tryE))
 
 				return
diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go
index 2a500b31b..58c417fb1 100644
--- a/pkg/morph/client/notary.go
+++ b/pkg/morph/client/notary.go
@@ -1,6 +1,7 @@
 package client
 
 import (
+	"context"
 	"crypto/elliptic"
 	"encoding/binary"
 	"errors"
@@ -201,7 +202,7 @@ func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (util.Uint256,
 
 		// Transaction is already in mempool waiting to be processed.
 		// This is an expected situation if we restart the service.
-		c.logger.Info(logs.ClientNotaryDepositHasAlreadyBeenMade,
+		c.logger.Info(context.Background(), logs.ClientNotaryDepositHasAlreadyBeenMade,
 			zap.Int64("amount", int64(amount)),
 			zap.Int64("expire_at", till),
 			zap.Uint32("vub", vub),
@@ -209,7 +210,7 @@ func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (util.Uint256,
 		return util.Uint256{}, 0, nil
 	}
 
-	c.logger.Info(logs.ClientNotaryDepositInvoke,
+	c.logger.Info(context.Background(), logs.ClientNotaryDepositInvoke,
 		zap.Int64("amount", int64(amount)),
 		zap.Int64("expire_at", till),
 		zap.Uint32("vub", vub),
@@ -429,7 +430,7 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
 		return err
 	}
 
-	c.logger.Debug(logs.ClientNotaryRequestWithPreparedMainTXInvoked,
+	c.logger.Debug(context.Background(), logs.ClientNotaryRequestWithPreparedMainTXInvoked,
 		zap.String("tx_hash", mainH.StringLE()),
 		zap.Uint32("valid_until_block", untilActual),
 		zap.String("fallback_hash", fbH.StringLE()))
@@ -485,7 +486,7 @@ func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint
 		return 0, err
 	}
 
-	c.logger.Debug(logs.ClientNotaryRequestInvoked,
+	c.logger.Debug(context.Background(), logs.ClientNotaryRequestInvoked,
 		zap.String("method", method),
 		zap.Uint32("valid_until_block", untilActual),
 		zap.String("tx_hash", mainH.StringLE()),
diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go
index dd3c7d216..03bba8ab9 100644
--- a/pkg/morph/event/listener.go
+++ b/pkg/morph/event/listener.go
@@ -144,7 +144,7 @@ func (l *listener) Listen(ctx context.Context) {
 		l.wg.Add(1)
 		defer l.wg.Done()
 		if err := l.listen(ctx, nil); err != nil {
-			l.log.Error(logs.EventCouldNotStartListenToEvents,
+			l.log.Error(ctx, logs.EventCouldNotStartListenToEvents,
 				zap.String("error", err.Error()),
 			)
 		}
@@ -162,7 +162,7 @@ func (l *listener) ListenWithError(ctx context.Context, intError chan<- error) {
 		l.wg.Add(1)
 		defer l.wg.Done()
 		if err := l.listen(ctx, intError); err != nil {
-			l.log.Error(logs.EventCouldNotStartListenToEvents,
+			l.log.Error(ctx, logs.EventCouldNotStartListenToEvents,
 				zap.String("error", err.Error()),
 			)
 			l.sendError(ctx, intError, err)
@@ -234,7 +234,7 @@ func (l *listener) sendError(ctx context.Context, intErr chan<- error, err error
 	// in the same routine when shutting down node.
 	select {
 	case <-ctx.Done():
-		l.log.Info(logs.EventStopEventListenerByContext,
+		l.log.Info(ctx, logs.EventStopEventListenerByContext,
 			zap.String("reason", ctx.Err().Error()),
 		)
 		return false
@@ -251,43 +251,43 @@ loop:
 		select {
 		case err := <-subErrCh:
 			if !l.sendError(ctx, intErr, err) {
-				l.log.Error(logs.EventStopEventListenerByError, zap.Error(err))
+				l.log.Error(ctx, logs.EventStopEventListenerByError, zap.Error(err))
 			}
 			break loop
 		case <-ctx.Done():
-			l.log.Info(logs.EventStopEventListenerByContext,
+			l.log.Info(ctx, logs.EventStopEventListenerByContext,
 				zap.String("reason", ctx.Err().Error()),
 			)
 			break loop
 		case notifyEvent, ok := <-chs.NotificationsCh:
 			if !ok {
-				l.log.Warn(logs.EventStopEventListenerByNotificationChannel)
+				l.log.Warn(ctx, logs.EventStopEventListenerByNotificationChannel)
 				l.sendError(ctx, intErr, errNotificationSubscrConnectionTerminated)
 				break loop
 			} else if notifyEvent == nil {
-				l.log.Warn(logs.EventNilNotificationEventWasCaught)
+				l.log.Warn(ctx, logs.EventNilNotificationEventWasCaught)
 				continue loop
 			}
 
 			l.handleNotifyEvent(notifyEvent)
 		case notaryEvent, ok := <-chs.NotaryRequestsCh:
 			if !ok {
-				l.log.Warn(logs.EventStopEventListenerByNotaryChannel)
+				l.log.Warn(ctx, logs.EventStopEventListenerByNotaryChannel)
 				l.sendError(ctx, intErr, errNotarySubscrConnectionTerminated)
 				break loop
 			} else if notaryEvent == nil {
-				l.log.Warn(logs.EventNilNotaryEventWasCaught)
+				l.log.Warn(ctx, logs.EventNilNotaryEventWasCaught)
 				continue loop
 			}
 
 			l.handleNotaryEvent(notaryEvent)
 		case b, ok := <-chs.BlockCh:
 			if !ok {
-				l.log.Warn(logs.EventStopEventListenerByBlockChannel)
+				l.log.Warn(ctx, logs.EventStopEventListenerByBlockChannel)
 				l.sendError(ctx, intErr, errBlockNotificationChannelClosed)
 				break loop
 			} else if b == nil {
-				l.log.Warn(logs.EventNilBlockWasCaught)
+				l.log.Warn(ctx, logs.EventNilBlockWasCaught)
 				continue loop
 			}
 
@@ -302,7 +302,7 @@ func (l *listener) handleBlockEvent(b *block.Block) {
 			l.blockHandlers[i](b)
 		}
 	}); err != nil {
-		l.log.Warn(logs.EventListenerWorkerPoolDrained,
+		l.log.Warn(context.Background(), logs.EventListenerWorkerPoolDrained,
 			zap.Int("capacity", l.pool.Cap()))
 	}
 }
@@ -311,7 +311,7 @@ func (l *listener) handleNotaryEvent(notaryEvent *result.NotaryRequestEvent) {
 	if err := l.pool.Submit(func() {
 		l.parseAndHandleNotary(notaryEvent)
 	}); err != nil {
-		l.log.Warn(logs.EventListenerWorkerPoolDrained,
+		l.log.Warn(context.Background(), logs.EventListenerWorkerPoolDrained,
 			zap.Int("capacity", l.pool.Cap()))
 	}
 }
@@ -320,7 +320,7 @@ func (l *listener) handleNotifyEvent(notifyEvent *state.ContainedNotificationEve
 	if err := l.pool.Submit(func() {
 		l.parseAndHandleNotification(notifyEvent)
 	}); err != nil {
-		l.log.Warn(logs.EventListenerWorkerPoolDrained,
+		l.log.Warn(context.Background(), logs.EventListenerWorkerPoolDrained,
 			zap.Int("capacity", l.pool.Cap()))
 	}
 }
@@ -347,7 +347,7 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi
 	l.mtx.RUnlock()
 
 	if !ok {
-		log.Debug(logs.EventEventParserNotSet)
+		log.Debug(context.Background(), logs.EventEventParserNotSet)
 
 		return
 	}
@@ -355,7 +355,7 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi
 	// parse the notification event
 	event, err := parser(notifyEvent)
 	if err != nil {
-		log.Warn(logs.EventCouldNotParseNotificationEvent,
+		log.Warn(context.Background(), logs.EventCouldNotParseNotificationEvent,
 			zap.String("error", err.Error()),
 		)
 
@@ -368,7 +368,7 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi
 	l.mtx.RUnlock()
 
 	if len(handlers) == 0 {
-		log.Info(logs.EventNotificationHandlersForParsedNotificationEventWereNotRegistered,
+		log.Info(context.Background(), logs.EventNotificationHandlersForParsedNotificationEventWereNotRegistered,
 			zap.Any("event", event),
 		)
 
@@ -388,13 +388,13 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
 		switch {
 		case errors.Is(err, ErrTXAlreadyHandled):
 		case errors.As(err, &expErr):
-			l.log.Warn(logs.EventSkipExpiredMainTXNotaryEvent,
+			l.log.Warn(context.Background(), logs.EventSkipExpiredMainTXNotaryEvent,
 				zap.String("error", err.Error()),
 				zap.Uint32("current_block_height", expErr.CurrentBlockHeight),
 				zap.Uint32("fallback_tx_not_valid_before_height", expErr.FallbackTXNotValidBeforeHeight),
 			)
 		default:
-			l.log.Warn(logs.EventCouldNotPrepareAndValidateNotaryEvent,
+			l.log.Warn(context.Background(), logs.EventCouldNotPrepareAndValidateNotaryEvent,
 				zap.String("error", err.Error()),
 			)
 		}
@@ -418,7 +418,7 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
 	l.mtx.RUnlock()
 
 	if !ok {
-		log.Debug(logs.EventNotaryParserNotSet)
+		log.Debug(context.Background(), logs.EventNotaryParserNotSet)
 
 		return
 	}
@@ -426,7 +426,7 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
 	// parse the notary event
 	event, err := parser(notaryEvent)
 	if err != nil {
-		log.Warn(logs.EventCouldNotParseNotaryEvent,
+		log.Warn(context.Background(), logs.EventCouldNotParseNotaryEvent,
 			zap.String("error", err.Error()),
 		)
 
@@ -439,7 +439,7 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
 	l.mtx.RUnlock()
 
 	if !ok {
-		log.Info(logs.EventNotaryHandlersForParsedNotificationEventWereNotRegistered,
+		log.Info(context.Background(), logs.EventNotaryHandlersForParsedNotificationEventWereNotRegistered,
 			zap.Any("event", event),
 		)
 
@@ -461,7 +461,7 @@ func (l *listener) SetNotificationParser(pi NotificationParserInfo) {
 
 	parser := pi.parser()
 	if parser == nil {
-		log.Info(logs.EventIgnoreNilEventParser)
+		log.Info(context.Background(), logs.EventIgnoreNilEventParser)
 		return
 	}
 
@@ -470,7 +470,7 @@ func (l *listener) SetNotificationParser(pi NotificationParserInfo) {
 
 	// check if the listener was started
 	if l.started {
-		log.Warn(logs.EventListenerHasBeenAlreadyStartedIgnoreParser)
+		log.Warn(context.Background(), logs.EventListenerHasBeenAlreadyStartedIgnoreParser)
 		return
 	}
 
@@ -479,7 +479,7 @@ func (l *listener) SetNotificationParser(pi NotificationParserInfo) {
 		l.notificationParsers[pi.scriptHashWithType] = pi.parser()
 	}
 
-	log.Debug(logs.EventRegisteredNewEventParser)
+	log.Debug(context.Background(), logs.EventRegisteredNewEventParser)
 }
 
 // RegisterNotificationHandler registers the handler for particular notification event of contract.
@@ -494,7 +494,7 @@ func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) {
 
 	handler := hi.Handler()
 	if handler == nil {
-		log.Warn(logs.EventIgnoreNilEventHandler)
+		log.Warn(context.Background(), logs.EventIgnoreNilEventHandler)
 		return
 	}
 
@@ -504,7 +504,7 @@ func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) {
 	l.mtx.RUnlock()
 
 	if !ok {
-		log.Warn(logs.EventIgnoreHandlerOfEventWoParser)
+		log.Warn(context.Background(), logs.EventIgnoreHandlerOfEventWoParser)
 		return
 	}
 
@@ -516,7 +516,7 @@ func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) {
 	)
 	l.mtx.Unlock()
 
-	log.Debug(logs.EventRegisteredNewEventHandler)
+	log.Debug(context.Background(), logs.EventRegisteredNewEventHandler)
 }
 
 // EnableNotarySupport enables notary request listening. Passed hash is
@@ -557,7 +557,7 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) {
 
 	parser := pi.parser()
 	if parser == nil {
-		log.Info(logs.EventIgnoreNilNotaryEventParser)
+		log.Info(context.Background(), logs.EventIgnoreNilNotaryEventParser)
 		return
 	}
 
@@ -566,7 +566,7 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) {
 
 	// check if the listener was started
 	if l.started {
-		log.Warn(logs.EventListenerHasBeenAlreadyStartedIgnoreNotaryParser)
+		log.Warn(context.Background(), logs.EventListenerHasBeenAlreadyStartedIgnoreNotaryParser)
 		return
 	}
 
@@ -575,7 +575,7 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) {
 		l.notaryParsers[pi.notaryRequestTypes] = pi.parser()
 	}
 
-	log.Info(logs.EventRegisteredNewEventParser)
+	log.Info(context.Background(), logs.EventRegisteredNewEventParser)
 }
 
 // RegisterNotaryHandler registers the handler for particular notification notary request event.
@@ -595,7 +595,7 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) {
 
 	handler := hi.Handler()
 	if handler == nil {
-		log.Warn(logs.EventIgnoreNilNotaryEventHandler)
+		log.Warn(context.Background(), logs.EventIgnoreNilNotaryEventHandler)
 		return
 	}
 
@@ -605,7 +605,7 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) {
 	l.mtx.RUnlock()
 
 	if !ok {
-		log.Warn(logs.EventIgnoreHandlerOfNotaryEventWoParser)
+		log.Warn(context.Background(), logs.EventIgnoreHandlerOfNotaryEventWoParser)
 		return
 	}
 
@@ -614,7 +614,7 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) {
 	l.notaryHandlers[hi.notaryRequestTypes] = hi.Handler()
 	l.mtx.Unlock()
 
-	log.Info(logs.EventRegisteredNewEventHandler)
+	log.Info(context.Background(), logs.EventRegisteredNewEventHandler)
 }
 
 // Stop closes subscription channel with remote neo node.
@@ -628,7 +628,7 @@ func (l *listener) Stop() {
 
 func (l *listener) RegisterBlockHandler(handler BlockHandler) {
 	if handler == nil {
-		l.log.Warn(logs.EventIgnoreNilBlockHandler)
+		l.log.Warn(context.Background(), logs.EventIgnoreNilBlockHandler)
 		return
 	}
 
diff --git a/pkg/morph/event/utils.go b/pkg/morph/event/utils.go
index f3b6443fb..31bbf4432 100644
--- a/pkg/morph/event/utils.go
+++ b/pkg/morph/event/utils.go
@@ -1,6 +1,7 @@
 package event
 
 import (
+	"context"
 	"errors"
 	"fmt"
 
@@ -89,7 +90,7 @@ func WorkerPoolHandler(w util2.WorkerPool, h Handler, log *logger.Logger) Handle
 			h(e)
 		})
 		if err != nil {
-			log.Warn(logs.EventCouldNotSubmitHandlerToWorkerPool,
+			log.Warn(context.Background(), logs.EventCouldNotSubmitHandlerToWorkerPool,
 				zap.String("error", err.Error()),
 			)
 		}
diff --git a/pkg/morph/subscriber/subscriber.go b/pkg/morph/subscriber/subscriber.go
index ee5466a7d..3a2da6757 100644
--- a/pkg/morph/subscriber/subscriber.go
+++ b/pkg/morph/subscriber/subscriber.go
@@ -245,9 +245,9 @@ routeloop:
 }
 
 func (s *subscriber) switchEndpoint(ctx context.Context, finishCh chan<- bool) bool {
-	s.log.Info(logs.RPConnectionLost)
+	s.log.Info(ctx, logs.RPConnectionLost)
 	if !s.client.SwitchRPC(ctx) {
-		s.log.Error(logs.RPCNodeSwitchFailure)
+		s.log.Error(ctx, logs.RPCNodeSwitchFailure)
 		return false
 	}
 
@@ -304,7 +304,7 @@ func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotific
 	if s.subscribedToNewBlocks {
 		_, err = s.client.ReceiveBlocks(blCh)
 		if err != nil {
-			s.log.Error(logs.ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch, zap.Error(err))
+			s.log.Error(context.Background(), logs.ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch, zap.Error(err))
 			return false
 		}
 	}
@@ -313,7 +313,7 @@ func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotific
 	for contract := range s.subscribedEvents {
 		_, err = s.client.ReceiveExecutionNotifications(contract, notifCh)
 		if err != nil {
-			s.log.Error(logs.ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
+			s.log.Error(context.Background(), logs.ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
 			return false
 		}
 	}
@@ -322,7 +322,7 @@ func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotific
 	for signer := range s.subscribedNotaryEvents {
 		_, err = s.client.ReceiveNotaryRequests(signer, notaryCh)
 		if err != nil {
-			s.log.Error(logs.ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
+			s.log.Error(context.Background(), logs.ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch, zap.Error(err))
 			return false
 		}
 	}
diff --git a/pkg/services/apemanager/executor.go b/pkg/services/apemanager/executor.go
index 86f9cb893..cc792e23d 100644
--- a/pkg/services/apemanager/executor.go
+++ b/pkg/services/apemanager/executor.go
@@ -53,7 +53,7 @@ func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedC
 	}
 
 	if s.log == nil {
-		s.log = &logger.Logger{Logger: zap.NewNop()}
+		s.log = logger.NewLoggerWrapper(zap.NewNop())
 	}
 
 	return s
diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go
index 56748b08c..93ad3dc46 100644
--- a/pkg/services/object/acl/v2/service.go
+++ b/pkg/services/object/acl/v2/service.go
@@ -64,7 +64,7 @@ func New(next object.ServiceServer,
 	opts ...Option,
 ) Service {
 	cfg := &cfg{
-		log:        &logger.Logger{Logger: zap.L()},
+		log:        logger.NewLoggerWrapper(zap.L()),
 		next:       next,
 		nm:         nm,
 		irFetcher:  irf,
diff --git a/pkg/services/object/common/writer/common.go b/pkg/services/object/common/writer/common.go
index 6689557ee..8b92d34ed 100644
--- a/pkg/services/object/common/writer/common.go
+++ b/pkg/services/object/common/writer/common.go
@@ -59,7 +59,7 @@ func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context,
 	if n.Traversal.submitPrimaryPlacementFinish() {
 		err := n.ForEachNode(ctx, f)
 		if err != nil {
-			n.cfg.Logger.Error(logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
+			n.cfg.Logger.Error(ctx, logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
 			// we don't fail primary operation because of broadcast failure
 		}
 	}
diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go
index 571bae7bb..64115b86b 100644
--- a/pkg/services/object/common/writer/ec.go
+++ b/pkg/services/object/common/writer/ec.go
@@ -162,7 +162,7 @@ func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index
 			if err == nil {
 				return nil
 			}
-			e.Config.Logger.Logger.Warn(logs.ECFailedToSendToContainerNode, zap.Stringers("address_group", info.AddressGroup()))
+			e.Config.Logger.Warn(ctx, logs.ECFailedToSendToContainerNode, zap.Stringers("address_group", info.AddressGroup()))
 			lastErr = err
 		}
 	}
@@ -275,7 +275,7 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
 	if err == nil {
 		return nil
 	}
-	e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+	e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
 		zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
 		zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err))
 
@@ -299,7 +299,7 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
 		if err == nil {
 			return nil
 		}
-		e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+		e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
 			zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
 			zap.String("node", hex.EncodeToString(node.PublicKey())),
 			zap.Error(err))
@@ -323,7 +323,7 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
 		if err == nil {
 			return nil
 		}
-		e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+		e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
 			zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
 			zap.String("node", hex.EncodeToString(node.PublicKey())),
 			zap.Error(err))
diff --git a/pkg/services/object/delete/delete.go b/pkg/services/object/delete/delete.go
index 88454625d..8aaff670c 100644
--- a/pkg/services/object/delete/delete.go
+++ b/pkg/services/object/delete/delete.go
@@ -33,13 +33,13 @@ func (s *Service) Delete(ctx context.Context, prm Prm) error {
 }
 
 func (exec *execCtx) execute(ctx context.Context) error {
-	exec.log.Debug(logs.ServingRequest)
+	exec.log.Debug(ctx, logs.ServingRequest)
 
 	if err := exec.executeLocal(ctx); err != nil {
-		exec.log.Debug(logs.OperationFinishedWithError, zap.String("error", err.Error()))
+		exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.String("error", err.Error()))
 		return err
 	}
 
-	exec.log.Debug(logs.OperationFinishedSuccessfully)
+	exec.log.Debug(ctx, logs.OperationFinishedSuccessfully)
 	return nil
 }
diff --git a/pkg/services/object/delete/exec.go b/pkg/services/object/delete/exec.go
index ec771320e..36a17bde2 100644
--- a/pkg/services/object/delete/exec.go
+++ b/pkg/services/object/delete/exec.go
@@ -34,13 +34,13 @@ type execCtx struct {
 }
 
 func (exec *execCtx) setLogger(l *logger.Logger) {
-	exec.log = &logger.Logger{Logger: l.With(
+	exec.log = l.With(
 		zap.String("request", "DELETE"),
 		zap.Stringer("address", exec.address()),
 		zap.Bool("local", exec.isLocal()),
 		zap.Bool("with session", exec.prm.common.SessionToken() != nil),
 		zap.Bool("with bearer", exec.prm.common.BearerToken() != nil),
-	)}
+	)
 }
 
 func (exec *execCtx) isLocal() bool {
@@ -83,16 +83,16 @@ func (exec *execCtx) formExtendedInfo(ctx context.Context) error {
 		exec.splitInfo = errSplitInfo.SplitInfo()
 		exec.tombstone.SetSplitID(exec.splitInfo.SplitID())
 
-		exec.log.Debug(logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers)
+		exec.log.Debug(ctx, logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers)
 
 		if err := exec.collectMembers(ctx); err != nil {
 			return err
 		}
 
-		exec.log.Debug(logs.DeleteMembersSuccessfullyCollected)
+		exec.log.Debug(ctx, logs.DeleteMembersSuccessfullyCollected)
 		return nil
 	case errors.As(err, &errECInfo):
-		exec.log.Debug(logs.DeleteECObjectReceived)
+		exec.log.Debug(ctx, logs.DeleteECObjectReceived)
 		return nil
 	}
 
@@ -108,7 +108,7 @@ func (exec *execCtx) formExtendedInfo(ctx context.Context) error {
 
 func (exec *execCtx) collectMembers(ctx context.Context) error {
 	if exec.splitInfo == nil {
-		exec.log.Debug(logs.DeleteNoSplitInfoObjectIsPHY)
+		exec.log.Debug(ctx, logs.DeleteNoSplitInfoObjectIsPHY)
 		return nil
 	}
 
@@ -131,7 +131,7 @@ func (exec *execCtx) collectMembers(ctx context.Context) error {
 func (exec *execCtx) collectChain(ctx context.Context) error {
 	var chain []oid.ID
 
-	exec.log.Debug(logs.DeleteAssemblingChain)
+	exec.log.Debug(ctx, logs.DeleteAssemblingChain)
 
 	for prev, withPrev := exec.splitInfo.LastPart(); withPrev; {
 		chain = append(chain, prev)
@@ -152,7 +152,7 @@ func (exec *execCtx) collectChain(ctx context.Context) error {
 }
 
 func (exec *execCtx) collectChildren(ctx context.Context) error {
-	exec.log.Debug(logs.DeleteCollectingChildren)
+	exec.log.Debug(ctx, logs.DeleteCollectingChildren)
 
 	children, err := exec.svc.header.children(ctx, exec)
 	if err != nil {
@@ -165,7 +165,7 @@ func (exec *execCtx) collectChildren(ctx context.Context) error {
 }
 
 func (exec *execCtx) supplementBySplitID(ctx context.Context) error {
-	exec.log.Debug(logs.DeleteSupplementBySplitID)
+	exec.log.Debug(ctx, logs.DeleteSupplementBySplitID)
 
 	chain, err := exec.svc.searcher.splitMembers(ctx, exec)
 	if err != nil {
diff --git a/pkg/services/object/delete/local.go b/pkg/services/object/delete/local.go
index 2c3c47f49..01b2d9b3f 100644
--- a/pkg/services/object/delete/local.go
+++ b/pkg/services/object/delete/local.go
@@ -10,13 +10,13 @@ import (
 )
 
 func (exec *execCtx) executeLocal(ctx context.Context) error {
-	exec.log.Debug(logs.DeleteFormingTombstoneStructure)
+	exec.log.Debug(ctx, logs.DeleteFormingTombstoneStructure)
 
 	if err := exec.formTombstone(ctx); err != nil {
 		return err
 	}
 
-	exec.log.Debug(logs.DeleteTombstoneStructureSuccessfullyFormedSaving)
+	exec.log.Debug(ctx, logs.DeleteTombstoneStructureSuccessfullyFormedSaving)
 
 	return exec.saveTombstone(ctx)
 }
@@ -33,7 +33,7 @@ func (exec *execCtx) formTombstone(ctx context.Context) error {
 	)
 	exec.addMembers([]oid.ID{exec.address().Object()})
 
-	exec.log.Debug(logs.DeleteFormingSplitInfo)
+	exec.log.Debug(ctx, logs.DeleteFormingSplitInfo)
 
 	if err := exec.formExtendedInfo(ctx); err != nil {
 		return fmt.Errorf("form extended info: %w", err)
diff --git a/pkg/services/object/delete/service.go b/pkg/services/object/delete/service.go
index e4f7a8c50..867d3f4ef 100644
--- a/pkg/services/object/delete/service.go
+++ b/pkg/services/object/delete/service.go
@@ -72,7 +72,7 @@ func New(gs *getsvc.Service,
 	opts ...Option,
 ) *Service {
 	c := &cfg{
-		log:        &logger.Logger{Logger: zap.L()},
+		log:        logger.NewLoggerWrapper(zap.L()),
 		header:     &headSvcWrapper{s: gs},
 		searcher:   &searchSvcWrapper{s: ss},
 		placer:     &putSvcWrapper{s: ps},
@@ -92,6 +92,6 @@ func New(gs *getsvc.Service,
 // WithLogger returns option to specify Delete service's logger.
 func WithLogger(l *logger.Logger) Option {
 	return func(c *cfg) {
-		c.log = &logger.Logger{Logger: l.With(zap.String("component", "objectSDK.Delete service"))}
+		c.log = l.With(zap.String("component", "objectSDK.Delete service"))
 	}
 }
diff --git a/pkg/services/object/get/assemble.go b/pkg/services/object/get/assemble.go
index 9f17f1e4c..e164627d2 100644
--- a/pkg/services/object/get/assemble.go
+++ b/pkg/services/object/get/assemble.go
@@ -13,7 +13,7 @@ import (
 
 func (r *request) assemble(ctx context.Context) {
 	if !r.canAssembleComplexObject() {
-		r.log.Debug(logs.GetCanNotAssembleTheObject)
+		r.log.Debug(ctx, logs.GetCanNotAssembleTheObject)
 		return
 	}
 
@@ -35,23 +35,23 @@ func (r *request) assemble(ctx context.Context) {
 	// `execCtx` so it should be disabled there.
 	r.disableForwarding()
 
-	r.log.Debug(logs.GetTryingToAssembleTheObject)
+	r.log.Debug(ctx, logs.GetTryingToAssembleTheObject)
 
 	r.prm.common = r.prm.common.WithLocalOnly(false)
 	assembler := newAssembler(r.address(), r.splitInfo(), r.ctxRange(), r, r.headOnly())
 
-	r.log.Debug(logs.GetAssemblingSplittedObject,
+	r.log.Debug(ctx, logs.GetAssemblingSplittedObject,
 		zap.Uint64("range_offset", r.ctxRange().GetOffset()),
 		zap.Uint64("range_length", r.ctxRange().GetLength()),
 	)
-	defer r.log.Debug(logs.GetAssemblingSplittedObjectCompleted,
+	defer r.log.Debug(ctx, logs.GetAssemblingSplittedObjectCompleted,
 		zap.Uint64("range_offset", r.ctxRange().GetOffset()),
 		zap.Uint64("range_length", r.ctxRange().GetLength()),
 	)
 
 	obj, err := assembler.Assemble(ctx, r.prm.objWriter)
 	if err != nil {
-		r.log.Warn(logs.GetFailedToAssembleSplittedObject,
+		r.log.Warn(ctx, logs.GetFailedToAssembleSplittedObject,
 			zap.Error(err),
 			zap.Uint64("range_offset", r.ctxRange().GetOffset()),
 			zap.Uint64("range_length", r.ctxRange().GetLength()),
diff --git a/pkg/services/object/get/assembleec.go b/pkg/services/object/get/assembleec.go
index 03f913bbf..8ab423c87 100644
--- a/pkg/services/object/get/assembleec.go
+++ b/pkg/services/object/get/assembleec.go
@@ -12,7 +12,7 @@ import (
 
 func (r *request) assembleEC(ctx context.Context) {
 	if r.isRaw() {
-		r.log.Debug(logs.GetCanNotAssembleTheObject)
+		r.log.Debug(ctx, logs.GetCanNotAssembleTheObject)
 		return
 	}
 
@@ -34,7 +34,7 @@ func (r *request) assembleEC(ctx context.Context) {
 	// `execCtx` so it should be disabled there.
 	r.disableForwarding()
 
-	r.log.Debug(logs.GetTryingToAssembleTheECObject)
+	r.log.Debug(ctx, logs.GetTryingToAssembleTheECObject)
 
 	// initialize epoch number
 	ok := r.initEpoch()
@@ -45,18 +45,18 @@ func (r *request) assembleEC(ctx context.Context) {
 	r.prm.common = r.prm.common.WithLocalOnly(false)
 	assembler := newAssemblerEC(r.address(), r.infoEC, r.ctxRange(), r, r.localStorage, r.log, r.headOnly(), r.traverserGenerator, r.curProcEpoch)
 
-	r.log.Debug(logs.GetAssemblingECObject,
+	r.log.Debug(ctx, logs.GetAssemblingECObject,
 		zap.Uint64("range_offset", r.ctxRange().GetOffset()),
 		zap.Uint64("range_length", r.ctxRange().GetLength()),
 	)
-	defer r.log.Debug(logs.GetAssemblingECObjectCompleted,
+	defer r.log.Debug(ctx, logs.GetAssemblingECObjectCompleted,
 		zap.Uint64("range_offset", r.ctxRange().GetOffset()),
 		zap.Uint64("range_length", r.ctxRange().GetLength()),
 	)
 
 	obj, err := assembler.Assemble(ctx, r.prm.objWriter)
 	if err != nil && !errors.As(err, new(*objectSDK.ECInfoError)) {
-		r.log.Warn(logs.GetFailedToAssembleECObject,
+		r.log.Warn(ctx, logs.GetFailedToAssembleECObject,
 			zap.Error(err),
 			zap.Uint64("range_offset", r.ctxRange().GetOffset()),
 			zap.Uint64("range_length", r.ctxRange().GetLength()),
diff --git a/pkg/services/object/get/assemblerec.go b/pkg/services/object/get/assemblerec.go
index 44d9af3a2..b0895e13e 100644
--- a/pkg/services/object/get/assemblerec.go
+++ b/pkg/services/object/get/assemblerec.go
@@ -155,7 +155,7 @@ func (a *assemblerec) retrieveParts(ctx context.Context, trav *placement.Travers
 
 	parts, err := a.processECNodesRequests(ctx, remoteNodes, dataCount, parityCount)
 	if err != nil {
-		a.log.Debug(logs.GetUnableToGetAllPartsECObject, zap.Error(err))
+		a.log.Debug(ctx, logs.GetUnableToGetAllPartsECObject, zap.Error(err))
 	}
 	return parts
 }
@@ -229,7 +229,7 @@ func (a *assemblerec) tryGetChunkFromLocalStorage(ctx context.Context, ch object
 	var objID oid.ID
 	err := objID.ReadFromV2(ch.ID)
 	if err != nil {
-		a.log.Error(logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
+		a.log.Error(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
 		return nil
 	}
 	var addr oid.Address
@@ -239,13 +239,13 @@ func (a *assemblerec) tryGetChunkFromLocalStorage(ctx context.Context, ch object
 	if a.head {
 		object, err = a.localStorage.Head(ctx, addr, false)
 		if err != nil {
-			a.log.Warn(logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
+			a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
 			return nil
 		}
 	} else {
 		object, err = a.localStorage.Get(ctx, addr)
 		if err != nil {
-			a.log.Warn(logs.GetUnableToGetPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
+			a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err))
 			return nil
 		}
 	}
@@ -259,11 +259,11 @@ func (a *assemblerec) tryGetChunkListFromNode(ctx context.Context, node client.N
 	var errECInfo *objectSDK.ECInfoError
 	_, err := a.remoteStorage.headObjectFromNode(ctx, a.addr, node, true)
 	if err == nil {
-		a.log.Error(logs.GetUnexpectedECObject, zap.String("node", hex.EncodeToString(node.PublicKey())))
+		a.log.Error(ctx, logs.GetUnexpectedECObject, zap.String("node", hex.EncodeToString(node.PublicKey())))
 		return nil
 	}
 	if !errors.As(err, &errECInfo) {
-		a.log.Warn(logs.GetUnableToHeadPartsECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err))
+		a.log.Warn(ctx, logs.GetUnableToHeadPartsECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err))
 		return nil
 	}
 	result := make([]objectSDK.ECChunk, 0, len(errECInfo.ECInfo().Chunks))
@@ -277,7 +277,7 @@ func (a *assemblerec) tryGetChunkFromRemoteStorage(ctx context.Context, node cli
 	var objID oid.ID
 	err := objID.ReadFromV2(ch.ID)
 	if err != nil {
-		a.log.Error(logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
+		a.log.Error(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err)))
 		return nil
 	}
 	var addr oid.Address
@@ -287,13 +287,13 @@ func (a *assemblerec) tryGetChunkFromRemoteStorage(ctx context.Context, node cli
 	if a.head {
 		object, err = a.remoteStorage.headObjectFromNode(ctx, addr, node, false)
 		if err != nil {
-			a.log.Warn(logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
+			a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
 			return nil
 		}
 	} else {
 		object, err = a.remoteStorage.getObjectFromNode(ctx, addr, node)
 		if err != nil {
-			a.log.Warn(logs.GetUnableToGetPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
+			a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err))
 			return nil
 		}
 	}
diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go
index 034768c81..2b84c5b32 100644
--- a/pkg/services/object/get/container.go
+++ b/pkg/services/object/get/container.go
@@ -10,13 +10,13 @@ import (
 
 func (r *request) executeOnContainer(ctx context.Context) {
 	if r.isLocal() {
-		r.log.Debug(logs.GetReturnResultDirectly)
+		r.log.Debug(ctx, logs.GetReturnResultDirectly)
 		return
 	}
 
 	lookupDepth := r.netmapLookupDepth()
 
-	r.log.Debug(logs.TryingToExecuteInContainer,
+	r.log.Debug(ctx, logs.TryingToExecuteInContainer,
 		zap.Uint64("netmap lookup depth", lookupDepth),
 	)
 
@@ -46,7 +46,7 @@ func (r *request) executeOnContainer(ctx context.Context) {
 }
 
 func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool {
-	r.log.Debug(logs.ProcessEpoch,
+	r.log.Debug(ctx, logs.ProcessEpoch,
 		zap.Uint64("number", r.curProcEpoch),
 	)
 
@@ -67,7 +67,7 @@ func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool
 	for {
 		addrs := traverser.Next()
 		if len(addrs) == 0 {
-			r.log.Debug(logs.NoMoreNodesAbortPlacementIteration)
+			r.log.Debug(ctx, logs.NoMoreNodesAbortPlacementIteration)
 
 			return false
 		}
@@ -75,7 +75,7 @@ func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool
 		for i := range addrs {
 			select {
 			case <-ctx.Done():
-				r.log.Debug(logs.InterruptPlacementIterationByContext,
+				r.log.Debug(ctx, logs.InterruptPlacementIterationByContext,
 					zap.Error(ctx.Err()),
 				)
 
@@ -91,7 +91,7 @@ func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool
 			client.NodeInfoFromNetmapElement(&info, addrs[i])
 
 			if r.processNode(ctx, info) {
-				r.log.Debug(logs.GetCompletingTheOperation)
+				r.log.Debug(ctx, logs.GetCompletingTheOperation)
 				return true
 			}
 		}
diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go
index 03b7f8bf2..557e9a028 100644
--- a/pkg/services/object/get/get.go
+++ b/pkg/services/object/get/get.go
@@ -91,7 +91,7 @@ func (s *Service) get(ctx context.Context, prm RequestParameters) error {
 }
 
 func (exec *request) execute(ctx context.Context) {
-	exec.log.Debug(logs.ServingRequest)
+	exec.log.Debug(ctx, logs.ServingRequest)
 
 	// perform local operation
 	exec.executeLocal(ctx)
@@ -103,23 +103,23 @@ func (exec *request) analyzeStatus(ctx context.Context, execCnr bool) {
 	// analyze local result
 	switch exec.status {
 	case statusOK:
-		exec.log.Debug(logs.OperationFinishedSuccessfully)
+		exec.log.Debug(ctx, logs.OperationFinishedSuccessfully)
 	case statusINHUMED:
-		exec.log.Debug(logs.GetRequestedObjectWasMarkedAsRemoved)
+		exec.log.Debug(ctx, logs.GetRequestedObjectWasMarkedAsRemoved)
 	case statusVIRTUAL:
-		exec.log.Debug(logs.GetRequestedObjectIsVirtual)
+		exec.log.Debug(ctx, logs.GetRequestedObjectIsVirtual)
 		exec.assemble(ctx)
 	case statusOutOfRange:
-		exec.log.Debug(logs.GetRequestedRangeIsOutOfObjectBounds)
+		exec.log.Debug(ctx, logs.GetRequestedRangeIsOutOfObjectBounds)
 	case statusEC:
-		exec.log.Debug(logs.GetRequestedObjectIsEC)
+		exec.log.Debug(ctx, logs.GetRequestedObjectIsEC)
 		if exec.isRaw() && execCnr {
 			exec.executeOnContainer(ctx)
 			exec.analyzeStatus(ctx, false)
 		}
 		exec.assembleEC(ctx)
 	default:
-		exec.log.Debug(logs.OperationFinishedWithError,
+		exec.log.Debug(ctx, logs.OperationFinishedWithError,
 			zap.Error(exec.err),
 		)
 		var errAccessDenied *apistatus.ObjectAccessDenied
diff --git a/pkg/services/object/get/local.go b/pkg/services/object/get/local.go
index 1cd5e549c..cfabb082f 100644
--- a/pkg/services/object/get/local.go
+++ b/pkg/services/object/get/local.go
@@ -31,7 +31,7 @@ func (r *request) executeLocal(ctx context.Context) {
 		r.status = statusUndefined
 		r.err = err
 
-		r.log.Debug(logs.GetLocalGetFailed, zap.Error(err))
+		r.log.Debug(ctx, logs.GetLocalGetFailed, zap.Error(err))
 	case err == nil:
 		r.status = statusOK
 		r.err = nil
diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go
index f2639f8e6..b6a83fd0c 100644
--- a/pkg/services/object/get/remote.go
+++ b/pkg/services/object/get/remote.go
@@ -18,7 +18,7 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool {
 	ctx, span := tracing.StartSpanFromContext(ctx, "getService.processNode")
 	defer span.End()
 
-	r.log.Debug(logs.ProcessingNode, zap.String("node_key", hex.EncodeToString(info.PublicKey())))
+	r.log.Debug(ctx, logs.ProcessingNode, zap.String("node_key", hex.EncodeToString(info.PublicKey())))
 
 	rs, ok := r.getRemoteStorage(info)
 	if !ok {
@@ -35,7 +35,7 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool {
 
 	switch {
 	default:
-		r.log.Debug(logs.GetRemoteCallFailed, zap.Error(err))
+		r.log.Debug(ctx, logs.GetRemoteCallFailed, zap.Error(err))
 		if r.status != statusEC {
 			// for raw requests, continue to collect other parts
 			r.status = statusUndefined
diff --git a/pkg/services/object/get/request.go b/pkg/services/object/get/request.go
index 1a7a43a35..bba767d2d 100644
--- a/pkg/services/object/get/request.go
+++ b/pkg/services/object/get/request.go
@@ -47,14 +47,14 @@ func (r *request) setLogger(l *logger.Logger) {
 		req = "GET_RANGE"
 	}
 
-	r.log = &logger.Logger{Logger: l.With(
+	r.log = l.With(
 		zap.String("request", req),
 		zap.Stringer("address", r.address()),
 		zap.Bool("raw", r.isRaw()),
 		zap.Bool("local", r.isLocal()),
 		zap.Bool("with session", r.prm.common.SessionToken() != nil),
 		zap.Bool("with bearer", r.prm.common.BearerToken() != nil),
-	)}
+	)
 }
 
 func (r *request) isLocal() bool {
@@ -129,7 +129,7 @@ func (r *request) initEpoch() bool {
 		r.status = statusUndefined
 		r.err = err
 
-		r.log.Debug(logs.CouldNotGetCurrentEpochNumber, zap.Error(err))
+		r.log.Debug(context.Background(), logs.CouldNotGetCurrentEpochNumber, zap.Error(err))
 
 		return false
 	case err == nil:
@@ -148,7 +148,7 @@ func (r *request) generateTraverser(addr oid.Address) (*placement.Traverser, boo
 		r.status = statusUndefined
 		r.err = err
 
-		r.log.Debug(logs.GetCouldNotGenerateContainerTraverser, zap.Error(err))
+		r.log.Debug(context.Background(), logs.GetCouldNotGenerateContainerTraverser, zap.Error(err))
 
 		return nil, false
 	case err == nil:
@@ -162,7 +162,7 @@ func (r *request) getRemoteStorage(info clientcore.NodeInfo) (remoteStorage, boo
 		r.status = statusUndefined
 		r.err = err
 
-		r.log.Debug(logs.GetCouldNotConstructRemoteNodeClient)
+		r.log.Debug(context.Background(), logs.GetCouldNotConstructRemoteNodeClient)
 
 		return nil, false
 	}
@@ -185,7 +185,7 @@ func (r *request) writeCollectedHeader(ctx context.Context) bool {
 		r.status = statusUndefined
 		r.err = err
 
-		r.log.Debug(logs.GetCouldNotWriteHeader, zap.Error(err))
+		r.log.Debug(ctx, logs.GetCouldNotWriteHeader, zap.Error(err))
 	case err == nil:
 		r.status = statusOK
 		r.err = nil
@@ -206,7 +206,7 @@ func (r *request) writeObjectPayload(ctx context.Context, obj *objectSDK.Object)
 		r.status = statusUndefined
 		r.err = err
 
-		r.log.Debug(logs.GetCouldNotWritePayloadChunk, zap.Error(err))
+		r.log.Debug(ctx, logs.GetCouldNotWritePayloadChunk, zap.Error(err))
 	case err == nil:
 		r.status = statusOK
 		r.err = nil
diff --git a/pkg/services/object/get/service.go b/pkg/services/object/get/service.go
index 3413abeb7..9ec10b5f2 100644
--- a/pkg/services/object/get/service.go
+++ b/pkg/services/object/get/service.go
@@ -34,7 +34,7 @@ func New(
 	result := &Service{
 		keyStore:    ks,
 		epochSource: es,
-		log:         &logger.Logger{Logger: zap.L()},
+		log:         logger.NewLoggerWrapper(zap.L()),
 		localStorage: &engineLocalStorage{
 			engine: e,
 		},
@@ -53,6 +53,6 @@ func New(
 // WithLogger returns option to specify Get service's logger.
 func WithLogger(l *logger.Logger) Option {
 	return func(s *Service) {
-		s.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Get service"))}
+		s.log = l.With(zap.String("component", "Object.Get service"))
 	}
 }
diff --git a/pkg/services/object/get/v2/get_range_hash.go b/pkg/services/object/get/v2/get_range_hash.go
index e8e82ddd9..7d26a38c3 100644
--- a/pkg/services/object/get/v2/get_range_hash.go
+++ b/pkg/services/object/get/v2/get_range_hash.go
@@ -125,14 +125,14 @@ func (s *Service) forwardGetRangeHashRequest(ctx context.Context, req *objectV2.
 
 		var addrGr network.AddressGroup
 		if err := addrGr.FromIterator(network.NodeEndpointsIterator(node)); err != nil {
-			s.log.Warn(logs.GetSvcV2FailedToParseNodeEndpoints, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
+			s.log.Warn(ctx, logs.GetSvcV2FailedToParseNodeEndpoints, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
 			continue
 		}
 
 		var extAddr network.AddressGroup
 		if len(node.ExternalAddresses()) > 0 {
 			if err := extAddr.FromStringSlice(node.ExternalAddresses()); err != nil {
-				s.log.Warn(logs.GetSvcV2FailedToParseNodeExternalAddresses, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
+				s.log.Warn(ctx, logs.GetSvcV2FailedToParseNodeExternalAddresses, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())))
 				continue
 			}
 		}
@@ -150,12 +150,12 @@ func (s *Service) forwardGetRangeHashRequest(ctx context.Context, req *objectV2.
 		if firstErr == nil {
 			firstErr = err
 		}
-		s.log.Debug(logs.GetSvcV2FailedToGetRangeHashFromNode,
+		s.log.Debug(ctx, logs.GetSvcV2FailedToGetRangeHashFromNode,
 			zap.String("node_public_key", hex.EncodeToString(node.PublicKey())),
 			zap.Stringer("address", params.address),
 			zap.Error(err))
 	}
-	s.log.Debug(logs.GetSvcV2FailedToGetRangeHashFromAllOfContainerNodes, zap.Stringer("address", params.address), zap.Error(firstErr))
+	s.log.Debug(ctx, logs.GetSvcV2FailedToGetRangeHashFromAllOfContainerNodes, zap.Stringer("address", params.address), zap.Error(firstErr))
 	if firstErr != nil {
 		return nil, firstErr
 	}
diff --git a/pkg/services/object/get/v2/service.go b/pkg/services/object/get/v2/service.go
index 24b2f0099..fc483b74b 100644
--- a/pkg/services/object/get/v2/service.go
+++ b/pkg/services/object/get/v2/service.go
@@ -60,7 +60,7 @@ func NewService(svc *getsvc.Service,
 		netmapSource:  netmapSource,
 		announcedKeys: announcedKeys,
 		contSource:    contSource,
-		log:           &logger.Logger{Logger: zap.L()},
+		log:           logger.NewLoggerWrapper(zap.L()),
 	}
 
 	for i := range opts {
@@ -145,6 +145,6 @@ func (s *Service) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV
 
 func WithLogger(l *logger.Logger) Option {
 	return func(c *cfg) {
-		c.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Get V2 service"))}
+		c.log = l.With(zap.String("component", "Object.Get V2 service"))
 	}
 }
diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go
index 8cf4f0d62..5cc0a5722 100644
--- a/pkg/services/object/put/service.go
+++ b/pkg/services/object/put/service.go
@@ -29,7 +29,7 @@ func NewService(ks *objutil.KeyStorage,
 	c := &objectwriter.Config{
 		RemotePool:        util.NewPseudoWorkerPool(),
 		LocalPool:         util.NewPseudoWorkerPool(),
-		Logger:            &logger.Logger{Logger: zap.L()},
+		Logger:            logger.NewLoggerWrapper(zap.L()),
 		KeyStorage:        ks,
 		ClientConstructor: cc,
 		MaxSizeSrc:        ms,
diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go
index 3a0b3901f..36b0bd54c 100644
--- a/pkg/services/object/put/single.go
+++ b/pkg/services/object/put/single.go
@@ -317,7 +317,7 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context,
 			if err != nil {
 				objID, _ := obj.ID()
 				cnrID, _ := obj.ContainerID()
-				s.Config.Logger.Warn(logs.PutSingleRedirectFailure,
+				s.Config.Logger.Warn(ctx, logs.PutSingleRedirectFailure,
 					zap.Error(err),
 					zap.Stringer("address", addr),
 					zap.Stringer("object_id", objID),
diff --git a/pkg/services/object/search/container.go b/pkg/services/object/search/container.go
index 39259b0ca..999a3cc9e 100644
--- a/pkg/services/object/search/container.go
+++ b/pkg/services/object/search/container.go
@@ -15,7 +15,7 @@ import (
 func (exec *execCtx) executeOnContainer(ctx context.Context) error {
 	lookupDepth := exec.netmapLookupDepth()
 
-	exec.log.Debug(logs.TryingToExecuteInContainer,
+	exec.log.Debug(ctx, logs.TryingToExecuteInContainer,
 		zap.Uint64("netmap lookup depth", lookupDepth),
 	)
 
@@ -44,7 +44,7 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) error {
 }
 
 func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
-	exec.log.Debug(logs.ProcessEpoch,
+	exec.log.Debug(ctx, logs.ProcessEpoch,
 		zap.Uint64("number", exec.curProcEpoch),
 	)
 
@@ -59,7 +59,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
 	for {
 		addrs := traverser.Next()
 		if len(addrs) == 0 {
-			exec.log.Debug(logs.NoMoreNodesAbortPlacementIteration)
+			exec.log.Debug(ctx, logs.NoMoreNodesAbortPlacementIteration)
 			break
 		}
 
@@ -72,7 +72,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
 				defer wg.Done()
 				select {
 				case <-ctx.Done():
-					exec.log.Debug(logs.InterruptPlacementIterationByContext,
+					exec.log.Debug(ctx, logs.InterruptPlacementIterationByContext,
 						zap.String("error", ctx.Err().Error()))
 					return
 				default:
@@ -82,17 +82,17 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
 
 				client.NodeInfoFromNetmapElement(&info, addrs[i])
 
-				exec.log.Debug(logs.ProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey())))
+				exec.log.Debug(ctx, logs.ProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey())))
 
 				c, err := exec.svc.clientConstructor.get(info)
 				if err != nil {
-					exec.log.Debug(logs.SearchCouldNotConstructRemoteNodeClient, zap.String("error", err.Error()))
+					exec.log.Debug(ctx, logs.SearchCouldNotConstructRemoteNodeClient, zap.String("error", err.Error()))
 					return
 				}
 
 				ids, err := c.searchObjects(ctx, exec, info)
 				if err != nil {
-					exec.log.Debug(logs.SearchRemoteOperationFailed,
+					exec.log.Debug(ctx, logs.SearchRemoteOperationFailed,
 						zap.String("error", err.Error()))
 
 					return
@@ -102,7 +102,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
 				err = exec.writeIDList(ids)
 				mtx.Unlock()
 				if err != nil {
-					exec.log.Debug(logs.SearchCouldNotWriteObjectIdentifiers, zap.String("error", err.Error()))
+					exec.log.Debug(ctx, logs.SearchCouldNotWriteObjectIdentifiers, zap.String("error", err.Error()))
 					return
 				}
 			}(i)
diff --git a/pkg/services/object/search/exec.go b/pkg/services/object/search/exec.go
index 4a2c04ecd..eb9635f14 100644
--- a/pkg/services/object/search/exec.go
+++ b/pkg/services/object/search/exec.go
@@ -19,13 +19,13 @@ type execCtx struct {
 }
 
 func (exec *execCtx) setLogger(l *logger.Logger) {
-	exec.log = &logger.Logger{Logger: l.With(
+	exec.log = l.With(
 		zap.String("request", "SEARCH"),
 		zap.Stringer("container", exec.containerID()),
 		zap.Bool("local", exec.isLocal()),
 		zap.Bool("with session", exec.prm.common.SessionToken() != nil),
 		zap.Bool("with bearer", exec.prm.common.BearerToken() != nil),
-	)}
+	)
 }
 
 func (exec *execCtx) isLocal() bool {
diff --git a/pkg/services/object/search/local.go b/pkg/services/object/search/local.go
index cfaed13b8..bc59d0394 100644
--- a/pkg/services/object/search/local.go
+++ b/pkg/services/object/search/local.go
@@ -11,7 +11,7 @@ import (
 func (exec *execCtx) executeLocal(ctx context.Context) error {
 	ids, err := exec.svc.localStorage.search(ctx, exec)
 	if err != nil {
-		exec.log.Debug(logs.SearchLocalOperationFailed, zap.String("error", err.Error()))
+		exec.log.Debug(ctx, logs.SearchLocalOperationFailed, zap.String("error", err.Error()))
 		return err
 	}
 
diff --git a/pkg/services/object/search/search.go b/pkg/services/object/search/search.go
index 4a5c414d5..bb5c720ff 100644
--- a/pkg/services/object/search/search.go
+++ b/pkg/services/object/search/search.go
@@ -20,13 +20,13 @@ func (s *Service) Search(ctx context.Context, prm Prm) error {
 }
 
 func (exec *execCtx) execute(ctx context.Context) error {
-	exec.log.Debug(logs.ServingRequest)
+	exec.log.Debug(ctx, logs.ServingRequest)
 
 	err := exec.executeLocal(ctx)
 	exec.logResult(err)
 
 	if exec.isLocal() {
-		exec.log.Debug(logs.SearchReturnResultDirectly)
+		exec.log.Debug(ctx, logs.SearchReturnResultDirectly)
 		return err
 	}
 
@@ -38,8 +38,8 @@ func (exec *execCtx) execute(ctx context.Context) error {
 func (exec *execCtx) logResult(err error) {
 	switch {
 	default:
-		exec.log.Debug(logs.OperationFinishedWithError, zap.String("error", err.Error()))
+		exec.log.Debug(context.Background(), logs.OperationFinishedWithError, zap.String("error", err.Error()))
 	case err == nil:
-		exec.log.Debug(logs.OperationFinishedSuccessfully)
+		exec.log.Debug(context.Background(), logs.OperationFinishedSuccessfully)
 	}
 }
diff --git a/pkg/services/object/search/service.go b/pkg/services/object/search/service.go
index 7700f78d8..77d25357a 100644
--- a/pkg/services/object/search/service.go
+++ b/pkg/services/object/search/service.go
@@ -69,7 +69,7 @@ func New(e *engine.StorageEngine,
 	opts ...Option,
 ) *Service {
 	c := &cfg{
-		log: &logger.Logger{Logger: zap.L()},
+		log: logger.NewLoggerWrapper(zap.L()),
 		clientConstructor: &clientConstructorWrapper{
 			constructor: cc,
 		},
@@ -94,6 +94,6 @@ func New(e *engine.StorageEngine,
 // WithLogger returns option to specify Get service's logger.
 func WithLogger(l *logger.Logger) Option {
 	return func(c *cfg) {
-		c.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Search service"))}
+		c.log = l.With(zap.String("component", "Object.Search service"))
 	}
 }
diff --git a/pkg/services/object/util/log.go b/pkg/services/object/util/log.go
index 92beedaa7..5075344a4 100644
--- a/pkg/services/object/util/log.go
+++ b/pkg/services/object/util/log.go
@@ -1,6 +1,8 @@
 package util
 
 import (
+	"context"
+
 	"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
 	"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
 	"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@@ -9,7 +11,7 @@ import (
 
 // LogServiceError writes error message of object service to provided logger.
 func LogServiceError(l *logger.Logger, req string, node network.AddressGroup, err error) {
-	l.Error(logs.UtilObjectServiceError,
+	l.Error(context.Background(), logs.UtilObjectServiceError,
 		zap.String("node", network.StringifyGroup(node)),
 		zap.String("request", req),
 		zap.String("error", err.Error()),
@@ -18,7 +20,7 @@ func LogServiceError(l *logger.Logger, req string, node network.AddressGroup, er
 
 // LogWorkerPoolError writes debug error message of object worker pool to provided logger.
 func LogWorkerPoolError(l *logger.Logger, req string, err error) {
-	l.Error(logs.UtilCouldNotPushTaskToWorkerPool,
+	l.Error(context.Background(), logs.UtilCouldNotPushTaskToWorkerPool,
 		zap.String("request", req),
 		zap.String("error", err.Error()),
 	)
diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go
index 7476dbd48..6a9706b9e 100644
--- a/pkg/services/object_manager/tombstone/checker.go
+++ b/pkg/services/object_manager/tombstone/checker.go
@@ -57,7 +57,7 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr
 
 	ts, err := g.tsSource.Tombstone(ctx, a, epoch)
 	if err != nil {
-		log.Warn(
+		log.Warn(ctx,
 			logs.TombstoneCouldNotGetTheTombstoneTheSource,
 			zap.Error(err),
 		)
@@ -77,7 +77,7 @@ func (g *ExpirationChecker) handleTS(addr string, ts *objectSDK.Object, reqEpoch
 		if atr.Key() == objectV2.SysAttributeExpEpoch {
 			epoch, err := strconv.ParseUint(atr.Value(), 10, 64)
 			if err != nil {
-				g.log.Warn(
+				g.log.Warn(context.Background(),
 					logs.TombstoneExpirationParseFailure,
 					zap.Error(err),
 				)
diff --git a/pkg/services/object_manager/tombstone/constructor.go b/pkg/services/object_manager/tombstone/constructor.go
index 9d33e8179..67ddf316f 100644
--- a/pkg/services/object_manager/tombstone/constructor.go
+++ b/pkg/services/object_manager/tombstone/constructor.go
@@ -23,7 +23,7 @@ type Option func(*cfg)
 
 func defaultCfg() *cfg {
 	return &cfg{
-		log:       &logger.Logger{Logger: zap.NewNop()},
+		log:       logger.NewLoggerWrapper(zap.NewNop()),
 		cacheSize: defaultLRUCacheSize,
 	}
 }
diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go
index dbc9ea53c..c82680a1e 100644
--- a/pkg/services/policer/check.go
+++ b/pkg/services/policer/check.go
@@ -86,7 +86,7 @@ func (p *Policer) processRepContainerObject(ctx context.Context, objInfo objectc
 	}
 
 	if !c.needLocalCopy && c.removeLocalCopy {
-		p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected,
+		p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected,
 			zap.Stringer("object", objInfo.Address),
 		)
 
@@ -151,7 +151,7 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe
 				} else if client.IsErrNodeUnderMaintenance(err) {
 					shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies)
 				} else {
-					p.log.Error(logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance,
+					p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance,
 						zap.Stringer("object", addr),
 						zap.String("error", err.Error()),
 					)
@@ -178,7 +178,7 @@ func (p *Policer) handleMaintenance(node netmap.NodeInfo, checkedNodes nodeCache
 	shortage--
 	uncheckedCopies++
 
-	p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK,
+	p.log.Debug(context.Background(), logs.PolicerConsiderNodeUnderMaintenanceAsOK,
 		zap.String("node", netmap.StringifyPublicKey(node)),
 	)
 	return shortage, uncheckedCopies
@@ -189,7 +189,7 @@ func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address
 ) {
 	switch {
 	case shortage > 0:
-		p.log.Debug(logs.PolicerShortageOfObjectCopiesDetected,
+		p.log.Debug(ctx, logs.PolicerShortageOfObjectCopiesDetected,
 			zap.Stringer("object", addr),
 			zap.Uint32("shortage", shortage),
 		)
@@ -205,7 +205,7 @@ func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address
 	case uncheckedCopies > 0:
 		// If we have more copies than needed, but some of them are from the maintenance nodes,
 		// save the local copy.
-		p.log.Debug(logs.PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance,
+		p.log.Debug(ctx, logs.PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance,
 			zap.Int("count", uncheckedCopies))
 
 	case uncheckedCopies == 0:
diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go
index 6d2c153c9..cb583f1d3 100644
--- a/pkg/services/policer/ec.go
+++ b/pkg/services/policer/ec.go
@@ -59,7 +59,7 @@ func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objec
 	p.processRepNodes(ctx, c, objInfo, nn[0], uint32(len(nn[0])), checkedNodes)
 
 	if !c.needLocalCopy && c.removeLocalCopy {
-		p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected,
+		p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected,
 			zap.Stringer("object", objInfo.Address),
 		)
 
@@ -91,7 +91,7 @@ func (p *Policer) processECContainerECObject(ctx context.Context, objInfo object
 	p.adjustECPlacement(ctx, objInfo, nn[0], cnr)
 
 	if res.removeLocal {
-		p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address))
+		p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address))
 		p.cbRedundantCopy(ctx, objInfo.Address)
 	}
 	return nil
@@ -109,7 +109,7 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n
 	}
 	if requiredNode.Status().IsMaintenance() {
 		// consider maintenance mode has object, but do not drop local copy
-		p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
+		p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
 		return ecChunkProcessResult{}
 	}
 
@@ -120,7 +120,7 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n
 	if err == nil {
 		removeLocalChunk = true
 	} else if client.IsErrObjectNotFound(err) {
-		p.log.Debug(logs.PolicerShortageOfObjectCopiesDetected, zap.Stringer("object", objInfo.Address), zap.Uint32("shortage", 1))
+		p.log.Debug(ctx, logs.PolicerShortageOfObjectCopiesDetected, zap.Stringer("object", objInfo.Address), zap.Uint32("shortage", 1))
 		task := replicator.Task{
 			NumCopies: 1,
 			Addr:      objInfo.Address,
@@ -129,9 +129,9 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n
 		p.replicator.HandleReplicationTask(ctx, task, newNodeCache())
 	} else if client.IsErrNodeUnderMaintenance(err) {
 		// consider maintenance mode has object, but do not drop local copy
-		p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
+		p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
 	} else {
-		p.log.Error(logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.String("error", err.Error()))
+		p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.String("error", err.Error()))
 	}
 
 	return ecChunkProcessResult{
@@ -146,13 +146,13 @@ func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.I
 
 	requiredChunkIndexes := p.collectRequiredECChunks(nodes, objInfo)
 	if len(requiredChunkIndexes) == 0 {
-		p.log.Info(logs.PolicerNodeIsNotECObjectNode, zap.Stringer("object", objInfo.ECInfo.ParentID))
+		p.log.Info(ctx, logs.PolicerNodeIsNotECObjectNode, zap.Stringer("object", objInfo.ECInfo.ParentID))
 		return true
 	}
 
 	err := p.resolveLocalECChunks(ctx, parentAddress, requiredChunkIndexes)
 	if err != nil {
-		p.log.Error(logs.PolicerFailedToGetLocalECChunks, zap.Error(err), zap.Stringer("object", parentAddress))
+		p.log.Error(ctx, logs.PolicerFailedToGetLocalECChunks, zap.Error(err), zap.Stringer("object", parentAddress))
 		return false
 	}
 	if len(requiredChunkIndexes) == 0 {
@@ -224,11 +224,11 @@ func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.A
 
 				var chunkID oid.ID
 				if err := chunkID.ReadFromV2(ch.ID); err != nil {
-					p.log.Error(logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
+					p.log.Error(ctx, logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
 					return false
 				}
 				if existed, ok := indexToObjectID[ch.Index]; ok && existed != chunkID {
-					p.log.Error(logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", existed),
+					p.log.Error(ctx, logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", existed),
 						zap.Stringer("second", chunkID), zap.Stringer("object", parentAddress), zap.Uint32("index", ch.Index))
 					return false
 				}
@@ -239,7 +239,7 @@ func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.A
 
 	for index, candidates := range required {
 		if len(candidates) == 0 {
-			p.log.Error(logs.PolicerMissingECChunk, zap.Stringer("object", parentAddress), zap.Uint32("index", index))
+			p.log.Error(ctx, logs.PolicerMissingECChunk, zap.Stringer("object", parentAddress), zap.Uint32("index", index))
 			return false
 		}
 	}
@@ -271,18 +271,18 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info
 				resolved[ch.Index] = append(resolved[ch.Index], n)
 				var ecInfoChunkID oid.ID
 				if err := ecInfoChunkID.ReadFromV2(ch.ID); err != nil {
-					p.log.Error(logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
+					p.log.Error(ctx, logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
 					return
 				}
 				if chunkID, exist := chunkIDs[ch.Index]; exist && chunkID != ecInfoChunkID {
-					p.log.Error(logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", chunkID),
+					p.log.Error(ctx, logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", chunkID),
 						zap.Stringer("second", ecInfoChunkID), zap.Stringer("object", parentAddress), zap.Uint32("index", ch.Index))
 					return
 				}
 				chunkIDs[ch.Index] = ecInfoChunkID
 			}
 		} else if !p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total {
-			p.log.Warn(logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err))
+			p.log.Warn(ctx, logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err))
 			p.replicator.HandleReplicationTask(ctx, replicator.Task{
 				NumCopies: 1,
 				Addr:      objInfo.Address,
@@ -299,7 +299,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info
 		for i := range resolved {
 			found = append(found, i)
 		}
-		p.log.Error(logs.PolicerCouldNotRestoreObjectNotEnoughChunks, zap.Stringer("object", parentAddress), zap.Uint32s("found_chunks", found))
+		p.log.Error(ctx, logs.PolicerCouldNotRestoreObjectNotEnoughChunks, zap.Stringer("object", parentAddress), zap.Uint32s("found_chunks", found))
 		return
 	}
 	p.restoreECObject(ctx, objInfo, parentAddress, nodes, resolved, chunkIDs, cnr)
@@ -310,7 +310,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info,
 ) {
 	c, err := erasurecode.NewConstructor(int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECDataCount()), int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount()))
 	if err != nil {
-		p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
+		p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
 		return
 	}
 	parts := p.collectExistedChunks(ctx, objInfo, existedChunks, parentAddress, chunkIDs)
@@ -319,7 +319,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info,
 	}
 	key, err := p.keyStorage.GetKey(nil)
 	if err != nil {
-		p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
+		p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
 		return
 	}
 	required := make([]bool, len(parts))
@@ -329,7 +329,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info,
 		}
 	}
 	if err := c.ReconstructParts(parts, required, key); err != nil {
-		p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
+		p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
 		return
 	}
 	for idx, part := range parts {
@@ -377,7 +377,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I
 				if err == nil {
 					break
 				}
-				p.log.Warn(logs.PolicerCouldNotGetChunk, zap.Stringer("object", parentAddress), zap.Stringer("chunkID", objID), zap.Error(err), zap.String("node", hex.EncodeToString(node.PublicKey())))
+				p.log.Warn(ctx, logs.PolicerCouldNotGetChunk, zap.Stringer("object", parentAddress), zap.Stringer("chunkID", objID), zap.Error(err), zap.String("node", hex.EncodeToString(node.PublicKey())))
 			}
 			if obj != nil {
 				parts[idx] = obj
@@ -386,7 +386,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I
 		})
 	}
 	if err := errGroup.Wait(); err != nil {
-		p.log.Error(logs.PolicerCouldNotGetChunks, zap.Stringer("object", parentAddress), zap.Error(err))
+		p.log.Error(ctx, logs.PolicerCouldNotGetChunks, zap.Stringer("object", parentAddress), zap.Error(err))
 		return nil
 	}
 	return parts
diff --git a/pkg/services/policer/option.go b/pkg/services/policer/option.go
index 336f7a0ab..5d59604c2 100644
--- a/pkg/services/policer/option.go
+++ b/pkg/services/policer/option.go
@@ -91,7 +91,7 @@ type cfg struct {
 
 func defaultCfg() *cfg {
 	return &cfg{
-		log:           &logger.Logger{Logger: zap.L()},
+		log:           logger.NewLoggerWrapper(zap.L()),
 		batchSize:     10,
 		cacheSize:     1024, // 1024 * address size = 1024 * 64 = 64 MiB
 		sleepDuration: 1 * time.Second,
diff --git a/pkg/services/policer/policer.go b/pkg/services/policer/policer.go
index 363c0b922..4e8bacfec 100644
--- a/pkg/services/policer/policer.go
+++ b/pkg/services/policer/policer.go
@@ -4,7 +4,6 @@ import (
 	"sync"
 	"time"
 
-	"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
 	oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
 	lru "github.com/hashicorp/golang-lru/v2"
 	"go.uber.org/zap"
@@ -55,7 +54,7 @@ func New(opts ...Option) *Policer {
 		opts[i](c)
 	}
 
-	c.log = &logger.Logger{Logger: c.log.With(zap.String("component", "Object Policer"))}
+	c.log = c.log.With(zap.String("component", "Object Policer"))
 
 	cache, err := lru.New[oid.Address, time.Time](int(c.cacheSize))
 	if err != nil {
diff --git a/pkg/services/policer/process.go b/pkg/services/policer/process.go
index a5ebb0010..80a87ade9 100644
--- a/pkg/services/policer/process.go
+++ b/pkg/services/policer/process.go
@@ -14,7 +14,7 @@ import (
 
 func (p *Policer) Run(ctx context.Context) {
 	p.shardPolicyWorker(ctx)
-	p.log.Info(logs.PolicerRoutineStopped)
+	p.log.Info(ctx, logs.PolicerRoutineStopped)
 }
 
 func (p *Policer) shardPolicyWorker(ctx context.Context) {
@@ -33,7 +33,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) {
 				time.Sleep(p.sleepDuration) // finished whole cycle, sleep a bit
 				continue
 			}
-			p.log.Warn(logs.PolicerFailureAtObjectSelectForReplication, zap.Error(err))
+			p.log.Warn(ctx, logs.PolicerFailureAtObjectSelectForReplication, zap.Error(err))
 		}
 
 		skipMap := newSkipMap()
@@ -59,7 +59,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) {
 					if p.objsInWork.add(addr.Address) {
 						err := p.processObject(ctx, addr)
 						if err != nil && !skipMap.addSeenError(addr.Address.Container(), err) {
-							p.log.Error(logs.PolicerUnableToProcessObj,
+							p.log.Error(ctx, logs.PolicerUnableToProcessObj,
 								zap.Stringer("object", addr.Address),
 								zap.String("error", err.Error()))
 						}
@@ -69,7 +69,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) {
 					}
 				})
 				if err != nil {
-					p.log.Warn(logs.PolicerPoolSubmission, zap.Error(err))
+					p.log.Warn(ctx, logs.PolicerPoolSubmission, zap.Error(err))
 				}
 			}
 		}
diff --git a/pkg/services/replicator/process.go b/pkg/services/replicator/process.go
index 7e5c6e093..2120312f6 100644
--- a/pkg/services/replicator/process.go
+++ b/pkg/services/replicator/process.go
@@ -27,7 +27,7 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
 	p.metrics.IncInFlightRequest()
 	defer p.metrics.DecInFlightRequest()
 	defer func() {
-		p.log.Debug(logs.ReplicatorFinishWork,
+		p.log.Debug(ctx, logs.ReplicatorFinishWork,
 			zap.Uint32("amount of unfinished replicas", task.NumCopies),
 		)
 	}()
@@ -43,7 +43,7 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
 		var err error
 		task.Obj, err = engine.Get(ctx, p.localStorage, task.Addr)
 		if err != nil {
-			p.log.Error(logs.ReplicatorCouldNotGetObjectFromLocalStorage,
+			p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromLocalStorage,
 				zap.Stringer("object", task.Addr),
 				zap.Error(err),
 				zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
@@ -75,11 +75,11 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
 		cancel()
 
 		if err != nil {
-			log.Error(logs.ReplicatorCouldNotReplicateObject,
+			log.Error(ctx, logs.ReplicatorCouldNotReplicateObject,
 				zap.String("error", err.Error()),
 			)
 		} else {
-			log.Debug(logs.ReplicatorObjectSuccessfullyReplicated)
+			log.Debug(ctx, logs.ReplicatorObjectSuccessfullyReplicated)
 
 			task.NumCopies--
 
diff --git a/pkg/services/replicator/pull.go b/pkg/services/replicator/pull.go
index 7e7090237..5ce929342 100644
--- a/pkg/services/replicator/pull.go
+++ b/pkg/services/replicator/pull.go
@@ -22,7 +22,7 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) {
 	p.metrics.IncInFlightRequest()
 	defer p.metrics.DecInFlightRequest()
 	defer func() {
-		p.log.Debug(logs.ReplicatorFinishWork, zap.String("type", "pull"))
+		p.log.Debug(ctx, logs.ReplicatorFinishWork, zap.String("type", "pull"))
 	}()
 
 	ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandlePullTask",
@@ -48,7 +48,7 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) {
 			endpoints = append(endpoints, s)
 			return false
 		})
-		p.log.Error(logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
+		p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
 			zap.Stringer("object", task.Addr),
 			zap.Error(err),
 			zap.Strings("endpoints", endpoints),
@@ -56,7 +56,7 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) {
 	}
 
 	if obj == nil {
-		p.log.Error(logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
+		p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage,
 			zap.Stringer("object", task.Addr),
 			zap.Error(errFailedToGetObjectFromAnyNode),
 			zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
@@ -65,7 +65,7 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) {
 
 	err := engine.Put(ctx, p.localStorage, obj, containerCore.IsIndexedContainer(task.Container))
 	if err != nil {
-		p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage,
+		p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage,
 			zap.Stringer("object", task.Addr),
 			zap.Error(err),
 			zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
diff --git a/pkg/services/replicator/put.go b/pkg/services/replicator/put.go
index 537833516..489f66ae5 100644
--- a/pkg/services/replicator/put.go
+++ b/pkg/services/replicator/put.go
@@ -20,7 +20,7 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) {
 	p.metrics.IncInFlightRequest()
 	defer p.metrics.DecInFlightRequest()
 	defer func() {
-		p.log.Debug(logs.ReplicatorFinishWork, zap.String("type", "pull"))
+		p.log.Debug(ctx, logs.ReplicatorFinishWork, zap.String("type", "pull"))
 	}()
 
 	ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandleLocalPutTask",
@@ -31,7 +31,7 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) {
 	defer span.End()
 
 	if task.Obj == nil {
-		p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage,
+		p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage,
 			zap.Stringer("object", task.Addr),
 			zap.Error(errObjectNotDefined),
 			zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
@@ -40,7 +40,7 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) {
 
 	err := engine.Put(ctx, p.localStorage, task.Obj, containerCore.IsIndexedContainer(task.Container))
 	if err != nil {
-		p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage,
+		p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage,
 			zap.Stringer("object", task.Addr),
 			zap.Error(err),
 			zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
diff --git a/pkg/services/replicator/replicator.go b/pkg/services/replicator/replicator.go
index f2f86daf0..6910fa5af 100644
--- a/pkg/services/replicator/replicator.go
+++ b/pkg/services/replicator/replicator.go
@@ -45,7 +45,7 @@ func New(opts ...Option) *Replicator {
 		opts[i](c)
 	}
 
-	c.log = &logger.Logger{Logger: c.log.With(zap.String("component", "Object Replicator"))}
+	c.log = c.log.With(zap.String("component", "Object Replicator"))
 
 	return &Replicator{
 		cfg: c,
diff --git a/pkg/services/session/executor.go b/pkg/services/session/executor.go
index e914119b4..12b221613 100644
--- a/pkg/services/session/executor.go
+++ b/pkg/services/session/executor.go
@@ -33,7 +33,7 @@ func NewExecutionService(exec ServiceExecutor, respSvc *response.Service, l *log
 }
 
 func (s *executorSvc) Create(ctx context.Context, req *session.CreateRequest) (*session.CreateResponse, error) {
-	s.log.Debug(logs.ServingRequest,
+	s.log.Debug(ctx, logs.ServingRequest,
 		zap.String("component", "SessionService"),
 		zap.String("request", "Create"),
 	)
diff --git a/pkg/services/session/storage/persistent/options.go b/pkg/services/session/storage/persistent/options.go
index 411734ea1..60db97f90 100644
--- a/pkg/services/session/storage/persistent/options.go
+++ b/pkg/services/session/storage/persistent/options.go
@@ -19,7 +19,7 @@ type Option func(*cfg)
 
 func defaultCfg() *cfg {
 	return &cfg{
-		l:       &logger.Logger{Logger: zap.L()},
+		l:       logger.NewLoggerWrapper(zap.L()),
 		timeout: 100 * time.Millisecond,
 	}
 }
diff --git a/pkg/services/session/storage/persistent/storage.go b/pkg/services/session/storage/persistent/storage.go
index 71711e371..d312ea0ea 100644
--- a/pkg/services/session/storage/persistent/storage.go
+++ b/pkg/services/session/storage/persistent/storage.go
@@ -1,6 +1,7 @@
 package persistent
 
 import (
+	"context"
 	"crypto/aes"
 	"crypto/cipher"
 	"encoding/hex"
@@ -105,7 +106,7 @@ func (s *TokenStore) Get(ownerID user.ID, tokenID []byte) (t *storage.PrivateTok
 		return err
 	})
 	if err != nil {
-		s.l.Error(logs.PersistentCouldNotGetSessionFromPersistentStorage,
+		s.l.Error(context.Background(), logs.PersistentCouldNotGetSessionFromPersistentStorage,
 			zap.Error(err),
 			zap.Stringer("ownerID", ownerID),
 			zap.String("tokenID", hex.EncodeToString(tokenID)),
@@ -130,7 +131,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) {
 				if epochFromToken(v) <= epoch {
 					err = c.Delete()
 					if err != nil {
-						s.l.Error(logs.PersistentCouldNotDeleteSToken,
+						s.l.Error(context.Background(), logs.PersistentCouldNotDeleteSToken,
 							zap.String("token_id", hex.EncodeToString(k)),
 						)
 					}
@@ -141,7 +142,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) {
 		})
 	})
 	if err != nil {
-		s.l.Error(logs.PersistentCouldNotCleanUpExpiredTokens,
+		s.l.Error(context.Background(), logs.PersistentCouldNotCleanUpExpiredTokens,
 			zap.Uint64("epoch", epoch),
 		)
 	}
diff --git a/pkg/services/tree/redirect.go b/pkg/services/tree/redirect.go
index 5bde3ae38..416a0fafe 100644
--- a/pkg/services/tree/redirect.go
+++ b/pkg/services/tree/redirect.go
@@ -54,7 +54,7 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo
 				return false
 			}
 
-			s.log.Debug(logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint),
+			s.log.Debug(ctx, logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint),
 				zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
 
 			called = true
diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go
index 84e376cf7..0c5bde078 100644
--- a/pkg/services/tree/replicator.go
+++ b/pkg/services/tree/replicator.go
@@ -58,7 +58,7 @@ func (s *Service) localReplicationWorker(ctx context.Context) {
 
 			err := s.forest.TreeApply(ctx, op.cid, op.treeID, &op.Move, false)
 			if err != nil {
-				s.log.Error(logs.TreeFailedToApplyReplicatedOperation,
+				s.log.Error(ctx, logs.TreeFailedToApplyReplicatedOperation,
 					zap.String("err", err.Error()))
 			}
 			span.End()
@@ -116,11 +116,11 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req
 
 	if lastErr != nil {
 		if errors.Is(lastErr, errRecentlyFailed) {
-			s.log.Debug(logs.TreeDoNotSendUpdateToTheNode,
+			s.log.Debug(ctx, logs.TreeDoNotSendUpdateToTheNode,
 				zap.String("last_error", lastErr.Error()),
 				zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
 		} else {
-			s.log.Warn(logs.TreeFailedToSentUpdateToTheNode,
+			s.log.Warn(ctx, logs.TreeFailedToSentUpdateToTheNode,
 				zap.String("last_error", lastErr.Error()),
 				zap.String("address", lastAddr),
 				zap.String("key", hex.EncodeToString(n.PublicKey())),
@@ -154,7 +154,7 @@ func (s *Service) replicateLoop(ctx context.Context) {
 			start := time.Now()
 			err := s.replicate(op)
 			if err != nil {
-				s.log.Error(logs.TreeErrorDuringReplication,
+				s.log.Error(ctx, logs.TreeErrorDuringReplication,
 					zap.String("err", err.Error()),
 					zap.Stringer("cid", op.cid),
 					zap.String("treeID", op.treeID))
diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go
index b63338d25..2df3c08e6 100644
--- a/pkg/services/tree/service.go
+++ b/pkg/services/tree/service.go
@@ -63,7 +63,7 @@ func New(opts ...Option) *Service {
 	}
 
 	if s.log == nil {
-		s.log = &logger.Logger{Logger: zap.NewNop()}
+		s.log = logger.NewLoggerWrapper(zap.NewNop())
 	}
 
 	s.cache.init(s.key, s.ds)
diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go
index 2c6deeb78..e2249c9fb 100644
--- a/pkg/services/tree/sync.go
+++ b/pkg/services/tree/sync.go
@@ -92,7 +92,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
 	for _, tid := range treesToSync {
 		h, err := s.forest.TreeLastSyncHeight(ctx, cid, tid)
 		if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
-			s.log.Warn(logs.TreeCouldNotGetLastSynchronizedHeightForATree,
+			s.log.Warn(ctx, logs.TreeCouldNotGetLastSynchronizedHeightForATree,
 				zap.Stringer("cid", cid),
 				zap.String("tree", tid))
 			continue
@@ -100,7 +100,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
 		newHeight := s.synchronizeTree(ctx, cid, h, tid, nodes)
 		if h < newHeight {
 			if err := s.forest.TreeUpdateLastSyncHeight(ctx, cid, tid, newHeight); err != nil {
-				s.log.Warn(logs.TreeCouldNotUpdateLastSynchronizedHeightForATree,
+				s.log.Warn(ctx, logs.TreeCouldNotUpdateLastSynchronizedHeightForATree,
 					zap.Stringer("cid", cid),
 					zap.String("tree", tid))
 			}
@@ -251,7 +251,7 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string,
 func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
 	treeID string, nodes []netmapSDK.NodeInfo,
 ) uint64 {
-	s.log.Debug(logs.TreeSynchronizeTree, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.Uint64("from", from))
+	s.log.Debug(ctx, logs.TreeSynchronizeTree, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.Uint64("from", from))
 
 	errGroup, egCtx := errgroup.WithContext(ctx)
 	const workersCount = 1024
@@ -282,20 +282,20 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
 			n.IterateNetworkEndpoints(func(addr string) bool {
 				var a network.Address
 				if err := a.FromString(addr); err != nil {
-					s.log.Warn(logs.TreeFailedToParseAddressForTreeSynchronization, zap.Error(err), zap.String("address", addr))
+					s.log.Warn(ctx, logs.TreeFailedToParseAddressForTreeSynchronization, zap.Error(err), zap.String("address", addr))
 					return false
 				}
 
 				cc, err := s.createConnection(a)
 				if err != nil {
-					s.log.Warn(logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr))
+					s.log.Warn(ctx, logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr))
 					return false
 				}
 				defer cc.Close()
 
 				err = s.startStream(egCtx, cid, treeID, from, cc, nodeOperationStreams[i])
 				if err != nil {
-					s.log.Warn(logs.TreeFailedToRunTreeSynchronizationForSpecificNode, zap.Error(err), zap.String("address", addr))
+					s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationForSpecificNode, zap.Error(err), zap.String("address", addr))
 				}
 				nodeSynced = err == nil
 				return true
@@ -309,7 +309,7 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
 	}
 	if err := errGroup.Wait(); err != nil {
 		allNodesSynced.Store(false)
-		s.log.Warn(logs.TreeFailedToRunTreeSynchronizationOverAllNodes, zap.Error(err))
+		s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationOverAllNodes, zap.Error(err))
 	}
 
 	newHeight := minStreamedLastHeight
@@ -376,13 +376,13 @@ func (s *Service) syncLoop(ctx context.Context) {
 			return
 		case <-s.syncChan:
 			ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.sync")
-			s.log.Info(logs.TreeSyncingTrees)
+			s.log.Info(ctx, logs.TreeSyncingTrees)
 
 			start := time.Now()
 
 			cnrs, err := s.cfg.cnrSource.List()
 			if err != nil {
-				s.log.Error(logs.TreeCouldNotFetchContainers, zap.Error(err))
+				s.log.Error(ctx, logs.TreeCouldNotFetchContainers, zap.Error(err))
 				s.metrics.AddSyncDuration(time.Since(start), false)
 				span.End()
 				break
@@ -394,7 +394,7 @@ func (s *Service) syncLoop(ctx context.Context) {
 
 			s.removeContainers(ctx, newMap)
 
-			s.log.Info(logs.TreeTreesHaveBeenSynchronized)
+			s.log.Info(ctx, logs.TreeTreesHaveBeenSynchronized)
 
 			s.metrics.AddSyncDuration(time.Since(start), true)
 			span.End()
@@ -414,19 +414,19 @@ func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) {
 
 		err := s.syncPool.Submit(func() {
 			defer wg.Done()
-			s.log.Debug(logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr))
+			s.log.Debug(ctx, logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr))
 
 			err := s.synchronizeAllTrees(ctx, cnr)
 			if err != nil {
-				s.log.Error(logs.TreeCouldNotSyncTrees, zap.Stringer("cid", cnr), zap.Error(err))
+				s.log.Error(ctx, logs.TreeCouldNotSyncTrees, zap.Stringer("cid", cnr), zap.Error(err))
 				return
 			}
 
-			s.log.Debug(logs.TreeContainerTreesHaveBeenSynced, zap.Stringer("cid", cnr))
+			s.log.Debug(ctx, logs.TreeContainerTreesHaveBeenSynced, zap.Stringer("cid", cnr))
 		})
 		if err != nil {
 			wg.Done()
-			s.log.Error(logs.TreeCouldNotQueryTreesForSynchronization,
+			s.log.Error(ctx, logs.TreeCouldNotQueryTreesForSynchronization,
 				zap.Stringer("cid", cnr),
 				zap.Error(err))
 			if errors.Is(err, ants.ErrPoolClosed) {
@@ -452,7 +452,7 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID
 
 		existed, err := containerCore.WasRemoved(s.cnrSource, cnr)
 		if err != nil {
-			s.log.Error(logs.TreeCouldNotCheckIfContainerExisted,
+			s.log.Error(ctx, logs.TreeCouldNotCheckIfContainerExisted,
 				zap.Stringer("cid", cnr),
 				zap.Error(err))
 		} else if existed {
@@ -464,11 +464,11 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID
 	}
 
 	for _, cnr := range removed {
-		s.log.Debug(logs.TreeRemovingRedundantTrees, zap.Stringer("cid", cnr))
+		s.log.Debug(ctx, logs.TreeRemovingRedundantTrees, zap.Stringer("cid", cnr))
 
 		err := s.DropTree(ctx, cnr, "")
 		if err != nil {
-			s.log.Error(logs.TreeCouldNotRemoveRedundantTree,
+			s.log.Error(ctx, logs.TreeCouldNotRemoveRedundantTree,
 				zap.Stringer("cid", cnr),
 				zap.Error(err))
 		}
@@ -482,7 +482,7 @@ func (s *Service) containersToSync(cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID
 	for _, cnr := range cnrs {
 		_, pos, err := s.getContainerNodes(cnr)
 		if err != nil {
-			s.log.Error(logs.TreeCouldNotCalculateContainerNodes,
+			s.log.Error(context.Background(), logs.TreeCouldNotCalculateContainerNodes,
 				zap.Stringer("cid", cnr),
 				zap.Error(err))
 			continue
diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go
index d4ac2ab02..b3a1b9b94 100644
--- a/pkg/util/logger/logger.go
+++ b/pkg/util/logger/logger.go
@@ -184,6 +184,10 @@ func (l *Logger) WithOptions(options ...zap.Option) {
 	l.z = l.z.WithOptions(options...)
 }
 
+func (l *Logger) With(fields ...zap.Field) *Logger {
+	return &Logger{z: l.z.With(fields...)}
+}
+
 func NewLoggerWrapper(z *zap.Logger) *Logger {
 	return &Logger{
 		z: z,