diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index 3be33a2a1..cc106cf95 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -126,10 +126,10 @@ type shardCfg struct {
 	subStorages []subStorageCfg
 
 	gcCfg struct {
-		removerBatchSize             int
-		removerSleepInterval         time.Duration
-		expiredCollectorBatchSize    int
-		expiredCollectorWorkersCount int
+		removerBatchSize            int
+		removerSleepInterval        time.Duration
+		expiredCollectorBatchSize   int
+		expiredCollectorWorkerCount int
 	}
 
 	writecacheCfg struct {
@@ -256,7 +256,7 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg,
 		wc.maxBatchDelay = writeCacheCfg.BoltDB().MaxBatchDelay()
 		wc.maxObjSize = writeCacheCfg.MaxObjectSize()
 		wc.smallObjectSize = writeCacheCfg.SmallObjectSize()
-		wc.flushWorkerCount = writeCacheCfg.WorkersNumber()
+		wc.flushWorkerCount = writeCacheCfg.WorkerCount()
 		wc.sizeLimit = writeCacheCfg.SizeLimit()
 		wc.noSync = writeCacheCfg.NoSync()
 		wc.gcInterval = writeCacheCfg.GCInterval()
@@ -328,7 +328,7 @@ func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *s
 	newConfig.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
 	newConfig.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
 	newConfig.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
-	newConfig.gcCfg.expiredCollectorWorkersCount = gcCfg.ExpiredCollectorWorkersCount()
+	newConfig.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount()
 }
 
 // internals contains application-specific internals that are created
@@ -888,7 +888,7 @@ func (c *cfg) getShardOpts(shCfg shardCfg) shardOptsWithID {
 		shard.WithRemoverBatchSize(shCfg.gcCfg.removerBatchSize),
 		shard.WithGCRemoverSleepInterval(shCfg.gcCfg.removerSleepInterval),
 		shard.WithExpiredCollectorBatchSize(shCfg.gcCfg.expiredCollectorBatchSize),
-		shard.WithExpiredCollectorWorkersCount(shCfg.gcCfg.expiredCollectorWorkersCount),
+		shard.WithExpiredCollectorWorkerCount(shCfg.gcCfg.expiredCollectorWorkerCount),
 		shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
 			pool, err := ants.NewPool(sz)
 			fatalOnErr(err)
diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go
index 6b7c268ce..665f70bcb 100644
--- a/cmd/frostfs-node/config/engine/config_test.go
+++ b/cmd/frostfs-node/config/engine/config_test.go
@@ -74,7 +74,7 @@ func TestEngineSection(t *testing.T) {
 				require.Equal(t, "tmp/0/cache", wc.Path())
 				require.EqualValues(t, 16384, wc.SmallObjectSize())
 				require.EqualValues(t, 134217728, wc.MaxObjectSize())
-				require.EqualValues(t, 30, wc.WorkersNumber())
+				require.EqualValues(t, 30, wc.WorkerCount())
 				require.EqualValues(t, 3221225472, wc.SizeLimit())
 
 				require.Equal(t, "tmp/0/meta", meta.Path())
@@ -108,7 +108,7 @@ func TestEngineSection(t *testing.T) {
 				require.EqualValues(t, 150, gc.RemoverBatchSize())
 				require.Equal(t, 2*time.Minute, gc.RemoverSleepInterval())
 				require.Equal(t, 1500, gc.ExpiredCollectorBatchSize())
-				require.Equal(t, 15, gc.ExpiredCollectorWorkersCount())
+				require.Equal(t, 15, gc.ExpiredCollectorWorkerCount())
 
 				require.Equal(t, false, sc.RefillMetabase())
 				require.Equal(t, mode.ReadOnly, sc.Mode())
@@ -125,7 +125,7 @@ func TestEngineSection(t *testing.T) {
 				require.Equal(t, "tmp/1/cache", wc.Path())
 				require.EqualValues(t, 16384, wc.SmallObjectSize())
 				require.EqualValues(t, 134217728, wc.MaxObjectSize())
-				require.EqualValues(t, 30, wc.WorkersNumber())
+				require.EqualValues(t, 30, wc.WorkerCount())
 				require.EqualValues(t, 4294967296, wc.SizeLimit())
 
 				require.Equal(t, "tmp/1/meta", meta.Path())
@@ -157,7 +157,7 @@ func TestEngineSection(t *testing.T) {
 				require.EqualValues(t, 200, gc.RemoverBatchSize())
 				require.Equal(t, 5*time.Minute, gc.RemoverSleepInterval())
 				require.Equal(t, gcconfig.ExpiredCollectorBatchSizeDefault, gc.ExpiredCollectorBatchSize())
-				require.Equal(t, gcconfig.ExpiredCollectorWorkersCountDefault, gc.ExpiredCollectorWorkersCount())
+				require.Equal(t, gcconfig.ExpiredCollectorWorkersCountDefault, gc.ExpiredCollectorWorkerCount())
 
 				require.Equal(t, true, sc.RefillMetabase())
 				require.Equal(t, mode.ReadWrite, sc.Mode())
diff --git a/cmd/frostfs-node/config/engine/shard/gc/config.go b/cmd/frostfs-node/config/engine/shard/gc/config.go
index 0500697c8..8cb90d3ff 100644
--- a/cmd/frostfs-node/config/engine/shard/gc/config.go
+++ b/cmd/frostfs-node/config/engine/shard/gc/config.go
@@ -63,14 +63,14 @@ func (x *Config) RemoverSleepInterval() time.Duration {
 	return RemoverSleepIntervalDefault
 }
 
-// ExpiredCollectorWorkersCount returns the value of "expired_collector_workers_count"
+// ExpiredCollectorWorkerCount returns the value of "expired_collector_worker_count"
 // config parameter.
 //
 // Returns ExpiredCollectorWorkersCountDefault if the value is not a positive number.
-func (x *Config) ExpiredCollectorWorkersCount() int {
+func (x *Config) ExpiredCollectorWorkerCount() int {
 	s := config.IntSafe(
 		(*config.Config)(x),
-		"expired_collector_workers_count",
+		"expired_collector_worker_count",
 	)
 
 	if s > 0 {
diff --git a/cmd/frostfs-node/config/engine/shard/writecache/config.go b/cmd/frostfs-node/config/engine/shard/writecache/config.go
index 504fe3ca2..d0fd4bf75 100644
--- a/cmd/frostfs-node/config/engine/shard/writecache/config.go
+++ b/cmd/frostfs-node/config/engine/shard/writecache/config.go
@@ -106,13 +106,13 @@ func (x *Config) MaxObjectSize() uint64 {
 	return MaxSizeDefault
 }
 
-// WorkersNumber returns the value of "workers_number" config parameter.
+// WorkerCount returns the value of "flush_worker_count" config parameter.
 //
 // Returns WorkersNumberDefault if the value is not a positive number.
-func (x *Config) WorkersNumber() int {
+func (x *Config) WorkerCount() int {
 	c := config.IntSafe(
 		(*config.Config)(x),
-		"workers_number",
+		"flush_worker_count",
 	)
 
 	if c > 0 {
diff --git a/cmd/frostfs-node/config/object/config.go b/cmd/frostfs-node/config/object/config.go
index f7a33b5e0..876dc3ef1 100644
--- a/cmd/frostfs-node/config/object/config.go
+++ b/cmd/frostfs-node/config/object/config.go
@@ -28,11 +28,11 @@ func Put(c *config.Config) PutConfig {
 	}
 }
 
-// PoolSizeRemote returns the value of "pool_size_remote" config parameter.
+// PoolSizeRemote returns the value of "remote_pool_size" config parameter.
 //
 // Returns PutPoolSizeDefault if the value is not a positive number.
 func (g PutConfig) PoolSizeRemote() int {
-	v := config.Int(g.cfg, "pool_size_remote")
+	v := config.Int(g.cfg, "remote_pool_size")
 	if v > 0 {
 		return int(v)
 	}
@@ -40,11 +40,11 @@ func (g PutConfig) PoolSizeRemote() int {
 	return PutPoolSizeDefault
 }
 
-// PoolSizeLocal returns the value of "pool_size_local" config parameter.
+// PoolSizeLocal returns the value of "local_pool_size" config parameter.
 //
 // Returns PutPoolSizeDefault if the value is not a positive number.
 func (g PutConfig) PoolSizeLocal() int {
-	v := config.Int(g.cfg, "pool_size_local")
+	v := config.Int(g.cfg, "local_pool_size")
 	if v > 0 {
 		return int(v)
 	}
diff --git a/config/example/node.env b/config/example/node.env
index dda740cf1..f4dc218a3 100644
--- a/config/example/node.env
+++ b/config/example/node.env
@@ -84,8 +84,8 @@ FROSTFS_REPLICATOR_PUT_TIMEOUT=15s
 FROSTFS_REPLICATOR_POOL_SIZE=10
 
 # Object service section
-FROSTFS_OBJECT_PUT_POOL_SIZE_REMOTE=100
-FROSTFS_OBJECT_PUT_POOL_SIZE_LOCAL=200
+FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100
+FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200
 FROSTFS_OBJECT_PUT_SKIP_SESSION_TOKEN_ISSUER_VERIFICATION=true
 FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10
 
@@ -103,7 +103,7 @@ FROSTFS_STORAGE_SHARD_0_WRITECACHE_NO_SYNC=true
 FROSTFS_STORAGE_SHARD_0_WRITECACHE_PATH=tmp/0/cache
 FROSTFS_STORAGE_SHARD_0_WRITECACHE_SMALL_OBJECT_SIZE=16384
 FROSTFS_STORAGE_SHARD_0_WRITECACHE_MAX_OBJECT_SIZE=134217728
-FROSTFS_STORAGE_SHARD_0_WRITECACHE_WORKERS_NUMBER=30
+FROSTFS_STORAGE_SHARD_0_WRITECACHE_FLUSH_WORKER_COUNT=30
 FROSTFS_STORAGE_SHARD_0_WRITECACHE_CAPACITY=3221225472
 ### Metabase config
 FROSTFS_STORAGE_SHARD_0_METABASE_PATH=tmp/0/meta
@@ -142,7 +142,7 @@ FROSTFS_STORAGE_SHARD_0_GC_REMOVER_SLEEP_INTERVAL=2m
 #### Limit of objects to be marked expired by the garbage collector
 FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_BATCH_SIZE=1500
 #### Limit of concurrent workers collecting expired objects by the garbage collector
-FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_WORKERS_COUNT=15
+FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_WORKER_COUNT=15
 
 ## 1 shard
 ### Flag to refill Metabase from BlobStor
@@ -154,7 +154,7 @@ FROSTFS_STORAGE_SHARD_1_WRITECACHE_ENABLED=true
 FROSTFS_STORAGE_SHARD_1_WRITECACHE_PATH=tmp/1/cache
 FROSTFS_STORAGE_SHARD_1_WRITECACHE_SMALL_OBJECT_SIZE=16384
 FROSTFS_STORAGE_SHARD_1_WRITECACHE_MAX_OBJECT_SIZE=134217728
-FROSTFS_STORAGE_SHARD_1_WRITECACHE_WORKERS_NUMBER=30
+FROSTFS_STORAGE_SHARD_1_WRITECACHE_FLUSH_WORKER_COUNT=30
 FROSTFS_STORAGE_SHARD_1_WRITECACHE_CAPACITY=4294967296
 ### Metabase config
 FROSTFS_STORAGE_SHARD_1_METABASE_PATH=tmp/1/meta
diff --git a/config/example/node.json b/config/example/node.json
index 1038d5e5c..c2e3d0a5d 100644
--- a/config/example/node.json
+++ b/config/example/node.json
@@ -129,8 +129,8 @@
       "tombstone_lifetime": 10
     },
     "put": {
-      "pool_size_remote": 100,
-      "pool_size_local": 200,
+      "remote_pool_size": 100,
+      "local_pool_size": 200,
       "skip_session_token_issuer_verification": true
     }
   },
@@ -147,7 +147,7 @@
           "path": "tmp/0/cache",
           "small_object_size": 16384,
           "max_object_size": 134217728,
-          "workers_number": 30,
+          "flush_worker_count": 30,
           "capacity": 3221225472
         },
         "metabase": {
@@ -190,7 +190,7 @@
           "remover_batch_size": 150,
           "remover_sleep_interval": "2m",
           "expired_collector_batch_size": 1500,
-          "expired_collector_workers_count": 15
+          "expired_collector_worker_count": 15
         }
       },
       "1": {
@@ -203,7 +203,7 @@
           "memcache_capacity": 2147483648,
           "small_object_size": 16384,
           "max_object_size": 134217728,
-          "workers_number": 30,
+          "flush_worker_count": 30,
           "capacity": 4294967296
         },
         "metabase": {
diff --git a/config/example/node.yaml b/config/example/node.yaml
index 8b2046e95..d04e122b6 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -108,8 +108,8 @@ object:
   delete:
     tombstone_lifetime: 10 # tombstone "local" lifetime in epochs
   put:
-    pool_size_remote: 100  # number of async workers for remote PUT operations
-    pool_size_local: 200  # number of async workers for local PUT operations
+    remote_pool_size: 100  # number of async workers for remote PUT operations
+    local_pool_size: 200  # number of async workers for local PUT operations
     skip_session_token_issuer_verification: true # session token issuer verification will be skipped if true
 
 storage:
@@ -126,7 +126,7 @@ storage:
         type: bbolt
         small_object_size: 16k  # size threshold for "small" objects which are cached in key-value DB, not in FS, bytes
         max_object_size: 134217728  # size threshold for "big" objects which bypass write-cache and go to the storage directly, bytes
-        workers_number: 30  # number of write-cache flusher threads
+        flush_worker_count: 30  # number of write-cache flusher threads
 
       metabase:
         perm: 0644  # permissions for metabase files(directories: +x for current user and group)
@@ -196,7 +196,7 @@ storage:
         remover_batch_size: 150  # number of objects to be removed by the garbage collector
         remover_sleep_interval: 2m  # frequency of the garbage collector invocation
         expired_collector_batch_size: 1500 # number of objects to be marked expired by the garbage collector
-        expired_collector_workers_count: 15 # number of concurrent workers collecting expired objects by the garbage collector
+        expired_collector_worker_count: 15 # number of concurrent workers collecting expired objects by the garbage collector
 
     1:
       writecache:
diff --git a/config/mainnet/config.yml b/config/mainnet/config.yml
index 7db476e55..d86ea451f 100644
--- a/config/mainnet/config.yml
+++ b/config/mainnet/config.yml
@@ -50,8 +50,8 @@ prometheus:
 
 object:
   put:
-    pool_size_remote: 100
-    pool_size_local: 100
+    remote_pool_size: 100
+    local_pool_size: 100
 
 morph:
   rpc_endpoint:
diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md
index 5e9f3caf7..71eabc042 100644
--- a/docs/storage-node-configuration.md
+++ b/docs/storage-node-configuration.md
@@ -243,7 +243,7 @@ gc:
   remover_batch_size: 200
   remover_sleep_interval: 5m
   expired_collector_batch_size: 500
-  expired_collector_workers_count: 5
+  expired_collector_worker_count: 5
 ```
 
 | Parameter                         | Type       | Default value | Description                                              |
@@ -251,7 +251,7 @@ gc:
 | `remover_batch_size`              | `int`      | `100`         | Amount of objects to grab in a single batch.             |
 | `remover_sleep_interval`          | `duration` | `1m`          | Time to sleep between iterations.                        |
 | `expired_collector_batch_size`    | `int`      | `500`         | Max amount of expired objects to grab in a single batch. |
-| `expired_collector_workers_count` | `int`      | `5`           | Max amount of concurrent expired objects workers.        |
+| `expired_collector_worker_count`  | `int`      | `5`           | Max amount of concurrent expired objects workers.        |
 
 ### `metabase` subsection
 
@@ -280,7 +280,7 @@ writecache:
   capacity: 4294967296
   small_object_size: 16384
   max_object_size: 134217728
-  workers_number: 30
+  flush_worker_count: 30
 ```
 
 | Parameter            | Type       | Default value | Description                                                                                                          |
@@ -290,7 +290,7 @@ writecache:
 | `capacity`           | `size`     | unrestricted  | Approximate maximum size of the writecache. If the writecache is full, objects are written to the blobstor directly. |
 | `small_object_size`  | `size`     | `32K`         | Maximum object size for "small" objects. This objects are stored in a key-value database instead of a file-system.   |
 | `max_object_size`    | `size`     | `64M`         | Maximum object size allowed to be stored in the writecache.                                                          |
-| `workers_number`     | `int`      | `20`          | Amount of background workers that move data from the writecache to the blobstor.                                     |
+| `flush_worker_count` | `int`      | `20`          | Amount of background workers that move data from the writecache to the blobstor.                                     |
 | `max_batch_size`     | `int`      | `1000`        | Maximum amount of small object `PUT` operations to perform in a single transaction.                                  |
 | `max_batch_delay`    | `duration` | `10ms`        | Maximum delay before a batch starts.                                                                                 |
 
@@ -415,7 +415,7 @@ replicator:
 | Parameter     | Type       | Default value                          | Description                                 |
 |---------------|------------|----------------------------------------|---------------------------------------------|
 | `put_timeout` | `duration` | `5s`                                   | Timeout for performing the `PUT` operation. |
-| `pool_size`   | `int`      | Equal to `object.put.pool_size_remote` | Maximum amount of concurrent replications.  |
+| `pool_size`   | `int`      | Equal to `object.put.remote_pool_size` | Maximum amount of concurrent replications.  |
 
 # `object` section
 Contains object-service related parameters.
@@ -423,14 +423,14 @@ Contains object-service related parameters.
 ```yaml
 object:
   put:
-    pool_size_remote: 100
+    remote_pool_size: 100
 ```
 
 | Parameter                   | Type  | Default value | Description                                                                                    |
 |-----------------------------|-------|---------------|------------------------------------------------------------------------------------------------|
 | `delete.tombstone_lifetime` | `int` | `5`           | Tombstone lifetime for removed objects in epochs.                                              |
-| `put.pool_size_remote`      | `int` | `10`          | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. |
-| `put.pool_size_local`       | `int` | `10`          | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services.  |
+| `put.remote_pool_size`      | `int` | `10`          | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. |
+| `put.local_pool_size`       | `int` | `10`          | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services.  |
 
 # `runtime` section
 Contains runtime parameters.
diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go
index 346903c5c..e16f89457 100644
--- a/pkg/local_object_storage/shard/gc.go
+++ b/pkg/local_object_storage/shard/gc.go
@@ -115,8 +115,8 @@ type gcCfg struct {
 
 	workerPoolInit func(int) util.WorkerPool
 
-	expiredCollectorWorkersCount int
-	expiredCollectorBatchSize    int
+	expiredCollectorWorkerCount int
+	expiredCollectorBatchSize   int
 
 	metrics GCMectrics
 
@@ -313,16 +313,16 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
 	return
 }
 
-func (s *Shard) getExpiredObjectsParameters() (workersCount, batchSize int) {
-	workersCount = minExpiredWorkers
+func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) {
+	workerCount = minExpiredWorkers
 	batchSize = minExpiredBatchSize
 
 	if s.gc.gcCfg.expiredCollectorBatchSize > batchSize {
 		batchSize = s.gc.gcCfg.expiredCollectorBatchSize
 	}
 
-	if s.gc.gcCfg.expiredCollectorWorkersCount > workersCount {
-		workersCount = s.gc.gcCfg.expiredCollectorWorkersCount
+	if s.gc.gcCfg.expiredCollectorWorkerCount > workerCount {
+		workerCount = s.gc.gcCfg.expiredCollectorWorkerCount
 	}
 	return
 }
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index 07b774022..aa87e0cfc 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -365,11 +365,11 @@ func WithExpiredCollectorBatchSize(size int) Option {
 	}
 }
 
-// WithExpiredCollectorWorkersCount returns option to set concurrent
+// WithExpiredCollectorWorkerCount returns option to set concurrent
 // workers count of expired object collection operation.
-func WithExpiredCollectorWorkersCount(count int) Option {
+func WithExpiredCollectorWorkerCount(count int) Option {
 	return func(c *cfg) {
-		c.gcCfg.expiredCollectorWorkersCount = count
+		c.gcCfg.expiredCollectorWorkerCount = count
 	}
 }