diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index 3e1bfb4f2..004c8f128 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -33,6 +33,7 @@ import (
 	"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
 	"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
 	internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
+	"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
 	"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase"
 	"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
 	frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
@@ -277,6 +278,9 @@ func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig
 	a.setMetabaseConfig(&newConfig, oldConfig)
 
 	a.setGCConfig(&newConfig, oldConfig)
+	if err := a.setLimits(&newConfig, oldConfig); err != nil {
+		return err
+	}
 
 	a.EngineCfg.shards = append(a.EngineCfg.shards, newConfig)
 
@@ -370,6 +374,14 @@ func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *s
 	newConfig.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount()
 }
 
+func (a *applicationConfiguration) setLimits(newConfig *shardCfg, oldConfig *shardconfig.Config) error {
+	limitsConfig := oldConfig.Limits()
+	if err := qos.ValidateConfig(limitsConfig); err != nil {
+		return err
+	}
+	return nil
+}
+
 // internals contains application-specific internals that are created
 // on application startup and are shared b/w the components during
 // the application life cycle.
diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go
index ef6380a62..b912b5d7d 100644
--- a/cmd/frostfs-node/config/engine/config_test.go
+++ b/cmd/frostfs-node/config/engine/config_test.go
@@ -11,6 +11,7 @@ import (
 	blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza"
 	fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
 	gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
+	limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
 	piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
 	writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
 	configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
@@ -76,6 +77,7 @@ func TestEngineSection(t *testing.T) {
 			ss := blob.Storages()
 			pl := sc.Pilorama()
 			gc := sc.GC()
+			limits := sc.Limits()
 
 			switch num {
 			case 0:
@@ -134,6 +136,75 @@ func TestEngineSection(t *testing.T) {
 				require.Equal(t, false, sc.RefillMetabase())
 				require.Equal(t, mode.ReadOnly, sc.Mode())
 				require.Equal(t, 100, sc.RefillMetabaseWorkersCount())
+
+				readLimits := limits.Read()
+				writeLimits := limits.Write()
+				require.Equal(t, 30*time.Second, readLimits.IdleTimeout)
+				require.Equal(t, int64(10_000), readLimits.MaxRunningOps)
+				require.Equal(t, int64(1_000), readLimits.MaxWaitingOps)
+				require.Equal(t, 45*time.Second, writeLimits.IdleTimeout)
+				require.Equal(t, int64(1_000), writeLimits.MaxRunningOps)
+				require.Equal(t, int64(100), writeLimits.MaxWaitingOps)
+				require.ElementsMatch(t, readLimits.Tags,
+					[]limitsconfig.IOTagConfig{
+						{
+							Tag:         "internal",
+							Weight:      toPtr(20),
+							ReservedOps: toPtr(1000),
+							LimitOps:    toPtr(0),
+						},
+						{
+							Tag:         "client",
+							Weight:      toPtr(70),
+							ReservedOps: toPtr(10000),
+						},
+						{
+							Tag:         "background",
+							Weight:      toPtr(5),
+							LimitOps:    toPtr(10000),
+							ReservedOps: toPtr(0),
+						},
+						{
+							Tag:      "writecache",
+							Weight:   toPtr(5),
+							LimitOps: toPtr(25000),
+						},
+						{
+							Tag:      "policer",
+							Weight:   toPtr(5),
+							LimitOps: toPtr(25000),
+						},
+					})
+				require.ElementsMatch(t, writeLimits.Tags,
+					[]limitsconfig.IOTagConfig{
+						{
+							Tag:         "internal",
+							Weight:      toPtr(200),
+							ReservedOps: toPtr(100),
+							LimitOps:    toPtr(0),
+						},
+						{
+							Tag:         "client",
+							Weight:      toPtr(700),
+							ReservedOps: toPtr(1000),
+						},
+						{
+							Tag:         "background",
+							Weight:      toPtr(50),
+							LimitOps:    toPtr(1000),
+							ReservedOps: toPtr(0),
+						},
+						{
+							Tag:      "writecache",
+							Weight:   toPtr(50),
+							LimitOps: toPtr(2500),
+						},
+						{
+							Tag:      "policer",
+							Weight:   toPtr(50),
+							LimitOps: toPtr(2500),
+						},
+					})
 			case 1:
 				require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path())
 				require.Equal(t, fs.FileMode(0o644), pl.Perm())
@@ -188,6 +259,17 @@ func TestEngineSection(t *testing.T) {
 				require.Equal(t, true, sc.RefillMetabase())
 				require.Equal(t, mode.ReadWrite, sc.Mode())
 				require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount())
+
+				readLimits := limits.Read()
+				writeLimits := limits.Write()
+				require.Equal(t, limitsconfig.DefaultIdleTimeout, readLimits.IdleTimeout)
+				require.Equal(t, limitsconfig.NoLimit, readLimits.MaxRunningOps)
+				require.Equal(t, limitsconfig.NoLimit, readLimits.MaxWaitingOps)
+				require.Equal(t, limitsconfig.DefaultIdleTimeout, writeLimits.IdleTimeout)
+				require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxRunningOps)
+				require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxWaitingOps)
+				require.Equal(t, 0, len(readLimits.Tags))
+				require.Equal(t, 0, len(writeLimits.Tags))
 			}
 			return nil
 		})
@@ -201,3 +283,7 @@ func TestEngineSection(t *testing.T) {
 		configtest.ForEnvFileType(t, path, fileConfigTest)
 	})
 }
+
+func toPtr(v float64) *float64 {
+	return &v
+}
diff --git a/cmd/frostfs-node/config/engine/shard/config.go b/cmd/frostfs-node/config/engine/shard/config.go
index 0620c9f63..e50d56b95 100644
--- a/cmd/frostfs-node/config/engine/shard/config.go
+++ b/cmd/frostfs-node/config/engine/shard/config.go
@@ -4,6 +4,7 @@ import (
 	"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
 	blobstorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor"
 	gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
+	limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
 	metabaseconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/metabase"
 	piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
 	writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
@@ -125,6 +126,14 @@ func (x *Config) GC() *gcconfig.Config {
 	)
 }
 
+// Limits returns "limits" subsection as a limitsconfig.Config.
+func (x *Config) Limits() *limitsconfig.Config {
+	return limitsconfig.From(
+		(*config.Config)(x).
+			Sub("limits"),
+	)
+}
+
 // RefillMetabase returns the value of "resync_metabase" config parameter.
 //
 // Returns false if the value is not a valid bool.
diff --git a/cmd/frostfs-node/config/engine/shard/limits/config.go b/cmd/frostfs-node/config/engine/shard/limits/config.go
new file mode 100644
index 000000000..b9b5c4382
--- /dev/null
+++ b/cmd/frostfs-node/config/engine/shard/limits/config.go
@@ -0,0 +1,130 @@
+package limits
+
+import (
+	"math"
+	"strconv"
+	"time"
+
+	"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+	"github.com/spf13/cast"
+)
+
+const (
+	NoLimit            int64 = math.MaxInt64
+	DefaultIdleTimeout       = 5 * time.Minute
+)
+
+// From wraps config section into Config.
+func From(c *config.Config) *Config {
+	return (*Config)(c)
+}
+
+// Config is a wrapper over the config section
+// which provides access to Shard's limits configurations.
+type Config config.Config
+
+// Read returns the value of "read" limits config section.
+func (x *Config) Read() OpConfig {
+	return x.parse("read")
+}
+
+// Write returns the value of "write" limits config section.
+func (x *Config) Write() OpConfig {
+	return x.parse("write")
+}
+
+func (x *Config) parse(sub string) OpConfig {
+	c := (*config.Config)(x).Sub(sub)
+	var result OpConfig
+
+	if s := config.Int(c, "max_waiting_ops"); s > 0 {
+		result.MaxWaitingOps = s
+	} else {
+		result.MaxWaitingOps = NoLimit
+	}
+
+	if s := config.Int(c, "max_running_ops"); s > 0 {
+		result.MaxRunningOps = s
+	} else {
+		result.MaxRunningOps = NoLimit
+	}
+
+	if s := config.DurationSafe(c, "idle_timeout"); s > 0 {
+		result.IdleTimeout = s
+	} else {
+		result.IdleTimeout = DefaultIdleTimeout
+	}
+
+	result.Tags = tags(c)
+
+	return result
+}
+
+type OpConfig struct {
+	// MaxWaitingOps returns the value of "max_waiting_ops" config parameter.
+	//
+	// Equals NoLimit if the value is not a positive number.
+	MaxWaitingOps int64
+	// MaxRunningOps returns the value of "max_running_ops" config parameter.
+	//
+	// Equals NoLimit if the value is not a positive number.
+	MaxRunningOps int64
+	// IdleTimeout returns the value of "idle_timeout" config parameter.
+	//
+	// Equals DefaultIdleTimeout if the value is not a valid duration.
+	IdleTimeout time.Duration
+	// Tags returns the value of "tags" config parameter.
+	//
+	// Equals nil if the value is not a valid tags config slice.
+	Tags []IOTagConfig
+}
+
+type IOTagConfig struct {
+	Tag         string
+	Weight      *float64
+	LimitOps    *float64
+	ReservedOps *float64
+}
+
+func tags(c *config.Config) []IOTagConfig {
+	c = c.Sub("tags")
+	var result []IOTagConfig
+	for i := 0; ; i++ {
+		tag := config.String(c, strconv.Itoa(i)+".tag")
+		if tag == "" {
+			return result
+		}
+
+		var tagConfig IOTagConfig
+		tagConfig.Tag = tag
+
+		v := c.Value(strconv.Itoa(i) + ".weight")
+		if v != nil {
+			w, err := cast.ToFloat64E(v)
+			panicOnErr(err)
+			tagConfig.Weight = &w
+		}
+
+		v = c.Value(strconv.Itoa(i) + ".limit_ops")
+		if v != nil {
+			l, err := cast.ToFloat64E(v)
+			panicOnErr(err)
+			tagConfig.LimitOps = &l
+		}
+
+		v = c.Value(strconv.Itoa(i) + ".reserved_ops")
+		if v != nil {
+			r, err := cast.ToFloat64E(v)
+			panicOnErr(err)
+			tagConfig.ReservedOps = &r
+		}
+
+		result = append(result, tagConfig)
+	}
+}
+
+func panicOnErr(err error) {
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/config/example/node.env b/config/example/node.env
index 2ebef181a..9bd645344 100644
--- a/config/example/node.env
+++ b/config/example/node.env
@@ -157,6 +157,47 @@ FROSTFS_STORAGE_SHARD_0_GC_REMOVER_SLEEP_INTERVAL=2m
 FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_BATCH_SIZE=1500
 #### Limit of concurrent workers collecting expired objects by the garbage collector
 FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_WORKER_COUNT=15
+#### Limits config
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_RUNNING_OPS=10000
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_WAITING_OPS=1000
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_RUNNING_OPS=1000
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_WAITING_OPS=100
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_IDLE_TIMEOUT=45s
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_IDLE_TIMEOUT=30s
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_TAG=internal
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_WEIGHT=20
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_LIMIT_OPS=0
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_RESERVED_OPS=1000
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_TAG=client
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_WEIGHT=70
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_RESERVED_OPS=10000
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_TAG=background
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_WEIGHT=5
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_LIMIT_OPS=10000
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_RESERVED_OPS=0
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_TAG=writecache
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_WEIGHT=5
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_LIMIT_OPS=25000
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_TAG=policer
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_WEIGHT=5
+FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_LIMIT_OPS=25000
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_TAG=internal
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_WEIGHT=200
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_LIMIT_OPS=0
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_RESERVED_OPS=100
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_TAG=client
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_WEIGHT=700
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_RESERVED_OPS=1000
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_TAG=background
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_WEIGHT=50
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_LIMIT_OPS=1000
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_RESERVED_OPS=0
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_TAG=writecache
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_WEIGHT=50
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_LIMIT_OPS=2500
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_TAG=policer
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_WEIGHT=50
+FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_LIMIT_OPS=2500
 
 ## 1 shard
 ### Flag to refill Metabase from BlobStor
diff --git a/config/example/node.json b/config/example/node.json
index 0ed72effc..6b799b318 100644
--- a/config/example/node.json
+++ b/config/example/node.json
@@ -221,6 +221,76 @@
           "remover_sleep_interval": "2m",
           "expired_collector_batch_size": 1500,
           "expired_collector_worker_count": 15
+        },
+        "limits": {
+          "read": {
+            "max_running_ops": 10000,
+            "max_waiting_ops": 1000,
+            "idle_timeout": "30s",
+            "tags": [
+              {
+                "tag": "internal",
+                "weight": 20,
+                "limit_ops": 0,
+                "reserved_ops": 1000
+              },
+              {
+                "tag": "client",
+                "weight": 70,
+                "reserved_ops": 10000
+              },
+              {
+                "tag": "background",
+                "weight": 5,
+                "limit_ops": 10000,
+                "reserved_ops": 0
+              },
+              {
+                "tag": "writecache",
+                "weight": 5,
+                "limit_ops": 25000
+              },
+              {
+                "tag": "policer",
+                "weight": 5,
+                "limit_ops": 25000
+              }
+            ]
+          },
+          "write": {
+            "max_running_ops": 1000,
+            "max_waiting_ops": 100,
+            "idle_timeout": "45s",
+            "tags": [
+              {
+                "tag": "internal",
+                "weight": 200,
+                "limit_ops": 0,
+                "reserved_ops": 100
+              },
+              {
+                "tag": "client",
+                "weight": 700,
+                "reserved_ops": 1000
+              },
+              {
+                "tag": "background",
+                "weight": 50,
+                "limit_ops": 1000,
+                "reserved_ops": 0
+              },
+              {
+                "tag": "writecache",
+                "weight": 50,
+                "limit_ops": 2500
+              },
+              {
+                "tag": "policer",
+                "weight": 50,
+                "limit_ops": 2500
+              }
+            ]
+          }
         }
       },
       "1": {
diff --git a/config/example/node.yaml b/config/example/node.yaml
index 6b810653e..2552a419c 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -227,6 +227,52 @@ storage:
         expired_collector_batch_size: 1500 # number of objects to be marked expired by the garbage collector
         expired_collector_worker_count: 15 # number of concurrent workers collecting expired objects by the garbage collector
 
+      limits:
+        read:
+          max_running_ops: 10000
+          max_waiting_ops: 1000
+          idle_timeout: 30s
+          tags:
+          - tag: internal
+            weight: 20
+            limit_ops: 0
+            reserved_ops: 1000
+          - tag: client
+            weight: 70
+            reserved_ops: 10000
+          - tag: background
+            weight: 5
+            limit_ops: 10000
+            reserved_ops: 0
+          - tag: writecache
+            weight: 5
+            limit_ops: 25000
+          - tag: policer
+            weight: 5
+            limit_ops: 25000
+        write:
+          max_running_ops: 1000
+          max_waiting_ops: 100
+          idle_timeout: 45s
+          tags:
+          - tag: internal
+            weight: 200
+            limit_ops: 0
+            reserved_ops: 100
+          - tag: client
+            weight: 700
+            reserved_ops: 1000
+          - tag: background
+            weight: 50
+            limit_ops: 1000
+            reserved_ops: 0
+          - tag: writecache
+            weight: 50
+            limit_ops: 2500
+          - tag: policer
+            weight: 50
+            limit_ops: 2500
+
     1:
       writecache:
         path: tmp/1/cache  # write-cache root directory
diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md
index 1eb5437ba..271cc6532 100644
--- a/docs/storage-node-configuration.md
+++ b/docs/storage-node-configuration.md
@@ -195,6 +195,7 @@ The following table describes configuration for each shard.
 | `blobstor`                                       | [Blobstor config](#blobstor-subsection)     |               | Blobstor configuration.                                                                                                                                                                                           |
 | `small_object_size`                              | `size`                                      | `1M`          | Maximum size of an object stored in blobovnicza tree.                                                                                                                                                             |
 | `gc`                                             | [GC config](#gc-subsection)                 |               | GC configuration.                                                                                                                                                                                                 |
+| `limits`                                         | [Shard limits config](#limits-subsection)   |               | Shard limits configuration.                                                                                                                                                                                                 |
 
 ### `blobstor` subsection
 
@@ -301,6 +302,64 @@ writecache:
 | `flush_worker_count`        | `int`      | `20`          | Amount of background workers that move data from the writecache to the blobstor.                                              |
 | `max_flushing_objects_size` | `size`     | `512M`        | Max total size of background flushing objects.                                                                                |
 
+### `limits` subsection
+
+```yaml
+limits:
+  max_read_running_ops: 10000
+  max_read_waiting_ops: 1000
+  max_write_running_ops: 1000
+  max_write_waiting_ops: 100
+  read:
+    - tag: internal
+      weight: 20
+      limit_ops: 0
+      reserved_ops: 1000
+    - tag: client
+      weight: 70
+      reserved_ops: 10000
+    - tag: background
+      weight: 5
+      limit_ops: 10000
+      reserved_ops: 0
+    - tag: writecache
+      weight: 5
+      limit_ops: 25000
+    - tag: policer
+      weight: 5
+      limit_ops: 25000
+    write:
+    - tag: internal
+      weight: 200
+      limit_ops: 0
+      reserved_ops: 100
+    - tag: client
+      weight: 700
+      reserved_ops: 1000
+    - tag: background
+      weight: 50
+      limit_ops: 1000
+      reserved_ops: 0
+    - tag: writecache
+      weight: 50
+      limit_ops: 2500
+    - tag: policer
+      weight: 50
+      limit_ops: 2500
+```
+
+| Parameter               | Type     | Default value  | Description                                                                                                     |
+| ----------------------- | -------- | -------------- | --------------------------------------------------------------------------------------------------------------- |
+| `max_read_running_ops`  | `int`    | 0 (no limit)   | The maximum number of runnig read operations.                                                                   |
+| `max_read_waiting_ops`  | `int`    | 0 (no limit)   | The maximum number of waiting read operations.                                                                  |
+| `max_write_running_ops` | `int`    | 0 (no limit)   | The maximum number of running write operations.                                                                 |
+| `max_write_waiting_ops` | `int`    | 0 (no limit)   | The maximum number of running write operations.                                                                 |
+| `read`                  | `[]tag`  | empty          | Array of shard read settings for tags.                                                                          |
+| `write`                 | `[]tag`  | empty          | Array of shard write settings for tags.                                                                         |
+| `tag.tag`               | `string` | empty          | Tag name. Allowed values: `client`, `internal`, `background`, `writecache`, `policer`.                          |
+| `tag.weight`            | `float`  | 0 (no weight)  | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. |
+| `tag.limit_ops`         | `float`  | 0 (no limit)   | Operations per second rate limit for queries with the specified tag.                                            |
+| `tag.reserved_ops`      | `float`  | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag.                                         |
 
 # `node` section
 
diff --git a/internal/qos/validate.go b/internal/qos/validate.go
new file mode 100644
index 000000000..afced345b
--- /dev/null
+++ b/internal/qos/validate.go
@@ -0,0 +1,92 @@
+package qos
+
+import (
+	"errors"
+	"fmt"
+	"math"
+
+	"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
+)
+
+var errWeightsMustBeSpecified = errors.New("invalid weights: weights must be specified for all tags or not specified for any")
+
+type tagConfig struct {
+	Shares, Limit, Reserved *float64
+}
+
+func ValidateConfig(c *limits.Config) error {
+	if err := validateOpConfig(c.Read()); err != nil {
+		return fmt.Errorf("limits 'read' section validation error: %w", err)
+	}
+	if err := validateOpConfig(c.Write()); err != nil {
+		return fmt.Errorf("limits 'write' section validation error: %w", err)
+	}
+	return nil
+}
+
+func validateOpConfig(c limits.OpConfig) error {
+	if c.MaxRunningOps <= 0 {
+		return fmt.Errorf("invalid 'max_running_ops = %d': must be greater than zero", c.MaxRunningOps)
+	}
+	if c.MaxWaitingOps <= 0 {
+		return fmt.Errorf("invalid 'max_waiting_ops = %d': must be greater than zero", c.MaxWaitingOps)
+	}
+	if c.IdleTimeout <= 0 {
+		return fmt.Errorf("invalid 'idle_timeout = %s': must be greater than zero", c.IdleTimeout.String())
+	}
+	if err := validateTags(c.Tags); err != nil {
+		return fmt.Errorf("'tags' config section validation error: %w", err)
+	}
+	return nil
+}
+
+func validateTags(configTags []limits.IOTagConfig) error {
+	tags := map[IOTag]tagConfig{
+		IOTagClient:     {},
+		IOTagInternal:   {},
+		IOTagBackground: {},
+		IOTagWritecache: {},
+		IOTagPolicer:    {},
+	}
+	for _, t := range configTags {
+		tag, err := FromRawString(t.Tag)
+		if err != nil {
+			return fmt.Errorf("invalid tag %s: %w", t.Tag, err)
+		}
+		if _, ok := tags[tag]; !ok {
+			return fmt.Errorf("tag %s is not configurable", t.Tag)
+		}
+		tags[tag] = tagConfig{
+			Shares:   t.Weight,
+			Limit:    t.LimitOps,
+			Reserved: t.ReservedOps,
+		}
+	}
+	idx := 0
+	var shares float64
+	for t, v := range tags {
+		if idx == 0 {
+			idx++
+			shares = float64Value(v.Shares)
+		} else if (shares != 0 && float64Value(v.Shares) == 0) || (shares == 0 && float64Value(v.Shares) != 0) {
+			return errWeightsMustBeSpecified
+		}
+		if float64Value(v.Shares) < 0 || math.IsNaN(float64Value(v.Shares)) {
+			return fmt.Errorf("invalid weight for tag %s: must be positive value", t.String())
+		}
+		if float64Value(v.Limit) < 0 || math.IsNaN(float64Value(v.Limit)) {
+			return fmt.Errorf("invalid limit_ops for tag %s: must be positive value", t.String())
+		}
+		if float64Value(v.Reserved) < 0 || math.IsNaN(float64Value(v.Reserved)) {
+			return fmt.Errorf("invalid reserved_ops for tag %s: must be positive value", t.String())
+		}
+	}
+	return nil
+}
+
+func float64Value(f *float64) float64 {
+	if f == nil {
+		return 0.0
+	}
+	return *f
+}