diff --git a/internal/qos/limiter.go b/internal/qos/limiter.go
index 62e8f6cf7..661518fb5 100644
--- a/internal/qos/limiter.go
+++ b/internal/qos/limiter.go
@@ -7,7 +7,6 @@ import (
 	"time"
 
 	"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
-	"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
 	"git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling"
 	"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
 	apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -26,6 +25,11 @@ type Limiter interface {
 	Close()
 }
 
+type scheduler interface {
+	RequestArrival(ctx context.Context, tag string) (scheduling.ReleaseFunc, error)
+	Close()
+}
+
 func NewLimiter(c *limits.Config) (Limiter, error) {
 	if err := validateConfig(c); err != nil {
 		return nil, err
@@ -34,15 +38,11 @@ func NewLimiter(c *limits.Config) (Limiter, error) {
 	if isNoop(read, write) {
 		return noopLimiterInstance, nil
 	}
-	readScheduler, err := scheduling.NewMClock(
-		uint64(read.MaxRunningOps), uint64(read.MaxWaitingOps),
-		converToSchedulingTags(read.Tags), read.IdleTimeout)
+	readScheduler, err := createScheduler(c.Read())
 	if err != nil {
 		return nil, fmt.Errorf("failed to create read scheduler: %w", err)
 	}
-	writeScheduler, err := scheduling.NewMClock(
-		uint64(write.MaxRunningOps), uint64(write.MaxWaitingOps),
-		converToSchedulingTags(write.Tags), write.IdleTimeout)
+	writeScheduler, err := createScheduler(c.Write())
 	if err != nil {
 		return nil, fmt.Errorf("failed to create write scheduler: %w", err)
 	}
@@ -52,6 +52,15 @@ func NewLimiter(c *limits.Config) (Limiter, error) {
 	}, nil
 }
 
+func createScheduler(config limits.OpConfig) (scheduler, error) {
+	if len(config.Tags) == 0 && config.MaxWaitingOps == limits.NoLimit {
+		return &semaphore{limit: int64(config.MaxRunningOps)}, nil
+	}
+	return scheduling.NewMClock(
+		uint64(config.MaxRunningOps), uint64(config.MaxWaitingOps),
+		converToSchedulingTags(config.Tags), config.IdleTimeout)
+}
+
 func converToSchedulingTags(limits []limits.IOTagConfig) map[string]scheduling.TagInfo {
 	result := make(map[string]scheduling.TagInfo)
 	for _, tag := range []IOTag{IOTagClient, IOTagBackground, IOTagInternal, IOTagPolicer, IOTagWritecache} {
@@ -100,27 +109,19 @@ func (n *noopLimiter) Close() {}
 var _ Limiter = (*mClockLimiter)(nil)
 
 type mClockLimiter struct {
-	readScheduler  *scheduling.MClock
-	writeScheduler *scheduling.MClock
+	readScheduler  scheduler
+	writeScheduler scheduler
 }
 
 func (n *mClockLimiter) ReadRequest(ctx context.Context) (ReleaseFunc, error) {
-	tag, ok := tagging.IOTagFromContext(ctx)
-	assert.Cond(ok, "request has no tag")
-	if tag == IOTagCritical.String() {
-		return releaseStub, nil
-	}
-	rel, err := n.readScheduler.RequestArrival(ctx, tag)
-	if err != nil {
-		if errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) {
-			return nil, &apistatus.ResourceExhausted{}
-		}
-		return nil, err
-	}
-	return ReleaseFunc(rel), nil
+	return requestArrival(ctx, n.readScheduler)
 }
 
 func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) {
+	return requestArrival(ctx, n.writeScheduler)
+}
+
+func requestArrival(ctx context.Context, s scheduler) (ReleaseFunc, error) {
 	tag, ok := tagging.IOTagFromContext(ctx)
 	if !ok {
 		tag = IOTagClient.String()
@@ -128,9 +129,10 @@ func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) {
 	if tag == IOTagCritical.String() {
 		return releaseStub, nil
 	}
-	rel, err := n.writeScheduler.RequestArrival(ctx, tag)
+	rel, err := s.RequestArrival(ctx, tag)
 	if err != nil {
-		if errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) {
+		if errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) ||
+			errors.Is(err, errSemaphoreLimitExceeded) {
 			return nil, &apistatus.ResourceExhausted{}
 		}
 		return nil, err
diff --git a/internal/qos/semaphore.go b/internal/qos/semaphore.go
new file mode 100644
index 000000000..0b26f9988
--- /dev/null
+++ b/internal/qos/semaphore.go
@@ -0,0 +1,39 @@
+package qos
+
+import (
+	"context"
+	"errors"
+	"sync/atomic"
+
+	"git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling"
+)
+
+var (
+	_                         scheduler = (*semaphore)(nil)
+	errSemaphoreLimitExceeded           = errors.New("semaphore limit exceeded")
+)
+
+type semaphore struct {
+	count atomic.Int64
+	limit int64
+}
+
+func (s *semaphore) Close() {}
+
+func (s *semaphore) RequestArrival(ctx context.Context, _ string) (scheduling.ReleaseFunc, error) {
+	select {
+	case <-ctx.Done():
+		return nil, ctx.Err()
+	default:
+	}
+
+	v := s.count.Add(1)
+	if v > s.limit {
+		s.count.Add(-1)
+		return nil, errSemaphoreLimitExceeded
+	}
+
+	return func() {
+		s.count.Add(-1)
+	}, nil
+}