[#9999] mclock: Schedule by limit requests as soon as possible
All checks were successful
DCO action / DCO (pull_request) Successful in 24s
Vulncheck / Vulncheck (pull_request) Successful in 31s
Pre-commit hooks / Pre-commit (pull_request) Successful in 1m10s
Tests and linters / Run gofumpt (pull_request) Successful in 1m4s
Tests and linters / Lint (pull_request) Successful in 1m12s
Tests and linters / gopls check (pull_request) Successful in 1m11s
Tests and linters / Staticcheck (pull_request) Successful in 1m18s
Tests and linters / Tests with -race (pull_request) Successful in 1m30s
Tests and linters / Tests (pull_request) Successful in 1m3s

Let's assume that for some tag `limit = 1000 RPS` defined and each
request takes 10 ms to complete. At some point in time 1000 requests
were accepted. Then first request will be scheduled at `now()`, second -
at `now() + 1 ms`, third - at `now() + 2 ms` etc. Total processing
duration of 1000 requests will be 1 second + 10 ms.

After this fix scheduler looks forward to schedule requests within limit.
So for situation above total processing duration of 1000 requests will be
10 ms in ideal world.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
Dmitrii Stepanov 2025-03-24 15:33:50 +03:00
parent 32079ad7c2
commit 41690c21e5
Signed by: dstepanov-yadro
GPG key ID: 237AF1A763293BC0
2 changed files with 46 additions and 1 deletions

View file

@ -306,7 +306,7 @@ func (q *MClock) setNextScheduleTimer(now float64) {
}
func (q *MClock) scheduleByLimitAndWeight(now float64) {
for q.limitQueue.Len() > 0 && q.limitQueue.items[0].ts() <= now {
for q.limitQueue.Len() > 0 && q.limitQueue.items[0].ts() < now+1.0 {
ready := heap.Pop(q.limitQueue).(*limitMQueueItem)
heap.Push(q.readyQueue, &readyMQueueItem{r: ready.r})
}

View file

@ -515,3 +515,48 @@ func TestMClockLowLimit(t *testing.T) {
})
require.NoError(t, eg.Wait())
}
func TestMClockLimitTotalTime(t *testing.T) {
t.Parallel()
limit := 10.0 // 10 RPS -> 1 request per 100 ms
q, err := NewMClock(100, 100, map[string]TagInfo{
"class1": {Share: 50, LimitIOPS: &limit},
}, 5*time.Second)
require.NoError(t, err)
defer q.Close()
// 10 requests, each request runs for 500 ms,
// but they should be scheduled as soon as possible,
// so total duration must be less than 1 second
eg, ctx := errgroup.WithContext(context.Background())
startedAt := time.Now()
for range 10 {
eg.Go(func() error {
release, err := q.RequestArrival(ctx, "class1")
require.NoError(t, err)
time.Sleep(500 * time.Millisecond)
release()
return nil
})
}
require.NoError(t, eg.Wait())
require.True(t, time.Since(startedAt) <= 1*time.Second)
// 11 requests, limit = 10 RPS, so 10 requests should be
// scheduled as soon as possible, but last request should be
// scheduled at now + 1.0 s
eg, ctx = errgroup.WithContext(context.Background())
startedAt = time.Now()
for range 11 {
eg.Go(func() error {
release, err := q.RequestArrival(ctx, "class1")
require.NoError(t, err)
time.Sleep(500 * time.Millisecond)
release()
return nil
})
}
require.NoError(t, eg.Wait())
require.True(t, time.Since(startedAt) >= 1500*time.Millisecond)
require.True(t, time.Since(startedAt) <= 1600*time.Millisecond) // 100 ms offset to complete all requests
}