forked from TrueCloudLab/distribution
Fix signalling Wait in regulator.enter
In some conditions, regulator.exit may not send a signal to blocked regulator.enter. Let's assume we are in the critical section of regulator.exit and r.available is equal to 0. And there are three more gorotines. One goroutine also executes regulator.exit and waits for the lock. Rest run regulator.enter and wait for the signal. We send the signal, and after releasing the lock, there will be lock contention: 1. Wait from regulator.enter 2. Lock from regulator.exit If the winner is Lock from regulator.exit, we will not send another signal to unlock the second Wait. Signed-off-by: Oleg Bulatov <obulatov@redhat.com>
This commit is contained in:
parent
1e2f10eb65
commit
258345ba0d
2 changed files with 68 additions and 5 deletions
|
@ -38,11 +38,7 @@ func (r *regulator) enter() {
|
|||
|
||||
func (r *regulator) exit() {
|
||||
r.L.Lock()
|
||||
// We only need to signal to a waiting FS operation if we're already at the
|
||||
// limit of threads used
|
||||
if r.available == 0 {
|
||||
r.Signal()
|
||||
}
|
||||
r.available++
|
||||
r.L.Unlock()
|
||||
}
|
||||
|
|
67
registry/storage/driver/base/regulator_test.go
Normal file
67
registry/storage/driver/base/regulator_test.go
Normal file
|
@ -0,0 +1,67 @@
|
|||
package base
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestRegulatorEnterExit(t *testing.T) {
|
||||
const limit = 500
|
||||
|
||||
r := NewRegulator(nil, limit).(*regulator)
|
||||
|
||||
for try := 0; try < 50; try++ {
|
||||
run := make(chan struct{})
|
||||
|
||||
var firstGroupReady sync.WaitGroup
|
||||
var firstGroupDone sync.WaitGroup
|
||||
firstGroupReady.Add(limit)
|
||||
firstGroupDone.Add(limit)
|
||||
for i := 0; i < limit; i++ {
|
||||
go func() {
|
||||
r.enter()
|
||||
firstGroupReady.Done()
|
||||
<-run
|
||||
r.exit()
|
||||
firstGroupDone.Done()
|
||||
}()
|
||||
}
|
||||
firstGroupReady.Wait()
|
||||
|
||||
// now we exhausted all the limit, let's run a little bit more
|
||||
var secondGroupReady sync.WaitGroup
|
||||
var secondGroupDone sync.WaitGroup
|
||||
for i := 0; i < 50; i++ {
|
||||
secondGroupReady.Add(1)
|
||||
secondGroupDone.Add(1)
|
||||
go func() {
|
||||
secondGroupReady.Done()
|
||||
r.enter()
|
||||
r.exit()
|
||||
secondGroupDone.Done()
|
||||
}()
|
||||
}
|
||||
secondGroupReady.Wait()
|
||||
|
||||
// allow the first group to return resources
|
||||
close(run)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
secondGroupDone.Wait()
|
||||
close(done)
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("some r.enter() are still locked")
|
||||
}
|
||||
|
||||
firstGroupDone.Wait()
|
||||
|
||||
if r.available != limit {
|
||||
t.Fatalf("r.available: got %d, want %d", r.available, limit)
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in a new issue