From 258345ba0d6d530babbeec91d7ae17dcdee7206f Mon Sep 17 00:00:00 2001 From: Oleg Bulatov Date: Fri, 2 Jun 2017 13:17:42 +0200 Subject: [PATCH] Fix signalling Wait in regulator.enter In some conditions, regulator.exit may not send a signal to blocked regulator.enter. Let's assume we are in the critical section of regulator.exit and r.available is equal to 0. And there are three more gorotines. One goroutine also executes regulator.exit and waits for the lock. Rest run regulator.enter and wait for the signal. We send the signal, and after releasing the lock, there will be lock contention: 1. Wait from regulator.enter 2. Lock from regulator.exit If the winner is Lock from regulator.exit, we will not send another signal to unlock the second Wait. Signed-off-by: Oleg Bulatov --- registry/storage/driver/base/regulator.go | 6 +- .../storage/driver/base/regulator_test.go | 67 +++++++++++++++++++ 2 files changed, 68 insertions(+), 5 deletions(-) create mode 100644 registry/storage/driver/base/regulator_test.go diff --git a/registry/storage/driver/base/regulator.go b/registry/storage/driver/base/regulator.go index 185160a4b..1e929f836 100644 --- a/registry/storage/driver/base/regulator.go +++ b/registry/storage/driver/base/regulator.go @@ -38,11 +38,7 @@ func (r *regulator) enter() { func (r *regulator) exit() { r.L.Lock() - // We only need to signal to a waiting FS operation if we're already at the - // limit of threads used - if r.available == 0 { - r.Signal() - } + r.Signal() r.available++ r.L.Unlock() } diff --git a/registry/storage/driver/base/regulator_test.go b/registry/storage/driver/base/regulator_test.go new file mode 100644 index 000000000..e4c0ad586 --- /dev/null +++ b/registry/storage/driver/base/regulator_test.go @@ -0,0 +1,67 @@ +package base + +import ( + "sync" + "testing" + "time" +) + +func TestRegulatorEnterExit(t *testing.T) { + const limit = 500 + + r := NewRegulator(nil, limit).(*regulator) + + for try := 0; try < 50; try++ { + run := make(chan struct{}) + + var firstGroupReady sync.WaitGroup + var firstGroupDone sync.WaitGroup + firstGroupReady.Add(limit) + firstGroupDone.Add(limit) + for i := 0; i < limit; i++ { + go func() { + r.enter() + firstGroupReady.Done() + <-run + r.exit() + firstGroupDone.Done() + }() + } + firstGroupReady.Wait() + + // now we exhausted all the limit, let's run a little bit more + var secondGroupReady sync.WaitGroup + var secondGroupDone sync.WaitGroup + for i := 0; i < 50; i++ { + secondGroupReady.Add(1) + secondGroupDone.Add(1) + go func() { + secondGroupReady.Done() + r.enter() + r.exit() + secondGroupDone.Done() + }() + } + secondGroupReady.Wait() + + // allow the first group to return resources + close(run) + + done := make(chan struct{}) + go func() { + secondGroupDone.Wait() + close(done) + }() + select { + case <-done: + case <-time.After(5 * time.Second): + t.Fatal("some r.enter() are still locked") + } + + firstGroupDone.Wait() + + if r.available != limit { + t.Fatalf("r.available: got %d, want %d", r.available, limit) + } + } +}