Merge pull request #3774 from greatroar/archiver-pool

archiver: Remove cleanup goroutine from BufferPool
This commit is contained in:
MichaelEischer 2022-06-04 18:50:24 +02:00 committed by GitHub
commit 60ca6b1418
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2 changed files with 15 additions and 50 deletions

View file

@ -1,52 +1,44 @@
package archiver package archiver
import (
"context"
"sync"
)
// Buffer is a reusable buffer. After the buffer has been used, Release should // Buffer is a reusable buffer. After the buffer has been used, Release should
// be called so the underlying slice is put back into the pool. // be called so the underlying slice is put back into the pool.
type Buffer struct { type Buffer struct {
Data []byte Data []byte
Put func(*Buffer) pool *BufferPool
} }
// Release puts the buffer back into the pool it came from. // Release puts the buffer back into the pool it came from.
func (b *Buffer) Release() { func (b *Buffer) Release() {
if b.Put != nil { pool := b.pool
b.Put(b) if pool == nil || cap(b.Data) > pool.defaultSize {
return
}
select {
case pool.ch <- b:
default:
} }
} }
// BufferPool implements a limited set of reusable buffers. // BufferPool implements a limited set of reusable buffers.
type BufferPool struct { type BufferPool struct {
ch chan *Buffer ch chan *Buffer
chM sync.Mutex
defaultSize int defaultSize int
clearOnce sync.Once
} }
// NewBufferPool initializes a new buffer pool. When the context is cancelled, // NewBufferPool initializes a new buffer pool. The pool stores at most max
// all buffers are released. The pool stores at most max items. New buffers are // items. New buffers are created with defaultSize. Buffers that have grown
// created with defaultSize, buffers that are larger are released and not put // larger are not put back.
// back. func NewBufferPool(max int, defaultSize int) *BufferPool {
func NewBufferPool(ctx context.Context, max int, defaultSize int) *BufferPool {
b := &BufferPool{ b := &BufferPool{
ch: make(chan *Buffer, max), ch: make(chan *Buffer, max),
defaultSize: defaultSize, defaultSize: defaultSize,
} }
go func() {
<-ctx.Done()
b.clear()
}()
return b return b
} }
// Get returns a new buffer, either from the pool or newly allocated. // Get returns a new buffer, either from the pool or newly allocated.
func (pool *BufferPool) Get() *Buffer { func (pool *BufferPool) Get() *Buffer {
pool.chM.Lock()
defer pool.chM.Unlock()
select { select {
case buf := <-pool.ch: case buf := <-pool.ch:
return buf return buf
@ -54,36 +46,9 @@ func (pool *BufferPool) Get() *Buffer {
} }
b := &Buffer{ b := &Buffer{
Put: pool.Put,
Data: make([]byte, pool.defaultSize), Data: make([]byte, pool.defaultSize),
pool: pool,
} }
return b return b
} }
// Put returns a buffer to the pool for reuse.
func (pool *BufferPool) Put(b *Buffer) {
if cap(b.Data) > pool.defaultSize {
return
}
pool.chM.Lock()
defer pool.chM.Unlock()
select {
case pool.ch <- b:
default:
}
}
// clear empties the buffer so that all items can be garbage collected.
func (pool *BufferPool) clear() {
pool.clearOnce.Do(func() {
ch := pool.ch
pool.chM.Lock()
pool.ch = nil
pool.chM.Unlock()
close(ch)
for range ch {
}
})
}

View file

@ -76,7 +76,7 @@ func NewFileSaver(ctx context.Context, t *tomb.Tomb, save SaveBlobFn, pol chunke
s := &FileSaver{ s := &FileSaver{
saveBlob: save, saveBlob: save,
saveFilePool: NewBufferPool(ctx, int(poolSize), chunker.MaxSize), saveFilePool: NewBufferPool(int(poolSize), chunker.MaxSize),
pol: pol, pol: pol,
ch: ch, ch: ch,