From 0db1d11b2e50d59fa6ce1c304db8f7a4152a30f2 Mon Sep 17 00:00:00 2001 From: greatroar <61184462+greatroar@users.noreply.github.com> Date: Sun, 29 May 2022 17:07:37 +0200 Subject: [PATCH] archiver: Remove cleanup goroutine from BufferPool This isn't doing anything. Channels should get cleaned up by the GC when the last reference to them disappears, just like all other data structures. Also inlined BufferPool.Put in Buffer.Release, its only caller. --- internal/archiver/buffer.go | 63 ++++++++------------------------- internal/archiver/file_saver.go | 2 +- 2 files changed, 15 insertions(+), 50 deletions(-) diff --git a/internal/archiver/buffer.go b/internal/archiver/buffer.go index ef7131322..39bda2668 100644 --- a/internal/archiver/buffer.go +++ b/internal/archiver/buffer.go @@ -1,52 +1,44 @@ package archiver -import ( - "context" - "sync" -) - // Buffer is a reusable buffer. After the buffer has been used, Release should // be called so the underlying slice is put back into the pool. type Buffer struct { Data []byte - Put func(*Buffer) + pool *BufferPool } // Release puts the buffer back into the pool it came from. func (b *Buffer) Release() { - if b.Put != nil { - b.Put(b) + pool := b.pool + if pool == nil || cap(b.Data) > pool.defaultSize { + return + } + + select { + case pool.ch <- b: + default: } } // BufferPool implements a limited set of reusable buffers. type BufferPool struct { ch chan *Buffer - chM sync.Mutex defaultSize int - clearOnce sync.Once } -// NewBufferPool initializes a new buffer pool. When the context is cancelled, -// all buffers are released. The pool stores at most max items. New buffers are -// created with defaultSize, buffers that are larger are released and not put -// back. -func NewBufferPool(ctx context.Context, max int, defaultSize int) *BufferPool { +// NewBufferPool initializes a new buffer pool. The pool stores at most max +// items. New buffers are created with defaultSize. Buffers that have grown +// larger are not put back. +func NewBufferPool(max int, defaultSize int) *BufferPool { b := &BufferPool{ ch: make(chan *Buffer, max), defaultSize: defaultSize, } - go func() { - <-ctx.Done() - b.clear() - }() return b } // Get returns a new buffer, either from the pool or newly allocated. func (pool *BufferPool) Get() *Buffer { - pool.chM.Lock() - defer pool.chM.Unlock() select { case buf := <-pool.ch: return buf @@ -54,36 +46,9 @@ func (pool *BufferPool) Get() *Buffer { } b := &Buffer{ - Put: pool.Put, Data: make([]byte, pool.defaultSize), + pool: pool, } return b } - -// Put returns a buffer to the pool for reuse. -func (pool *BufferPool) Put(b *Buffer) { - if cap(b.Data) > pool.defaultSize { - return - } - - pool.chM.Lock() - defer pool.chM.Unlock() - select { - case pool.ch <- b: - default: - } -} - -// clear empties the buffer so that all items can be garbage collected. -func (pool *BufferPool) clear() { - pool.clearOnce.Do(func() { - ch := pool.ch - pool.chM.Lock() - pool.ch = nil - pool.chM.Unlock() - close(ch) - for range ch { - } - }) -} diff --git a/internal/archiver/file_saver.go b/internal/archiver/file_saver.go index 24cc5e116..2c43aefa1 100644 --- a/internal/archiver/file_saver.go +++ b/internal/archiver/file_saver.go @@ -76,7 +76,7 @@ func NewFileSaver(ctx context.Context, t *tomb.Tomb, save SaveBlobFn, pol chunke s := &FileSaver{ saveBlob: save, - saveFilePool: NewBufferPool(ctx, int(poolSize), chunker.MaxSize), + saveFilePool: NewBufferPool(int(poolSize), chunker.MaxSize), pol: pol, ch: ch,