All checks were successful
ci/woodpecker/pr/pre-commit Pipeline was successful
Do not use write-cache as a read cache: always remove objects from the WC, not only if an object hasn't been used for some time (LRU cache is dropped). Use object size (in bytes) as a metric of used space, not an approximate (and too inaccurate) maximum stored objects number. Signed-off-by: Pavel Karpy <p.karpy@yadro.com>
134 lines
2.5 KiB
Go
134 lines
2.5 KiB
Go
package writecache
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"fmt"
|
|
"sync"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
"go.etcd.io/bbolt"
|
|
"go.uber.org/atomic"
|
|
)
|
|
|
|
func (c *Cache) sizeIfAdd(delta uint64) uint64 {
|
|
return delta + c.objCounters.fstreeSize.Load() + c.objCounters.dbSize.Load()
|
|
}
|
|
|
|
type counters struct {
|
|
dbSize, fstreeSize atomic.Uint64
|
|
}
|
|
|
|
func (x *counters) incDB(delta int) {
|
|
x.dbSize.Add(uint64(delta))
|
|
}
|
|
|
|
func (x *counters) decDB(delta int) {
|
|
x.dbSize.Sub(uint64(delta))
|
|
}
|
|
|
|
func (x *counters) incFS(delta int) {
|
|
x.fstreeSize.Add(uint64(delta))
|
|
}
|
|
|
|
func (x *counters) decFS(delta int) {
|
|
x.fstreeSize.Sub(uint64(delta))
|
|
}
|
|
|
|
func (c *Cache) initCounters(ctx context.Context) error {
|
|
var wg sync.WaitGroup
|
|
var dbErr error
|
|
var fsErr error
|
|
|
|
wg.Add(1)
|
|
go func() {
|
|
dbErr = c.initDBSizeCounter(ctx)
|
|
wg.Done()
|
|
}()
|
|
|
|
wg.Add(1)
|
|
go func() {
|
|
fsErr = c.initFSSizeCounter(ctx)
|
|
wg.Done()
|
|
}()
|
|
|
|
wg.Wait()
|
|
|
|
switch {
|
|
case dbErr != nil:
|
|
return fmt.Errorf("database counter initialization: %w", dbErr)
|
|
case fsErr != nil:
|
|
return fmt.Errorf("FSTree counter initialization: %w", fsErr)
|
|
default:
|
|
return nil
|
|
}
|
|
}
|
|
|
|
var stopIter = errors.New("stop")
|
|
|
|
func (c *Cache) initDBSizeCounter(ctx context.Context) error {
|
|
var inDB int
|
|
err := c.db.View(func(tx *bbolt.Tx) error {
|
|
b := tx.Bucket(defaultBucket)
|
|
if b == nil {
|
|
return nil
|
|
}
|
|
|
|
return b.ForEach(func(_, v []byte) error {
|
|
select {
|
|
case <-ctx.Done():
|
|
return ctx.Err()
|
|
case <-c.workersChan:
|
|
return stopIter
|
|
default:
|
|
}
|
|
|
|
inDB += len(v)
|
|
return nil
|
|
})
|
|
})
|
|
if err != nil && !errors.Is(err, stopIter) {
|
|
return fmt.Errorf("could not read write-cache DB counter: %w", err)
|
|
}
|
|
|
|
c.objCounters.dbSize.Store(uint64(inDB))
|
|
|
|
return nil
|
|
}
|
|
|
|
func (c *Cache) initFSSizeCounter(ctx context.Context) error {
|
|
var inFSTree int
|
|
|
|
var prm common.IteratePrm
|
|
prm.LazyHandler = func(address oid.Address, f func() ([]byte, error)) error {
|
|
select {
|
|
case <-ctx.Done():
|
|
return ctx.Err()
|
|
case <-c.workersChan:
|
|
return stopIter
|
|
default:
|
|
}
|
|
|
|
data, err := f()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// write-cache is a temporary storage on a fast disk,
|
|
// so it is not expected to be configured with any
|
|
// compressor ever
|
|
inFSTree += len(data)
|
|
|
|
return nil
|
|
}
|
|
|
|
_, err := c.fsTree.Iterate(prm)
|
|
if err != nil && !errors.Is(err, stopIter) {
|
|
return fmt.Errorf("could not read write-cache FSTree counter: %w", err)
|
|
}
|
|
|
|
c.objCounters.fstreeSize.Store(uint64(inFSTree))
|
|
|
|
return nil
|
|
}
|