frostfs-node/pkg/local_object_storage/writecache/state.go
Leonard Lyubich a1696a81b6 [#776] writecache: Limit size of used disk space
There is a need to limit disk space used by write-cache. It is almost
impossible to calculate the value exactly. It is proposed to estimate the
size of the cache by the number of objects stored in it.

Track amounts of objects saved in DB and FSTree separately. To do this,
`ObjectCounters` interface is defined. It is generalized to a store of
numbers that can be made persistent (new option `WithObjectCounters`). By
default DB number is calculated as key number in default bucket, and FS
number is set same to DB since it is currently hard to read the actual value
from `FSTree` instance. Each PUT/DELETE operation to DB or FS
increases/decreases corresponding counter. Before each PUT op an overflow
check is performed with the following formula for evaluating the occupied
space: `NumDB * MaxDBSize + NumFS * MaxFSSize`. If next PUT can cause
write-cache overflow, object is written to the main storage.

By default maximum write-cache size is set to 1GB.

Signed-off-by: Leonard Lyubich <leonard@nspcc.ru>
2021-09-15 18:07:36 +03:00

103 lines
2 KiB
Go

package writecache
import (
"fmt"
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/fstree"
"go.etcd.io/bbolt"
"go.uber.org/atomic"
)
// ObjectCounters is an interface of the storage of cached object amount.
type ObjectCounters interface {
// Increments number of objects saved in DB.
IncDB()
// Decrements number of objects saved in DB.
DecDB()
// Returns number of objects saved in DB.
DB() uint64
// Increments number of objects saved in FSTree.
IncFS()
// Decrements number of objects saved in FSTree.
DecFS()
// Returns number of objects saved in FSTree.
FS() uint64
// Reads number of objects saved in write-cache. It is called on write-cache initialization step.
Read() error
// Flushes the values and closes the storage. It is called on write-cache shutdown.
FlushAndClose()
}
func (c *cache) estimateCacheSize() uint64 {
return c.objCounters.DB()*c.smallObjectSize + c.objCounters.FS()*c.maxObjectSize
}
func (c *cache) incSizeDB(sz uint64) uint64 {
return sz + c.smallObjectSize
}
func (c *cache) incSizeFS(sz uint64) uint64 {
return sz + c.maxObjectSize
}
type counters struct {
cDB, cFS atomic.Uint64
db *bbolt.DB
fs *fstree.FSTree
}
func (x *counters) IncDB() {
x.cDB.Inc()
}
func (x *counters) DecDB() {
x.cDB.Dec()
}
func (x *counters) DB() uint64 {
return x.cDB.Load()
}
func (x *counters) IncFS() {
x.cFS.Inc()
}
func (x *counters) DecFS() {
x.cFS.Dec()
}
func (x *counters) FS() uint64 {
return x.cFS.Load()
}
func (x *counters) Read() error {
var inDB uint64
err := x.db.View(func(tx *bbolt.Tx) error {
b := tx.Bucket(defaultBucket)
if b != nil {
inDB = uint64(b.Stats().KeyN)
}
return nil
})
if err != nil {
return fmt.Errorf("could not read write-cache DB counter: %w", err)
}
x.cDB.Store(inDB)
// FIXME: calculate the actual value in FSTree (new method?).
// For now we can think that db/fs = 50/50.
x.cFS.Store(inDB)
return nil
}
func (x *counters) FlushAndClose() {
// values aren't stored
}