All checks were successful
ci/woodpecker/pr/pre-commit Pipeline was successful
Do not use write-cache as a read cache: always remove objects from the WC, not only if an object hasn't been used for some time (LRU cache is dropped). Use object size (in bytes) as a metric of used space, not an approximate (and too inaccurate) maximum stored objects number. Signed-off-by: Pavel Karpy <p.karpy@yadro.com>
449 lines
9.6 KiB
Go
449 lines
9.6 KiB
Go
package writecache
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"errors"
|
|
"time"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
|
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
|
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
|
|
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
"github.com/mr-tron/base58"
|
|
"github.com/nspcc-dev/neo-go/pkg/util/slice"
|
|
"go.etcd.io/bbolt"
|
|
"go.opentelemetry.io/otel/attribute"
|
|
"go.opentelemetry.io/otel/trace"
|
|
"go.uber.org/zap"
|
|
)
|
|
|
|
const (
|
|
// flushBatchSize is amount of keys which will be read from cache to be flushed
|
|
// to the main storage. It is used to reduce contention between cache put
|
|
// and cache persist.
|
|
flushBatchSize = 512
|
|
// defaultFlushWorkersCount is number of workers for putting objects in main storage.
|
|
defaultFlushWorkersCount = 20
|
|
// defaultFlushInterval is default time interval between successive flushes.
|
|
defaultFlushInterval = time.Second
|
|
)
|
|
|
|
type objWithData struct {
|
|
obj *object.Object
|
|
data []byte
|
|
}
|
|
|
|
// runFlushLoop starts background workers which periodically flush objects to the blobstor.
|
|
func (c *Cache) runFlushLoop() {
|
|
for i := 0; i < c.workersCount; i++ {
|
|
c.wg.Add(1)
|
|
go c.smallObjectsFlusher()
|
|
}
|
|
|
|
c.wg.Add(1)
|
|
go func() {
|
|
c.flushBigObjects(context.TODO())
|
|
c.wg.Done()
|
|
}()
|
|
|
|
c.wg.Add(1)
|
|
go func() {
|
|
defer c.wg.Done()
|
|
|
|
tt := time.NewTimer(defaultFlushInterval)
|
|
defer tt.Stop()
|
|
|
|
for {
|
|
select {
|
|
case <-tt.C:
|
|
c.flushSmallObjects()
|
|
tt.Reset(defaultFlushInterval)
|
|
case <-c.workersChan:
|
|
return
|
|
}
|
|
}
|
|
}()
|
|
}
|
|
|
|
func (c *Cache) flushSmallObjects() {
|
|
var lastKey []byte
|
|
var m []objectInfo
|
|
for {
|
|
select {
|
|
case <-c.workersChan:
|
|
return
|
|
default:
|
|
}
|
|
|
|
m = m[:0]
|
|
|
|
// We put objects in batches of fixed size to not interfere with main put cycle a lot.
|
|
_ = c.db.View(func(tx *bbolt.Tx) error {
|
|
b := tx.Bucket(defaultBucket)
|
|
cs := b.Cursor()
|
|
|
|
var k, v []byte
|
|
|
|
if len(lastKey) == 0 {
|
|
k, v = cs.First()
|
|
} else {
|
|
k, v = cs.Seek(lastKey)
|
|
if bytes.Equal(k, lastKey) {
|
|
k, v = cs.Next()
|
|
}
|
|
}
|
|
|
|
for ; k != nil && len(m) < flushBatchSize; k, v = cs.Next() {
|
|
if len(lastKey) == len(k) {
|
|
copy(lastKey, k)
|
|
} else {
|
|
lastKey = slice.Copy(k)
|
|
}
|
|
|
|
m = append(m, objectInfo{
|
|
addr: string(k),
|
|
data: slice.Copy(v),
|
|
})
|
|
}
|
|
return nil
|
|
})
|
|
|
|
var count int
|
|
for i := range m {
|
|
obj := object.New()
|
|
data := m[i].data
|
|
|
|
if err := obj.Unmarshal(data); err != nil {
|
|
continue
|
|
}
|
|
|
|
count++
|
|
select {
|
|
case c.smallFlushCh <- objWithData{obj: obj, data: data}:
|
|
case <-c.workersChan:
|
|
return
|
|
}
|
|
}
|
|
|
|
if count == 0 {
|
|
break
|
|
}
|
|
|
|
c.log.Debug(logs.WritecacheTriedToFlushItemsFromWritecache,
|
|
zap.Int("count", count),
|
|
zap.String("start", base58.Encode(lastKey)))
|
|
}
|
|
}
|
|
|
|
func (c *Cache) flushBigObjects(ctx context.Context) {
|
|
tick := time.NewTicker(defaultFlushInterval * 10)
|
|
for {
|
|
select {
|
|
case <-tick.C:
|
|
c.modeMtx.RLock()
|
|
if c.readOnly() {
|
|
c.modeMtx.RUnlock()
|
|
break
|
|
}
|
|
|
|
_ = c.flushFSTree(ctx, true)
|
|
|
|
c.modeMtx.RUnlock()
|
|
case <-c.workersChan:
|
|
return
|
|
}
|
|
}
|
|
}
|
|
|
|
func (c *Cache) reportFlushError(msg string, addr string, err error) {
|
|
if c.reportError != nil {
|
|
c.reportError(msg, err)
|
|
} else {
|
|
c.log.Error(msg,
|
|
zap.String("address", addr),
|
|
zap.Error(err))
|
|
}
|
|
}
|
|
|
|
func (c *Cache) flushFSTree(ctx context.Context, ignoreErrors bool) error {
|
|
var prm common.IteratePrm
|
|
prm.IgnoreErrors = ignoreErrors
|
|
prm.LazyHandler = func(addr oid.Address, f func() ([]byte, error)) error {
|
|
sAddr := addr.EncodeToString()
|
|
|
|
select {
|
|
case <-c.workersChan:
|
|
return stopIter
|
|
case <-ctx.Done():
|
|
return ctx.Err()
|
|
default:
|
|
}
|
|
|
|
data, err := f()
|
|
if err != nil {
|
|
c.reportFlushError("can't read a file", sAddr, err)
|
|
if ignoreErrors {
|
|
return nil
|
|
}
|
|
return err
|
|
}
|
|
|
|
var obj object.Object
|
|
err = obj.Unmarshal(data)
|
|
if err != nil {
|
|
c.reportFlushError("can't unmarshal an object", sAddr, err)
|
|
if ignoreErrors {
|
|
return nil
|
|
}
|
|
return err
|
|
}
|
|
|
|
err = c.flushObject(ctx, objWithData{obj: &obj, data: data})
|
|
if err != nil {
|
|
if ignoreErrors {
|
|
return nil
|
|
}
|
|
return err
|
|
}
|
|
|
|
err = c.dropBigObject(ctx, addr, len(data))
|
|
if err != nil {
|
|
c.reportFlushError("can't drop an object from FSTree", sAddr, err)
|
|
if ignoreErrors {
|
|
return nil
|
|
}
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
_, err := c.fsTree.Iterate(prm)
|
|
if errors.Is(err, stopIter) {
|
|
return nil
|
|
}
|
|
|
|
return err
|
|
}
|
|
|
|
// smallObjectsFlusher writes small objects to the main storage.
|
|
func (c *Cache) smallObjectsFlusher() {
|
|
defer c.wg.Done()
|
|
|
|
var objAndData objWithData
|
|
for {
|
|
// Give priority to direct put.
|
|
select {
|
|
case objAndData = <-c.smallFlushCh:
|
|
case <-c.workersChan:
|
|
return
|
|
}
|
|
|
|
err := c.flushObject(context.TODO(), objAndData)
|
|
if err == nil {
|
|
addr := objectCore.AddressOf(objAndData.obj)
|
|
|
|
err = c.dropSmallObject(context.TODO(), addr)
|
|
if err != nil {
|
|
c.reportFlushError("can't drop object from write-cache",
|
|
addr.EncodeToString(), err)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// flushObject is used to write object directly to the main storage.
|
|
func (c *Cache) flushObject(ctx context.Context, objAndData objWithData) error {
|
|
obj := objAndData.obj
|
|
data := objAndData.data
|
|
addr := objectCore.AddressOf(obj)
|
|
|
|
var prm common.PutPrm
|
|
prm.Object = obj
|
|
prm.RawData = data
|
|
|
|
res, err := c.blobstor.Put(ctx, prm)
|
|
if err != nil {
|
|
if !errors.Is(err, common.ErrNoSpace) && !errors.Is(err, common.ErrReadOnly) &&
|
|
!errors.Is(err, blobstor.ErrNoPlaceFound) {
|
|
c.reportFlushError("can't flush an object to blobstor",
|
|
addr.EncodeToString(), err)
|
|
}
|
|
return err
|
|
}
|
|
|
|
var updPrm meta.UpdateStorageIDPrm
|
|
updPrm.SetAddress(addr)
|
|
updPrm.SetStorageID(res.StorageID)
|
|
|
|
_, err = c.metabase.UpdateStorageID(updPrm)
|
|
if err != nil {
|
|
if errors.As(err, new(apistatus.ObjectNotFound)) || errors.As(err, new(apistatus.ObjectAlreadyRemoved)) {
|
|
// object info is outdated in the WC
|
|
return nil
|
|
}
|
|
|
|
c.reportFlushError("can't update object storage ID",
|
|
addr.EncodeToString(), err)
|
|
}
|
|
return err
|
|
}
|
|
|
|
// Flush flushes all objects from the write-cache to the main storage.
|
|
// Write-cache must be in readonly mode to ensure correctness of an operation and
|
|
// to prevent interference with background flush workers.
|
|
func (c *Cache) Flush(ctx context.Context, ignoreErrors bool) error {
|
|
ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Flush",
|
|
trace.WithAttributes(
|
|
attribute.Bool("ignore_errors", ignoreErrors),
|
|
))
|
|
defer span.End()
|
|
|
|
c.modeMtx.RLock()
|
|
defer c.modeMtx.RUnlock()
|
|
|
|
return c.flush(ctx, ignoreErrors)
|
|
}
|
|
|
|
func (c *Cache) flush(ctx context.Context, ignoreErrors bool) error {
|
|
if err := c.flushFSTree(ctx, ignoreErrors); err != nil {
|
|
return err
|
|
}
|
|
|
|
var dbFunc func(func(*bbolt.Tx) error) error
|
|
if c.readOnly() {
|
|
dbFunc = c.db.View
|
|
} else {
|
|
dbFunc = c.db.Update
|
|
}
|
|
|
|
return dbFunc(func(tx *bbolt.Tx) error {
|
|
var addr oid.Address
|
|
|
|
b := tx.Bucket(defaultBucket)
|
|
cs := b.Cursor()
|
|
for k, data := cs.Seek(nil); k != nil; k, data = cs.Next() {
|
|
sa := string(k)
|
|
|
|
if err := addr.DecodeString(sa); err != nil {
|
|
c.reportFlushError("can't decode object address from the DB", sa, err)
|
|
if ignoreErrors {
|
|
continue
|
|
}
|
|
return err
|
|
}
|
|
|
|
var obj object.Object
|
|
if err := obj.Unmarshal(data); err != nil {
|
|
c.reportFlushError("can't unmarshal an object from the DB", sa, err)
|
|
if ignoreErrors {
|
|
continue
|
|
}
|
|
return err
|
|
}
|
|
|
|
err := c.flushObject(ctx, objWithData{obj: &obj, data: data})
|
|
if err != nil {
|
|
if ignoreErrors {
|
|
continue
|
|
}
|
|
|
|
return err
|
|
}
|
|
|
|
if c.readOnly() {
|
|
continue
|
|
}
|
|
|
|
removed, err := dropObject(tx, k)
|
|
if err != nil {
|
|
c.reportFlushError("can't drop an object from the DB", sa, err)
|
|
if ignoreErrors {
|
|
continue
|
|
}
|
|
}
|
|
|
|
storagelog.Write(c.log,
|
|
storagelog.AddressField(addr),
|
|
storagelog.StorageTypeField(wcStorageType),
|
|
storagelog.OpField("db DELETE"),
|
|
)
|
|
c.objCounters.decDB(removed)
|
|
}
|
|
return nil
|
|
})
|
|
}
|
|
|
|
func (c *Cache) dropSmallObject(ctx context.Context, addr oid.Address) error {
|
|
var removedBytes int
|
|
key := []byte(addr.EncodeToString())
|
|
var err error
|
|
|
|
err = c.db.Batch(func(tx *bbolt.Tx) error {
|
|
select {
|
|
case <-c.workersChan:
|
|
return nil
|
|
case <-ctx.Done():
|
|
return ctx.Err()
|
|
default:
|
|
}
|
|
|
|
removedBytes, err = dropObject(tx, key)
|
|
|
|
return err
|
|
|
|
})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
storagelog.Write(c.log,
|
|
storagelog.AddressField(addr),
|
|
storagelog.StorageTypeField(wcStorageType),
|
|
storagelog.OpField("db DELETE"),
|
|
)
|
|
|
|
if removedBytes > 0 {
|
|
c.objCounters.decDB(removedBytes)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func dropObject(tx *bbolt.Tx, key []byte) (int, error) {
|
|
b := tx.Bucket(defaultBucket)
|
|
|
|
removedBytes := len(b.Get(key))
|
|
if removedBytes > 0 {
|
|
return removedBytes, b.Delete(key)
|
|
}
|
|
|
|
return 0, nil
|
|
}
|
|
|
|
func (c *Cache) dropBigObject(ctx context.Context, addr oid.Address, size int) error {
|
|
_, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr})
|
|
if err != nil {
|
|
if errors.As(err, new(apistatus.ObjectNotFound)) {
|
|
return nil
|
|
}
|
|
|
|
return err
|
|
}
|
|
|
|
storagelog.Write(c.log,
|
|
storagelog.AddressField(addr),
|
|
storagelog.StorageTypeField(wcStorageType),
|
|
storagelog.OpField("fstree DELETE"),
|
|
)
|
|
c.objCounters.decFS(size)
|
|
|
|
return nil
|
|
}
|