WIP: Rever writecache improvements #1609
3 changed files with 11 additions and 62 deletions
|
@ -518,6 +518,5 @@ const (
|
||||||
FailedToSealWritecacheAsync = "failed to seal writecache async"
|
FailedToSealWritecacheAsync = "failed to seal writecache async"
|
||||||
WritecacheShrinkSkippedNotEmpty = "writecache shrink skipped: not empty"
|
WritecacheShrinkSkippedNotEmpty = "writecache shrink skipped: not empty"
|
||||||
BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file"
|
BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file"
|
||||||
WritecacheCantGetObject = "can't get an object from fstree"
|
|
||||||
FailedToUpdateMultinetConfiguration = "failed to update multinet configuration"
|
FailedToUpdateMultinetConfiguration = "failed to update multinet configuration"
|
||||||
)
|
)
|
||||||
|
|
|
@ -10,7 +10,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
"go.etcd.io/bbolt"
|
"go.etcd.io/bbolt"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -37,8 +37,9 @@ type cache struct {
|
||||||
const wcStorageType = "write-cache"
|
const wcStorageType = "write-cache"
|
||||||
|
|
||||||
type objectInfo struct {
|
type objectInfo struct {
|
||||||
addr oid.Address
|
addr string
|
||||||
size uint64
|
data []byte
|
||||||
|
obj *objectSDK.Object
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
|
@ -13,12 +13,10 @@ import (
|
||||||
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
|
||||||
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
||||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"go.etcd.io/bbolt"
|
"go.etcd.io/bbolt"
|
||||||
|
@ -31,7 +29,7 @@ const (
|
||||||
// defaultFlushWorkersCount is number of workers for putting objects in main storage.
|
// defaultFlushWorkersCount is number of workers for putting objects in main storage.
|
||||||
defaultFlushWorkersCount = 20
|
defaultFlushWorkersCount = 20
|
||||||
// defaultFlushInterval is default time interval between successive flushes.
|
// defaultFlushInterval is default time interval between successive flushes.
|
||||||
defaultFlushInterval = 10 * time.Second
|
defaultFlushInterval = time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
var errIterationCompleted = errors.New("iteration completed")
|
var errIterationCompleted = errors.New("iteration completed")
|
||||||
|
@ -43,41 +41,23 @@ func (c *cache) runFlushLoop(ctx context.Context) {
|
||||||
}
|
}
|
||||||
c.wg.Add(1)
|
c.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer c.wg.Done()
|
c.workerFlushBig(ctx)
|
||||||
c.pushToFlushQueue(ctx)
|
c.wg.Done()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
for range c.workersCount {
|
|
||||||
c.wg.Add(1)
|
|
||||||
go c.workerFlush(ctx)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cache) pushToFlushQueue(ctx context.Context) {
|
func (c *cache) workerFlushBig(ctx context.Context) {
|
||||||
tick := time.NewTicker(defaultFlushInterval)
|
tick := time.NewTicker(defaultFlushInterval * 10)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-tick.C:
|
case <-tick.C:
|
||||||
c.modeMtx.RLock()
|
c.modeMtx.RLock()
|
||||||
if c.readOnly() || c.noMetabase() {
|
if c.readOnly() || c.noMetabase() {
|
||||||
c.modeMtx.RUnlock()
|
c.modeMtx.RUnlock()
|
||||||
continue
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
err := c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error {
|
_ = c.flushFSTree(ctx, true)
|
||||||
select {
|
|
||||||
case c.flushCh <- objectInfo{
|
|
||||||
addr: oi.Address,
|
|
||||||
size: oi.DataSize,
|
|
||||||
}:
|
|
||||||
return nil
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
c.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
c.modeMtx.RUnlock()
|
c.modeMtx.RUnlock()
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
|
@ -86,37 +66,6 @@ func (c *cache) pushToFlushQueue(ctx context.Context) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cache) workerFlush(ctx context.Context) {
|
|
||||||
defer c.wg.Done()
|
|
||||||
|
|
||||||
var objInfo objectInfo
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case objInfo = <-c.flushCh:
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := c.fsTree.Get(ctx, common.GetPrm{
|
|
||||||
Address: objInfo.addr,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
if !errors.As(err, new(*apistatus.ObjectNotFound)) {
|
|
||||||
c.reportFlushError(ctx, logs.WritecacheCantGetObject, objInfo.addr.EncodeToString(), metaerr.Wrap(err))
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
err = c.flushObject(ctx, res.Object, res.RawData, StorageTypeFSTree)
|
|
||||||
if err != nil {
|
|
||||||
// Error is handled in flushObject.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
c.deleteFromDisk(ctx, objInfo.addr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *cache) reportFlushError(ctx context.Context, msg string, addr string, err error) {
|
func (c *cache) reportFlushError(ctx context.Context, msg string, addr string, err error) {
|
||||||
if c.reportError != nil {
|
if c.reportError != nil {
|
||||||
c.reportError(ctx, msg, err)
|
c.reportError(ctx, msg, err)
|
||||||
|
|
Loading…
Add table
Reference in a new issue