From 82a30c0775cd46824299d61d293467778cf1bf8d Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Thu, 24 Aug 2023 17:44:09 +0300 Subject: [PATCH 01/11] [#645] blobstor: Add simple blobtree impl Signed-off-by: Dmitrii Stepanov --- .../blobstor/blobtree/blobtree.go | 80 ++++++++ .../blobstor/blobtree/config.go | 14 ++ .../blobstor/blobtree/content.go | 191 ++++++++++++++++++ .../blobstor/blobtree/control.go | 98 +++++++++ .../blobstor/blobtree/delete.go | 124 ++++++++++++ .../blobstor/blobtree/dispatcher.go | 94 +++++++++ .../blobstor/blobtree/dispatcher_test.go | 29 +++ .../blobstor/blobtree/exists.go | 54 +++++ .../blobstor/blobtree/generic_test.go | 39 ++++ .../blobstor/blobtree/get.go | 107 ++++++++++ .../blobstor/blobtree/get_range.go | 28 +++ .../blobstor/blobtree/iterate.go | 96 +++++++++ .../blobstor/blobtree/option.go | 29 +++ .../blobstor/blobtree/put.go | 88 ++++++++ .../blobstor/perf_test.go | 10 + pkg/util/sync/key_locker.go | 46 ++++- pkg/util/sync/key_locker_test.go | 16 +- 17 files changed, 1133 insertions(+), 10 deletions(-) create mode 100644 pkg/local_object_storage/blobstor/blobtree/blobtree.go create mode 100644 pkg/local_object_storage/blobstor/blobtree/config.go create mode 100644 pkg/local_object_storage/blobstor/blobtree/content.go create mode 100644 pkg/local_object_storage/blobstor/blobtree/control.go create mode 100644 pkg/local_object_storage/blobstor/blobtree/delete.go create mode 100644 pkg/local_object_storage/blobstor/blobtree/dispatcher.go create mode 100644 pkg/local_object_storage/blobstor/blobtree/dispatcher_test.go create mode 100644 pkg/local_object_storage/blobstor/blobtree/exists.go create mode 100644 pkg/local_object_storage/blobstor/blobtree/generic_test.go create mode 100644 pkg/local_object_storage/blobstor/blobtree/get.go create mode 100644 pkg/local_object_storage/blobstor/blobtree/get_range.go create mode 100644 pkg/local_object_storage/blobstor/blobtree/iterate.go create mode 100644 pkg/local_object_storage/blobstor/blobtree/option.go create mode 100644 pkg/local_object_storage/blobstor/blobtree/put.go diff --git a/pkg/local_object_storage/blobstor/blobtree/blobtree.go b/pkg/local_object_storage/blobstor/blobtree/blobtree.go new file mode 100644 index 000000000..3f51c345f --- /dev/null +++ b/pkg/local_object_storage/blobstor/blobtree/blobtree.go @@ -0,0 +1,80 @@ +package blobtree + +import ( + "errors" + "path/filepath" + "strings" + "sync/atomic" + "syscall" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" + utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" +) + +var _ common.Storage = &BlobTree{} + +type BlobTree struct { + cfg cfg + dirLock *utilSync.KeyLocker[string] + fileLock *utilSync.KeyLocker[string] + compressor *compression.Config + dispatcher *rootDispatcher + suffix atomic.Uint64 +} + +func New(opts ...Option) *BlobTree { + b := &BlobTree{ + cfg: cfg{ + targetFileSizeBytes: 4 * 1024 * 1024, + rootPath: "./", + depth: 3, + permissions: 0700, + initWorkersCount: 1000, + }, + dirLock: utilSync.NewKeyLocker[string](), + fileLock: utilSync.NewKeyLocker[string](), + } + + for _, opt := range opts { + opt(&b.cfg) + } + + b.dispatcher = newRootDispatcher() + + return b +} + +func (b *BlobTree) getDirectoryPath(addr oid.Address) string { + sAddr := addr.Object().EncodeToString() + "." + addr.Container().EncodeToString() + var sb strings.Builder + size := int(1+b.cfg.depth*(directoryLength+1)) + len(b.cfg.rootPath) // /path + slash + (character + slash for every level) + sb.Grow(size) + sb.WriteString(b.cfg.rootPath) + + for i := uint64(0); i < b.cfg.depth; i++ { + sb.WriteRune(filepath.Separator) + sb.WriteString(sAddr[:directoryLength]) + sAddr = sAddr[directoryLength:] + } + + sb.WriteRune(filepath.Separator) + return sb.String() +} + +func (b *BlobTree) createDir(dir string) error { + b.dirLock.Lock(dir) + defer b.dirLock.Unlock(dir) + + if err := util.MkdirAllX(dir, b.cfg.permissions); err != nil { + if errors.Is(err, syscall.ENOSPC) { + err = common.ErrNoSpace + return err + } + return err + } + + return nil +} diff --git a/pkg/local_object_storage/blobstor/blobtree/config.go b/pkg/local_object_storage/blobstor/blobtree/config.go new file mode 100644 index 000000000..823c5b6f7 --- /dev/null +++ b/pkg/local_object_storage/blobstor/blobtree/config.go @@ -0,0 +1,14 @@ +package blobtree + +import "io/fs" + +var directoryLength uint64 = 1 + +type cfg struct { + rootPath string + depth uint64 + targetFileSizeBytes uint64 + permissions fs.FileMode + readOnly bool + initWorkersCount int +} diff --git a/pkg/local_object_storage/blobstor/blobtree/content.go b/pkg/local_object_storage/blobstor/blobtree/content.go new file mode 100644 index 000000000..3760d1648 --- /dev/null +++ b/pkg/local_object_storage/blobstor/blobtree/content.go @@ -0,0 +1,191 @@ +package blobtree + +import ( + "crypto/sha256" + "encoding/binary" + "errors" + "fmt" + "os" + "path/filepath" + "strconv" + + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" +) + +const ( + defaultVersion = 0 + + sizeOfVersion = 1 + sizeOfCount = 8 + sizeOfDataLength = 8 + sizeOfContainerID = sha256.Size + sizeOfObjectID = sha256.Size +) + +var ( + errFileToSmall = errors.New("invalid file content: not enough bytes to read count of records") + errInvalidFileContentVersion = errors.New("invalid file content: not enough bytes to read record version") + errInvalidFileContentContainerID = errors.New("invalid file content: not enough bytes to read container ID") + errInvalidFileContentObjectID = errors.New("invalid file content: not enough bytes to read object ID") + errInvalidFileContentLength = errors.New("invalid file content: not enough bytes to read data length") + errInvalidFileContentData = errors.New("invalid file content: not enough bytes to read data") +) + +type objectData struct { + Version byte + Address oid.Address + Data []byte +} + +func (b *BlobTree) readFileContent(path string) ([]objectData, error) { + rawData, err := os.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + return []objectData{}, nil + } + return nil, err + } + return b.unmarshalSlice(rawData) +} + +func (b *BlobTree) unmarshalSlice(data []byte) ([]objectData, error) { + if len(data) < sizeOfCount { + return nil, errFileToSmall + } + count := binary.LittleEndian.Uint64(data[:8]) + result := make([]objectData, 0, count) + + data = data[sizeOfCount:] + var idx uint64 + for idx = 0; idx < count; idx++ { + record, read, err := b.unmarshalRecord(data) + if err != nil { + return nil, err + } + result = append(result, record) + data = data[read:] + } + + return result, nil +} + +func (b *BlobTree) unmarshalRecord(data []byte) (objectData, uint64, error) { + if len(data) < sizeOfVersion { + return objectData{}, 0, errInvalidFileContentVersion + } + var result objectData + var read uint64 + result.Version = data[0] + if result.Version != defaultVersion { + return objectData{}, 0, fmt.Errorf("invalid file content: unknown version %d", result.Version) + } + read += sizeOfVersion + + if len(data[read:]) < sizeOfContainerID { + return objectData{}, 0, errInvalidFileContentContainerID + } + var contID cid.ID + if err := contID.Decode(data[read : read+sizeOfContainerID]); err != nil { + return objectData{}, 0, fmt.Errorf("invalid file content: failed to read container ID: %w", err) + } + read += sizeOfContainerID + + if len(data[read:]) < sizeOfObjectID { + return objectData{}, 0, errInvalidFileContentObjectID + } + var objID oid.ID + if err := objID.Decode(data[read : read+sizeOfObjectID]); err != nil { + return objectData{}, 0, fmt.Errorf("invalid file content: failed to read object ID: %w", err) + } + read += sizeOfObjectID + + result.Address.SetContainer(contID) + result.Address.SetObject(objID) + + if len(data[read:]) < sizeOfDataLength { + return objectData{}, 0, errInvalidFileContentLength + } + dataLength := binary.LittleEndian.Uint64(data[read : read+sizeOfDataLength]) + read += sizeOfDataLength + + if uint64(len(data[read:])) < dataLength { + return objectData{}, 0, errInvalidFileContentData + } + result.Data = make([]byte, dataLength) + copy(result.Data, data[read:read+dataLength]) + read += dataLength + + return result, read, nil +} + +func (b *BlobTree) saveContentToFile(records []objectData, path string) (uint64, error) { + data, err := b.marshalSlice(records) + if err != nil { + return 0, err + } + return uint64(len(data)), b.writeFile(path, data) +} + +func (b *BlobTree) writeFile(p string, data []byte) error { + f, err := os.OpenFile(p, os.O_WRONLY|os.O_CREATE|os.O_TRUNC|os.O_EXCL|os.O_SYNC, b.cfg.permissions) + if err != nil { + return err + } + _, err = f.Write(data) + if err1 := f.Close(); err1 != nil && err == nil { + err = err1 + } + return err +} + +func (b *BlobTree) marshalSlice(records []objectData) ([]byte, error) { + buf := make([]byte, b.estimateSize(records)) + result := buf + binary.LittleEndian.PutUint64(buf, uint64(len(records))) + buf = buf[sizeOfCount:] + for _, record := range records { + written := b.marshalRecord(record, buf) + buf = buf[written:] + } + return result, nil +} + +func (b *BlobTree) marshalRecord(record objectData, dst []byte) uint64 { + var written uint64 + + dst[0] = record.Version + dst = dst[sizeOfVersion:] + written += sizeOfVersion + + record.Address.Container().Encode(dst) + dst = dst[sizeOfContainerID:] + written += sizeOfContainerID + + record.Address.Object().Encode(dst) + dst = dst[sizeOfObjectID:] + written += sizeOfObjectID + + binary.LittleEndian.PutUint64(dst, uint64(len(record.Data))) + dst = dst[sizeOfDataLength:] + written += sizeOfDataLength + + copy(dst, record.Data) + written += uint64(len(record.Data)) + + return written +} + +func (b *BlobTree) estimateSize(records []objectData) uint64 { + var result uint64 + result += sizeOfCount + for _, record := range records { + result += (sizeOfVersion + sizeOfContainerID + sizeOfObjectID + sizeOfDataLength) + result += uint64(len(record.Data)) + } + return result +} + +func (b *BlobTree) getFilePath(dir string, idx uint64) string { + return filepath.Join(dir, strconv.FormatUint(idx, 16)) +} diff --git a/pkg/local_object_storage/blobstor/blobtree/control.go b/pkg/local_object_storage/blobstor/blobtree/control.go new file mode 100644 index 000000000..ec33ef2cf --- /dev/null +++ b/pkg/local_object_storage/blobstor/blobtree/control.go @@ -0,0 +1,98 @@ +package blobtree + +import ( + "os" + "path/filepath" + "strconv" + "strings" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" + "golang.org/x/sync/errgroup" +) + +var Type = "blobtree" + +func (b *BlobTree) Open(readOnly bool) error { + b.cfg.readOnly = readOnly + return nil +} + +func (b *BlobTree) Init() error { + if err := b.createDir(b.cfg.rootPath); err != nil { + return err + } + + var eg errgroup.Group + eg.SetLimit(b.cfg.initWorkersCount) + eg.Go(func() error { + return b.initDir(&eg, b.cfg.rootPath, 0) + }) + return eg.Wait() +} + +func (b *BlobTree) initDir(eg *errgroup.Group, dir string, depth uint64) error { + entities, err := os.ReadDir(dir) + if err != nil { + return err + } + for _, entity := range entities { + if depth < b.cfg.depth && entity.IsDir() { + eg.Go(func() error { + return b.initDir(eg, filepath.Join(dir, entity.Name()), depth+1) + }) + continue + } + + if depth != b.cfg.depth { + continue + } + + if b.isTempFile(entity.Name()) { + if err = os.Remove(filepath.Join(dir, entity.Name())); err != nil { + return err + } + continue + } + + idx, err := b.parseIdx(entity.Name()) + if err != nil { + continue + } + b.dispatcher.Init(dir, idx) + + stat, err := os.Stat(filepath.Join(dir, entity.Name())) + if err != nil { + return err + } + if stat.Size() < int64(b.cfg.targetFileSizeBytes) { + b.dispatcher.ReturnIdx(dir, idx) + } + } + return nil +} + +func (b *BlobTree) isTempFile(name string) bool { + return strings.Contains(name, tempFileSymbols) +} + +func (b *BlobTree) parseIdx(name string) (uint64, error) { + return strconv.ParseUint(name, 16, 64) +} + +func (b *BlobTree) Close() error { + return nil +} + +func (b *BlobTree) Type() string { return Type } +func (b *BlobTree) Path() string { return b.cfg.rootPath } + +func (b *BlobTree) SetCompressor(cc *compression.Config) { + b.compressor = cc +} + +func (b *BlobTree) Compressor() *compression.Config { + return b.compressor +} + +func (b *BlobTree) SetReportErrorFunc(_ func(string, error)) {} +func (b *BlobTree) SetParentID(_ string) {} diff --git a/pkg/local_object_storage/blobstor/blobtree/delete.go b/pkg/local_object_storage/blobstor/blobtree/delete.go new file mode 100644 index 000000000..482a8855a --- /dev/null +++ b/pkg/local_object_storage/blobstor/blobtree/delete.go @@ -0,0 +1,124 @@ +package blobtree + +import ( + "context" + "encoding/binary" + "os" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" +) + +func (b *BlobTree) Delete(_ context.Context, prm common.DeletePrm) (common.DeleteRes, error) { + if b.cfg.readOnly { + return common.DeleteRes{}, common.ErrReadOnly + } + + if len(prm.StorageID) == storageIDLength { + return b.deleteFromIdx(prm.Address, binary.LittleEndian.Uint64(prm.StorageID)) + } + return b.findAndDelete(prm.Address) +} + +func (b *BlobTree) deleteFromIdx(addr oid.Address, idx uint64) (common.DeleteRes, error) { + dir := b.getDirectoryPath(addr) + path := b.getFilePath(dir, idx) + + b.fileLock.Lock(path) + defer b.fileLock.Unlock(path) + + records, err := b.readFileContent(path) + if err != nil { + return common.DeleteRes{}, err + } + + deleteIdx := -1 + for i := range records { + if records[i].Address.Equals(addr) { + deleteIdx = i + break + } + } + + if deleteIdx == -1 { + return common.DeleteRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) + } + + if len(records) == 1 { + err = os.Remove(path) + if err == nil { + b.dispatcher.ReturnIdx(dir, idx) + // decrease files metric + } + return common.DeleteRes{}, err + } + + records = append(records[:idx], records[idx+1:]...) + size, err := b.writeToTmpAndRename(records, path) + if err != nil { + return common.DeleteRes{}, err + } + if size < b.cfg.targetFileSizeBytes { + b.dispatcher.ReturnIdx(dir, idx) + } + return common.DeleteRes{}, nil +} + +func (b *BlobTree) findAndDelete(addr oid.Address) (common.DeleteRes, error) { + dir := b.getDirectoryPath(addr) + idx, err := b.findFileIdx(dir, addr) + if err != nil { + return common.DeleteRes{}, err + } + return b.deleteFromIdx(addr, idx) +} + +func (b *BlobTree) findFileIdx(dir string, addr oid.Address) (uint64, error) { + entities, err := os.ReadDir(dir) + if err != nil { + if os.IsNotExist(err) { + return 0, logicerr.Wrap(new(apistatus.ObjectNotFound)) + } + return 0, err + } + for _, entity := range entities { + if entity.IsDir() { + continue + } + if b.isTempFile(entity.Name()) { + continue + } + idx, err := b.parseIdx(entity.Name()) + if err != nil { + continue + } + path := b.getFilePath(dir, idx) + contains, err := b.fileContainsObject(path, addr) + if err != nil { + return 0, err + } + if contains { + return idx, nil + } + } + return 0, logicerr.Wrap(new(apistatus.ObjectNotFound)) +} + +func (b *BlobTree) fileContainsObject(path string, addr oid.Address) (bool, error) { + b.fileLock.RLock(path) + defer b.fileLock.RUnlock(path) + + records, err := b.readFileContent(path) + if err != nil { + return false, err + } + + for i := range records { + if records[i].Address.Equals(addr) { + return true, nil + } + } + return false, nil +} diff --git a/pkg/local_object_storage/blobstor/blobtree/dispatcher.go b/pkg/local_object_storage/blobstor/blobtree/dispatcher.go new file mode 100644 index 000000000..1ce5d9da9 --- /dev/null +++ b/pkg/local_object_storage/blobstor/blobtree/dispatcher.go @@ -0,0 +1,94 @@ +package blobtree + +import ( + "sync" +) + +type rootDispatcher struct { + dispatchers map[string]*dirDispatcher + guard sync.Mutex +} + +func newRootDispatcher() *rootDispatcher { + return &rootDispatcher{ + dispatchers: make(map[string]*dirDispatcher), + } +} + +func (d *rootDispatcher) GetIdxForWrite(dir string) uint64 { + return d.getDirDispatcher(dir).GetIdxForWrite() +} + +func (d *rootDispatcher) ReturnIdx(dir string, idx uint64) { + d.getDirDispatcher(dir).ReturnIdx(idx) +} + +func (d *rootDispatcher) Init(dir string, idx uint64) { + d.getDirDispatcher(dir).Init(idx) +} + +func (d *rootDispatcher) getDirDispatcher(dir string) *dirDispatcher { + d.guard.Lock() + defer d.guard.Unlock() + + if result, ok := d.dispatchers[dir]; ok { + return result + } + + result := newDirDispatcher(dir) + d.dispatchers[dir] = result + return result +} + +type dirDispatcher struct { + dir string + guard sync.Mutex + indicies map[uint64]struct{} + nextIndex uint64 +} + +func newDirDispatcher(dir string) *dirDispatcher { + return &dirDispatcher{ + dir: dir, + indicies: make(map[uint64]struct{}), + } +} + +func (d *dirDispatcher) GetIdxForWrite() uint64 { + d.guard.Lock() + defer d.guard.Unlock() + + var result uint64 + var found bool + + for idx := range d.indicies { + result = idx + found = true + break + } + + if found { + delete(d.indicies, result) + return result + } + + result = d.nextIndex + d.nextIndex++ + return result +} + +func (d *dirDispatcher) ReturnIdx(idx uint64) { + d.guard.Lock() + defer d.guard.Unlock() + + d.indicies[idx] = struct{}{} +} + +func (d *dirDispatcher) Init(idx uint64) { + d.guard.Lock() + defer d.guard.Unlock() + + if d.nextIndex <= idx { + d.nextIndex = idx + 1 + } +} diff --git a/pkg/local_object_storage/blobstor/blobtree/dispatcher_test.go b/pkg/local_object_storage/blobstor/blobtree/dispatcher_test.go new file mode 100644 index 000000000..ed72bf948 --- /dev/null +++ b/pkg/local_object_storage/blobstor/blobtree/dispatcher_test.go @@ -0,0 +1,29 @@ +package blobtree + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestDispatcher(t *testing.T) { + t.Parallel() + + d := newRootDispatcher() + idx := d.GetIdxForWrite("/dir1") + require.Equal(t, uint64(0), idx) + d.ReturnIdx("/dir1", idx) + + idx = d.GetIdxForWrite("/dir1") + require.Equal(t, uint64(0), idx) + + idx = d.GetIdxForWrite("/dir1") + require.Equal(t, uint64(1), idx) + + d.Init("/dir2", 5) + idx = d.GetIdxForWrite("/dir2") + require.Equal(t, uint64(6), idx) + + idx = d.GetIdxForWrite("/dir2") + require.Equal(t, uint64(7), idx) +} diff --git a/pkg/local_object_storage/blobstor/blobtree/exists.go b/pkg/local_object_storage/blobstor/blobtree/exists.go new file mode 100644 index 000000000..d42dfa766 --- /dev/null +++ b/pkg/local_object_storage/blobstor/blobtree/exists.go @@ -0,0 +1,54 @@ +package blobtree + +import ( + "context" + "encoding/binary" + "errors" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" +) + +func (b *BlobTree) Exists(_ context.Context, prm common.ExistsPrm) (common.ExistsRes, error) { + if len(prm.StorageID) == storageIDLength { + return b.existsFromIdx(prm.Address, binary.LittleEndian.Uint64(prm.StorageID)) + } + return b.findAndCheck(prm.Address) +} + +func (b *BlobTree) existsFromIdx(addr oid.Address, idx uint64) (common.ExistsRes, error) { + dir := b.getDirectoryPath(addr) + path := b.getFilePath(dir, idx) + + b.fileLock.RLock(path) + defer b.fileLock.RUnlock(path) + + records, err := b.readFileContent(path) + if err != nil { + return common.ExistsRes{}, err + } + + for i := range records { + if records[i].Address.Equals(addr) { + return common.ExistsRes{ + Exists: true, + }, nil + } + } + + return common.ExistsRes{}, nil +} + +func (b *BlobTree) findAndCheck(addr oid.Address) (common.ExistsRes, error) { + dir := b.getDirectoryPath(addr) + _, err := b.findFileIdx(dir, addr) + if err != nil { + var notFound *apistatus.ObjectNotFound + if errors.As(err, ¬Found) { + return common.ExistsRes{}, nil + } + return common.ExistsRes{}, err + } + return common.ExistsRes{Exists: true}, nil +} diff --git a/pkg/local_object_storage/blobstor/blobtree/generic_test.go b/pkg/local_object_storage/blobstor/blobtree/generic_test.go new file mode 100644 index 000000000..f0c24aee5 --- /dev/null +++ b/pkg/local_object_storage/blobstor/blobtree/generic_test.go @@ -0,0 +1,39 @@ +package blobtree + +import ( + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest" +) + +func TestGeneric(t *testing.T) { + newTreeFromPath := func(path string) common.Storage { + return New( + WithPath(path), + WithDepth(2)) + } + + newTree := func(t *testing.T) common.Storage { + return newTreeFromPath(t.TempDir()) + } + + blobstortest.TestAll(t, newTree, 2048, 16*1024) + + t.Run("info", func(t *testing.T) { + path := t.TempDir() + blobstortest.TestInfo(t, func(*testing.T) common.Storage { + return newTreeFromPath(path) + }, Type, path) + }) +} + +func TestControl(t *testing.T) { + newTree := func(t *testing.T) common.Storage { + return New( + WithPath(t.TempDir()), + WithDepth(2)) + } + + blobstortest.TestControl(t, newTree, 2048, 2048) +} diff --git a/pkg/local_object_storage/blobstor/blobtree/get.go b/pkg/local_object_storage/blobstor/blobtree/get.go new file mode 100644 index 000000000..41ee462c9 --- /dev/null +++ b/pkg/local_object_storage/blobstor/blobtree/get.go @@ -0,0 +1,107 @@ +package blobtree + +import ( + "context" + "encoding/binary" + "os" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" +) + +func (b *BlobTree) Get(_ context.Context, prm common.GetPrm) (common.GetRes, error) { + if len(prm.StorageID) == storageIDLength { + return b.getFromIdx(prm.Address, binary.LittleEndian.Uint64(prm.StorageID)) + } + return b.findAndGet(prm.Address) +} + +func (b *BlobTree) getFromIdx(addr oid.Address, idx uint64) (common.GetRes, error) { + dir := b.getDirectoryPath(addr) + path := b.getFilePath(dir, idx) + + b.fileLock.RLock(path) + defer b.fileLock.RUnlock(path) + + records, err := b.readFileContent(path) + if err != nil { + return common.GetRes{}, err + } + + for _, record := range records { + if record.Address.Equals(addr) { + return b.unmarshalGetRes(record) + } + } + + return common.GetRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) +} + +func (b *BlobTree) unmarshalGetRes(record objectData) (common.GetRes, error) { + data, err := b.compressor.Decompress(record.Data) + if err != nil { + return common.GetRes{}, err + } + + obj := objectSDK.New() + if err := obj.Unmarshal(data); err != nil { + return common.GetRes{}, err + } + return common.GetRes{Object: obj, RawData: data}, nil +} + +func (b *BlobTree) findAndGet(addr oid.Address) (common.GetRes, error) { + dir := b.getDirectoryPath(addr) + entities, err := os.ReadDir(dir) + if err != nil { + if os.IsNotExist(err) { + return common.GetRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) + } + return common.GetRes{}, err + } + for _, entity := range entities { + if entity.IsDir() { + continue + } + if b.isTempFile(entity.Name()) { + continue + } + idx, err := b.parseIdx(entity.Name()) + if err != nil { + continue + } + path := b.getFilePath(dir, idx) + res, err := b.tryReadObject(path, addr) + if err != nil { + return common.GetRes{}, err + } + if res.Object != nil { + return res, nil + } + } + return common.GetRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) +} + +func (b *BlobTree) tryReadObject(path string, addr oid.Address) (common.GetRes, error) { + b.fileLock.RLock(path) + defer b.fileLock.RUnlock(path) + + records, err := b.readFileContent(path) + if err != nil { + return common.GetRes{}, err + } + + for _, record := range records { + if record.Address.Equals(addr) { + res, err := b.unmarshalGetRes(record) + if err != nil { + return common.GetRes{}, err + } + return res, nil + } + } + return common.GetRes{}, nil +} diff --git a/pkg/local_object_storage/blobstor/blobtree/get_range.go b/pkg/local_object_storage/blobstor/blobtree/get_range.go new file mode 100644 index 000000000..8740d33f9 --- /dev/null +++ b/pkg/local_object_storage/blobstor/blobtree/get_range.go @@ -0,0 +1,28 @@ +package blobtree + +import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" +) + +func (b *BlobTree) GetRange(ctx context.Context, prm common.GetRangePrm) (common.GetRangeRes, error) { + res, err := b.Get(ctx, common.GetPrm{Address: prm.Address, StorageID: prm.StorageID}) + if err != nil { + return common.GetRangeRes{}, err + } + + payload := res.Object.Payload() + from := prm.Range.GetOffset() + to := from + prm.Range.GetLength() + + if pLen := uint64(len(payload)); to < from || pLen < from || pLen < to { + return common.GetRangeRes{}, logicerr.Wrap(new(apistatus.ObjectOutOfRange)) + } + + return common.GetRangeRes{ + Data: payload[from:to], + }, nil +} diff --git a/pkg/local_object_storage/blobstor/blobtree/iterate.go b/pkg/local_object_storage/blobstor/blobtree/iterate.go new file mode 100644 index 000000000..af39a4a5f --- /dev/null +++ b/pkg/local_object_storage/blobstor/blobtree/iterate.go @@ -0,0 +1,96 @@ +package blobtree + +import ( + "context" + "encoding/binary" + "os" + "path/filepath" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" +) + +func (b *BlobTree) Iterate(_ context.Context, prm common.IteratePrm) (common.IterateRes, error) { + return common.IterateRes{}, b.iterateDir(b.cfg.rootPath, 0, prm) +} + +func (b *BlobTree) iterateDir(dir string, depth uint64, prm common.IteratePrm) error { + entities, err := os.ReadDir(dir) + if err != nil { + if prm.IgnoreErrors { + return nil + } + return err + } + + for _, entity := range entities { + if depth < b.cfg.depth && entity.IsDir() { + err := b.iterateDir(filepath.Join(dir, entity.Name()), depth+1, prm) + if err != nil { + return err + } + } + if depth != b.cfg.depth { + continue + } + if b.isTempFile(entity.Name()) { + continue + } + idx, err := b.parseIdx(entity.Name()) + if err != nil { + continue + } + path := b.getFilePath(dir, idx) + err = b.iterateRecords(idx, path, prm) + if err != nil { + return err + } + } + return nil +} + +func (b *BlobTree) iterateRecords(idx uint64, path string, prm common.IteratePrm) error { + b.fileLock.RLock(path) + defer b.fileLock.RUnlock(path) + + records, err := b.readFileContent(path) + if err != nil { + if prm.IgnoreErrors { + return nil + } + return err + } + + for _, record := range records { + if prm.LazyHandler != nil { + if err = prm.LazyHandler(record.Address, func() ([]byte, error) { + return record.Data, nil + }); err != nil { + return err + } + continue + } + + record.Data, err = b.compressor.Decompress(record.Data) + if err != nil { + if prm.IgnoreErrors { + if prm.ErrorHandler != nil { + return prm.ErrorHandler(record.Address, err) + } + continue + } + return err + } + + storageID := make([]byte, storageIDLength) + binary.LittleEndian.PutUint64(storageID, idx) + err = prm.Handler(common.IterationElement{ + Address: record.Address, + ObjectData: record.Data, + StorageID: storageID, + }) + if err != nil { + return err + } + } + return nil +} diff --git a/pkg/local_object_storage/blobstor/blobtree/option.go b/pkg/local_object_storage/blobstor/blobtree/option.go new file mode 100644 index 000000000..9bd8557ea --- /dev/null +++ b/pkg/local_object_storage/blobstor/blobtree/option.go @@ -0,0 +1,29 @@ +package blobtree + +import "io/fs" + +type Option func(*cfg) + +func WithPath(path string) Option { + return func(c *cfg) { + c.rootPath = path + } +} + +func WithDepth(depth uint64) Option { + return func(c *cfg) { + c.depth = depth + } +} + +func WithPerm(p fs.FileMode) Option { + return func(c *cfg) { + c.permissions = p + } +} + +func WithTargetSize(size uint64) Option { + return func(c *cfg) { + c.targetFileSizeBytes = size + } +} diff --git a/pkg/local_object_storage/blobstor/blobtree/put.go b/pkg/local_object_storage/blobstor/blobtree/put.go new file mode 100644 index 000000000..9c2c6779b --- /dev/null +++ b/pkg/local_object_storage/blobstor/blobtree/put.go @@ -0,0 +1,88 @@ +package blobtree + +import ( + "context" + "encoding/binary" + "os" + "strconv" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" +) + +const ( + tempFileSymbols = "###" + storageIDLength = 8 +) + +func (b *BlobTree) Put(_ context.Context, prm common.PutPrm) (common.PutRes, error) { + if b.cfg.readOnly { + return common.PutRes{}, common.ErrReadOnly + } + + dir := b.getDirectoryPath(prm.Address) + + if err := b.createDir(dir); err != nil { + return common.PutRes{}, err + } + + if !prm.DontCompress { + prm.RawData = b.compressor.Compress(prm.RawData) + } + + idx, err := b.saveToFile(prm, dir) + if err != nil { + return common.PutRes{}, err + } + + storageID := make([]byte, storageIDLength) + binary.LittleEndian.PutUint64(storageID, idx) + return common.PutRes{StorageID: storageID}, nil +} + +func (b *BlobTree) saveToFile(prm common.PutPrm, dir string) (uint64, error) { + returnIdx := true + idx := b.dispatcher.GetIdxForWrite(dir) + path := b.getFilePath(dir, idx) + + b.fileLock.Lock(path) + defer b.fileLock.Unlock(path) + + defer func() { + if returnIdx { + b.dispatcher.ReturnIdx(dir, idx) + } + }() + + currentContent, err := b.readFileContent(path) + if err != nil { + return 0, err + } + var newRecord objectData + newRecord.Address = prm.Address + newRecord.Data = prm.RawData + + size, err := b.writeToTmpAndRename(append(currentContent, newRecord), path) + if err != nil { + return 0, err + } + returnIdx = size < b.cfg.targetFileSizeBytes + + return idx, nil +} + +func (b *BlobTree) writeToTmpAndRename(records []objectData, path string) (uint64, error) { + tmpFile := path + tempFileSymbols + strconv.FormatUint(b.suffix.Add(1), 16) + + size, err := b.saveContentToFile(records, tmpFile) + if err != nil { + _ = os.Remove(tmpFile) + return 0, err + } + + if err := os.Rename(tmpFile, path); err != nil { + _ = os.Remove(tmpFile) + return 0, err + } + + return size, nil +} diff --git a/pkg/local_object_storage/blobstor/perf_test.go b/pkg/local_object_storage/blobstor/perf_test.go index c773ea0ee..9b1c8a1ec 100644 --- a/pkg/local_object_storage/blobstor/perf_test.go +++ b/pkg/local_object_storage/blobstor/perf_test.go @@ -6,6 +6,7 @@ import ( "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobtree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/memstore" @@ -81,6 +82,15 @@ var storages = []storage{ ) }, }, + { + desc: "blobtree", + create: func(dir string) common.Storage { + return blobtree.New( + blobtree.WithDepth(2), + blobtree.WithPath(dir), + ) + }, + }, } func BenchmarkSubstorageReadPerf(b *testing.B) { diff --git a/pkg/util/sync/key_locker.go b/pkg/util/sync/key_locker.go index 97de0386d..2a5545569 100644 --- a/pkg/util/sync/key_locker.go +++ b/pkg/util/sync/key_locker.go @@ -3,8 +3,8 @@ package sync import "sync" type locker struct { - mtx sync.Mutex - waiters int // not protected by mtx, must used outer mutex to update concurrently + mtx sync.RWMutex + userCount int // not protected by mtx, must used outer mutex to update concurrently } type KeyLocker[K comparable] struct { @@ -19,26 +19,50 @@ func NewKeyLocker[K comparable]() *KeyLocker[K] { } func (l *KeyLocker[K]) Lock(key K) { + l.lock(key, false) +} + +func (l *KeyLocker[K]) RLock(key K) { + l.lock(key, true) +} + +func (l *KeyLocker[K]) lock(key K, read bool) { l.lockersMtx.Lock() if locker, found := l.lockers[key]; found { - locker.waiters++ + locker.userCount++ l.lockersMtx.Unlock() - locker.mtx.Lock() + if read { + locker.mtx.RLock() + } else { + locker.mtx.Lock() + } return } locker := &locker{ - waiters: 1, + userCount: 1, + } + if read { + locker.mtx.RLock() + } else { + locker.mtx.Lock() } - locker.mtx.Lock() l.lockers[key] = locker l.lockersMtx.Unlock() } func (l *KeyLocker[K]) Unlock(key K) { + l.unlock(key, false) +} + +func (l *KeyLocker[K]) RUnlock(key K) { + l.unlock(key, true) +} + +func (l *KeyLocker[K]) unlock(key K, read bool) { l.lockersMtx.Lock() defer l.lockersMtx.Unlock() @@ -47,10 +71,14 @@ func (l *KeyLocker[K]) Unlock(key K) { return } - if locker.waiters == 1 { + if locker.userCount == 1 { delete(l.lockers, key) } - locker.waiters-- + locker.userCount-- - locker.mtx.Unlock() + if read { + locker.mtx.RUnlock() + } else { + locker.mtx.Unlock() + } } diff --git a/pkg/util/sync/key_locker_test.go b/pkg/util/sync/key_locker_test.go index 3b3e6a694..f4ba3e19d 100644 --- a/pkg/util/sync/key_locker_test.go +++ b/pkg/util/sync/key_locker_test.go @@ -9,7 +9,7 @@ import ( "golang.org/x/sync/errgroup" ) -func TestKeyLocker(t *testing.T) { +func TestKeyLockerWrite(t *testing.T) { taken := false eg, _ := errgroup.WithContext(context.Background()) keyLocker := NewKeyLocker[int]() @@ -30,3 +30,17 @@ func TestKeyLocker(t *testing.T) { } require.NoError(t, eg.Wait()) } + +func TestKeyLockerRead(t *testing.T) { + eg, _ := errgroup.WithContext(context.Background()) + keyLocker := NewKeyLocker[int]() + for i := 0; i < 100; i++ { + eg.Go(func() error { + keyLocker.RLock(0) + defer keyLocker.RUnlock(0) + + return nil + }) + } + require.NoError(t, eg.Wait()) +} -- 2.45.2 From 76855bddac33a6c44c9ab077ca0ab623721cca95 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 4 Oct 2023 13:28:24 +0300 Subject: [PATCH 02/11] [#645] node: Allow to add blobtree substorage from config Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 19 ++++++ .../engine/shard/blobstor/blobtree/config.go | 60 +++++++++++++++++++ cmd/frostfs-node/validate.go | 3 +- 3 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 cmd/frostfs-node/config/engine/shard/blobstor/blobtree/config.go diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index cc106cf95..ea0973e7d 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -22,6 +22,7 @@ import ( engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine" shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard" blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza" + blobtreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobtree" fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree" loggerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/logger" nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" @@ -33,6 +34,7 @@ import ( netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobtree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" @@ -302,6 +304,10 @@ func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, ol sub := fstreeconfig.From((*config.Config)(storagesCfg[i])) sCfg.depth = sub.Depth() sCfg.noSync = sub.NoSync() + case blobtree.Type: + sub := blobtreeconfig.From((*config.Config)(storagesCfg[i])) + sCfg.depth = sub.Depth() + sCfg.size = sub.Size() default: return fmt.Errorf("invalid storage type: %s", storagesCfg[i].Type()) } @@ -834,6 +840,19 @@ func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage { return true }, }) + case blobtree.Type: + blobTreeOpts := []blobtree.Option{ + blobtree.WithPath(sRead.path), + blobtree.WithPerm(sRead.perm), + blobtree.WithDepth(sRead.depth), + blobtree.WithTargetSize(sRead.size), + } + ss = append(ss, blobstor.SubStorage{ + Storage: blobtree.New(blobTreeOpts...), + Policy: func(_ *objectSDK.Object, data []byte) bool { + return uint64(len(data)) < shCfg.smallSizeObjectLimit + }, + }) default: // should never happen, that has already // been handled: when the config was read diff --git a/cmd/frostfs-node/config/engine/shard/blobstor/blobtree/config.go b/cmd/frostfs-node/config/engine/shard/blobstor/blobtree/config.go new file mode 100644 index 000000000..47b653a45 --- /dev/null +++ b/cmd/frostfs-node/config/engine/shard/blobstor/blobtree/config.go @@ -0,0 +1,60 @@ +package blobtreeconfig + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobtree" +) + +// Config is a wrapper over the config section +// which provides access to Blobtree configurations. +type Config config.Config + +const ( + // SizeDefault is a default limit of estimates of single Blobtree file size. + SizeDefault = 4 * 1024 * 1024 + + // DepthDefault is a default shallow dir depth. + DepthDefault = 8 +) + +// From wraps config section into Config. +func From(c *config.Config) *Config { + return (*Config)(c) +} + +// Type returns the storage type. +func (x *Config) Type() string { + return blobtree.Type +} + +// Size returns the value of "size" config parameter. +// +// Returns SizeDefault if the value is not a positive number. +func (x *Config) Size() uint64 { + s := config.SizeInBytesSafe( + (*config.Config)(x), + "size", + ) + + if s > 0 { + return s + } + + return SizeDefault +} + +// ShallowDepth returns the value of "depth" config parameter. +// +// Returns ShallowDepthDefault if the value is not a positive number. +func (x *Config) Depth() uint64 { + d := config.UintSafe( + (*config.Config)(x), + "depth", + ) + + if d > 0 { + return d + } + + return DepthDefault +} diff --git a/cmd/frostfs-node/validate.go b/cmd/frostfs-node/validate.go index 80c90ec44..b3472a074 100644 --- a/cmd/frostfs-node/validate.go +++ b/cmd/frostfs-node/validate.go @@ -10,6 +10,7 @@ import ( loggerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/logger" treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobtree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" ) @@ -55,7 +56,7 @@ func validateConfig(c *config.Config) error { } for i := range blobstor { switch blobstor[i].Type() { - case fstree.Type, blobovniczatree.Type: + case fstree.Type, blobovniczatree.Type, blobtree.Type: default: return fmt.Errorf("unexpected storage type: %s (shard %d)", blobstor[i].Type(), shardNum) } -- 2.45.2 From fba369ec346b1af0933d9057d1557a12e2da87ed Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 4 Oct 2023 14:35:13 +0300 Subject: [PATCH 03/11] [#645] blobtree: Add metrics Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 7 + .../blobstor/blobtree/blobtree.go | 1 + .../blobstor/blobtree/config.go | 1 + .../blobstor/blobtree/control.go | 8 +- .../blobstor/blobtree/delete.go | 20 ++- .../blobstor/blobtree/exists.go | 18 ++- .../blobstor/blobtree/get.go | 17 +++ .../blobstor/blobtree/get_range.go | 21 ++- .../blobstor/blobtree/iterate.go | 12 +- .../blobstor/blobtree/metrics.go | 34 +++++ .../blobstor/blobtree/option.go | 6 + .../blobstor/blobtree/put.go | 27 ++++ pkg/local_object_storage/metrics/blobtree.go | 74 +++++++++++ pkg/metrics/blobtree.go | 120 ++++++++++++++++++ pkg/metrics/consts.go | 1 + pkg/metrics/node.go | 6 + 16 files changed, 362 insertions(+), 11 deletions(-) create mode 100644 pkg/local_object_storage/blobstor/blobtree/metrics.go create mode 100644 pkg/local_object_storage/metrics/blobtree.go create mode 100644 pkg/metrics/blobtree.go diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index ea0973e7d..b3c1892f7 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -847,6 +847,13 @@ func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage { blobtree.WithDepth(sRead.depth), blobtree.WithTargetSize(sRead.size), } + if c.metricsCollector != nil { + blobTreeOpts = append(blobTreeOpts, + blobtree.WithMetrics( + lsmetrics.NewBlobTreeMetrics(sRead.path, c.metricsCollector.BlobTreeMetrics()), + ), + ) + } ss = append(ss, blobstor.SubStorage{ Storage: blobtree.New(blobTreeOpts...), Policy: func(_ *objectSDK.Object, data []byte) bool { diff --git a/pkg/local_object_storage/blobstor/blobtree/blobtree.go b/pkg/local_object_storage/blobstor/blobtree/blobtree.go index 3f51c345f..1270c54bf 100644 --- a/pkg/local_object_storage/blobstor/blobtree/blobtree.go +++ b/pkg/local_object_storage/blobstor/blobtree/blobtree.go @@ -33,6 +33,7 @@ func New(opts ...Option) *BlobTree { depth: 3, permissions: 0700, initWorkersCount: 1000, + metrics: &noopMetrics{}, }, dirLock: utilSync.NewKeyLocker[string](), fileLock: utilSync.NewKeyLocker[string](), diff --git a/pkg/local_object_storage/blobstor/blobtree/config.go b/pkg/local_object_storage/blobstor/blobtree/config.go index 823c5b6f7..daceab7d1 100644 --- a/pkg/local_object_storage/blobstor/blobtree/config.go +++ b/pkg/local_object_storage/blobstor/blobtree/config.go @@ -11,4 +11,5 @@ type cfg struct { permissions fs.FileMode readOnly bool initWorkersCount int + metrics Metrics } diff --git a/pkg/local_object_storage/blobstor/blobtree/control.go b/pkg/local_object_storage/blobstor/blobtree/control.go index ec33ef2cf..af3ba2431 100644 --- a/pkg/local_object_storage/blobstor/blobtree/control.go +++ b/pkg/local_object_storage/blobstor/blobtree/control.go @@ -14,6 +14,7 @@ var Type = "blobtree" func (b *BlobTree) Open(readOnly bool) error { b.cfg.readOnly = readOnly + b.cfg.metrics.SetMode(readOnly) return nil } @@ -59,6 +60,7 @@ func (b *BlobTree) initDir(eg *errgroup.Group, dir string, depth uint64) error { continue } b.dispatcher.Init(dir, idx) + b.cfg.metrics.IncFilesCount() stat, err := os.Stat(filepath.Join(dir, entity.Name())) if err != nil { @@ -80,6 +82,7 @@ func (b *BlobTree) parseIdx(name string) (uint64, error) { } func (b *BlobTree) Close() error { + b.cfg.metrics.Close() return nil } @@ -95,4 +98,7 @@ func (b *BlobTree) Compressor() *compression.Config { } func (b *BlobTree) SetReportErrorFunc(_ func(string, error)) {} -func (b *BlobTree) SetParentID(_ string) {} + +func (b *BlobTree) SetParentID(parentID string) { + b.cfg.metrics.SetParentID(parentID) +} diff --git a/pkg/local_object_storage/blobstor/blobtree/delete.go b/pkg/local_object_storage/blobstor/blobtree/delete.go index 482a8855a..3d33b8f2a 100644 --- a/pkg/local_object_storage/blobstor/blobtree/delete.go +++ b/pkg/local_object_storage/blobstor/blobtree/delete.go @@ -4,6 +4,7 @@ import ( "context" "encoding/binary" "os" + "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" @@ -12,14 +13,27 @@ import ( ) func (b *BlobTree) Delete(_ context.Context, prm common.DeletePrm) (common.DeleteRes, error) { + var ( + success = false + startedAt = time.Now() + ) + defer func() { + b.cfg.metrics.Delete(time.Since(startedAt), success, prm.StorageID != nil) + }() + if b.cfg.readOnly { return common.DeleteRes{}, common.ErrReadOnly } + var res common.DeleteRes + var err error if len(prm.StorageID) == storageIDLength { - return b.deleteFromIdx(prm.Address, binary.LittleEndian.Uint64(prm.StorageID)) + res, err = b.deleteFromIdx(prm.Address, binary.LittleEndian.Uint64(prm.StorageID)) + } else { + res, err = b.findAndDelete(prm.Address) } - return b.findAndDelete(prm.Address) + success = err == nil + return res, err } func (b *BlobTree) deleteFromIdx(addr oid.Address, idx uint64) (common.DeleteRes, error) { @@ -50,7 +64,7 @@ func (b *BlobTree) deleteFromIdx(addr oid.Address, idx uint64) (common.DeleteRes err = os.Remove(path) if err == nil { b.dispatcher.ReturnIdx(dir, idx) - // decrease files metric + b.cfg.metrics.DecFilesCount() } return common.DeleteRes{}, err } diff --git a/pkg/local_object_storage/blobstor/blobtree/exists.go b/pkg/local_object_storage/blobstor/blobtree/exists.go index d42dfa766..08ba3a113 100644 --- a/pkg/local_object_storage/blobstor/blobtree/exists.go +++ b/pkg/local_object_storage/blobstor/blobtree/exists.go @@ -4,6 +4,7 @@ import ( "context" "encoding/binary" "errors" + "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -11,10 +12,23 @@ import ( ) func (b *BlobTree) Exists(_ context.Context, prm common.ExistsPrm) (common.ExistsRes, error) { + var ( + startedAt = time.Now() + success = false + ) + defer func() { + b.cfg.metrics.Exists(time.Since(startedAt), success, prm.StorageID != nil) + }() + + var res common.ExistsRes + var err error if len(prm.StorageID) == storageIDLength { - return b.existsFromIdx(prm.Address, binary.LittleEndian.Uint64(prm.StorageID)) + res, err = b.existsFromIdx(prm.Address, binary.LittleEndian.Uint64(prm.StorageID)) + } else { + res, err = b.findAndCheck(prm.Address) } - return b.findAndCheck(prm.Address) + success = err == nil + return res, err } func (b *BlobTree) existsFromIdx(addr oid.Address, idx uint64) (common.ExistsRes, error) { diff --git a/pkg/local_object_storage/blobstor/blobtree/get.go b/pkg/local_object_storage/blobstor/blobtree/get.go index 41ee462c9..a1ed3c110 100644 --- a/pkg/local_object_storage/blobstor/blobtree/get.go +++ b/pkg/local_object_storage/blobstor/blobtree/get.go @@ -4,6 +4,7 @@ import ( "context" "encoding/binary" "os" + "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" @@ -13,6 +14,22 @@ import ( ) func (b *BlobTree) Get(_ context.Context, prm common.GetPrm) (common.GetRes, error) { + var ( + startedAt = time.Now() + success = false + size = 0 + ) + defer func() { + b.cfg.metrics.Get(time.Since(startedAt), size, success, prm.StorageID != nil) + }() + + res, err := b.get(prm) + success = err == nil + size = len(res.RawData) + return res, err +} + +func (b *BlobTree) get(prm common.GetPrm) (common.GetRes, error) { if len(prm.StorageID) == storageIDLength { return b.getFromIdx(prm.Address, binary.LittleEndian.Uint64(prm.StorageID)) } diff --git a/pkg/local_object_storage/blobstor/blobtree/get_range.go b/pkg/local_object_storage/blobstor/blobtree/get_range.go index 8740d33f9..190d2f5e9 100644 --- a/pkg/local_object_storage/blobstor/blobtree/get_range.go +++ b/pkg/local_object_storage/blobstor/blobtree/get_range.go @@ -2,6 +2,7 @@ package blobtree import ( "context" + "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" @@ -9,12 +10,21 @@ import ( ) func (b *BlobTree) GetRange(ctx context.Context, prm common.GetRangePrm) (common.GetRangeRes, error) { - res, err := b.Get(ctx, common.GetPrm{Address: prm.Address, StorageID: prm.StorageID}) + var ( + startedAt = time.Now() + success = false + size = 0 + ) + defer func() { + b.cfg.metrics.GetRange(time.Since(startedAt), size, success, prm.StorageID != nil) + }() + + gRes, err := b.get(common.GetPrm{Address: prm.Address, StorageID: prm.StorageID}) if err != nil { return common.GetRangeRes{}, err } - payload := res.Object.Payload() + payload := gRes.Object.Payload() from := prm.Range.GetOffset() to := from + prm.Range.GetLength() @@ -22,7 +32,10 @@ func (b *BlobTree) GetRange(ctx context.Context, prm common.GetRangePrm) (common return common.GetRangeRes{}, logicerr.Wrap(new(apistatus.ObjectOutOfRange)) } - return common.GetRangeRes{ + res := common.GetRangeRes{ Data: payload[from:to], - }, nil + } + size = len(res.Data) + success = true + return res, nil } diff --git a/pkg/local_object_storage/blobstor/blobtree/iterate.go b/pkg/local_object_storage/blobstor/blobtree/iterate.go index af39a4a5f..95f2ace3b 100644 --- a/pkg/local_object_storage/blobstor/blobtree/iterate.go +++ b/pkg/local_object_storage/blobstor/blobtree/iterate.go @@ -5,12 +5,22 @@ import ( "encoding/binary" "os" "path/filepath" + "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" ) func (b *BlobTree) Iterate(_ context.Context, prm common.IteratePrm) (common.IterateRes, error) { - return common.IterateRes{}, b.iterateDir(b.cfg.rootPath, 0, prm) + var ( + startedAt = time.Now() + err error + ) + defer func() { + b.cfg.metrics.Iterate(time.Since(startedAt), err == nil) + }() + + err = b.iterateDir(b.cfg.rootPath, 0, prm) + return common.IterateRes{}, err } func (b *BlobTree) iterateDir(dir string, depth uint64, prm common.IteratePrm) error { diff --git a/pkg/local_object_storage/blobstor/blobtree/metrics.go b/pkg/local_object_storage/blobstor/blobtree/metrics.go new file mode 100644 index 000000000..2008ad605 --- /dev/null +++ b/pkg/local_object_storage/blobstor/blobtree/metrics.go @@ -0,0 +1,34 @@ +package blobtree + +import "time" + +type Metrics interface { + SetParentID(parentID string) + + SetMode(readOnly bool) + Close() + + Delete(d time.Duration, success, withStorageID bool) + Exists(d time.Duration, success, withStorageID bool) + GetRange(d time.Duration, size int, success, withStorageID bool) + Get(d time.Duration, size int, success, withStorageID bool) + Iterate(d time.Duration, success bool) + Put(d time.Duration, size int, success bool) + + IncFilesCount() + DecFilesCount() +} + +type noopMetrics struct{} + +func (m *noopMetrics) SetParentID(string) {} +func (m *noopMetrics) SetMode(bool) {} +func (m *noopMetrics) Close() {} +func (m *noopMetrics) Delete(time.Duration, bool, bool) {} +func (m *noopMetrics) Exists(time.Duration, bool, bool) {} +func (m *noopMetrics) GetRange(time.Duration, int, bool, bool) {} +func (m *noopMetrics) Get(time.Duration, int, bool, bool) {} +func (m *noopMetrics) Iterate(time.Duration, bool) {} +func (m *noopMetrics) Put(time.Duration, int, bool) {} +func (m *noopMetrics) IncFilesCount() {} +func (m *noopMetrics) DecFilesCount() {} diff --git a/pkg/local_object_storage/blobstor/blobtree/option.go b/pkg/local_object_storage/blobstor/blobtree/option.go index 9bd8557ea..786fc3f4c 100644 --- a/pkg/local_object_storage/blobstor/blobtree/option.go +++ b/pkg/local_object_storage/blobstor/blobtree/option.go @@ -27,3 +27,9 @@ func WithTargetSize(size uint64) Option { c.targetFileSizeBytes = size } } + +func WithMetrics(m Metrics) Option { + return func(c *cfg) { + c.metrics = m + } +} diff --git a/pkg/local_object_storage/blobstor/blobtree/put.go b/pkg/local_object_storage/blobstor/blobtree/put.go index 9c2c6779b..eee169b19 100644 --- a/pkg/local_object_storage/blobstor/blobtree/put.go +++ b/pkg/local_object_storage/blobstor/blobtree/put.go @@ -5,6 +5,7 @@ import ( "encoding/binary" "os" "strconv" + "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" ) @@ -15,6 +16,15 @@ const ( ) func (b *BlobTree) Put(_ context.Context, prm common.PutPrm) (common.PutRes, error) { + var ( + success bool + size int + startedAt = time.Now() + ) + defer func() { + b.cfg.metrics.Put(time.Since(startedAt), size, success) + }() + if b.cfg.readOnly { return common.PutRes{}, common.ErrReadOnly } @@ -34,6 +44,9 @@ func (b *BlobTree) Put(_ context.Context, prm common.PutPrm) (common.PutRes, err return common.PutRes{}, err } + success = true + size = len(prm.RawData) + storageID := make([]byte, storageIDLength) binary.LittleEndian.PutUint64(storageID, idx) return common.PutRes{StorageID: storageID}, nil @@ -79,10 +92,24 @@ func (b *BlobTree) writeToTmpAndRename(records []objectData, path string) (uint6 return 0, err } + newFile := false + _, err = os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + newFile = true + } else { + return 0, err + } + } + if err := os.Rename(tmpFile, path); err != nil { _ = os.Remove(tmpFile) return 0, err } + if newFile { + b.cfg.metrics.IncFilesCount() + } + return size, nil } diff --git a/pkg/local_object_storage/metrics/blobtree.go b/pkg/local_object_storage/metrics/blobtree.go new file mode 100644 index 000000000..e78685fae --- /dev/null +++ b/pkg/local_object_storage/metrics/blobtree.go @@ -0,0 +1,74 @@ +package metrics + +import ( + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobtree" + metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics" +) + +func NewBlobTreeMetrics(path string, m metrics_impl.BlobTreeMetrics) blobtree.Metrics { + return &blobTreeMetrics{ + path: path, + m: m, + } +} + +type blobTreeMetrics struct { + shardID string + path string + m metrics_impl.BlobTreeMetrics +} + +func (m *blobTreeMetrics) SetParentID(parentID string) { + m.shardID = parentID +} + +func (m *blobTreeMetrics) SetMode(readOnly bool) { + m.m.SetBlobTreeMode(m.shardID, m.path, readOnly) +} + +func (m *blobTreeMetrics) Close() { + m.m.CloseBlobTree(m.shardID, m.path) +} + +func (m *blobTreeMetrics) Delete(d time.Duration, success, withStorageID bool) { + m.m.BlobTreeMethodDuration(m.shardID, m.path, "Delete", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID}) +} + +func (m *blobTreeMetrics) Exists(d time.Duration, success, withStorageID bool) { + m.m.BlobTreeMethodDuration(m.shardID, m.path, "Exists", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID}) +} + +func (m *blobTreeMetrics) GetRange(d time.Duration, size int, success, withStorageID bool) { + m.m.BlobTreeMethodDuration(m.shardID, m.path, "GetRange", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID}) + if success { + m.m.AddBlobTreeGet(m.shardID, m.path, size) + } +} + +func (m *blobTreeMetrics) Get(d time.Duration, size int, success, withStorageID bool) { + m.m.BlobTreeMethodDuration(m.shardID, m.path, "Get", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID}) + if success { + m.m.AddBlobTreeGet(m.shardID, m.path, size) + } +} + +func (m *blobTreeMetrics) Iterate(d time.Duration, success bool) { + m.m.BlobTreeMethodDuration(m.shardID, m.path, "Iterate", d, success, metrics_impl.NullBool{}) +} + +func (m *blobTreeMetrics) Put(d time.Duration, size int, success bool) { + m.m.BlobTreeMethodDuration(m.shardID, m.path, "Put", d, success, metrics_impl.NullBool{}) + if success { + m.m.AddBlobTreePut(m.shardID, m.path, size) + } +} + +func (m *blobTreeMetrics) IncFilesCount() { + m.m.IncBlobTreeFilesCount(m.shardID, m.path) +} + +func (m *blobTreeMetrics) DecFilesCount() { + m.m.DecBlobTreeFilesCount(m.shardID, m.path) +} diff --git a/pkg/metrics/blobtree.go b/pkg/metrics/blobtree.go new file mode 100644 index 000000000..798263a7c --- /dev/null +++ b/pkg/metrics/blobtree.go @@ -0,0 +1,120 @@ +package metrics + +import ( + "strconv" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" + "github.com/prometheus/client_golang/prometheus" +) + +type BlobTreeMetrics interface { + SetBlobTreeMode(shardID, path string, readOnly bool) + CloseBlobTree(shardID, path string) + BlobTreeMethodDuration(shardID, path string, method string, d time.Duration, success bool, withStorageID NullBool) + IncBlobTreeFilesCount(shardID, path string) + DecBlobTreeFilesCount(shardID, path string) + AddBlobTreePut(shardID, path string, size int) + AddBlobTreeGet(shardID, path string, size int) +} + +type blobTreeMetrics struct { + mode *shardIDPathModeValue + reqDuration *prometheus.HistogramVec + put *prometheus.CounterVec + get *prometheus.CounterVec + filesCount *prometheus.GaugeVec +} + +func newBlobTreeMetrics() *blobTreeMetrics { + return &blobTreeMetrics{ + mode: newShardIDPathMode(blobTreeSubSystem, "mode", "Blob tree mode"), + + reqDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: blobTreeSubSystem, + Name: "request_duration_seconds", + Help: "Accumulated Blob tree request process duration", + }, []string{shardIDLabel, pathLabel, successLabel, methodLabel, withStorageIDLabel}), + put: metrics.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: blobTreeSubSystem, + Name: "put_bytes", + Help: "Accumulated payload size written to Blob tree", + }, []string{shardIDLabel, pathLabel}), + get: metrics.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: blobTreeSubSystem, + Name: "get_bytes", + Help: "Accumulated payload size read from Blob tree", + }, []string{shardIDLabel, pathLabel}), + filesCount: metrics.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: blobTreeSubSystem, + Name: "files_count", + Help: "Count of data files in Blob tree", + }, []string{shardIDLabel, pathLabel}), + } +} + +func (b *blobTreeMetrics) SetBlobTreeMode(shardID, path string, readOnly bool) { + b.mode.SetMode(shardID, path, modeFromBool(readOnly)) +} + +func (b *blobTreeMetrics) CloseBlobTree(shardID, path string) { + b.mode.SetMode(shardID, path, closedMode) + b.reqDuration.DeletePartialMatch(prometheus.Labels{ + shardIDLabel: shardID, + pathLabel: path, + }) + b.get.DeletePartialMatch(prometheus.Labels{ + shardIDLabel: shardID, + pathLabel: path, + }) + b.put.DeletePartialMatch(prometheus.Labels{ + shardIDLabel: shardID, + pathLabel: path, + }) + b.filesCount.DeletePartialMatch(prometheus.Labels{ + shardIDLabel: shardID, + pathLabel: path, + }) +} + +func (b *blobTreeMetrics) BlobTreeMethodDuration(shardID, path string, method string, d time.Duration, success bool, withStorageID NullBool) { + b.reqDuration.With(prometheus.Labels{ + shardIDLabel: shardID, + pathLabel: path, + successLabel: strconv.FormatBool(success), + methodLabel: method, + withStorageIDLabel: withStorageID.String(), + }).Observe(d.Seconds()) +} + +func (b *blobTreeMetrics) IncBlobTreeFilesCount(shardID, path string) { + b.filesCount.With(prometheus.Labels{ + shardIDLabel: shardID, + pathLabel: path, + }).Inc() +} + +func (b *blobTreeMetrics) DecBlobTreeFilesCount(shardID, path string) { + b.filesCount.With(prometheus.Labels{ + shardIDLabel: shardID, + pathLabel: path, + }).Dec() +} + +func (b *blobTreeMetrics) AddBlobTreePut(shardID, path string, size int) { + b.put.With(prometheus.Labels{ + shardIDLabel: shardID, + pathLabel: path, + }).Add(float64(size)) +} + +func (b *blobTreeMetrics) AddBlobTreeGet(shardID, path string, size int) { + b.get.With(prometheus.Labels{ + shardIDLabel: shardID, + pathLabel: path, + }).Add(float64(size)) +} diff --git a/pkg/metrics/consts.go b/pkg/metrics/consts.go index f7a8fd771..708ae45a9 100644 --- a/pkg/metrics/consts.go +++ b/pkg/metrics/consts.go @@ -7,6 +7,7 @@ const ( fstreeSubSystem = "fstree" blobstoreSubSystem = "blobstore" blobovniczaTreeSubSystem = "blobovnicza_tree" + blobTreeSubSystem = "blobtree" metabaseSubSystem = "metabase" piloramaSubSystem = "pilorama" engineSubsystem = "engine" diff --git a/pkg/metrics/node.go b/pkg/metrics/node.go index 0dd86d90e..04952f654 100644 --- a/pkg/metrics/node.go +++ b/pkg/metrics/node.go @@ -20,6 +20,7 @@ type NodeMetrics struct { metabase *metabaseMetrics pilorama *piloramaMetrics grpc *grpcServerMetrics + blobTree *blobTreeMetrics policer *policerMetrics morphClient *morphClientMetrics morphCache *morphCacheMetrics @@ -49,6 +50,7 @@ func NewNodeMetrics() *NodeMetrics { morphClient: newMorphClientMetrics(), morphCache: newMorphCacheMetrics(namespace), log: logger.NewLogMetrics(namespace), + blobTree: newBlobTreeMetrics(), } } @@ -116,3 +118,7 @@ func (m *NodeMetrics) MorphCacheMetrics() MorphCacheMetrics { func (m *NodeMetrics) LogMetrics() logger.LogMetrics { return m.log } + +func (m *NodeMetrics) BlobTreeMetrics() BlobTreeMetrics { + return m.blobTree +} -- 2.45.2 From 49668c3bfa75ea522c809460375b972b3afcc15d Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 4 Oct 2023 14:41:42 +0300 Subject: [PATCH 04/11] [#645] blobovnicza: Fix metrics Fix naming. Add `metrics.Close` call. Fix `metrics.Close` implemetation. Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 2 +- .../blobovnicza/blobovnicza_test.go | 2 +- .../blobstor/blobovniczatree/control.go | 2 +- .../blobstor/blobovniczatree/put.go | 2 +- .../metrics/blobovnicza.go | 28 +++++++-------- pkg/metrics/blobovnicza.go | 34 +++++++++++++------ pkg/metrics/node.go | 28 +++++++-------- 7 files changed, 55 insertions(+), 43 deletions(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index b3c1892f7..ef626350b 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -808,7 +808,7 @@ func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage { if c.metricsCollector != nil { blobTreeOpts = append(blobTreeOpts, blobovniczatree.WithMetrics( - lsmetrics.NewBlobovniczaTreeMetrics(sRead.path, c.metricsCollector.BlobobvnizcaTreeMetrics()), + lsmetrics.NewBlobovniczaTreeMetrics(sRead.path, c.metricsCollector.BlobovniczaTreeMetrics()), ), ) } diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go index 8d701ae5c..7fe6a7657 100644 --- a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go +++ b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go @@ -97,7 +97,7 @@ func TestBlobovnicza(t *testing.T) { testPutGet(t, blz, oidtest.Address(), objSizeLim, nil, nil) } - // blobovnizca accepts object event if full + // blobovnicza accepts object event if full testPutGet(t, blz, oidtest.Address(), 1024, func(err error) bool { return err == nil }, nil) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control.go b/pkg/local_object_storage/blobstor/blobovniczatree/control.go index d993767b7..722ec0002 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/control.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/control.go @@ -50,7 +50,7 @@ func (b *Blobovniczas) Close() error { b.dbCache.Close() // order important b.activeDBManager.Close() b.commondbManager.Close() - + b.metrics.Close() return nil } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/put.go b/pkg/local_object_storage/blobstor/blobovniczatree/put.go index 6f9c8c0de..a02fef13e 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/put.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/put.go @@ -15,7 +15,7 @@ import ( "go.uber.org/zap" ) -// Put saves object in the maximum weight blobobnicza. +// Put saves object in the maximum weight blobovnicza. // // returns error if could not save object in any blobovnicza. func (b *Blobovniczas) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) { diff --git a/pkg/local_object_storage/metrics/blobovnicza.go b/pkg/local_object_storage/metrics/blobovnicza.go index 0d0318b3b..16f21cf44 100644 --- a/pkg/local_object_storage/metrics/blobovnicza.go +++ b/pkg/local_object_storage/metrics/blobovnicza.go @@ -8,7 +8,7 @@ import ( metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics" ) -func NewBlobovniczaTreeMetrics(path string, m metrics_impl.BlobobvnizcaMetrics) blobovniczatree.Metrics { +func NewBlobovniczaTreeMetrics(path string, m metrics_impl.BlobovniczaMetrics) blobovniczatree.Metrics { return &blobovniczaTreeMetrics{ path: path, shardID: undefined, @@ -19,7 +19,7 @@ func NewBlobovniczaTreeMetrics(path string, m metrics_impl.BlobobvnizcaMetrics) type blobovniczaTreeMetrics struct { shardID string path string - m metrics_impl.BlobobvnizcaMetrics + m metrics_impl.BlobovniczaMetrics } func (m *blobovniczaTreeMetrics) Blobovnicza() blobovnicza.Metrics { @@ -35,48 +35,48 @@ func (m *blobovniczaTreeMetrics) SetParentID(parentID string) { } func (m *blobovniczaTreeMetrics) SetMode(readOnly bool) { - m.m.SetBlobobvnizcaTreeMode(m.shardID, m.path, readOnly) + m.m.SetBlobovniczaTreeMode(m.shardID, m.path, readOnly) } func (m *blobovniczaTreeMetrics) Close() { - m.m.CloseBlobobvnizcaTree(m.shardID, m.path) + m.m.CloseBlobovniczaTree(m.shardID, m.path) } func (m *blobovniczaTreeMetrics) Delete(d time.Duration, success, withStorageID bool) { - m.m.BlobobvnizcaTreeMethodDuration(m.shardID, m.path, "Delete", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID}) + m.m.BlobovniczaTreeMethodDuration(m.shardID, m.path, "Delete", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID}) } func (m *blobovniczaTreeMetrics) Exists(d time.Duration, success, withStorageID bool) { - m.m.BlobobvnizcaTreeMethodDuration(m.shardID, m.path, "Exists", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID}) + m.m.BlobovniczaTreeMethodDuration(m.shardID, m.path, "Exists", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID}) } func (m *blobovniczaTreeMetrics) GetRange(d time.Duration, size int, success, withStorageID bool) { - m.m.BlobobvnizcaTreeMethodDuration(m.shardID, m.path, "GetRange", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID}) + m.m.BlobovniczaTreeMethodDuration(m.shardID, m.path, "GetRange", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID}) if success { - m.m.AddBlobobvnizcaTreeGet(m.shardID, m.path, size) + m.m.AddBlobovniczaTreeGet(m.shardID, m.path, size) } } func (m *blobovniczaTreeMetrics) Get(d time.Duration, size int, success, withStorageID bool) { - m.m.BlobobvnizcaTreeMethodDuration(m.shardID, m.path, "Get", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID}) + m.m.BlobovniczaTreeMethodDuration(m.shardID, m.path, "Get", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID}) if success { - m.m.AddBlobobvnizcaTreeGet(m.shardID, m.path, size) + m.m.AddBlobovniczaTreeGet(m.shardID, m.path, size) } } func (m *blobovniczaTreeMetrics) Iterate(d time.Duration, success bool) { - m.m.BlobobvnizcaTreeMethodDuration(m.shardID, m.path, "Iterate", d, success, metrics_impl.NullBool{}) + m.m.BlobovniczaTreeMethodDuration(m.shardID, m.path, "Iterate", d, success, metrics_impl.NullBool{}) } func (m *blobovniczaTreeMetrics) Put(d time.Duration, size int, success bool) { - m.m.BlobobvnizcaTreeMethodDuration(m.shardID, m.path, "Put", d, success, metrics_impl.NullBool{}) + m.m.BlobovniczaTreeMethodDuration(m.shardID, m.path, "Put", d, success, metrics_impl.NullBool{}) if success { - m.m.AddBlobobvnizcaTreePut(m.shardID, m.path, size) + m.m.AddBlobovniczaTreePut(m.shardID, m.path, size) } } type blobovniczaMetrics struct { - m metrics_impl.BlobobvnizcaMetrics + m metrics_impl.BlobovniczaMetrics shardID func() string path string } diff --git a/pkg/metrics/blobovnicza.go b/pkg/metrics/blobovnicza.go index a1ecbc700..7179a1bc4 100644 --- a/pkg/metrics/blobovnicza.go +++ b/pkg/metrics/blobovnicza.go @@ -8,12 +8,12 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -type BlobobvnizcaMetrics interface { - SetBlobobvnizcaTreeMode(shardID, path string, readOnly bool) - CloseBlobobvnizcaTree(shardID, path string) - BlobobvnizcaTreeMethodDuration(shardID, path string, method string, d time.Duration, success bool, withStorageID NullBool) - AddBlobobvnizcaTreePut(shardID, path string, size int) - AddBlobobvnizcaTreeGet(shardID, path string, size int) +type BlobovniczaMetrics interface { + SetBlobovniczaTreeMode(shardID, path string, readOnly bool) + CloseBlobovniczaTree(shardID, path string) + BlobovniczaTreeMethodDuration(shardID, path string, method string, d time.Duration, success bool, withStorageID NullBool) + AddBlobovniczaTreePut(shardID, path string, size int) + AddBlobovniczaTreeGet(shardID, path string, size int) AddOpenBlobovniczaSize(shardID, path string, size uint64) SubOpenBlobovniczaSize(shardID, path string, size uint64) @@ -78,11 +78,11 @@ func newBlobovnicza() *blobovnicza { } } -func (b *blobovnicza) SetBlobobvnizcaTreeMode(shardID, path string, readOnly bool) { +func (b *blobovnicza) SetBlobovniczaTreeMode(shardID, path string, readOnly bool) { b.treeMode.SetMode(shardID, path, modeFromBool(readOnly)) } -func (b *blobovnicza) CloseBlobobvnizcaTree(shardID, path string) { +func (b *blobovnicza) CloseBlobovniczaTree(shardID, path string) { b.treeMode.SetMode(shardID, path, closedMode) b.treeReqDuration.DeletePartialMatch(prometheus.Labels{ shardIDLabel: shardID, @@ -96,9 +96,21 @@ func (b *blobovnicza) CloseBlobobvnizcaTree(shardID, path string) { shardIDLabel: shardID, pathLabel: path, }) + b.treeOpenSize.DeletePartialMatch(prometheus.Labels{ + shardIDLabel: shardID, + pathLabel: path, + }) + b.treeOpenItems.DeletePartialMatch(prometheus.Labels{ + shardIDLabel: shardID, + pathLabel: path, + }) + b.treeOpenCounter.DeletePartialMatch(prometheus.Labels{ + shardIDLabel: shardID, + pathLabel: path, + }) } -func (b *blobovnicza) BlobobvnizcaTreeMethodDuration(shardID, path string, method string, d time.Duration, success bool, withStorageID NullBool) { +func (b *blobovnicza) BlobovniczaTreeMethodDuration(shardID, path string, method string, d time.Duration, success bool, withStorageID NullBool) { b.treeReqDuration.With(prometheus.Labels{ shardIDLabel: shardID, pathLabel: path, @@ -108,14 +120,14 @@ func (b *blobovnicza) BlobobvnizcaTreeMethodDuration(shardID, path string, metho }).Observe(d.Seconds()) } -func (b *blobovnicza) AddBlobobvnizcaTreePut(shardID, path string, size int) { +func (b *blobovnicza) AddBlobovniczaTreePut(shardID, path string, size int) { b.treePut.With(prometheus.Labels{ shardIDLabel: shardID, pathLabel: path, }).Add(float64(size)) } -func (b *blobovnicza) AddBlobobvnizcaTreeGet(shardID, path string, size int) { +func (b *blobovnicza) AddBlobovniczaTreeGet(shardID, path string, size int) { b.treeGet.With(prometheus.Labels{ shardIDLabel: shardID, pathLabel: path, diff --git a/pkg/metrics/node.go b/pkg/metrics/node.go index 04952f654..8e82207aa 100644 --- a/pkg/metrics/node.go +++ b/pkg/metrics/node.go @@ -16,7 +16,7 @@ type NodeMetrics struct { epoch prometheus.Gauge fstree *fstreeMetrics blobstore *blobstoreMetrics - blobobvnizca *blobovnicza + blobovnicza *blobovnicza metabase *metabaseMetrics pilorama *piloramaMetrics grpc *grpcServerMetrics @@ -40,17 +40,17 @@ func NewNodeMetrics() *NodeMetrics { Name: "epoch", Help: "Current epoch as seen by inner-ring node.", }), - fstree: newFSTreeMetrics(), - blobstore: newBlobstoreMetrics(), - blobobvnizca: newBlobovnicza(), - metabase: newMetabaseMetrics(), - pilorama: newPiloramaMetrics(), - grpc: newGrpcServerMetrics(), - policer: newPolicerMetrics(), - morphClient: newMorphClientMetrics(), - morphCache: newMorphCacheMetrics(namespace), - log: logger.NewLogMetrics(namespace), - blobTree: newBlobTreeMetrics(), + fstree: newFSTreeMetrics(), + blobstore: newBlobstoreMetrics(), + blobovnicza: newBlobovnicza(), + metabase: newMetabaseMetrics(), + pilorama: newPiloramaMetrics(), + grpc: newGrpcServerMetrics(), + policer: newPolicerMetrics(), + morphClient: newMorphClientMetrics(), + morphCache: newMorphCacheMetrics(namespace), + log: logger.NewLogMetrics(namespace), + blobTree: newBlobTreeMetrics(), } } @@ -87,8 +87,8 @@ func (m *NodeMetrics) Blobstore() BlobstoreMetrics { return m.blobstore } -func (m *NodeMetrics) BlobobvnizcaTreeMetrics() BlobobvnizcaMetrics { - return m.blobobvnizca +func (m *NodeMetrics) BlobovniczaTreeMetrics() BlobovniczaMetrics { + return m.blobovnicza } func (m *NodeMetrics) MetabaseMetrics() MetabaseMetrics { -- 2.45.2 From 41f94fe18fe9426f93d7bb07c2207774e23e00c4 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 4 Oct 2023 16:08:33 +0300 Subject: [PATCH 05/11] [#645] blobtree: Add tracing Signed-off-by: Dmitrii Stepanov --- .../blobstor/blobtree/delete.go | 18 +++++++++--- .../blobstor/blobtree/exists.go | 18 +++++++++--- .../blobstor/blobtree/get.go | 19 ++++++++++--- .../blobstor/blobtree/get_range.go | 14 ++++++++++ .../blobstor/blobtree/iterate.go | 17 +++++++---- .../blobstor/blobtree/put.go | 18 ++++++++---- .../blobstor/blobtree/storage_id.go | 28 +++++++++++++++++++ 7 files changed, 109 insertions(+), 23 deletions(-) create mode 100644 pkg/local_object_storage/blobstor/blobtree/storage_id.go diff --git a/pkg/local_object_storage/blobstor/blobtree/delete.go b/pkg/local_object_storage/blobstor/blobtree/delete.go index 3d33b8f2a..cdda9d39f 100644 --- a/pkg/local_object_storage/blobstor/blobtree/delete.go +++ b/pkg/local_object_storage/blobstor/blobtree/delete.go @@ -2,17 +2,19 @@ package blobtree import ( "context" - "encoding/binary" "os" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" + "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) -func (b *BlobTree) Delete(_ context.Context, prm common.DeletePrm) (common.DeleteRes, error) { +func (b *BlobTree) Delete(ctx context.Context, prm common.DeletePrm) (common.DeleteRes, error) { var ( success = false startedAt = time.Now() @@ -21,14 +23,22 @@ func (b *BlobTree) Delete(_ context.Context, prm common.DeletePrm) (common.Delet b.cfg.metrics.Delete(time.Since(startedAt), success, prm.StorageID != nil) }() + _, span := tracing.StartSpanFromContext(ctx, "BlobTree.Delete", + trace.WithAttributes( + attribute.String("path", b.cfg.rootPath), + attribute.String("address", prm.Address.EncodeToString()), + attribute.String("storage_id", storageIDToIdxStringSafe(prm.StorageID)), + )) + defer span.End() + if b.cfg.readOnly { return common.DeleteRes{}, common.ErrReadOnly } var res common.DeleteRes var err error - if len(prm.StorageID) == storageIDLength { - res, err = b.deleteFromIdx(prm.Address, binary.LittleEndian.Uint64(prm.StorageID)) + if idx, ok := tryParseIdxFromStorageID(prm.StorageID); ok { + res, err = b.deleteFromIdx(prm.Address, idx) } else { res, err = b.findAndDelete(prm.Address) } diff --git a/pkg/local_object_storage/blobstor/blobtree/exists.go b/pkg/local_object_storage/blobstor/blobtree/exists.go index 08ba3a113..433578b32 100644 --- a/pkg/local_object_storage/blobstor/blobtree/exists.go +++ b/pkg/local_object_storage/blobstor/blobtree/exists.go @@ -2,16 +2,18 @@ package blobtree import ( "context" - "encoding/binary" "errors" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) -func (b *BlobTree) Exists(_ context.Context, prm common.ExistsPrm) (common.ExistsRes, error) { +func (b *BlobTree) Exists(ctx context.Context, prm common.ExistsPrm) (common.ExistsRes, error) { var ( startedAt = time.Now() success = false @@ -20,10 +22,18 @@ func (b *BlobTree) Exists(_ context.Context, prm common.ExistsPrm) (common.Exist b.cfg.metrics.Exists(time.Since(startedAt), success, prm.StorageID != nil) }() + _, span := tracing.StartSpanFromContext(ctx, "BlobTree.Exists", + trace.WithAttributes( + attribute.String("path", b.cfg.rootPath), + attribute.String("address", prm.Address.EncodeToString()), + attribute.String("storage_id", storageIDToIdxStringSafe(prm.StorageID)), + )) + defer span.End() + var res common.ExistsRes var err error - if len(prm.StorageID) == storageIDLength { - res, err = b.existsFromIdx(prm.Address, binary.LittleEndian.Uint64(prm.StorageID)) + if idx, ok := tryParseIdxFromStorageID(prm.StorageID); ok { + res, err = b.existsFromIdx(prm.Address, idx) } else { res, err = b.findAndCheck(prm.Address) } diff --git a/pkg/local_object_storage/blobstor/blobtree/get.go b/pkg/local_object_storage/blobstor/blobtree/get.go index a1ed3c110..1569a0687 100644 --- a/pkg/local_object_storage/blobstor/blobtree/get.go +++ b/pkg/local_object_storage/blobstor/blobtree/get.go @@ -2,18 +2,20 @@ package blobtree import ( "context" - "encoding/binary" "os" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" + "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) -func (b *BlobTree) Get(_ context.Context, prm common.GetPrm) (common.GetRes, error) { +func (b *BlobTree) Get(ctx context.Context, prm common.GetPrm) (common.GetRes, error) { var ( startedAt = time.Now() success = false @@ -23,6 +25,15 @@ func (b *BlobTree) Get(_ context.Context, prm common.GetPrm) (common.GetRes, err b.cfg.metrics.Get(time.Since(startedAt), size, success, prm.StorageID != nil) }() + _, span := tracing.StartSpanFromContext(ctx, "BlobTree.Get", + trace.WithAttributes( + attribute.String("path", b.cfg.rootPath), + attribute.String("address", prm.Address.EncodeToString()), + attribute.String("storage_id", storageIDToIdxStringSafe(prm.StorageID)), + attribute.Bool("raw", prm.Raw), + )) + defer span.End() + res, err := b.get(prm) success = err == nil size = len(res.RawData) @@ -30,8 +41,8 @@ func (b *BlobTree) Get(_ context.Context, prm common.GetPrm) (common.GetRes, err } func (b *BlobTree) get(prm common.GetPrm) (common.GetRes, error) { - if len(prm.StorageID) == storageIDLength { - return b.getFromIdx(prm.Address, binary.LittleEndian.Uint64(prm.StorageID)) + if idx, ok := tryParseIdxFromStorageID(prm.StorageID); ok { + return b.getFromIdx(prm.Address, idx) } return b.findAndGet(prm.Address) } diff --git a/pkg/local_object_storage/blobstor/blobtree/get_range.go b/pkg/local_object_storage/blobstor/blobtree/get_range.go index 190d2f5e9..0c6071c59 100644 --- a/pkg/local_object_storage/blobstor/blobtree/get_range.go +++ b/pkg/local_object_storage/blobstor/blobtree/get_range.go @@ -2,11 +2,15 @@ package blobtree import ( "context" + "strconv" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" + "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) func (b *BlobTree) GetRange(ctx context.Context, prm common.GetRangePrm) (common.GetRangeRes, error) { @@ -19,6 +23,16 @@ func (b *BlobTree) GetRange(ctx context.Context, prm common.GetRangePrm) (common b.cfg.metrics.GetRange(time.Since(startedAt), size, success, prm.StorageID != nil) }() + _, span := tracing.StartSpanFromContext(ctx, "BlobTree.GetRange", + trace.WithAttributes( + attribute.String("path", b.cfg.rootPath), + attribute.String("address", prm.Address.EncodeToString()), + attribute.String("storage_id", storageIDToIdxStringSafe(prm.StorageID)), + attribute.String("offset", strconv.FormatUint(prm.Range.GetOffset(), 10)), + attribute.String("length", strconv.FormatUint(prm.Range.GetLength(), 10)), + )) + defer span.End() + gRes, err := b.get(common.GetPrm{Address: prm.Address, StorageID: prm.StorageID}) if err != nil { return common.GetRangeRes{}, err diff --git a/pkg/local_object_storage/blobstor/blobtree/iterate.go b/pkg/local_object_storage/blobstor/blobtree/iterate.go index 95f2ace3b..1fc1418d9 100644 --- a/pkg/local_object_storage/blobstor/blobtree/iterate.go +++ b/pkg/local_object_storage/blobstor/blobtree/iterate.go @@ -2,15 +2,17 @@ package blobtree import ( "context" - "encoding/binary" "os" "path/filepath" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) -func (b *BlobTree) Iterate(_ context.Context, prm common.IteratePrm) (common.IterateRes, error) { +func (b *BlobTree) Iterate(ctx context.Context, prm common.IteratePrm) (common.IterateRes, error) { var ( startedAt = time.Now() err error @@ -19,6 +21,13 @@ func (b *BlobTree) Iterate(_ context.Context, prm common.IteratePrm) (common.Ite b.cfg.metrics.Iterate(time.Since(startedAt), err == nil) }() + _, span := tracing.StartSpanFromContext(ctx, "BlobTree.Iterate", + trace.WithAttributes( + attribute.String("path", b.cfg.rootPath), + attribute.Bool("ignore_errors", prm.IgnoreErrors), + )) + defer span.End() + err = b.iterateDir(b.cfg.rootPath, 0, prm) return common.IterateRes{}, err } @@ -91,12 +100,10 @@ func (b *BlobTree) iterateRecords(idx uint64, path string, prm common.IteratePrm return err } - storageID := make([]byte, storageIDLength) - binary.LittleEndian.PutUint64(storageID, idx) err = prm.Handler(common.IterationElement{ Address: record.Address, ObjectData: record.Data, - StorageID: storageID, + StorageID: idxToStorageID(idx), }) if err != nil { return err diff --git a/pkg/local_object_storage/blobstor/blobtree/put.go b/pkg/local_object_storage/blobstor/blobtree/put.go index eee169b19..8359b771a 100644 --- a/pkg/local_object_storage/blobstor/blobtree/put.go +++ b/pkg/local_object_storage/blobstor/blobtree/put.go @@ -2,20 +2,21 @@ package blobtree import ( "context" - "encoding/binary" "os" "strconv" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) const ( tempFileSymbols = "###" - storageIDLength = 8 ) -func (b *BlobTree) Put(_ context.Context, prm common.PutPrm) (common.PutRes, error) { +func (b *BlobTree) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) { var ( success bool size int @@ -25,6 +26,13 @@ func (b *BlobTree) Put(_ context.Context, prm common.PutPrm) (common.PutRes, err b.cfg.metrics.Put(time.Since(startedAt), size, success) }() + _, span := tracing.StartSpanFromContext(ctx, "BlobTree.Put", + trace.WithAttributes( + attribute.String("address", prm.Address.EncodeToString()), + attribute.Bool("dont_compress", prm.DontCompress), + )) + defer span.End() + if b.cfg.readOnly { return common.PutRes{}, common.ErrReadOnly } @@ -47,9 +55,7 @@ func (b *BlobTree) Put(_ context.Context, prm common.PutPrm) (common.PutRes, err success = true size = len(prm.RawData) - storageID := make([]byte, storageIDLength) - binary.LittleEndian.PutUint64(storageID, idx) - return common.PutRes{StorageID: storageID}, nil + return common.PutRes{StorageID: idxToStorageID(idx)}, nil } func (b *BlobTree) saveToFile(prm common.PutPrm, dir string) (uint64, error) { diff --git a/pkg/local_object_storage/blobstor/blobtree/storage_id.go b/pkg/local_object_storage/blobstor/blobtree/storage_id.go new file mode 100644 index 000000000..cd176980b --- /dev/null +++ b/pkg/local_object_storage/blobstor/blobtree/storage_id.go @@ -0,0 +1,28 @@ +package blobtree + +import ( + "encoding/binary" + "strconv" +) + +const storageIDLength = 8 + +func tryParseIdxFromStorageID(storageID []byte) (uint64, bool) { + if len(storageID) == storageIDLength { + return binary.LittleEndian.Uint64(storageID), true + } + return 0, false +} + +func storageIDToIdxStringSafe(storageID []byte) string { + if len(storageID) == storageIDLength { + return strconv.FormatUint(binary.LittleEndian.Uint64(storageID), 10) + } + return "" +} + +func idxToStorageID(idx uint64) []byte { + storageID := make([]byte, storageIDLength) + binary.LittleEndian.PutUint64(storageID, idx) + return storageID +} -- 2.45.2 From 2f6d4a49dca097ef1f5d80f238d68efd279679d1 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 9 Oct 2023 10:13:21 +0300 Subject: [PATCH 06/11] [#645] blobtree: Add `.data` extension for data files Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/blobstor/blobtree/content.go | 4 +++- pkg/local_object_storage/blobstor/blobtree/control.go | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/local_object_storage/blobstor/blobtree/content.go b/pkg/local_object_storage/blobstor/blobtree/content.go index 3760d1648..c59ff4ed6 100644 --- a/pkg/local_object_storage/blobstor/blobtree/content.go +++ b/pkg/local_object_storage/blobstor/blobtree/content.go @@ -21,6 +21,8 @@ const ( sizeOfDataLength = 8 sizeOfContainerID = sha256.Size sizeOfObjectID = sha256.Size + + dataExtension = ".data" ) var ( @@ -187,5 +189,5 @@ func (b *BlobTree) estimateSize(records []objectData) uint64 { } func (b *BlobTree) getFilePath(dir string, idx uint64) string { - return filepath.Join(dir, strconv.FormatUint(idx, 16)) + return filepath.Join(dir, strconv.FormatUint(idx, 16)+dataExtension) } diff --git a/pkg/local_object_storage/blobstor/blobtree/control.go b/pkg/local_object_storage/blobstor/blobtree/control.go index af3ba2431..d2f8f3fc2 100644 --- a/pkg/local_object_storage/blobstor/blobtree/control.go +++ b/pkg/local_object_storage/blobstor/blobtree/control.go @@ -78,7 +78,7 @@ func (b *BlobTree) isTempFile(name string) bool { } func (b *BlobTree) parseIdx(name string) (uint64, error) { - return strconv.ParseUint(name, 16, 64) + return strconv.ParseUint(strings.TrimSuffix(name, dataExtension), 16, 64) } func (b *BlobTree) Close() error { -- 2.45.2 From 4daef7077439688c3e8fa4124943405698590701 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 9 Oct 2023 11:45:42 +0300 Subject: [PATCH 07/11] [#645] blobtree: Store path as storageID Signed-off-by: Dmitrii Stepanov --- .../blobstor/blobtree/blobtree.go | 16 ++++++---- .../blobstor/blobtree/content.go | 18 ++++++++++- .../blobstor/blobtree/control.go | 22 ++++++------- .../blobstor/blobtree/delete.go | 27 +++++++++------- .../blobstor/blobtree/exists.go | 32 +++++++++---------- .../blobstor/blobtree/get.go | 22 +++++-------- .../blobstor/blobtree/get_range.go | 2 +- .../blobstor/blobtree/iterate.go | 26 +++++++-------- .../blobstor/blobtree/put.go | 28 ++++++++-------- .../blobstor/blobtree/storage_id.go | 25 ++++----------- 10 files changed, 106 insertions(+), 112 deletions(-) diff --git a/pkg/local_object_storage/blobstor/blobtree/blobtree.go b/pkg/local_object_storage/blobstor/blobtree/blobtree.go index 1270c54bf..a50114676 100644 --- a/pkg/local_object_storage/blobstor/blobtree/blobtree.go +++ b/pkg/local_object_storage/blobstor/blobtree/blobtree.go @@ -48,27 +48,29 @@ func New(opts ...Option) *BlobTree { return b } -func (b *BlobTree) getDirectoryPath(addr oid.Address) string { +func (b *BlobTree) getDir(addr oid.Address) string { sAddr := addr.Object().EncodeToString() + "." + addr.Container().EncodeToString() var sb strings.Builder - size := int(1+b.cfg.depth*(directoryLength+1)) + len(b.cfg.rootPath) // /path + slash + (character + slash for every level) + size := int(b.cfg.depth * (directoryLength + 1)) // (character + slash for every level) sb.Grow(size) - sb.WriteString(b.cfg.rootPath) for i := uint64(0); i < b.cfg.depth; i++ { - sb.WriteRune(filepath.Separator) + if i > 0 { + sb.WriteRune(filepath.Separator) + } sb.WriteString(sAddr[:directoryLength]) sAddr = sAddr[directoryLength:] } - - sb.WriteRune(filepath.Separator) return sb.String() } -func (b *BlobTree) createDir(dir string) error { +func (b *BlobTree) createDir(dir string, isSystemPath bool) error { b.dirLock.Lock(dir) defer b.dirLock.Unlock(dir) + if !isSystemPath { + dir = b.getSystemPath(dir) + } if err := util.MkdirAllX(dir, b.cfg.permissions); err != nil { if errors.Is(err, syscall.ENOSPC) { err = common.ErrNoSpace diff --git a/pkg/local_object_storage/blobstor/blobtree/content.go b/pkg/local_object_storage/blobstor/blobtree/content.go index c59ff4ed6..b3a457e54 100644 --- a/pkg/local_object_storage/blobstor/blobtree/content.go +++ b/pkg/local_object_storage/blobstor/blobtree/content.go @@ -8,6 +8,7 @@ import ( "os" "path/filepath" "strconv" + "strings" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -41,7 +42,7 @@ type objectData struct { } func (b *BlobTree) readFileContent(path string) ([]objectData, error) { - rawData, err := os.ReadFile(path) + rawData, err := os.ReadFile(b.getSystemPath(path)) if err != nil { if os.IsNotExist(err) { return []objectData{}, nil @@ -130,6 +131,7 @@ func (b *BlobTree) saveContentToFile(records []objectData, path string) (uint64, } func (b *BlobTree) writeFile(p string, data []byte) error { + p = b.getSystemPath(p) f, err := os.OpenFile(p, os.O_WRONLY|os.O_CREATE|os.O_TRUNC|os.O_EXCL|os.O_SYNC, b.cfg.permissions) if err != nil { return err @@ -141,6 +143,10 @@ func (b *BlobTree) writeFile(p string, data []byte) error { return err } +func (b *BlobTree) getSystemPath(path string) string { + return filepath.Join(b.cfg.rootPath, path) +} + func (b *BlobTree) marshalSlice(records []objectData) ([]byte, error) { buf := make([]byte, b.estimateSize(records)) result := buf @@ -191,3 +197,13 @@ func (b *BlobTree) estimateSize(records []objectData) uint64 { func (b *BlobTree) getFilePath(dir string, idx uint64) string { return filepath.Join(dir, strconv.FormatUint(idx, 16)+dataExtension) } + +func (b *BlobTree) parsePath(path string) (string, uint64, error) { + dir := filepath.Dir(path) + fileName := strings.TrimSuffix(filepath.Base(path), dataExtension) + idx, err := strconv.ParseUint(fileName, 16, 64) + if err != nil { + return "", 0, fmt.Errorf("failed to parse blobtree path: %w", err) + } + return dir, idx, nil +} diff --git a/pkg/local_object_storage/blobstor/blobtree/control.go b/pkg/local_object_storage/blobstor/blobtree/control.go index d2f8f3fc2..584421b43 100644 --- a/pkg/local_object_storage/blobstor/blobtree/control.go +++ b/pkg/local_object_storage/blobstor/blobtree/control.go @@ -19,37 +19,35 @@ func (b *BlobTree) Open(readOnly bool) error { } func (b *BlobTree) Init() error { - if err := b.createDir(b.cfg.rootPath); err != nil { + if err := b.createDir(b.cfg.rootPath, true); err != nil { return err } var eg errgroup.Group eg.SetLimit(b.cfg.initWorkersCount) eg.Go(func() error { - return b.initDir(&eg, b.cfg.rootPath, 0) + return b.initDir(&eg, "") }) return eg.Wait() } -func (b *BlobTree) initDir(eg *errgroup.Group, dir string, depth uint64) error { - entities, err := os.ReadDir(dir) +func (b *BlobTree) initDir(eg *errgroup.Group, dir string) error { + entities, err := os.ReadDir(b.getSystemPath(dir)) if err != nil { return err } for _, entity := range entities { - if depth < b.cfg.depth && entity.IsDir() { + if entity.IsDir() { eg.Go(func() error { - return b.initDir(eg, filepath.Join(dir, entity.Name()), depth+1) + return b.initDir(eg, filepath.Join(dir, entity.Name())) }) continue } - if depth != b.cfg.depth { - continue - } + path := filepath.Join(dir, entity.Name()) if b.isTempFile(entity.Name()) { - if err = os.Remove(filepath.Join(dir, entity.Name())); err != nil { + if err = os.Remove(b.getSystemPath(path)); err != nil { return err } continue @@ -57,12 +55,12 @@ func (b *BlobTree) initDir(eg *errgroup.Group, dir string, depth uint64) error { idx, err := b.parseIdx(entity.Name()) if err != nil { - continue + return err } b.dispatcher.Init(dir, idx) b.cfg.metrics.IncFilesCount() - stat, err := os.Stat(filepath.Join(dir, entity.Name())) + stat, err := os.Stat(b.getSystemPath(path)) if err != nil { return err } diff --git a/pkg/local_object_storage/blobstor/blobtree/delete.go b/pkg/local_object_storage/blobstor/blobtree/delete.go index cdda9d39f..bfc46ef2b 100644 --- a/pkg/local_object_storage/blobstor/blobtree/delete.go +++ b/pkg/local_object_storage/blobstor/blobtree/delete.go @@ -3,6 +3,7 @@ package blobtree import ( "context" "os" + "path/filepath" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -27,7 +28,7 @@ func (b *BlobTree) Delete(ctx context.Context, prm common.DeletePrm) (common.Del trace.WithAttributes( attribute.String("path", b.cfg.rootPath), attribute.String("address", prm.Address.EncodeToString()), - attribute.String("storage_id", storageIDToIdxStringSafe(prm.StorageID)), + attribute.String("storage_id", string(prm.StorageID)), )) defer span.End() @@ -37,8 +38,8 @@ func (b *BlobTree) Delete(ctx context.Context, prm common.DeletePrm) (common.Del var res common.DeleteRes var err error - if idx, ok := tryParseIdxFromStorageID(prm.StorageID); ok { - res, err = b.deleteFromIdx(prm.Address, idx) + if path, ok := getPathFromStorageID(prm.StorageID); ok { + res, err = b.deleteFromPath(prm.Address, path) } else { res, err = b.findAndDelete(prm.Address) } @@ -46,13 +47,15 @@ func (b *BlobTree) Delete(ctx context.Context, prm common.DeletePrm) (common.Del return res, err } -func (b *BlobTree) deleteFromIdx(addr oid.Address, idx uint64) (common.DeleteRes, error) { - dir := b.getDirectoryPath(addr) - path := b.getFilePath(dir, idx) - +func (b *BlobTree) deleteFromPath(addr oid.Address, path string) (common.DeleteRes, error) { b.fileLock.Lock(path) defer b.fileLock.Unlock(path) + dir, idx, err := b.parsePath(path) + if err != nil { + return common.DeleteRes{}, err + } + records, err := b.readFileContent(path) if err != nil { return common.DeleteRes{}, err @@ -71,7 +74,7 @@ func (b *BlobTree) deleteFromIdx(addr oid.Address, idx uint64) (common.DeleteRes } if len(records) == 1 { - err = os.Remove(path) + err = os.Remove(b.getSystemPath(path)) if err == nil { b.dispatcher.ReturnIdx(dir, idx) b.cfg.metrics.DecFilesCount() @@ -79,7 +82,7 @@ func (b *BlobTree) deleteFromIdx(addr oid.Address, idx uint64) (common.DeleteRes return common.DeleteRes{}, err } - records = append(records[:idx], records[idx+1:]...) + records = append(records[:deleteIdx], records[deleteIdx+1:]...) size, err := b.writeToTmpAndRename(records, path) if err != nil { return common.DeleteRes{}, err @@ -91,16 +94,16 @@ func (b *BlobTree) deleteFromIdx(addr oid.Address, idx uint64) (common.DeleteRes } func (b *BlobTree) findAndDelete(addr oid.Address) (common.DeleteRes, error) { - dir := b.getDirectoryPath(addr) + dir := b.getDir(addr) idx, err := b.findFileIdx(dir, addr) if err != nil { return common.DeleteRes{}, err } - return b.deleteFromIdx(addr, idx) + return b.deleteFromPath(addr, b.getFilePath(dir, idx)) } func (b *BlobTree) findFileIdx(dir string, addr oid.Address) (uint64, error) { - entities, err := os.ReadDir(dir) + entities, err := os.ReadDir(filepath.Join(b.cfg.rootPath, dir)) if err != nil { if os.IsNotExist(err) { return 0, logicerr.Wrap(new(apistatus.ObjectNotFound)) diff --git a/pkg/local_object_storage/blobstor/blobtree/exists.go b/pkg/local_object_storage/blobstor/blobtree/exists.go index 433578b32..559cbf57e 100644 --- a/pkg/local_object_storage/blobstor/blobtree/exists.go +++ b/pkg/local_object_storage/blobstor/blobtree/exists.go @@ -26,25 +26,22 @@ func (b *BlobTree) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi trace.WithAttributes( attribute.String("path", b.cfg.rootPath), attribute.String("address", prm.Address.EncodeToString()), - attribute.String("storage_id", storageIDToIdxStringSafe(prm.StorageID)), + attribute.String("storage_id", string(prm.StorageID)), )) defer span.End() var res common.ExistsRes var err error - if idx, ok := tryParseIdxFromStorageID(prm.StorageID); ok { - res, err = b.existsFromIdx(prm.Address, idx) + if path, ok := getPathFromStorageID(prm.StorageID); ok { + res, err = b.existsFromPath(prm.Address, path) } else { - res, err = b.findAndCheck(prm.Address) + res, err = b.findAndCheckExistance(prm.Address) } success = err == nil return res, err } -func (b *BlobTree) existsFromIdx(addr oid.Address, idx uint64) (common.ExistsRes, error) { - dir := b.getDirectoryPath(addr) - path := b.getFilePath(dir, idx) - +func (b *BlobTree) existsFromPath(addr oid.Address, path string) (common.ExistsRes, error) { b.fileLock.RLock(path) defer b.fileLock.RUnlock(path) @@ -64,15 +61,16 @@ func (b *BlobTree) existsFromIdx(addr oid.Address, idx uint64) (common.ExistsRes return common.ExistsRes{}, nil } -func (b *BlobTree) findAndCheck(addr oid.Address) (common.ExistsRes, error) { - dir := b.getDirectoryPath(addr) +func (b *BlobTree) findAndCheckExistance(addr oid.Address) (common.ExistsRes, error) { + dir := b.getDir(addr) _, err := b.findFileIdx(dir, addr) - if err != nil { - var notFound *apistatus.ObjectNotFound - if errors.As(err, ¬Found) { - return common.ExistsRes{}, nil - } - return common.ExistsRes{}, err + if err == nil { + return common.ExistsRes{Exists: true}, nil } - return common.ExistsRes{Exists: true}, nil + + var notFound *apistatus.ObjectNotFound + if errors.As(err, ¬Found) { + return common.ExistsRes{}, nil + } + return common.ExistsRes{}, err } diff --git a/pkg/local_object_storage/blobstor/blobtree/get.go b/pkg/local_object_storage/blobstor/blobtree/get.go index 1569a0687..de92b0239 100644 --- a/pkg/local_object_storage/blobstor/blobtree/get.go +++ b/pkg/local_object_storage/blobstor/blobtree/get.go @@ -3,6 +3,7 @@ package blobtree import ( "context" "os" + "path/filepath" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -29,7 +30,7 @@ func (b *BlobTree) Get(ctx context.Context, prm common.GetPrm) (common.GetRes, e trace.WithAttributes( attribute.String("path", b.cfg.rootPath), attribute.String("address", prm.Address.EncodeToString()), - attribute.String("storage_id", storageIDToIdxStringSafe(prm.StorageID)), + attribute.String("storage_id", string(prm.StorageID)), attribute.Bool("raw", prm.Raw), )) defer span.End() @@ -41,16 +42,13 @@ func (b *BlobTree) Get(ctx context.Context, prm common.GetPrm) (common.GetRes, e } func (b *BlobTree) get(prm common.GetPrm) (common.GetRes, error) { - if idx, ok := tryParseIdxFromStorageID(prm.StorageID); ok { - return b.getFromIdx(prm.Address, idx) + if path, ok := getPathFromStorageID(prm.StorageID); ok { + return b.getFromPath(prm.Address, path) } return b.findAndGet(prm.Address) } -func (b *BlobTree) getFromIdx(addr oid.Address, idx uint64) (common.GetRes, error) { - dir := b.getDirectoryPath(addr) - path := b.getFilePath(dir, idx) - +func (b *BlobTree) getFromPath(addr oid.Address, path string) (common.GetRes, error) { b.fileLock.RLock(path) defer b.fileLock.RUnlock(path) @@ -82,8 +80,8 @@ func (b *BlobTree) unmarshalGetRes(record objectData) (common.GetRes, error) { } func (b *BlobTree) findAndGet(addr oid.Address) (common.GetRes, error) { - dir := b.getDirectoryPath(addr) - entities, err := os.ReadDir(dir) + dir := b.getDir(addr) + entities, err := os.ReadDir(filepath.Join(b.cfg.rootPath, dir)) if err != nil { if os.IsNotExist(err) { return common.GetRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) @@ -97,11 +95,7 @@ func (b *BlobTree) findAndGet(addr oid.Address) (common.GetRes, error) { if b.isTempFile(entity.Name()) { continue } - idx, err := b.parseIdx(entity.Name()) - if err != nil { - continue - } - path := b.getFilePath(dir, idx) + path := filepath.Join(dir, entity.Name()) res, err := b.tryReadObject(path, addr) if err != nil { return common.GetRes{}, err diff --git a/pkg/local_object_storage/blobstor/blobtree/get_range.go b/pkg/local_object_storage/blobstor/blobtree/get_range.go index 0c6071c59..a89693c9f 100644 --- a/pkg/local_object_storage/blobstor/blobtree/get_range.go +++ b/pkg/local_object_storage/blobstor/blobtree/get_range.go @@ -27,7 +27,7 @@ func (b *BlobTree) GetRange(ctx context.Context, prm common.GetRangePrm) (common trace.WithAttributes( attribute.String("path", b.cfg.rootPath), attribute.String("address", prm.Address.EncodeToString()), - attribute.String("storage_id", storageIDToIdxStringSafe(prm.StorageID)), + attribute.String("storage_id", string(prm.StorageID)), attribute.String("offset", strconv.FormatUint(prm.Range.GetOffset(), 10)), attribute.String("length", strconv.FormatUint(prm.Range.GetLength(), 10)), )) diff --git a/pkg/local_object_storage/blobstor/blobtree/iterate.go b/pkg/local_object_storage/blobstor/blobtree/iterate.go index 1fc1418d9..61a00f7d5 100644 --- a/pkg/local_object_storage/blobstor/blobtree/iterate.go +++ b/pkg/local_object_storage/blobstor/blobtree/iterate.go @@ -28,12 +28,12 @@ func (b *BlobTree) Iterate(ctx context.Context, prm common.IteratePrm) (common.I )) defer span.End() - err = b.iterateDir(b.cfg.rootPath, 0, prm) + err = b.iterateDir("", prm) return common.IterateRes{}, err } -func (b *BlobTree) iterateDir(dir string, depth uint64, prm common.IteratePrm) error { - entities, err := os.ReadDir(dir) +func (b *BlobTree) iterateDir(dir string, prm common.IteratePrm) error { + entities, err := os.ReadDir(filepath.Join(b.cfg.rootPath, dir)) if err != nil { if prm.IgnoreErrors { return nil @@ -42,24 +42,20 @@ func (b *BlobTree) iterateDir(dir string, depth uint64, prm common.IteratePrm) e } for _, entity := range entities { - if depth < b.cfg.depth && entity.IsDir() { - err := b.iterateDir(filepath.Join(dir, entity.Name()), depth+1, prm) + if entity.IsDir() { + err := b.iterateDir(filepath.Join(dir, entity.Name()), prm) if err != nil { return err } - } - if depth != b.cfg.depth { continue } + if b.isTempFile(entity.Name()) { continue } - idx, err := b.parseIdx(entity.Name()) - if err != nil { - continue - } - path := b.getFilePath(dir, idx) - err = b.iterateRecords(idx, path, prm) + + path := filepath.Join(dir, entity.Name()) + err = b.iterateRecords(path, prm) if err != nil { return err } @@ -67,7 +63,7 @@ func (b *BlobTree) iterateDir(dir string, depth uint64, prm common.IteratePrm) e return nil } -func (b *BlobTree) iterateRecords(idx uint64, path string, prm common.IteratePrm) error { +func (b *BlobTree) iterateRecords(path string, prm common.IteratePrm) error { b.fileLock.RLock(path) defer b.fileLock.RUnlock(path) @@ -103,7 +99,7 @@ func (b *BlobTree) iterateRecords(idx uint64, path string, prm common.IteratePrm err = prm.Handler(common.IterationElement{ Address: record.Address, ObjectData: record.Data, - StorageID: idxToStorageID(idx), + StorageID: pathToStorageID(path), }) if err != nil { return err diff --git a/pkg/local_object_storage/blobstor/blobtree/put.go b/pkg/local_object_storage/blobstor/blobtree/put.go index 8359b771a..6d6653171 100644 --- a/pkg/local_object_storage/blobstor/blobtree/put.go +++ b/pkg/local_object_storage/blobstor/blobtree/put.go @@ -37,9 +37,9 @@ func (b *BlobTree) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, e return common.PutRes{}, common.ErrReadOnly } - dir := b.getDirectoryPath(prm.Address) + dir := b.getDir(prm.Address) - if err := b.createDir(dir); err != nil { + if err := b.createDir(dir, false); err != nil { return common.PutRes{}, err } @@ -47,7 +47,7 @@ func (b *BlobTree) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, e prm.RawData = b.compressor.Compress(prm.RawData) } - idx, err := b.saveToFile(prm, dir) + path, err := b.saveToLocalDir(prm, dir) if err != nil { return common.PutRes{}, err } @@ -55,10 +55,10 @@ func (b *BlobTree) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, e success = true size = len(prm.RawData) - return common.PutRes{StorageID: idxToStorageID(idx)}, nil + return common.PutRes{StorageID: pathToStorageID(path)}, nil } -func (b *BlobTree) saveToFile(prm common.PutPrm, dir string) (uint64, error) { +func (b *BlobTree) saveToLocalDir(prm common.PutPrm, dir string) (string, error) { returnIdx := true idx := b.dispatcher.GetIdxForWrite(dir) path := b.getFilePath(dir, idx) @@ -74,7 +74,7 @@ func (b *BlobTree) saveToFile(prm common.PutPrm, dir string) (uint64, error) { currentContent, err := b.readFileContent(path) if err != nil { - return 0, err + return "", err } var newRecord objectData newRecord.Address = prm.Address @@ -82,24 +82,24 @@ func (b *BlobTree) saveToFile(prm common.PutPrm, dir string) (uint64, error) { size, err := b.writeToTmpAndRename(append(currentContent, newRecord), path) if err != nil { - return 0, err + return "", err } returnIdx = size < b.cfg.targetFileSizeBytes - return idx, nil + return path, nil } func (b *BlobTree) writeToTmpAndRename(records []objectData, path string) (uint64, error) { - tmpFile := path + tempFileSymbols + strconv.FormatUint(b.suffix.Add(1), 16) + tmpPath := path + tempFileSymbols + strconv.FormatUint(b.suffix.Add(1), 16) - size, err := b.saveContentToFile(records, tmpFile) + size, err := b.saveContentToFile(records, tmpPath) if err != nil { - _ = os.Remove(tmpFile) + _ = os.Remove(b.getSystemPath(tmpPath)) return 0, err } newFile := false - _, err = os.Stat(path) + _, err = os.Stat(b.getSystemPath(path)) if err != nil { if os.IsNotExist(err) { newFile = true @@ -108,8 +108,8 @@ func (b *BlobTree) writeToTmpAndRename(records []objectData, path string) (uint6 } } - if err := os.Rename(tmpFile, path); err != nil { - _ = os.Remove(tmpFile) + if err := os.Rename(b.getSystemPath(tmpPath), b.getSystemPath(path)); err != nil { + _ = os.Remove(b.getSystemPath(tmpPath)) return 0, err } diff --git a/pkg/local_object_storage/blobstor/blobtree/storage_id.go b/pkg/local_object_storage/blobstor/blobtree/storage_id.go index cd176980b..64a268eb2 100644 --- a/pkg/local_object_storage/blobstor/blobtree/storage_id.go +++ b/pkg/local_object_storage/blobstor/blobtree/storage_id.go @@ -1,28 +1,15 @@ package blobtree import ( - "encoding/binary" - "strconv" + "strings" ) -const storageIDLength = 8 +const storageIDPrefix = "blobtree:" -func tryParseIdxFromStorageID(storageID []byte) (uint64, bool) { - if len(storageID) == storageIDLength { - return binary.LittleEndian.Uint64(storageID), true - } - return 0, false +func getPathFromStorageID(storageID []byte) (string, bool) { + return strings.CutPrefix(string(storageID), storageIDPrefix) } -func storageIDToIdxStringSafe(storageID []byte) string { - if len(storageID) == storageIDLength { - return strconv.FormatUint(binary.LittleEndian.Uint64(storageID), 10) - } - return "" -} - -func idxToStorageID(idx uint64) []byte { - storageID := make([]byte, storageIDLength) - binary.LittleEndian.PutUint64(storageID, idx) - return storageID +func pathToStorageID(path string) []byte { + return []byte(storageIDPrefix + path) } -- 2.45.2 From 6cbea91c06801bcfd1c6e521f868c96e56ad7d42 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 25 Oct 2023 18:50:50 +0300 Subject: [PATCH 08/11] [#645] blobstor: Add Badger store Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 35 ++++++ .../shard/blobstor/badgerstore/config.go | 85 +++++++++++++ cmd/frostfs-node/validate.go | 3 +- .../blobstor/badgerstore/config.go | 116 ++++++++++++++++++ .../blobstor/badgerstore/control.go | 91 ++++++++++++++ .../blobstor/badgerstore/delete.go | 44 +++++++ .../blobstor/badgerstore/exists.go | 36 ++++++ .../blobstor/badgerstore/generic_test.go | 39 ++++++ .../blobstor/badgerstore/get.go | 107 ++++++++++++++++ .../blobstor/badgerstore/iterate.go | 112 +++++++++++++++++ .../blobstor/badgerstore/keys.go | 28 +++++ .../blobstor/badgerstore/put.go | 38 ++++++ .../blobstor/badgerstore/store.go | 66 ++++++++++ .../blobstor/blobtree/iterate.go | 12 -- .../blobstor/perf_test.go | 9 ++ 15 files changed, 808 insertions(+), 13 deletions(-) create mode 100644 cmd/frostfs-node/config/engine/shard/blobstor/badgerstore/config.go create mode 100644 pkg/local_object_storage/blobstor/badgerstore/config.go create mode 100644 pkg/local_object_storage/blobstor/badgerstore/control.go create mode 100644 pkg/local_object_storage/blobstor/badgerstore/delete.go create mode 100644 pkg/local_object_storage/blobstor/badgerstore/exists.go create mode 100644 pkg/local_object_storage/blobstor/badgerstore/generic_test.go create mode 100644 pkg/local_object_storage/blobstor/badgerstore/get.go create mode 100644 pkg/local_object_storage/blobstor/badgerstore/iterate.go create mode 100644 pkg/local_object_storage/blobstor/badgerstore/keys.go create mode 100644 pkg/local_object_storage/blobstor/badgerstore/put.go create mode 100644 pkg/local_object_storage/blobstor/badgerstore/store.go diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index ef626350b..4916d7274 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -21,6 +21,7 @@ import ( contractsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/contracts" engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine" shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard" + badgerstoreconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/badgerstore" blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza" blobtreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobtree" fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree" @@ -33,6 +34,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/badgerstore" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobtree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" @@ -183,6 +185,14 @@ type subStorageCfg struct { width uint64 leafWidth uint64 openedCacheSize int + + //badgerstore-specific + indexCacheSize int64 + memTablesCount int + compactorsCount int + gcInterval time.Duration + gcDiscardRatio float64 + valueLogFileSize int64 } // readConfig fills applicationConfiguration with raw configuration values @@ -308,6 +318,14 @@ func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, ol sub := blobtreeconfig.From((*config.Config)(storagesCfg[i])) sCfg.depth = sub.Depth() sCfg.size = sub.Size() + case badgerstore.Type: + sub := badgerstoreconfig.From((*config.Config)(storagesCfg[i])) + sCfg.indexCacheSize = sub.IndexCacheSize() + sCfg.memTablesCount = sub.MemTablesCount() + sCfg.compactorsCount = sub.CompactorsCount() + sCfg.gcInterval = sub.GCInterval() + sCfg.gcDiscardRatio = sub.GCDiscardRatio() + sCfg.valueLogFileSize = sub.ValueLogFileSize() default: return fmt.Errorf("invalid storage type: %s", storagesCfg[i].Type()) } @@ -860,6 +878,23 @@ func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage { return uint64(len(data)) < shCfg.smallSizeObjectLimit }, }) + case badgerstore.Type: + badgerStoreOpts := []badgerstore.Option{ + badgerstore.WithPath(sRead.path), + badgerstore.WithPermissions(sRead.perm), + badgerstore.WithCompactorsCount(sRead.compactorsCount), + badgerstore.WithGCDiscardRatio(sRead.gcDiscardRatio), + badgerstore.WithGCInterval(sRead.gcInterval), + badgerstore.WithIndexCacheSize(sRead.indexCacheSize), + badgerstore.WithMemTablesCount(sRead.memTablesCount), + badgerstore.WithValueLogSize(sRead.valueLogFileSize), + } + ss = append(ss, blobstor.SubStorage{ + Storage: badgerstore.New(badgerStoreOpts...), + Policy: func(_ *objectSDK.Object, data []byte) bool { + return uint64(len(data)) < shCfg.smallSizeObjectLimit + }, + }) default: // should never happen, that has already // been handled: when the config was read diff --git a/cmd/frostfs-node/config/engine/shard/blobstor/badgerstore/config.go b/cmd/frostfs-node/config/engine/shard/blobstor/badgerstore/config.go new file mode 100644 index 000000000..fe8f4f629 --- /dev/null +++ b/cmd/frostfs-node/config/engine/shard/blobstor/badgerstore/config.go @@ -0,0 +1,85 @@ +package badgerstore + +import ( + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/badgerstore" +) + +type Config config.Config + +const ( + IndexCacheSizeDefault = 256 << 20 //256MB + MemTablesCountDefault = 32 + CompactorsCountDefault = 64 + GCIntervalDefault = 10 * time.Minute + GCDiscardRatioDefault = 0.2 + ValueLogSizeDefault = 1 << 30 //1GB +) + +// From wraps config section into Config. +func From(c *config.Config) *Config { + return (*Config)(c) +} + +// Type returns the storage type. +func (x *Config) Type() string { + return badgerstore.Type +} + +// IndexCacheSize returns `index_cache_size` value or IndexCacheSizeDefault. +func (x *Config) IndexCacheSize() int64 { + s := config.SizeInBytesSafe((*config.Config)(x), "index_cache_size") + if s > 0 { + return int64(s) + } + + return IndexCacheSizeDefault +} + +// MemTablesCount returns `mem_tables_count` value or MemTablesCountDefault. +func (x *Config) MemTablesCount() int { + v := config.IntSafe((*config.Config)(x), "mem_tables_count") + if v > 0 { + return int(v) + } + return MemTablesCountDefault +} + +// CompactorsCount returns `mem_tables_count` value or CompactorsCountDefault. +func (x *Config) CompactorsCount() int { + v := config.IntSafe((*config.Config)(x), "compactors_count") + if v > 0 { + return int(v) + } + return CompactorsCountDefault +} + +// GCInterval returns `gc_interval` value or GCIntervalDefault. +func (x *Config) GCInterval() time.Duration { + v := config.DurationSafe((*config.Config)(x), "gc_interval") + if v > 0 { + return v + } + return GCIntervalDefault +} + +// GCDiscardRatio returns `gc_discard_percent` value as ratio value (in range (0.0; 1.0)) or GCDiscardRatioDefault. +func (x *Config) GCDiscardRatio() float64 { + v := config.Uint32Safe((*config.Config)(x), "gc_discard_percent") + if v > 0 && v < 100 { + return float64(v) / (float64(100)) + } + return GCDiscardRatioDefault +} + +// ValueLogFileSize returns `value_log_file_size` value or ValueLogSizeDefault. +func (x *Config) ValueLogFileSize() int64 { + s := config.SizeInBytesSafe((*config.Config)(x), "value_log_file_size") + if s > 0 { + return int64(s) + } + + return ValueLogSizeDefault +} diff --git a/cmd/frostfs-node/validate.go b/cmd/frostfs-node/validate.go index b3472a074..60690d2a6 100644 --- a/cmd/frostfs-node/validate.go +++ b/cmd/frostfs-node/validate.go @@ -9,6 +9,7 @@ import ( shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard" loggerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/logger" treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/badgerstore" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobtree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" @@ -56,7 +57,7 @@ func validateConfig(c *config.Config) error { } for i := range blobstor { switch blobstor[i].Type() { - case fstree.Type, blobovniczatree.Type, blobtree.Type: + case fstree.Type, blobovniczatree.Type, blobtree.Type, badgerstore.Type: default: return fmt.Errorf("unexpected storage type: %s (shard %d)", blobstor[i].Type(), shardNum) } diff --git a/pkg/local_object_storage/blobstor/badgerstore/config.go b/pkg/local_object_storage/blobstor/badgerstore/config.go new file mode 100644 index 000000000..53306f995 --- /dev/null +++ b/pkg/local_object_storage/blobstor/badgerstore/config.go @@ -0,0 +1,116 @@ +package badgerstore + +import ( + "io/fs" + "math" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" + "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v4/options" +) + +type cfg struct { + permissions fs.FileMode + compression *compression.Config + db badger.Options + gcTimeout time.Duration + gcDiscardRatio float64 +} + +type Option func(*cfg) + +// defaultCfg creates default options to create Store. +// Default Badger options: +// BaseTableSize: 2MB +// BaseLevelSize: 10MB +// TableSizeMultiplier: 2 +// LevelSizeMultiplier: 10 +// MaxLevels: 7 +// NumLevelZeroTables: 5 +// ValueLogFileSize: 1GB +// +// Badger flushes MemTable directly to Level0. +// So for Level0 MemTableSize is used as TableSize https://github.com/dgraph-io/badger/blob/v4.1.0/levels.go#L403. +// There is no total size limit for Level0, only NumLevelZeroTables +// +// Badger uses Dynamic Level Sizes like RocksDB. +// See https://github.com/facebook/rocksdb/blob/v3.11/include/rocksdb/options.h#L366 for explanation. +func defaultCfg() *cfg { + opts := badger.DefaultOptions("/") + opts.BlockCacheSize = 0 // compression and encryption are disabled, so block cache should be disabled + opts.IndexCacheSize = 256 << 20 // 256MB, to not to keep all indicies in memory + opts.Compression = options.None // performed by cfg.compressor + opts.Logger = nil + opts.MetricsEnabled = false + opts.NumLevelZeroTablesStall = math.MaxInt // to not to stall because of Level0 slow compaction + opts.NumMemtables = 32 // default memtable size is 64MB, so max memory consumption will be 2GB before stall + opts.NumCompactors = 64 + opts.SyncWrites = true + opts.ValueLogMaxEntries = math.MaxUint32 // default vLog file size is 1GB, so size is more clear than entries count + opts.ValueThreshold = 0 // to store all values in vLog + + return &cfg{ + permissions: 0700, + db: opts, + gcTimeout: 10 * time.Minute, + gcDiscardRatio: 0.2, // for 1GB vLog file GC will perform only if around 200MB could be free + } +} + +// WithPath sets BadgerStore directory. +func WithPath(dir string) Option { + return func(c *cfg) { + c.db.Dir = dir + c.db.ValueDir = dir + } +} + +// WithPermissions sets persmission flags. +func WithPermissions(p fs.FileMode) Option { + return func(c *cfg) { + c.permissions = p + } +} + +// WithIndexCacheSize sets BadgerStore index cache size. +func WithIndexCacheSize(sz int64) Option { + return func(c *cfg) { + c.db.IndexCacheSize = sz + } +} + +// WithMemTablesCount sets maximum count of memtables. +func WithMemTablesCount(count int) Option { + return func(c *cfg) { + c.db.NumMemtables = count + } +} + +// WithCompactorsCount sets count of concurrent compactors. +func WithCompactorsCount(count int) Option { + return func(c *cfg) { + c.db.NumCompactors = count + } +} + +// WithGCInterval sets GC interval value. +func WithGCInterval(d time.Duration) Option { + return func(c *cfg) { + c.gcTimeout = d + } +} + +// WithGCDiscardRatio sets GC discard ratio. +func WithGCDiscardRatio(r float64) Option { + return func(c *cfg) { + c.gcDiscardRatio = r + } +} + +// WithValueLogSize sets max value log size. +func WithValueLogSize(sz int64) Option { + return func(c *cfg) { + c.db.ValueLogFileSize = sz + } +} diff --git a/pkg/local_object_storage/blobstor/badgerstore/control.go b/pkg/local_object_storage/blobstor/badgerstore/control.go new file mode 100644 index 000000000..a7933225a --- /dev/null +++ b/pkg/local_object_storage/blobstor/badgerstore/control.go @@ -0,0 +1,91 @@ +package badgerstore + +import ( + "context" + "fmt" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" + "github.com/dgraph-io/badger/v4" +) + +// Close implements common.Storage. +func (s *Store) Close() error { + s.modeMtx.Lock() + defer s.modeMtx.Unlock() + + if !s.opened { + return nil + } + + if s.gcCancel != nil { + s.gcCancel() + } + s.wg.Wait() + + if err := s.db.Close(); err != nil { + return err + } + s.opened = false + return nil +} + +// Init implements common.Storage. +func (s *Store) Init() error { + s.modeMtx.Lock() + defer s.modeMtx.Unlock() + + if !s.opened { + return fmt.Errorf("store must be opened before initialization") + } + + s.startGC() + + return nil +} + +func (s *Store) startGC() { + ctx, cancel := context.WithCancel(context.Background()) + s.gcCancel = cancel + + t := time.NewTicker(s.cfg.gcTimeout) + s.wg.Add(1) + + go func() { + defer s.wg.Done() + + select { + case <-ctx.Done(): + return + case <-t.C: + if err := s.db.RunValueLogGC(s.cfg.gcDiscardRatio); err == nil { + _ = s.db.RunValueLogGC(s.cfg.gcDiscardRatio) // see https://dgraph.io/docs/badger/get-started/#garbage-collection + } + } + }() +} + +// Open implements common.Storage. +func (s *Store) Open(readOnly bool) error { + s.modeMtx.Lock() + defer s.modeMtx.Unlock() + + if s.opened { + return nil + } + + err := util.MkdirAllX(s.cfg.db.Dir, s.cfg.permissions) + if err != nil { + return err + } + s.cfg.db.ReadOnly = readOnly + if s.db, err = badger.Open(s.cfg.db); err != nil { + return err + } + s.opened = true + return nil +} + +func (s *Store) readOnly() bool { + return s.cfg.db.ReadOnly +} diff --git a/pkg/local_object_storage/blobstor/badgerstore/delete.go b/pkg/local_object_storage/blobstor/badgerstore/delete.go new file mode 100644 index 000000000..0bad88117 --- /dev/null +++ b/pkg/local_object_storage/blobstor/badgerstore/delete.go @@ -0,0 +1,44 @@ +package badgerstore + +import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" + "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + "github.com/dgraph-io/badger/v4" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Delete implements common.Storage. +func (s *Store) Delete(ctx context.Context, prm common.DeletePrm) (common.DeleteRes, error) { + _, span := tracing.StartSpanFromContext(ctx, "BadgerStore.Delete", + trace.WithAttributes( + attribute.String("path", s.cfg.db.Dir), + attribute.String("address", prm.Address.EncodeToString()), + )) + defer span.End() + + if s.readOnly() { + return common.DeleteRes{}, common.ErrReadOnly + } + + tx := s.db.NewTransaction(true) + defer tx.Discard() + + _, err := tx.Get(key(prm.Address)) + if err != nil { + if err == badger.ErrKeyNotFound { + return common.DeleteRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) + } + return common.DeleteRes{}, err + } + + err = tx.Delete(key(prm.Address)) + if err != nil { + return common.DeleteRes{}, err + } + return common.DeleteRes{}, tx.Commit() +} diff --git a/pkg/local_object_storage/blobstor/badgerstore/exists.go b/pkg/local_object_storage/blobstor/badgerstore/exists.go new file mode 100644 index 000000000..5bf879f82 --- /dev/null +++ b/pkg/local_object_storage/blobstor/badgerstore/exists.go @@ -0,0 +1,36 @@ +package badgerstore + +import ( + "context" + "encoding/hex" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "github.com/dgraph-io/badger/v4" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Exists implements common.Storage. +func (s *Store) Exists(ctx context.Context, prm common.ExistsPrm) (common.ExistsRes, error) { + _, span := tracing.StartSpanFromContext(ctx, "BadgerStore.Exists", + trace.WithAttributes( + attribute.String("path", s.cfg.db.Dir), + attribute.String("address", prm.Address.EncodeToString()), + attribute.String("storage_id", hex.EncodeToString(prm.StorageID)), + )) + defer span.End() + + tx := s.db.NewTransaction(false) + defer tx.Discard() + + _, err := tx.Get(key(prm.Address)) + if err != nil { + if err == badger.ErrKeyNotFound { + return common.ExistsRes{Exists: false}, nil + } + return common.ExistsRes{}, err + } + + return common.ExistsRes{Exists: true}, nil +} diff --git a/pkg/local_object_storage/blobstor/badgerstore/generic_test.go b/pkg/local_object_storage/blobstor/badgerstore/generic_test.go new file mode 100644 index 000000000..d34981471 --- /dev/null +++ b/pkg/local_object_storage/blobstor/badgerstore/generic_test.go @@ -0,0 +1,39 @@ +package badgerstore + +import ( + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest" +) + +func TestGeneric(t *testing.T) { + const maxObjectSize = 1 << 16 + + helper := func(t *testing.T, dir string) common.Storage { + return New(WithPath(dir)) + } + + newStore := func(t *testing.T) common.Storage { + return helper(t, t.TempDir()) + } + + blobstortest.TestAll(t, newStore, 1024, maxObjectSize) + + t.Run("info", func(t *testing.T) { + dir := t.TempDir() + blobstortest.TestInfo(t, func(t *testing.T) common.Storage { + return helper(t, dir) + }, Type, dir) + }) +} + +func TestControl(t *testing.T) { + const maxObjectSize = 2048 + + newStore := func(t *testing.T) common.Storage { + return New(WithPath(t.TempDir())) + } + + blobstortest.TestControl(t, newStore, 1024, maxObjectSize) +} diff --git a/pkg/local_object_storage/blobstor/badgerstore/get.go b/pkg/local_object_storage/blobstor/badgerstore/get.go new file mode 100644 index 000000000..ba028e3c2 --- /dev/null +++ b/pkg/local_object_storage/blobstor/badgerstore/get.go @@ -0,0 +1,107 @@ +package badgerstore + +import ( + "context" + "encoding/hex" + "fmt" + "strconv" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" + "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/dgraph-io/badger/v4" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Get implements common.Storage. +func (s *Store) Get(ctx context.Context, prm common.GetPrm) (common.GetRes, error) { + _, span := tracing.StartSpanFromContext(ctx, "BadgerStore.Get", + trace.WithAttributes( + attribute.String("path", s.cfg.db.Dir), + attribute.String("address", prm.Address.EncodeToString()), + attribute.String("storage_id", hex.EncodeToString(prm.StorageID)), + attribute.Bool("raw", prm.Raw), + )) + defer span.End() + + data, err := s.getObjectData(prm.Address) + if err != nil { + return common.GetRes{}, err + } + + data, err = s.cfg.compression.Decompress(data) + if err != nil { + return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err) + } + + obj := objectSDK.New() + if err := obj.Unmarshal(data); err != nil { + return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err) + } + + return common.GetRes{Object: obj, RawData: data}, nil +} + +// GetRange implements common.Storage. +func (s *Store) GetRange(ctx context.Context, prm common.GetRangePrm) (common.GetRangeRes, error) { + _, span := tracing.StartSpanFromContext(ctx, "BadgerStore.GetRange", + trace.WithAttributes( + attribute.String("path", s.cfg.db.Dir), + attribute.String("address", prm.Address.EncodeToString()), + attribute.String("storage_id", hex.EncodeToString(prm.StorageID)), + attribute.String("offset", strconv.FormatUint(prm.Range.GetOffset(), 10)), + attribute.String("length", strconv.FormatUint(prm.Range.GetLength(), 10)), + )) + defer span.End() + + data, err := s.getObjectData(prm.Address) + if err != nil { + return common.GetRangeRes{}, err + } + + data, err = s.cfg.compression.Decompress(data) + if err != nil { + return common.GetRangeRes{}, fmt.Errorf("could not decompress object data: %w", err) + } + + obj := objectSDK.New() + if err := obj.Unmarshal(data); err != nil { + return common.GetRangeRes{}, fmt.Errorf("could not unmarshal the object: %w", err) + } + + from := prm.Range.GetOffset() + to := from + prm.Range.GetLength() + payload := obj.Payload() + + if pLen := uint64(len(payload)); to < from || pLen < from || pLen < to { + return common.GetRangeRes{}, logicerr.Wrap(new(apistatus.ObjectOutOfRange)) + } + + return common.GetRangeRes{ + Data: payload[from:to], + }, nil +} + +func (s *Store) getObjectData(addr oid.Address) ([]byte, error) { + var data []byte + tx := s.db.NewTransaction(false) + defer tx.Discard() + + item, err := tx.Get(key(addr)) + if err != nil { + if err == badger.ErrKeyNotFound { + return nil, logicerr.Wrap(new(apistatus.ObjectNotFound)) + } + return nil, err + } + + data, err = item.ValueCopy(nil) + if err != nil { + return nil, err + } + return data, nil +} diff --git a/pkg/local_object_storage/blobstor/badgerstore/iterate.go b/pkg/local_object_storage/blobstor/badgerstore/iterate.go new file mode 100644 index 000000000..77f55afb3 --- /dev/null +++ b/pkg/local_object_storage/blobstor/badgerstore/iterate.go @@ -0,0 +1,112 @@ +package badgerstore + +import ( + "bytes" + "context" + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "github.com/dgraph-io/badger/v4" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Iterate implements common.Storage. +func (s *Store) Iterate(ctx context.Context, prm common.IteratePrm) (common.IterateRes, error) { + _, span := tracing.StartSpanFromContext(ctx, "BadgerStore.Iterate", + trace.WithAttributes( + attribute.String("path", s.cfg.db.Dir), + attribute.Bool("ignore_errors", prm.IgnoreErrors), + )) + defer span.End() + + var last []byte + opts := badger.DefaultIteratorOptions + batch := make([]keyValue, 0, opts.PrefetchSize) + opts.PrefetchSize++ // to skip last + for { + select { + case <-ctx.Done(): + return common.IterateRes{}, ctx.Err() + default: + } + + batch = batch[:0] + err := s.db.View(func(tx *badger.Txn) error { + it := tx.NewIterator(opts) + defer it.Close() + + for it.Seek(last); it.Valid(); it.Next() { + if bytes.Equal(last, it.Item().Key()) { + continue + } + + var kv keyValue + var err error + kv.key = it.Item().KeyCopy(nil) + kv.value, err = it.Item().ValueCopy(nil) + if err != nil { + if prm.IgnoreErrors { + continue + } + return err + } + batch = append(batch, kv) + last = kv.key + if len(batch) == opts.PrefetchSize-1 { + break + } + } + return nil + }) + if err != nil { + return common.IterateRes{}, err + } + + select { + case <-ctx.Done(): + return common.IterateRes{}, ctx.Err() + default: + } + + if len(batch) == 0 { + break + } + if err := s.iterateBatch(batch, prm); err != nil { + return common.IterateRes{}, err + } + } + + return common.IterateRes{}, nil +} + +func (s *Store) iterateBatch(batch []keyValue, prm common.IteratePrm) error { + for _, kv := range batch { + addr, err := address(kv.key) + if err != nil { + if prm.IgnoreErrors { + continue + } + } + data, err := s.cfg.compression.Decompress(kv.value) + if err != nil { + if prm.IgnoreErrors { + continue + } + return fmt.Errorf("could not decompress object data: %w", err) + } + + if err := prm.Handler(common.IterationElement{ + Address: addr, + ObjectData: data, + }); err != nil { + return err + } + } + return nil +} + +type keyValue struct { + key, value []byte +} diff --git a/pkg/local_object_storage/blobstor/badgerstore/keys.go b/pkg/local_object_storage/blobstor/badgerstore/keys.go new file mode 100644 index 000000000..0cbd4acb4 --- /dev/null +++ b/pkg/local_object_storage/blobstor/badgerstore/keys.go @@ -0,0 +1,28 @@ +package badgerstore + +import ( + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" +) + +func key(add oid.Address) []byte { + res := make([]byte, 64) + add.Container().Encode(res) + add.Object().Encode(res[32:]) + return res +} + +func address(k []byte) (oid.Address, error) { + var res oid.Address + var containerID cid.ID + var objectID oid.ID + if err := containerID.Decode(k[:32]); err != nil { + return res, err + } + if err := objectID.Decode(k[32:]); err != nil { + return res, err + } + res.SetContainer(containerID) + res.SetObject(objectID) + return res, nil +} diff --git a/pkg/local_object_storage/blobstor/badgerstore/put.go b/pkg/local_object_storage/blobstor/badgerstore/put.go new file mode 100644 index 000000000..0daa39533 --- /dev/null +++ b/pkg/local_object_storage/blobstor/badgerstore/put.go @@ -0,0 +1,38 @@ +package badgerstore + +import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Put implements common.Storage. +func (s *Store) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) { + _, span := tracing.StartSpanFromContext(ctx, "BadgerStore.Put", + trace.WithAttributes( + attribute.String("path", s.cfg.db.Dir), + attribute.String("address", prm.Address.EncodeToString()), + attribute.Bool("dont_compress", prm.DontCompress), + )) + defer span.End() + + if s.readOnly() { + return common.PutRes{}, common.ErrReadOnly + } + + if !prm.DontCompress { + prm.RawData = s.cfg.compression.Compress(prm.RawData) + } + + b := s.db.NewWriteBatch() + defer b.Cancel() + + err := b.Set(key(prm.Address), prm.RawData) + if err != nil { + return common.PutRes{}, err + } + return common.PutRes{}, b.Flush() +} diff --git a/pkg/local_object_storage/blobstor/badgerstore/store.go b/pkg/local_object_storage/blobstor/badgerstore/store.go new file mode 100644 index 000000000..92a055212 --- /dev/null +++ b/pkg/local_object_storage/blobstor/badgerstore/store.go @@ -0,0 +1,66 @@ +package badgerstore + +import ( + "context" + "sync" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" + "github.com/dgraph-io/badger/v4" +) + +const ( + Type = "badgerstore" +) + +var _ common.Storage = (*Store)(nil) + +type Store struct { + cfg *cfg + db *badger.DB + + modeMtx *sync.Mutex // protects fields in group below + opened bool + gcCancel context.CancelFunc + + wg *sync.WaitGroup +} + +// New returns new Store instance with opts applied. +func New(opts ...Option) *Store { + s := &Store{ + cfg: defaultCfg(), + modeMtx: &sync.Mutex{}, + wg: &sync.WaitGroup{}, + } + for _, opt := range opts { + opt(s.cfg) + } + return s +} + +// Compressor implements common.Storage. +func (s *Store) Compressor() *compression.Config { + return s.cfg.compression +} + +// Path implements common.Storage. +func (s *Store) Path() string { + return s.cfg.db.Dir +} + +// SetCompressor implements common.Storage. +func (s *Store) SetCompressor(cc *compression.Config) { + s.cfg.compression = cc +} + +// SetParentID implements common.Storage. +func (*Store) SetParentID(parentID string) {} + +// SetReportErrorFunc implements common.Storage. +func (*Store) SetReportErrorFunc(func(string, error)) {} + +// Type implements common.Storage. +func (*Store) Type() string { + return Type +} diff --git a/pkg/local_object_storage/blobstor/blobtree/iterate.go b/pkg/local_object_storage/blobstor/blobtree/iterate.go index 61a00f7d5..4ea9d7ab8 100644 --- a/pkg/local_object_storage/blobstor/blobtree/iterate.go +++ b/pkg/local_object_storage/blobstor/blobtree/iterate.go @@ -76,21 +76,9 @@ func (b *BlobTree) iterateRecords(path string, prm common.IteratePrm) error { } for _, record := range records { - if prm.LazyHandler != nil { - if err = prm.LazyHandler(record.Address, func() ([]byte, error) { - return record.Data, nil - }); err != nil { - return err - } - continue - } - record.Data, err = b.compressor.Decompress(record.Data) if err != nil { if prm.IgnoreErrors { - if prm.ErrorHandler != nil { - return prm.ErrorHandler(record.Address, err) - } continue } return err diff --git a/pkg/local_object_storage/blobstor/perf_test.go b/pkg/local_object_storage/blobstor/perf_test.go index 9b1c8a1ec..573b87cc2 100644 --- a/pkg/local_object_storage/blobstor/perf_test.go +++ b/pkg/local_object_storage/blobstor/perf_test.go @@ -5,6 +5,7 @@ import ( "fmt" "testing" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/badgerstore" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobtree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -91,6 +92,14 @@ var storages = []storage{ ) }, }, + { + desc: "badger", + create: func(dir string) common.Storage { + return badgerstore.New( + badgerstore.WithPath(dir), + ) + }, + }, } func BenchmarkSubstorageReadPerf(b *testing.B) { -- 2.45.2 From 588113b7d6f269ebf9cfe6507bd81bd073a9bd28 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Thu, 26 Oct 2023 14:31:15 +0300 Subject: [PATCH 09/11] [#645] metrics: Add badgerstore metrics Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 5 + .../blobstor/badgerstore/config.go | 9 ++ .../blobstor/badgerstore/control.go | 2 + .../blobstor/badgerstore/delete.go | 17 +++- .../blobstor/badgerstore/exists.go | 10 ++ .../blobstor/badgerstore/get.go | 26 ++++- .../blobstor/badgerstore/iterate.go | 9 ++ .../blobstor/badgerstore/metrics.go | 31 ++++++ .../blobstor/badgerstore/put.go | 16 ++- .../blobstor/badgerstore/store.go | 4 +- .../metrics/badgerstore.go | 74 ++++++++++++++ pkg/metrics/badgerstore.go | 98 +++++++++++++++++++ pkg/metrics/consts.go | 1 + pkg/metrics/node.go | 6 ++ 14 files changed, 300 insertions(+), 8 deletions(-) create mode 100644 pkg/local_object_storage/blobstor/badgerstore/metrics.go create mode 100644 pkg/local_object_storage/metrics/badgerstore.go create mode 100644 pkg/metrics/badgerstore.go diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 4916d7274..e6b861f79 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -889,6 +889,11 @@ func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage { badgerstore.WithMemTablesCount(sRead.memTablesCount), badgerstore.WithValueLogSize(sRead.valueLogFileSize), } + if c.metricsCollector != nil { + badgerStoreOpts = append(badgerStoreOpts, + badgerstore.WithMetrics( + lsmetrics.NewBadgerStoreMetrics(sRead.path, c.metricsCollector.BadgerStoreMetrics()))) + } ss = append(ss, blobstor.SubStorage{ Storage: badgerstore.New(badgerStoreOpts...), Policy: func(_ *objectSDK.Object, data []byte) bool { diff --git a/pkg/local_object_storage/blobstor/badgerstore/config.go b/pkg/local_object_storage/blobstor/badgerstore/config.go index 53306f995..714a685ed 100644 --- a/pkg/local_object_storage/blobstor/badgerstore/config.go +++ b/pkg/local_object_storage/blobstor/badgerstore/config.go @@ -16,6 +16,7 @@ type cfg struct { db badger.Options gcTimeout time.Duration gcDiscardRatio float64 + metrics Metrics } type Option func(*cfg) @@ -55,6 +56,7 @@ func defaultCfg() *cfg { db: opts, gcTimeout: 10 * time.Minute, gcDiscardRatio: 0.2, // for 1GB vLog file GC will perform only if around 200MB could be free + metrics: &noopMetrics{}, } } @@ -114,3 +116,10 @@ func WithValueLogSize(sz int64) Option { c.db.ValueLogFileSize = sz } } + +// WithMetrics sets metrics. +func WithMetrics(m Metrics) Option { + return func(c *cfg) { + c.metrics = m + } +} diff --git a/pkg/local_object_storage/blobstor/badgerstore/control.go b/pkg/local_object_storage/blobstor/badgerstore/control.go index a7933225a..07ddf235b 100644 --- a/pkg/local_object_storage/blobstor/badgerstore/control.go +++ b/pkg/local_object_storage/blobstor/badgerstore/control.go @@ -27,6 +27,7 @@ func (s *Store) Close() error { return err } s.opened = false + s.cfg.metrics.Close() return nil } @@ -83,6 +84,7 @@ func (s *Store) Open(readOnly bool) error { return err } s.opened = true + s.cfg.metrics.SetMode(readOnly) return nil } diff --git a/pkg/local_object_storage/blobstor/badgerstore/delete.go b/pkg/local_object_storage/blobstor/badgerstore/delete.go index 0bad88117..34fcf986f 100644 --- a/pkg/local_object_storage/blobstor/badgerstore/delete.go +++ b/pkg/local_object_storage/blobstor/badgerstore/delete.go @@ -2,6 +2,7 @@ package badgerstore import ( "context" + "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" @@ -14,6 +15,13 @@ import ( // Delete implements common.Storage. func (s *Store) Delete(ctx context.Context, prm common.DeletePrm) (common.DeleteRes, error) { + success := false + startedAt := time.Now() + + defer func() { + s.cfg.metrics.Delete(time.Since(startedAt), success) + }() + _, span := tracing.StartSpanFromContext(ctx, "BadgerStore.Delete", trace.WithAttributes( attribute.String("path", s.cfg.db.Dir), @@ -36,9 +44,12 @@ func (s *Store) Delete(ctx context.Context, prm common.DeletePrm) (common.Delete return common.DeleteRes{}, err } - err = tx.Delete(key(prm.Address)) - if err != nil { + if err = tx.Delete(key(prm.Address)); err != nil { return common.DeleteRes{}, err } - return common.DeleteRes{}, tx.Commit() + if err = tx.Commit(); err != nil { + return common.DeleteRes{}, err + } + success = true + return common.DeleteRes{}, nil } diff --git a/pkg/local_object_storage/blobstor/badgerstore/exists.go b/pkg/local_object_storage/blobstor/badgerstore/exists.go index 5bf879f82..12f85801f 100644 --- a/pkg/local_object_storage/blobstor/badgerstore/exists.go +++ b/pkg/local_object_storage/blobstor/badgerstore/exists.go @@ -3,6 +3,7 @@ package badgerstore import ( "context" "encoding/hex" + "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" @@ -13,6 +14,13 @@ import ( // Exists implements common.Storage. func (s *Store) Exists(ctx context.Context, prm common.ExistsPrm) (common.ExistsRes, error) { + success := false + startedAt := time.Now() + + defer func() { + s.cfg.metrics.Exists(time.Since(startedAt), success) + }() + _, span := tracing.StartSpanFromContext(ctx, "BadgerStore.Exists", trace.WithAttributes( attribute.String("path", s.cfg.db.Dir), @@ -27,10 +35,12 @@ func (s *Store) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exists _, err := tx.Get(key(prm.Address)) if err != nil { if err == badger.ErrKeyNotFound { + success = true return common.ExistsRes{Exists: false}, nil } return common.ExistsRes{}, err } + success = true return common.ExistsRes{Exists: true}, nil } diff --git a/pkg/local_object_storage/blobstor/badgerstore/get.go b/pkg/local_object_storage/blobstor/badgerstore/get.go index ba028e3c2..c56ec86c0 100644 --- a/pkg/local_object_storage/blobstor/badgerstore/get.go +++ b/pkg/local_object_storage/blobstor/badgerstore/get.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "fmt" "strconv" + "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" @@ -19,6 +20,14 @@ import ( // Get implements common.Storage. func (s *Store) Get(ctx context.Context, prm common.GetPrm) (common.GetRes, error) { + success := false + size := 0 + startedAt := time.Now() + + defer func() { + s.cfg.metrics.Get(time.Since(startedAt), size, success) + }() + _, span := tracing.StartSpanFromContext(ctx, "BadgerStore.Get", trace.WithAttributes( attribute.String("path", s.cfg.db.Dir), @@ -43,11 +52,21 @@ func (s *Store) Get(ctx context.Context, prm common.GetPrm) (common.GetRes, erro return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err) } + success = true + size = len(data) return common.GetRes{Object: obj, RawData: data}, nil } // GetRange implements common.Storage. func (s *Store) GetRange(ctx context.Context, prm common.GetRangePrm) (common.GetRangeRes, error) { + success := false + size := 0 + startedAt := time.Now() + + defer func() { + s.cfg.metrics.GetRange(time.Since(startedAt), size, success) + }() + _, span := tracing.StartSpanFromContext(ctx, "BadgerStore.GetRange", trace.WithAttributes( attribute.String("path", s.cfg.db.Dir), @@ -81,9 +100,10 @@ func (s *Store) GetRange(ctx context.Context, prm common.GetRangePrm) (common.Ge return common.GetRangeRes{}, logicerr.Wrap(new(apistatus.ObjectOutOfRange)) } - return common.GetRangeRes{ - Data: payload[from:to], - }, nil + res := common.GetRangeRes{Data: payload[from:to]} + success = true + size = len(res.Data) + return res, nil } func (s *Store) getObjectData(addr oid.Address) ([]byte, error) { diff --git a/pkg/local_object_storage/blobstor/badgerstore/iterate.go b/pkg/local_object_storage/blobstor/badgerstore/iterate.go index 77f55afb3..be1e7f836 100644 --- a/pkg/local_object_storage/blobstor/badgerstore/iterate.go +++ b/pkg/local_object_storage/blobstor/badgerstore/iterate.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" @@ -14,6 +15,13 @@ import ( // Iterate implements common.Storage. func (s *Store) Iterate(ctx context.Context, prm common.IteratePrm) (common.IterateRes, error) { + success := false + startedAt := time.Now() + + defer func() { + s.cfg.metrics.Iterate(time.Since(startedAt), success) + }() + _, span := tracing.StartSpanFromContext(ctx, "BadgerStore.Iterate", trace.WithAttributes( attribute.String("path", s.cfg.db.Dir), @@ -78,6 +86,7 @@ func (s *Store) Iterate(ctx context.Context, prm common.IteratePrm) (common.Iter } } + success = true return common.IterateRes{}, nil } diff --git a/pkg/local_object_storage/blobstor/badgerstore/metrics.go b/pkg/local_object_storage/blobstor/badgerstore/metrics.go new file mode 100644 index 000000000..051b087f0 --- /dev/null +++ b/pkg/local_object_storage/blobstor/badgerstore/metrics.go @@ -0,0 +1,31 @@ +package badgerstore + +import "time" + +type Metrics interface { + SetParentID(parentID string) + + SetMode(readOnly bool) + Close() + + Delete(d time.Duration, success bool) + Exists(d time.Duration, success bool) + GetRange(d time.Duration, size int, success bool) + Get(d time.Duration, size int, success bool) + Iterate(d time.Duration, success bool) + Put(d time.Duration, size int, success bool) +} + +var _ Metrics = (*noopMetrics)(nil) + +type noopMetrics struct{} + +func (*noopMetrics) Close() {} +func (*noopMetrics) Delete(time.Duration, bool) {} +func (*noopMetrics) Exists(time.Duration, bool) {} +func (*noopMetrics) Get(time.Duration, int, bool) {} +func (*noopMetrics) GetRange(time.Duration, int, bool) {} +func (*noopMetrics) Iterate(time.Duration, bool) {} +func (*noopMetrics) Put(time.Duration, int, bool) {} +func (*noopMetrics) SetMode(bool) {} +func (*noopMetrics) SetParentID(string) {} diff --git a/pkg/local_object_storage/blobstor/badgerstore/put.go b/pkg/local_object_storage/blobstor/badgerstore/put.go index 0daa39533..c05445100 100644 --- a/pkg/local_object_storage/blobstor/badgerstore/put.go +++ b/pkg/local_object_storage/blobstor/badgerstore/put.go @@ -2,6 +2,7 @@ package badgerstore import ( "context" + "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" @@ -11,6 +12,14 @@ import ( // Put implements common.Storage. func (s *Store) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) { + success := false + size := 0 + startedAt := time.Now() + + defer func() { + s.cfg.metrics.Put(time.Since(startedAt), size, success) + }() + _, span := tracing.StartSpanFromContext(ctx, "BadgerStore.Put", trace.WithAttributes( attribute.String("path", s.cfg.db.Dir), @@ -34,5 +43,10 @@ func (s *Store) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, erro if err != nil { return common.PutRes{}, err } - return common.PutRes{}, b.Flush() + if err = b.Flush(); err != nil { + return common.PutRes{}, err + } + success = true + size = len(prm.RawData) + return common.PutRes{}, nil } diff --git a/pkg/local_object_storage/blobstor/badgerstore/store.go b/pkg/local_object_storage/blobstor/badgerstore/store.go index 92a055212..450ced888 100644 --- a/pkg/local_object_storage/blobstor/badgerstore/store.go +++ b/pkg/local_object_storage/blobstor/badgerstore/store.go @@ -55,7 +55,9 @@ func (s *Store) SetCompressor(cc *compression.Config) { } // SetParentID implements common.Storage. -func (*Store) SetParentID(parentID string) {} +func (s *Store) SetParentID(parentID string) { + s.cfg.metrics.SetParentID(parentID) +} // SetReportErrorFunc implements common.Storage. func (*Store) SetReportErrorFunc(func(string, error)) {} diff --git a/pkg/local_object_storage/metrics/badgerstore.go b/pkg/local_object_storage/metrics/badgerstore.go new file mode 100644 index 000000000..41a8111ac --- /dev/null +++ b/pkg/local_object_storage/metrics/badgerstore.go @@ -0,0 +1,74 @@ +package metrics + +import ( + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/badgerstore" + metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics" +) + +func NewBadgerStoreMetrics(path string, m metrics_impl.BadgerStoreMetrics) badgerstore.Metrics { + return &badgerStoreMetrics{ + path: path, + m: m, + } +} + +type badgerStoreMetrics struct { + path, shardID string + m metrics_impl.BadgerStoreMetrics +} + +// Close implements badgerstore.Metrics. +func (m *badgerStoreMetrics) Close() { + m.m.Close(m.shardID, m.path) +} + +// Delete implements badgerstore.Metrics. +func (m *badgerStoreMetrics) Delete(d time.Duration, success bool) { + m.m.MethodDuration(m.shardID, m.path, "Delete", d, success) +} + +// Exists implements badgerstore.Metrics. +func (m *badgerStoreMetrics) Exists(d time.Duration, success bool) { + m.m.MethodDuration(m.shardID, m.path, "Exists", d, success) +} + +// Get implements badgerstore.Metrics. +func (m *badgerStoreMetrics) Get(d time.Duration, size int, success bool) { + m.m.MethodDuration(m.shardID, m.path, "Get", d, success) + if success { + m.m.AddGet(m.shardID, m.path, size) + } +} + +// GetRange implements badgerstore.Metrics. +func (m *badgerStoreMetrics) GetRange(d time.Duration, size int, success bool) { + m.m.MethodDuration(m.shardID, m.path, "GetRange", d, success) + if success { + m.m.AddGet(m.shardID, m.path, size) + } +} + +// Iterate implements badgerstore.Metrics. +func (m *badgerStoreMetrics) Iterate(d time.Duration, success bool) { + m.m.MethodDuration(m.shardID, m.path, "Iterate", d, success) +} + +// Put implements badgerstore.Metrics. +func (m *badgerStoreMetrics) Put(d time.Duration, size int, success bool) { + m.m.MethodDuration(m.shardID, m.path, "Put", d, success) + if success { + m.m.AddPut(m.shardID, m.path, size) + } +} + +// SetMode implements badgerstore.Metrics. +func (m *badgerStoreMetrics) SetMode(readOnly bool) { + m.m.SetMode(m.shardID, m.path, readOnly) +} + +// SetParentID implements badgerstore.Metrics. +func (m *badgerStoreMetrics) SetParentID(parentID string) { + m.shardID = parentID +} diff --git a/pkg/metrics/badgerstore.go b/pkg/metrics/badgerstore.go new file mode 100644 index 000000000..4f18f58e9 --- /dev/null +++ b/pkg/metrics/badgerstore.go @@ -0,0 +1,98 @@ +package metrics + +import ( + "strconv" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" + "github.com/prometheus/client_golang/prometheus" +) + +type BadgerStoreMetrics interface { + SetMode(shardID, path string, readOnly bool) + Close(shardID, path string) + MethodDuration(shardID, path string, method string, d time.Duration, success bool) + AddPut(shardID, path string, size int) + AddGet(shardID, path string, size int) +} + +var _ BadgerStoreMetrics = (*badgerStoreMetrics)(nil) + +type badgerStoreMetrics struct { + mode *shardIDPathModeValue + reqDuration *prometheus.HistogramVec + put *prometheus.CounterVec + get *prometheus.CounterVec +} + +func newbadgerStoreMetrics() *badgerStoreMetrics { + return &badgerStoreMetrics{ + mode: newShardIDPathMode(badgerStoreSubSystem, "mode", "BadgerStore mode"), + reqDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: badgerStoreSubSystem, + Name: "request_duration_seconds", + Help: "Accumulated BadgerStore request process duration", + }, []string{shardIDLabel, pathLabel, successLabel, methodLabel}), + put: metrics.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: badgerStoreSubSystem, + Name: "put_bytes", + Help: "Accumulated payload size written to BadgerStore", + }, []string{shardIDLabel, pathLabel}), + get: metrics.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: badgerStoreSubSystem, + Name: "get_bytes", + Help: "Accumulated payload size read from BadgerStore", + }, []string{shardIDLabel, pathLabel}), + } +} + +// AddGet implements BadgerStoreMetrics. +func (b *badgerStoreMetrics) AddGet(shardID string, path string, size int) { + b.get.With(prometheus.Labels{ + shardIDLabel: shardID, + pathLabel: path, + }).Add(float64(size)) +} + +// AddPut implements BadgerStoreMetrics. +func (b *badgerStoreMetrics) AddPut(shardID string, path string, size int) { + b.put.With(prometheus.Labels{ + shardIDLabel: shardID, + pathLabel: path, + }).Add(float64(size)) +} + +// Close implements BadgerStoreMetrics. +func (b *badgerStoreMetrics) Close(shardID string, path string) { + b.mode.SetMode(shardID, path, closedMode) + b.reqDuration.DeletePartialMatch(prometheus.Labels{ + shardIDLabel: shardID, + pathLabel: path, + }) + b.get.DeletePartialMatch(prometheus.Labels{ + shardIDLabel: shardID, + pathLabel: path, + }) + b.put.DeletePartialMatch(prometheus.Labels{ + shardIDLabel: shardID, + pathLabel: path, + }) +} + +// MethodDuration implements BadgerStoreMetrics. +func (b *badgerStoreMetrics) MethodDuration(shardID string, path string, method string, d time.Duration, success bool) { + b.reqDuration.With(prometheus.Labels{ + shardIDLabel: shardID, + pathLabel: path, + successLabel: strconv.FormatBool(success), + methodLabel: method, + }).Observe(d.Seconds()) +} + +// SetMode implements BadgerStoreMetrics. +func (b *badgerStoreMetrics) SetMode(shardID string, path string, readOnly bool) { + b.mode.SetMode(shardID, path, modeFromBool(readOnly)) +} diff --git a/pkg/metrics/consts.go b/pkg/metrics/consts.go index 708ae45a9..9a9598e2d 100644 --- a/pkg/metrics/consts.go +++ b/pkg/metrics/consts.go @@ -22,6 +22,7 @@ const ( writeCacheSubsystem = "writecache" grpcServerSubsystem = "grpc_server" policerSubsystem = "policer" + badgerStoreSubSystem = "badgerstore" successLabel = "success" shardIDLabel = "shard_id" diff --git a/pkg/metrics/node.go b/pkg/metrics/node.go index 8e82207aa..90ab444d6 100644 --- a/pkg/metrics/node.go +++ b/pkg/metrics/node.go @@ -21,6 +21,7 @@ type NodeMetrics struct { pilorama *piloramaMetrics grpc *grpcServerMetrics blobTree *blobTreeMetrics + badgerStore *badgerStoreMetrics policer *policerMetrics morphClient *morphClientMetrics morphCache *morphCacheMetrics @@ -51,6 +52,7 @@ func NewNodeMetrics() *NodeMetrics { morphCache: newMorphCacheMetrics(namespace), log: logger.NewLogMetrics(namespace), blobTree: newBlobTreeMetrics(), + badgerStore: newbadgerStoreMetrics(), } } @@ -122,3 +124,7 @@ func (m *NodeMetrics) LogMetrics() logger.LogMetrics { func (m *NodeMetrics) BlobTreeMetrics() BlobTreeMetrics { return m.blobTree } + +func (m *NodeMetrics) BadgerStoreMetrics() BadgerStoreMetrics { + return m.badgerStore +} -- 2.45.2 From e7c379044f55e9077b25dc9496803c586dfb05ad Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 27 Oct 2023 17:52:22 +0300 Subject: [PATCH 10/11] [#645] config: Resolve funlen linter Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 145 +++++++++++++++++++++---------------- 1 file changed, 82 insertions(+), 63 deletions(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index e6b861f79..c52c44e15 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -186,7 +186,7 @@ type subStorageCfg struct { leafWidth uint64 openedCacheSize int - //badgerstore-specific + // badgerstore-specific indexCacheSize int64 memTablesCount int compactorsCount int @@ -812,46 +812,15 @@ func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage { for _, sRead := range shCfg.subStorages { switch sRead.typ { case blobovniczatree.Type: - blobTreeOpts := []blobovniczatree.Option{ - blobovniczatree.WithRootPath(sRead.path), - blobovniczatree.WithPermissions(sRead.perm), - blobovniczatree.WithBlobovniczaSize(sRead.size), - blobovniczatree.WithBlobovniczaShallowDepth(sRead.depth), - blobovniczatree.WithBlobovniczaShallowWidth(sRead.width), - blobovniczatree.WithBlobovniczaLeafWidth(sRead.leafWidth), - blobovniczatree.WithOpenedCacheSize(sRead.openedCacheSize), - blobovniczatree.WithLogger(c.log), - } - - if c.metricsCollector != nil { - blobTreeOpts = append(blobTreeOpts, - blobovniczatree.WithMetrics( - lsmetrics.NewBlobovniczaTreeMetrics(sRead.path, c.metricsCollector.BlobovniczaTreeMetrics()), - ), - ) - } + blobovniczaTreeOpts := c.getBlobovniczaTreeOpts(sRead) ss = append(ss, blobstor.SubStorage{ - Storage: blobovniczatree.NewBlobovniczaTree(blobTreeOpts...), + Storage: blobovniczatree.NewBlobovniczaTree(blobovniczaTreeOpts...), Policy: func(_ *objectSDK.Object, data []byte) bool { return uint64(len(data)) < shCfg.smallSizeObjectLimit }, }) case fstree.Type: - fstreeOpts := []fstree.Option{ - fstree.WithPath(sRead.path), - fstree.WithPerm(sRead.perm), - fstree.WithDepth(sRead.depth), - fstree.WithNoSync(sRead.noSync), - fstree.WithLogger(c.log), - } - if c.metricsCollector != nil { - fstreeOpts = append(fstreeOpts, - fstree.WithMetrics( - lsmetrics.NewFSTreeMetricsWithoutShardID(sRead.path, c.metricsCollector.FSTree()), - ), - ) - } - + fstreeOpts := c.getFSTreeOpts(sRead) ss = append(ss, blobstor.SubStorage{ Storage: fstree.New(fstreeOpts...), Policy: func(_ *objectSDK.Object, data []byte) bool { @@ -859,19 +828,7 @@ func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage { }, }) case blobtree.Type: - blobTreeOpts := []blobtree.Option{ - blobtree.WithPath(sRead.path), - blobtree.WithPerm(sRead.perm), - blobtree.WithDepth(sRead.depth), - blobtree.WithTargetSize(sRead.size), - } - if c.metricsCollector != nil { - blobTreeOpts = append(blobTreeOpts, - blobtree.WithMetrics( - lsmetrics.NewBlobTreeMetrics(sRead.path, c.metricsCollector.BlobTreeMetrics()), - ), - ) - } + blobTreeOpts := c.getBlobTreeOpts(sRead) ss = append(ss, blobstor.SubStorage{ Storage: blobtree.New(blobTreeOpts...), Policy: func(_ *objectSDK.Object, data []byte) bool { @@ -879,21 +836,7 @@ func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage { }, }) case badgerstore.Type: - badgerStoreOpts := []badgerstore.Option{ - badgerstore.WithPath(sRead.path), - badgerstore.WithPermissions(sRead.perm), - badgerstore.WithCompactorsCount(sRead.compactorsCount), - badgerstore.WithGCDiscardRatio(sRead.gcDiscardRatio), - badgerstore.WithGCInterval(sRead.gcInterval), - badgerstore.WithIndexCacheSize(sRead.indexCacheSize), - badgerstore.WithMemTablesCount(sRead.memTablesCount), - badgerstore.WithValueLogSize(sRead.valueLogFileSize), - } - if c.metricsCollector != nil { - badgerStoreOpts = append(badgerStoreOpts, - badgerstore.WithMetrics( - lsmetrics.NewBadgerStoreMetrics(sRead.path, c.metricsCollector.BadgerStoreMetrics()))) - } + badgerStoreOpts := c.getBadgerStoreOpts(sRead) ss = append(ss, blobstor.SubStorage{ Storage: badgerstore.New(badgerStoreOpts...), Policy: func(_ *objectSDK.Object, data []byte) bool { @@ -908,6 +851,82 @@ func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage { return ss } +func (c *cfg) getBadgerStoreOpts(sRead subStorageCfg) []badgerstore.Option { + badgerStoreOpts := []badgerstore.Option{ + badgerstore.WithPath(sRead.path), + badgerstore.WithPermissions(sRead.perm), + badgerstore.WithCompactorsCount(sRead.compactorsCount), + badgerstore.WithGCDiscardRatio(sRead.gcDiscardRatio), + badgerstore.WithGCInterval(sRead.gcInterval), + badgerstore.WithIndexCacheSize(sRead.indexCacheSize), + badgerstore.WithMemTablesCount(sRead.memTablesCount), + badgerstore.WithValueLogSize(sRead.valueLogFileSize), + } + if c.metricsCollector != nil { + badgerStoreOpts = append(badgerStoreOpts, + badgerstore.WithMetrics( + lsmetrics.NewBadgerStoreMetrics(sRead.path, c.metricsCollector.BadgerStoreMetrics()))) + } + return badgerStoreOpts +} + +func (c *cfg) getBlobTreeOpts(sRead subStorageCfg) []blobtree.Option { + blobTreeOpts := []blobtree.Option{ + blobtree.WithPath(sRead.path), + blobtree.WithPerm(sRead.perm), + blobtree.WithDepth(sRead.depth), + blobtree.WithTargetSize(sRead.size), + } + if c.metricsCollector != nil { + blobTreeOpts = append(blobTreeOpts, + blobtree.WithMetrics( + lsmetrics.NewBlobTreeMetrics(sRead.path, c.metricsCollector.BlobTreeMetrics()), + ), + ) + } + return blobTreeOpts +} + +func (c *cfg) getFSTreeOpts(sRead subStorageCfg) []fstree.Option { + fstreeOpts := []fstree.Option{ + fstree.WithPath(sRead.path), + fstree.WithPerm(sRead.perm), + fstree.WithDepth(sRead.depth), + fstree.WithNoSync(sRead.noSync), + fstree.WithLogger(c.log), + } + if c.metricsCollector != nil { + fstreeOpts = append(fstreeOpts, + fstree.WithMetrics( + lsmetrics.NewFSTreeMetricsWithoutShardID(sRead.path, c.metricsCollector.FSTree()), + ), + ) + } + return fstreeOpts +} + +func (c *cfg) getBlobovniczaTreeOpts(sRead subStorageCfg) []blobovniczatree.Option { + blobTreeOpts := []blobovniczatree.Option{ + blobovniczatree.WithRootPath(sRead.path), + blobovniczatree.WithPermissions(sRead.perm), + blobovniczatree.WithBlobovniczaSize(sRead.size), + blobovniczatree.WithBlobovniczaShallowDepth(sRead.depth), + blobovniczatree.WithBlobovniczaShallowWidth(sRead.width), + blobovniczatree.WithBlobovniczaLeafWidth(sRead.leafWidth), + blobovniczatree.WithOpenedCacheSize(sRead.openedCacheSize), + blobovniczatree.WithLogger(c.log), + } + + if c.metricsCollector != nil { + blobTreeOpts = append(blobTreeOpts, + blobovniczatree.WithMetrics( + lsmetrics.NewBlobovniczaTreeMetrics(sRead.path, c.metricsCollector.BlobovniczaTreeMetrics()), + ), + ) + } + return blobTreeOpts +} + func (c *cfg) getShardOpts(shCfg shardCfg) shardOptsWithID { writeCacheOpts := c.getWriteCacheOpts(shCfg) piloramaOpts := c.getPiloramaOpts(shCfg) -- 2.45.2 From dc5b741b1d632a39cb33cc00b40e1affe6bdd6a1 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 30 Oct 2023 14:07:28 +0300 Subject: [PATCH 11/11] [#645] badgerstore: Add logger. Signed-off-by: Dmitrii Stepanov --- internal/logs/logs.go | 1 + .../blobstor/badgerstore/config.go | 13 ++++++++++++- .../blobstor/badgerstore/control.go | 4 ++++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/internal/logs/logs.go b/internal/logs/logs.go index e8472357c..7a7fc1a98 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -515,5 +515,6 @@ const ( RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped" FailedToCountWritecacheItems = "failed to count writecache items" AttemtToCloseAlreadyClosedBlobovnicza = "attempt to close an already closed blobovnicza" + BadgerStoreGCFailed = "failed to run GC on badgerstore" FailedToGetContainerCounters = "failed to get container counters values" ) diff --git a/pkg/local_object_storage/blobstor/badgerstore/config.go b/pkg/local_object_storage/blobstor/badgerstore/config.go index 714a685ed..9156f63f2 100644 --- a/pkg/local_object_storage/blobstor/badgerstore/config.go +++ b/pkg/local_object_storage/blobstor/badgerstore/config.go @@ -6,8 +6,10 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "github.com/dgraph-io/badger/v4" "github.com/dgraph-io/badger/v4/options" + "go.uber.org/zap" ) type cfg struct { @@ -17,6 +19,7 @@ type cfg struct { gcTimeout time.Duration gcDiscardRatio float64 metrics Metrics + logger *logger.Logger } type Option func(*cfg) @@ -52,11 +55,12 @@ func defaultCfg() *cfg { opts.ValueThreshold = 0 // to store all values in vLog return &cfg{ - permissions: 0700, + permissions: 0o700, db: opts, gcTimeout: 10 * time.Minute, gcDiscardRatio: 0.2, // for 1GB vLog file GC will perform only if around 200MB could be free metrics: &noopMetrics{}, + logger: &logger.Logger{Logger: zap.L()}, } } @@ -123,3 +127,10 @@ func WithMetrics(m Metrics) Option { c.metrics = m } } + +// WithLogger sets logger. +func WithLogger(l *logger.Logger) Option { + return func(c *cfg) { + c.logger = l + } +} diff --git a/pkg/local_object_storage/blobstor/badgerstore/control.go b/pkg/local_object_storage/blobstor/badgerstore/control.go index 07ddf235b..321094080 100644 --- a/pkg/local_object_storage/blobstor/badgerstore/control.go +++ b/pkg/local_object_storage/blobstor/badgerstore/control.go @@ -5,8 +5,10 @@ import ( "fmt" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "github.com/dgraph-io/badger/v4" + "go.uber.org/zap" ) // Close implements common.Storage. @@ -61,6 +63,8 @@ func (s *Store) startGC() { case <-t.C: if err := s.db.RunValueLogGC(s.cfg.gcDiscardRatio); err == nil { _ = s.db.RunValueLogGC(s.cfg.gcDiscardRatio) // see https://dgraph.io/docs/badger/get-started/#garbage-collection + } else { + s.cfg.logger.Error(logs.BadgerStoreGCFailed, zap.Error(err), zap.String("path", s.cfg.db.Dir)) } } }() -- 2.45.2