[#645] blobtree: Add metrics
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
parent
76855bddac
commit
fba369ec34
16 changed files with 362 additions and 11 deletions
|
@ -847,6 +847,13 @@ func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage {
|
|||
blobtree.WithDepth(sRead.depth),
|
||||
blobtree.WithTargetSize(sRead.size),
|
||||
}
|
||||
if c.metricsCollector != nil {
|
||||
blobTreeOpts = append(blobTreeOpts,
|
||||
blobtree.WithMetrics(
|
||||
lsmetrics.NewBlobTreeMetrics(sRead.path, c.metricsCollector.BlobTreeMetrics()),
|
||||
),
|
||||
)
|
||||
}
|
||||
ss = append(ss, blobstor.SubStorage{
|
||||
Storage: blobtree.New(blobTreeOpts...),
|
||||
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
||||
|
|
|
@ -33,6 +33,7 @@ func New(opts ...Option) *BlobTree {
|
|||
depth: 3,
|
||||
permissions: 0700,
|
||||
initWorkersCount: 1000,
|
||||
metrics: &noopMetrics{},
|
||||
},
|
||||
dirLock: utilSync.NewKeyLocker[string](),
|
||||
fileLock: utilSync.NewKeyLocker[string](),
|
||||
|
|
|
@ -11,4 +11,5 @@ type cfg struct {
|
|||
permissions fs.FileMode
|
||||
readOnly bool
|
||||
initWorkersCount int
|
||||
metrics Metrics
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@ var Type = "blobtree"
|
|||
|
||||
func (b *BlobTree) Open(readOnly bool) error {
|
||||
b.cfg.readOnly = readOnly
|
||||
b.cfg.metrics.SetMode(readOnly)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -59,6 +60,7 @@ func (b *BlobTree) initDir(eg *errgroup.Group, dir string, depth uint64) error {
|
|||
continue
|
||||
}
|
||||
b.dispatcher.Init(dir, idx)
|
||||
b.cfg.metrics.IncFilesCount()
|
||||
|
||||
stat, err := os.Stat(filepath.Join(dir, entity.Name()))
|
||||
if err != nil {
|
||||
|
@ -80,6 +82,7 @@ func (b *BlobTree) parseIdx(name string) (uint64, error) {
|
|||
}
|
||||
|
||||
func (b *BlobTree) Close() error {
|
||||
b.cfg.metrics.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -95,4 +98,7 @@ func (b *BlobTree) Compressor() *compression.Config {
|
|||
}
|
||||
|
||||
func (b *BlobTree) SetReportErrorFunc(_ func(string, error)) {}
|
||||
func (b *BlobTree) SetParentID(_ string) {}
|
||||
|
||||
func (b *BlobTree) SetParentID(parentID string) {
|
||||
b.cfg.metrics.SetParentID(parentID)
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"encoding/binary"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
||||
|
@ -12,14 +13,27 @@ import (
|
|||
)
|
||||
|
||||
func (b *BlobTree) Delete(_ context.Context, prm common.DeletePrm) (common.DeleteRes, error) {
|
||||
var (
|
||||
success = false
|
||||
startedAt = time.Now()
|
||||
)
|
||||
defer func() {
|
||||
b.cfg.metrics.Delete(time.Since(startedAt), success, prm.StorageID != nil)
|
||||
}()
|
||||
|
||||
if b.cfg.readOnly {
|
||||
return common.DeleteRes{}, common.ErrReadOnly
|
||||
}
|
||||
|
||||
var res common.DeleteRes
|
||||
var err error
|
||||
if len(prm.StorageID) == storageIDLength {
|
||||
return b.deleteFromIdx(prm.Address, binary.LittleEndian.Uint64(prm.StorageID))
|
||||
res, err = b.deleteFromIdx(prm.Address, binary.LittleEndian.Uint64(prm.StorageID))
|
||||
} else {
|
||||
res, err = b.findAndDelete(prm.Address)
|
||||
}
|
||||
return b.findAndDelete(prm.Address)
|
||||
success = err == nil
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (b *BlobTree) deleteFromIdx(addr oid.Address, idx uint64) (common.DeleteRes, error) {
|
||||
|
@ -50,7 +64,7 @@ func (b *BlobTree) deleteFromIdx(addr oid.Address, idx uint64) (common.DeleteRes
|
|||
err = os.Remove(path)
|
||||
if err == nil {
|
||||
b.dispatcher.ReturnIdx(dir, idx)
|
||||
// decrease files metric
|
||||
b.cfg.metrics.DecFilesCount()
|
||||
}
|
||||
return common.DeleteRes{}, err
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
|
@ -11,10 +12,23 @@ import (
|
|||
)
|
||||
|
||||
func (b *BlobTree) Exists(_ context.Context, prm common.ExistsPrm) (common.ExistsRes, error) {
|
||||
var (
|
||||
startedAt = time.Now()
|
||||
success = false
|
||||
)
|
||||
defer func() {
|
||||
b.cfg.metrics.Exists(time.Since(startedAt), success, prm.StorageID != nil)
|
||||
}()
|
||||
|
||||
var res common.ExistsRes
|
||||
var err error
|
||||
if len(prm.StorageID) == storageIDLength {
|
||||
return b.existsFromIdx(prm.Address, binary.LittleEndian.Uint64(prm.StorageID))
|
||||
res, err = b.existsFromIdx(prm.Address, binary.LittleEndian.Uint64(prm.StorageID))
|
||||
} else {
|
||||
res, err = b.findAndCheck(prm.Address)
|
||||
}
|
||||
return b.findAndCheck(prm.Address)
|
||||
success = err == nil
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (b *BlobTree) existsFromIdx(addr oid.Address, idx uint64) (common.ExistsRes, error) {
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"encoding/binary"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
||||
|
@ -13,6 +14,22 @@ import (
|
|||
)
|
||||
|
||||
func (b *BlobTree) Get(_ context.Context, prm common.GetPrm) (common.GetRes, error) {
|
||||
var (
|
||||
startedAt = time.Now()
|
||||
success = false
|
||||
size = 0
|
||||
)
|
||||
defer func() {
|
||||
b.cfg.metrics.Get(time.Since(startedAt), size, success, prm.StorageID != nil)
|
||||
}()
|
||||
|
||||
res, err := b.get(prm)
|
||||
success = err == nil
|
||||
size = len(res.RawData)
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (b *BlobTree) get(prm common.GetPrm) (common.GetRes, error) {
|
||||
if len(prm.StorageID) == storageIDLength {
|
||||
return b.getFromIdx(prm.Address, binary.LittleEndian.Uint64(prm.StorageID))
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package blobtree
|
|||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
||||
|
@ -9,12 +10,21 @@ import (
|
|||
)
|
||||
|
||||
func (b *BlobTree) GetRange(ctx context.Context, prm common.GetRangePrm) (common.GetRangeRes, error) {
|
||||
res, err := b.Get(ctx, common.GetPrm{Address: prm.Address, StorageID: prm.StorageID})
|
||||
var (
|
||||
startedAt = time.Now()
|
||||
success = false
|
||||
size = 0
|
||||
)
|
||||
defer func() {
|
||||
b.cfg.metrics.GetRange(time.Since(startedAt), size, success, prm.StorageID != nil)
|
||||
}()
|
||||
|
||||
gRes, err := b.get(common.GetPrm{Address: prm.Address, StorageID: prm.StorageID})
|
||||
if err != nil {
|
||||
return common.GetRangeRes{}, err
|
||||
}
|
||||
|
||||
payload := res.Object.Payload()
|
||||
payload := gRes.Object.Payload()
|
||||
from := prm.Range.GetOffset()
|
||||
to := from + prm.Range.GetLength()
|
||||
|
||||
|
@ -22,7 +32,10 @@ func (b *BlobTree) GetRange(ctx context.Context, prm common.GetRangePrm) (common
|
|||
return common.GetRangeRes{}, logicerr.Wrap(new(apistatus.ObjectOutOfRange))
|
||||
}
|
||||
|
||||
return common.GetRangeRes{
|
||||
res := common.GetRangeRes{
|
||||
Data: payload[from:to],
|
||||
}, nil
|
||||
}
|
||||
size = len(res.Data)
|
||||
success = true
|
||||
return res, nil
|
||||
}
|
||||
|
|
|
@ -5,12 +5,22 @@ import (
|
|||
"encoding/binary"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||
)
|
||||
|
||||
func (b *BlobTree) Iterate(_ context.Context, prm common.IteratePrm) (common.IterateRes, error) {
|
||||
return common.IterateRes{}, b.iterateDir(b.cfg.rootPath, 0, prm)
|
||||
var (
|
||||
startedAt = time.Now()
|
||||
err error
|
||||
)
|
||||
defer func() {
|
||||
b.cfg.metrics.Iterate(time.Since(startedAt), err == nil)
|
||||
}()
|
||||
|
||||
err = b.iterateDir(b.cfg.rootPath, 0, prm)
|
||||
return common.IterateRes{}, err
|
||||
}
|
||||
|
||||
func (b *BlobTree) iterateDir(dir string, depth uint64, prm common.IteratePrm) error {
|
||||
|
|
34
pkg/local_object_storage/blobstor/blobtree/metrics.go
Normal file
34
pkg/local_object_storage/blobstor/blobtree/metrics.go
Normal file
|
@ -0,0 +1,34 @@
|
|||
package blobtree
|
||||
|
||||
import "time"
|
||||
|
||||
type Metrics interface {
|
||||
SetParentID(parentID string)
|
||||
|
||||
SetMode(readOnly bool)
|
||||
Close()
|
||||
|
||||
Delete(d time.Duration, success, withStorageID bool)
|
||||
Exists(d time.Duration, success, withStorageID bool)
|
||||
GetRange(d time.Duration, size int, success, withStorageID bool)
|
||||
Get(d time.Duration, size int, success, withStorageID bool)
|
||||
Iterate(d time.Duration, success bool)
|
||||
Put(d time.Duration, size int, success bool)
|
||||
|
||||
IncFilesCount()
|
||||
DecFilesCount()
|
||||
}
|
||||
|
||||
type noopMetrics struct{}
|
||||
|
||||
func (m *noopMetrics) SetParentID(string) {}
|
||||
func (m *noopMetrics) SetMode(bool) {}
|
||||
func (m *noopMetrics) Close() {}
|
||||
func (m *noopMetrics) Delete(time.Duration, bool, bool) {}
|
||||
func (m *noopMetrics) Exists(time.Duration, bool, bool) {}
|
||||
func (m *noopMetrics) GetRange(time.Duration, int, bool, bool) {}
|
||||
func (m *noopMetrics) Get(time.Duration, int, bool, bool) {}
|
||||
func (m *noopMetrics) Iterate(time.Duration, bool) {}
|
||||
func (m *noopMetrics) Put(time.Duration, int, bool) {}
|
||||
func (m *noopMetrics) IncFilesCount() {}
|
||||
func (m *noopMetrics) DecFilesCount() {}
|
|
@ -27,3 +27,9 @@ func WithTargetSize(size uint64) Option {
|
|||
c.targetFileSizeBytes = size
|
||||
}
|
||||
}
|
||||
|
||||
func WithMetrics(m Metrics) Option {
|
||||
return func(c *cfg) {
|
||||
c.metrics = m
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"encoding/binary"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||
)
|
||||
|
@ -15,6 +16,15 @@ const (
|
|||
)
|
||||
|
||||
func (b *BlobTree) Put(_ context.Context, prm common.PutPrm) (common.PutRes, error) {
|
||||
var (
|
||||
success bool
|
||||
size int
|
||||
startedAt = time.Now()
|
||||
)
|
||||
defer func() {
|
||||
b.cfg.metrics.Put(time.Since(startedAt), size, success)
|
||||
}()
|
||||
|
||||
if b.cfg.readOnly {
|
||||
return common.PutRes{}, common.ErrReadOnly
|
||||
}
|
||||
|
@ -34,6 +44,9 @@ func (b *BlobTree) Put(_ context.Context, prm common.PutPrm) (common.PutRes, err
|
|||
return common.PutRes{}, err
|
||||
}
|
||||
|
||||
success = true
|
||||
size = len(prm.RawData)
|
||||
|
||||
storageID := make([]byte, storageIDLength)
|
||||
binary.LittleEndian.PutUint64(storageID, idx)
|
||||
return common.PutRes{StorageID: storageID}, nil
|
||||
|
@ -79,10 +92,24 @@ func (b *BlobTree) writeToTmpAndRename(records []objectData, path string) (uint6
|
|||
return 0, err
|
||||
}
|
||||
|
||||
newFile := false
|
||||
_, err = os.Stat(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
newFile = true
|
||||
} else {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.Rename(tmpFile, path); err != nil {
|
||||
_ = os.Remove(tmpFile)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if newFile {
|
||||
b.cfg.metrics.IncFilesCount()
|
||||
}
|
||||
|
||||
return size, nil
|
||||
}
|
||||
|
|
74
pkg/local_object_storage/metrics/blobtree.go
Normal file
74
pkg/local_object_storage/metrics/blobtree.go
Normal file
|
@ -0,0 +1,74 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobtree"
|
||||
metrics_impl "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
|
||||
)
|
||||
|
||||
func NewBlobTreeMetrics(path string, m metrics_impl.BlobTreeMetrics) blobtree.Metrics {
|
||||
return &blobTreeMetrics{
|
||||
path: path,
|
||||
m: m,
|
||||
}
|
||||
}
|
||||
|
||||
type blobTreeMetrics struct {
|
||||
shardID string
|
||||
path string
|
||||
m metrics_impl.BlobTreeMetrics
|
||||
}
|
||||
|
||||
func (m *blobTreeMetrics) SetParentID(parentID string) {
|
||||
m.shardID = parentID
|
||||
}
|
||||
|
||||
func (m *blobTreeMetrics) SetMode(readOnly bool) {
|
||||
m.m.SetBlobTreeMode(m.shardID, m.path, readOnly)
|
||||
}
|
||||
|
||||
func (m *blobTreeMetrics) Close() {
|
||||
m.m.CloseBlobTree(m.shardID, m.path)
|
||||
}
|
||||
|
||||
func (m *blobTreeMetrics) Delete(d time.Duration, success, withStorageID bool) {
|
||||
m.m.BlobTreeMethodDuration(m.shardID, m.path, "Delete", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID})
|
||||
}
|
||||
|
||||
func (m *blobTreeMetrics) Exists(d time.Duration, success, withStorageID bool) {
|
||||
m.m.BlobTreeMethodDuration(m.shardID, m.path, "Exists", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID})
|
||||
}
|
||||
|
||||
func (m *blobTreeMetrics) GetRange(d time.Duration, size int, success, withStorageID bool) {
|
||||
m.m.BlobTreeMethodDuration(m.shardID, m.path, "GetRange", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID})
|
||||
if success {
|
||||
m.m.AddBlobTreeGet(m.shardID, m.path, size)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *blobTreeMetrics) Get(d time.Duration, size int, success, withStorageID bool) {
|
||||
m.m.BlobTreeMethodDuration(m.shardID, m.path, "Get", d, success, metrics_impl.NullBool{Valid: true, Bool: withStorageID})
|
||||
if success {
|
||||
m.m.AddBlobTreeGet(m.shardID, m.path, size)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *blobTreeMetrics) Iterate(d time.Duration, success bool) {
|
||||
m.m.BlobTreeMethodDuration(m.shardID, m.path, "Iterate", d, success, metrics_impl.NullBool{})
|
||||
}
|
||||
|
||||
func (m *blobTreeMetrics) Put(d time.Duration, size int, success bool) {
|
||||
m.m.BlobTreeMethodDuration(m.shardID, m.path, "Put", d, success, metrics_impl.NullBool{})
|
||||
if success {
|
||||
m.m.AddBlobTreePut(m.shardID, m.path, size)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *blobTreeMetrics) IncFilesCount() {
|
||||
m.m.IncBlobTreeFilesCount(m.shardID, m.path)
|
||||
}
|
||||
|
||||
func (m *blobTreeMetrics) DecFilesCount() {
|
||||
m.m.DecBlobTreeFilesCount(m.shardID, m.path)
|
||||
}
|
120
pkg/metrics/blobtree.go
Normal file
120
pkg/metrics/blobtree.go
Normal file
|
@ -0,0 +1,120 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type BlobTreeMetrics interface {
|
||||
SetBlobTreeMode(shardID, path string, readOnly bool)
|
||||
CloseBlobTree(shardID, path string)
|
||||
BlobTreeMethodDuration(shardID, path string, method string, d time.Duration, success bool, withStorageID NullBool)
|
||||
IncBlobTreeFilesCount(shardID, path string)
|
||||
DecBlobTreeFilesCount(shardID, path string)
|
||||
AddBlobTreePut(shardID, path string, size int)
|
||||
AddBlobTreeGet(shardID, path string, size int)
|
||||
}
|
||||
|
||||
type blobTreeMetrics struct {
|
||||
mode *shardIDPathModeValue
|
||||
reqDuration *prometheus.HistogramVec
|
||||
put *prometheus.CounterVec
|
||||
get *prometheus.CounterVec
|
||||
filesCount *prometheus.GaugeVec
|
||||
}
|
||||
|
||||
func newBlobTreeMetrics() *blobTreeMetrics {
|
||||
return &blobTreeMetrics{
|
||||
mode: newShardIDPathMode(blobTreeSubSystem, "mode", "Blob tree mode"),
|
||||
|
||||
reqDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: blobTreeSubSystem,
|
||||
Name: "request_duration_seconds",
|
||||
Help: "Accumulated Blob tree request process duration",
|
||||
}, []string{shardIDLabel, pathLabel, successLabel, methodLabel, withStorageIDLabel}),
|
||||
put: metrics.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: blobTreeSubSystem,
|
||||
Name: "put_bytes",
|
||||
Help: "Accumulated payload size written to Blob tree",
|
||||
}, []string{shardIDLabel, pathLabel}),
|
||||
get: metrics.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: blobTreeSubSystem,
|
||||
Name: "get_bytes",
|
||||
Help: "Accumulated payload size read from Blob tree",
|
||||
}, []string{shardIDLabel, pathLabel}),
|
||||
filesCount: metrics.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: blobTreeSubSystem,
|
||||
Name: "files_count",
|
||||
Help: "Count of data files in Blob tree",
|
||||
}, []string{shardIDLabel, pathLabel}),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *blobTreeMetrics) SetBlobTreeMode(shardID, path string, readOnly bool) {
|
||||
b.mode.SetMode(shardID, path, modeFromBool(readOnly))
|
||||
}
|
||||
|
||||
func (b *blobTreeMetrics) CloseBlobTree(shardID, path string) {
|
||||
b.mode.SetMode(shardID, path, closedMode)
|
||||
b.reqDuration.DeletePartialMatch(prometheus.Labels{
|
||||
shardIDLabel: shardID,
|
||||
pathLabel: path,
|
||||
})
|
||||
b.get.DeletePartialMatch(prometheus.Labels{
|
||||
shardIDLabel: shardID,
|
||||
pathLabel: path,
|
||||
})
|
||||
b.put.DeletePartialMatch(prometheus.Labels{
|
||||
shardIDLabel: shardID,
|
||||
pathLabel: path,
|
||||
})
|
||||
b.filesCount.DeletePartialMatch(prometheus.Labels{
|
||||
shardIDLabel: shardID,
|
||||
pathLabel: path,
|
||||
})
|
||||
}
|
||||
|
||||
func (b *blobTreeMetrics) BlobTreeMethodDuration(shardID, path string, method string, d time.Duration, success bool, withStorageID NullBool) {
|
||||
b.reqDuration.With(prometheus.Labels{
|
||||
shardIDLabel: shardID,
|
||||
pathLabel: path,
|
||||
successLabel: strconv.FormatBool(success),
|
||||
methodLabel: method,
|
||||
withStorageIDLabel: withStorageID.String(),
|
||||
}).Observe(d.Seconds())
|
||||
}
|
||||
|
||||
func (b *blobTreeMetrics) IncBlobTreeFilesCount(shardID, path string) {
|
||||
b.filesCount.With(prometheus.Labels{
|
||||
shardIDLabel: shardID,
|
||||
pathLabel: path,
|
||||
}).Inc()
|
||||
}
|
||||
|
||||
func (b *blobTreeMetrics) DecBlobTreeFilesCount(shardID, path string) {
|
||||
b.filesCount.With(prometheus.Labels{
|
||||
shardIDLabel: shardID,
|
||||
pathLabel: path,
|
||||
}).Dec()
|
||||
}
|
||||
|
||||
func (b *blobTreeMetrics) AddBlobTreePut(shardID, path string, size int) {
|
||||
b.put.With(prometheus.Labels{
|
||||
shardIDLabel: shardID,
|
||||
pathLabel: path,
|
||||
}).Add(float64(size))
|
||||
}
|
||||
|
||||
func (b *blobTreeMetrics) AddBlobTreeGet(shardID, path string, size int) {
|
||||
b.get.With(prometheus.Labels{
|
||||
shardIDLabel: shardID,
|
||||
pathLabel: path,
|
||||
}).Add(float64(size))
|
||||
}
|
|
@ -7,6 +7,7 @@ const (
|
|||
fstreeSubSystem = "fstree"
|
||||
blobstoreSubSystem = "blobstore"
|
||||
blobovniczaTreeSubSystem = "blobovnicza_tree"
|
||||
blobTreeSubSystem = "blobtree"
|
||||
metabaseSubSystem = "metabase"
|
||||
piloramaSubSystem = "pilorama"
|
||||
engineSubsystem = "engine"
|
||||
|
|
|
@ -20,6 +20,7 @@ type NodeMetrics struct {
|
|||
metabase *metabaseMetrics
|
||||
pilorama *piloramaMetrics
|
||||
grpc *grpcServerMetrics
|
||||
blobTree *blobTreeMetrics
|
||||
policer *policerMetrics
|
||||
morphClient *morphClientMetrics
|
||||
morphCache *morphCacheMetrics
|
||||
|
@ -49,6 +50,7 @@ func NewNodeMetrics() *NodeMetrics {
|
|||
morphClient: newMorphClientMetrics(),
|
||||
morphCache: newMorphCacheMetrics(namespace),
|
||||
log: logger.NewLogMetrics(namespace),
|
||||
blobTree: newBlobTreeMetrics(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -116,3 +118,7 @@ func (m *NodeMetrics) MorphCacheMetrics() MorphCacheMetrics {
|
|||
func (m *NodeMetrics) LogMetrics() logger.LogMetrics {
|
||||
return m.log
|
||||
}
|
||||
|
||||
func (m *NodeMetrics) BlobTreeMetrics() BlobTreeMetrics {
|
||||
return m.blobTree
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue