[#242] node: Add tracing spans

Add tracing spans for PUT requests.
Add tracing spans for DELETE requests.
Add tracing spans for SELECT requests.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
pull/251/head
Dmitrii Stepanov 2023-04-12 17:01:29 +03:00 committed by fyrchik
parent 200fc8b882
commit d62c6e4ce6
122 changed files with 863 additions and 417 deletions

View File

@ -36,7 +36,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
storageID := meta.StorageIDPrm{}
storageID.SetAddress(addr)
resStorageID, err := db.StorageID(storageID)
resStorageID, err := db.StorageID(cmd.Context(), storageID)
common.ExitOnErr(cmd, common.Errf("could not check if the obj is small: %w", err))
if id := resStorageID.StorageID(); id != nil {
@ -51,7 +51,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
siErr := new(object.SplitInfoError)
res, err := db.Get(prm)
res, err := db.Get(cmd.Context(), prm)
if errors.As(err, &siErr) {
link, linkSet := siErr.SplitInfo().Link()
last, lastSet := siErr.SplitInfo().LastPart()

View File

@ -42,7 +42,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler
for _, c := range listRes.Containers() {
selectPrm.WithContainerID(c)
selectRes, err := n.e.Select(selectPrm)
selectRes, err := n.e.Select(ctx, selectPrm)
if err != nil {
log.Error(logs.FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer,
zap.Stringer("cid", c),

View File

@ -617,20 +617,20 @@ type engineWithNotifications struct {
defaultTopic string
}
func (e engineWithNotifications) IsLocked(address oid.Address) (bool, error) {
return e.base.IsLocked(address)
func (e engineWithNotifications) IsLocked(ctx context.Context, address oid.Address) (bool, error) {
return e.base.IsLocked(ctx, address)
}
func (e engineWithNotifications) Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error {
return e.base.Delete(ctx, tombstone, toDelete)
}
func (e engineWithNotifications) Lock(locker oid.Address, toLock []oid.ID) error {
return e.base.Lock(locker, toLock)
func (e engineWithNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error {
return e.base.Lock(ctx, locker, toLock)
}
func (e engineWithNotifications) Put(o *objectSDK.Object) error {
if err := e.base.Put(o); err != nil {
func (e engineWithNotifications) Put(ctx context.Context, o *objectSDK.Object) error {
if err := e.base.Put(ctx, o); err != nil {
return err
}
@ -654,8 +654,8 @@ type engineWithoutNotifications struct {
engine *engine.StorageEngine
}
func (e engineWithoutNotifications) IsLocked(address oid.Address) (bool, error) {
return e.engine.IsLocked(address)
func (e engineWithoutNotifications) IsLocked(ctx context.Context, address oid.Address) (bool, error) {
return e.engine.IsLocked(ctx, address)
}
func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error {
@ -673,10 +673,10 @@ func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Ad
return err
}
func (e engineWithoutNotifications) Lock(locker oid.Address, toLock []oid.ID) error {
return e.engine.Lock(locker.Container(), locker.Object(), toLock)
func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error {
return e.engine.Lock(ctx, locker.Container(), locker.Object(), toLock)
}
func (e engineWithoutNotifications) Put(o *objectSDK.Object) error {
return engine.Put(e.engine, o)
func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object) error {
return engine.Put(ctx, e.engine, o)
}

View File

@ -1,6 +1,7 @@
package object
import (
"context"
"crypto/ecdsa"
"errors"
"fmt"
@ -42,7 +43,7 @@ type DeleteHandler interface {
// LockSource is a source of lock relations between the objects.
type LockSource interface {
// IsLocked must clarify object's lock status.
IsLocked(address oid.Address) (bool, error)
IsLocked(ctx context.Context, address oid.Address) (bool, error)
}
// Locker is an object lock storage interface.
@ -89,7 +90,7 @@ func NewFormatValidator(opts ...FormatValidatorOption) *FormatValidator {
// If unprepared is true, only fields set by user are validated.
//
// Returns nil error if the object has valid structure.
func (v *FormatValidator) Validate(obj *object.Object, unprepared bool) error {
func (v *FormatValidator) Validate(ctx context.Context, obj *object.Object, unprepared bool) error {
if obj == nil {
return errNilObject
}
@ -117,7 +118,7 @@ func (v *FormatValidator) Validate(obj *object.Object, unprepared bool) error {
return fmt.Errorf("(%T) could not validate signature key: %w", v, err)
}
if err := v.checkExpiration(obj); err != nil {
if err := v.checkExpiration(ctx, obj); err != nil {
return fmt.Errorf("object did not pass expiration check: %w", err)
}
@ -128,7 +129,7 @@ func (v *FormatValidator) Validate(obj *object.Object, unprepared bool) error {
if obj = obj.Parent(); obj != nil {
// Parent object already exists.
return v.Validate(obj, false)
return v.Validate(ctx, obj, false)
}
return nil
@ -327,7 +328,7 @@ func (v *FormatValidator) fillAndValidateTombstoneMeta(o *object.Object, meta *C
var errExpired = errors.New("object has expired")
func (v *FormatValidator) checkExpiration(obj *object.Object) error {
func (v *FormatValidator) checkExpiration(ctx context.Context, obj *object.Object) error {
exp, err := expirationEpochAttribute(obj)
if err != nil {
if errors.Is(err, errNoExpirationEpoch) {
@ -348,7 +349,7 @@ func (v *FormatValidator) checkExpiration(obj *object.Object) error {
addr.SetContainer(cID)
addr.SetObject(oID)
locked, err := v.e.IsLocked(addr)
locked, err := v.e.IsLocked(ctx, addr)
if err != nil {
return fmt.Errorf("locking status check for an expired object: %w", err)
}

View File

@ -1,6 +1,7 @@
package object
import (
"context"
"crypto/ecdsa"
"strconv"
"testing"
@ -40,7 +41,7 @@ type testLockSource struct {
m map[oid.Address]bool
}
func (t testLockSource) IsLocked(address oid.Address) (bool, error) {
func (t testLockSource) IsLocked(_ context.Context, address oid.Address) (bool, error) {
return t.m[address], nil
}
@ -62,20 +63,20 @@ func TestFormatValidator_Validate(t *testing.T) {
require.NoError(t, err)
t.Run("nil input", func(t *testing.T) {
require.Error(t, v.Validate(nil, true))
require.Error(t, v.Validate(context.Background(), nil, true))
})
t.Run("nil identifier", func(t *testing.T) {
obj := object.New()
require.ErrorIs(t, v.Validate(obj, false), errNilID)
require.ErrorIs(t, v.Validate(context.Background(), obj, false), errNilID)
})
t.Run("nil container identifier", func(t *testing.T) {
obj := object.New()
obj.SetID(oidtest.ID())
require.ErrorIs(t, v.Validate(obj, true), errNilCID)
require.ErrorIs(t, v.Validate(context.Background(), obj, true), errNilCID)
})
t.Run("unsigned object", func(t *testing.T) {
@ -83,7 +84,7 @@ func TestFormatValidator_Validate(t *testing.T) {
obj.SetContainerID(cidtest.ID())
obj.SetID(oidtest.ID())
require.Error(t, v.Validate(obj, false))
require.Error(t, v.Validate(context.Background(), obj, false))
})
t.Run("correct w/ session token", func(t *testing.T) {
@ -101,7 +102,7 @@ func TestFormatValidator_Validate(t *testing.T) {
require.NoError(t, object.SetIDWithSignature(ownerKey.PrivateKey, obj))
require.NoError(t, v.Validate(obj, false))
require.NoError(t, v.Validate(context.Background(), obj, false))
})
t.Run("correct w/o session token", func(t *testing.T) {
@ -109,7 +110,7 @@ func TestFormatValidator_Validate(t *testing.T) {
require.NoError(t, object.SetIDWithSignature(ownerKey.PrivateKey, obj))
require.NoError(t, v.Validate(obj, false))
require.NoError(t, v.Validate(context.Background(), obj, false))
})
t.Run("tombstone content", func(t *testing.T) {
@ -236,7 +237,7 @@ func TestFormatValidator_Validate(t *testing.T) {
t.Run("invalid attribute value", func(t *testing.T) {
val := "text"
err := v.Validate(fn(val), false)
err := v.Validate(context.Background(), fn(val), false)
require.Error(t, err)
})
@ -245,7 +246,7 @@ func TestFormatValidator_Validate(t *testing.T) {
obj := fn(val)
t.Run("non-locked", func(t *testing.T) {
err := v.Validate(obj, false)
err := v.Validate(context.Background(), obj, false)
require.ErrorIs(t, err, errExpired)
})
@ -258,14 +259,14 @@ func TestFormatValidator_Validate(t *testing.T) {
addr.SetObject(oID)
ls.m[addr] = true
err := v.Validate(obj, false)
err := v.Validate(context.Background(), obj, false)
require.NoError(t, err)
})
})
t.Run("alive object", func(t *testing.T) {
val := strconv.FormatUint(curEpoch, 10)
err := v.Validate(fn(val), true)
err := v.Validate(context.Background(), fn(val), true)
require.NoError(t, err)
})
})

View File

@ -88,7 +88,7 @@ func TestBlobovnicza(t *testing.T) {
var dPrm DeletePrm
dPrm.SetAddress(addr)
_, err := blz.Delete(dPrm)
_, err := blz.Delete(context.Background(), dPrm)
require.NoError(t, err)
// should return 404

View File

@ -1,10 +1,15 @@
package blobovnicza
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@ -30,7 +35,13 @@ func (p *DeletePrm) SetAddress(addr oid.Address) {
// Returns an error of type apistatus.ObjectNotFound if the object to be deleted is not in blobovnicza.
//
// Should not be called in read-only configuration.
func (b *Blobovnicza) Delete(prm DeletePrm) (DeleteRes, error) {
func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
_, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.Delete",
trace.WithAttributes(
attribute.String("address", prm.addr.EncodeToString()),
))
defer span.End()
addrKey := addressKey(prm.addr)
removed := false

View File

@ -1,13 +1,18 @@
package blobovniczatree
import (
"context"
"encoding/hex"
"path/filepath"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@ -15,7 +20,14 @@ import (
//
// If blobocvnicza ID is specified, only this blobovnicza is processed.
// Otherwise, all Blobovniczas are processed descending weight.
func (b *Blobovniczas) Delete(prm common.DeletePrm) (res common.DeleteRes, err error) {
func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res common.DeleteRes, err error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.Delete",
trace.WithAttributes(
attribute.String("address", prm.Address.EncodeToString()),
attribute.String("storage_id", hex.EncodeToString(prm.StorageID)),
))
defer span.End()
if b.readOnly {
return common.DeleteRes{}, common.ErrReadOnly
}
@ -30,7 +42,7 @@ func (b *Blobovniczas) Delete(prm common.DeletePrm) (res common.DeleteRes, err e
return res, err
}
return b.deleteObject(blz, bPrm, prm)
return b.deleteObject(ctx, blz, bPrm, prm)
}
activeCache := make(map[string]struct{})
@ -42,7 +54,7 @@ func (b *Blobovniczas) Delete(prm common.DeletePrm) (res common.DeleteRes, err e
// don't process active blobovnicza of the level twice
_, ok := activeCache[dirPath]
res, err = b.deleteObjectFromLevel(bPrm, p, !ok, prm)
res, err = b.deleteObjectFromLevel(ctx, bPrm, p, !ok, prm)
if err != nil {
if !blobovnicza.IsErrNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
@ -73,7 +85,7 @@ func (b *Blobovniczas) Delete(prm common.DeletePrm) (res common.DeleteRes, err e
// tries to delete object from particular blobovnicza.
//
// returns no error if object was removed from some blobovnicza of the same level.
func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath string, tryActive bool, dp common.DeletePrm) (common.DeleteRes, error) {
func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicza.DeletePrm, blzPath string, tryActive bool, dp common.DeletePrm) (common.DeleteRes, error) {
lvlPath := filepath.Dir(blzPath)
// try to remove from blobovnicza if it is opened
@ -81,7 +93,7 @@ func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath
v, ok := b.opened.Get(blzPath)
b.lruMtx.Unlock()
if ok {
if res, err := b.deleteObject(v, prm, dp); err == nil {
if res, err := b.deleteObject(ctx, v, prm, dp); err == nil {
return res, err
} else if !blobovnicza.IsErrNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromOpenedBlobovnicza,
@ -100,7 +112,7 @@ func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath
b.activeMtx.RUnlock()
if ok && tryActive {
if res, err := b.deleteObject(active.blz, prm, dp); err == nil {
if res, err := b.deleteObject(ctx, active.blz, prm, dp); err == nil {
return res, err
} else if !blobovnicza.IsErrNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromActiveBlobovnicza,
@ -125,11 +137,11 @@ func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath
return common.DeleteRes{}, err
}
return b.deleteObject(blz, prm, dp)
return b.deleteObject(ctx, blz, prm, dp)
}
// removes object from blobovnicza and returns common.DeleteRes.
func (b *Blobovniczas) deleteObject(blz *blobovnicza.Blobovnicza, prm blobovnicza.DeletePrm, dp common.DeletePrm) (common.DeleteRes, error) {
_, err := blz.Delete(prm)
func (b *Blobovniczas) deleteObject(ctx context.Context, blz *blobovnicza.Blobovnicza, prm blobovnicza.DeletePrm, dp common.DeletePrm) (common.DeleteRes, error) {
_, err := blz.Delete(ctx, prm)
return common.DeleteRes{}, err
}

View File

@ -33,7 +33,7 @@ func TestExistsInvalidStorageID(t *testing.T) {
d, err := obj.Marshal()
require.NoError(t, err)
putRes, err := b.Put(common.PutPrm{Address: addr, RawData: d, DontCompress: true})
putRes, err := b.Put(context.Background(), common.PutPrm{Address: addr, RawData: d, DontCompress: true})
require.NoError(t, err)
t.Run("valid but wrong storage id", func(t *testing.T) {

View File

@ -1,20 +1,31 @@
package blobovniczatree
import (
"context"
"errors"
"path/filepath"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
// Put saves object in the maximum weight blobobnicza.
//
// returns error if could not save object in any blobovnicza.
func (b *Blobovniczas) Put(prm common.PutPrm) (common.PutRes, error) {
func (b *Blobovniczas) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) {
_, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.Put",
trace.WithAttributes(
attribute.String("address", prm.Address.EncodeToString()),
attribute.Bool("dont_compress", prm.DontCompress),
))
defer span.End()
if b.readOnly {
return common.PutRes{}, common.ErrReadOnly
}

View File

@ -75,12 +75,12 @@ func TestCompression(t *testing.T) {
testPut := func(t *testing.T, b *BlobStor, i int) {
var prm common.PutPrm
prm.Object = smallObj[i]
_, err := b.Put(prm)
_, err := b.Put(context.Background(), prm)
require.NoError(t, err)
prm = common.PutPrm{}
prm.Object = bigObj[i]
_, err = b.Put(prm)
_, err = b.Put(context.Background(), prm)
require.NoError(t, err)
}

View File

@ -23,7 +23,7 @@ type Storage interface {
Get(context.Context, GetPrm) (GetRes, error)
GetRange(context.Context, GetRangePrm) (GetRangeRes, error)
Exists(context.Context, ExistsPrm) (ExistsRes, error)
Put(PutPrm) (PutRes, error)
Delete(DeletePrm) (DeleteRes, error)
Put(context.Context, PutPrm) (PutRes, error)
Delete(context.Context, DeletePrm) (DeleteRes, error)
Iterate(IteratePrm) (IterateRes, error)
}

View File

@ -1,19 +1,31 @@
package blobstor
import (
"context"
"encoding/hex"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
func (b *BlobStor) Delete(prm common.DeletePrm) (common.DeleteRes, error) {
func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.DeleteRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.Delete",
trace.WithAttributes(
attribute.String("address", prm.Address.EncodeToString()),
attribute.String("storage_id", hex.EncodeToString(prm.StorageID)),
))
defer span.End()
b.modeMtx.RLock()
defer b.modeMtx.RUnlock()
if prm.StorageID == nil {
for i := range b.storage {
res, err := b.storage[i].Storage.Delete(prm)
res, err := b.storage[i].Storage.Delete(ctx, prm)
if err == nil || !errors.As(err, new(apistatus.ObjectNotFound)) {
if err == nil {
logOp(b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID)
@ -31,7 +43,7 @@ func (b *BlobStor) Delete(prm common.DeletePrm) (common.DeleteRes, error) {
st = b.storage[0].Storage
}
res, err := st.Delete(prm)
res, err := st.Delete(ctx, prm)
if err == nil {
logOp(b.log, deleteOp, prm.Address, st.Type(), prm.StorageID)
}

View File

@ -36,7 +36,7 @@ func TestExists(t *testing.T) {
for i := range objects {
var prm common.PutPrm
prm.Object = objects[i]
_, err := b.Put(prm)
_, err := b.Put(context.Background(), prm)
require.NoError(t, err)
}

View File

@ -196,7 +196,13 @@ func (t *FSTree) treePath(addr oid.Address) string {
}
// Delete removes the object with the specified address from the storage.
func (t *FSTree) Delete(prm common.DeletePrm) (common.DeleteRes, error) {
func (t *FSTree) Delete(ctx context.Context, prm common.DeletePrm) (common.DeleteRes, error) {
_, span := tracing.StartSpanFromContext(ctx, "FSTree.Delete",
trace.WithAttributes(
attribute.String("address", prm.Address.EncodeToString()),
))
defer span.End()
if t.readOnly {
return common.DeleteRes{}, common.ErrReadOnly
}
@ -230,7 +236,14 @@ func (t *FSTree) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exist
}
// Put puts an object in the storage.
func (t *FSTree) Put(prm common.PutPrm) (common.PutRes, error) {
func (t *FSTree) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) {
_, span := tracing.StartSpanFromContext(ctx, "FSTree.Put",
trace.WithAttributes(
attribute.String("address", prm.Address.EncodeToString()),
attribute.Bool("dont_compress", prm.DontCompress),
))
defer span.End()
if t.readOnly {
return common.PutRes{}, common.ErrReadOnly
}

View File

@ -1,6 +1,7 @@
package blobstortest
import (
"context"
"math/rand"
"testing"
@ -67,7 +68,7 @@ func prepare(t *testing.T, count int, s common.Storage, min, max uint64) []objec
prm.Object = objects[i].obj
prm.RawData = objects[i].raw
putRes, err := s.Put(prm)
putRes, err := s.Put(context.Background(), prm)
require.NoError(t, err)
objects[i].storageID = putRes.StorageID

View File

@ -36,7 +36,7 @@ func TestControl(t *testing.T, cons Constructor, min, max uint64) {
prm.Object = NewObject(min + uint64(rand.Intn(int(max-min+1))))
prm.Address = objectCore.AddressOf(prm.Object)
_, err := s.Put(prm)
_, err := s.Put(context.Background(), prm)
require.ErrorIs(t, err, common.ErrReadOnly)
})
t.Run("delete fails", func(t *testing.T) {
@ -44,7 +44,7 @@ func TestControl(t *testing.T, cons Constructor, min, max uint64) {
prm.Address = objects[0].addr
prm.StorageID = objects[0].storageID
_, err := s.Delete(prm)
_, err := s.Delete(context.Background(), prm)
require.ErrorIs(t, err, common.ErrReadOnly)
})
}

View File

@ -22,7 +22,7 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) {
var prm common.DeletePrm
prm.Address = oidtest.Address()
_, err := s.Delete(prm)
_, err := s.Delete(context.Background(), prm)
require.Error(t, err, new(apistatus.ObjectNotFound))
})
@ -31,7 +31,7 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) {
prm.Address = objects[0].addr
prm.StorageID = objects[0].storageID
_, err := s.Delete(prm)
_, err := s.Delete(context.Background(), prm)
require.NoError(t, err)
t.Run("exists fail", func(t *testing.T) {
@ -55,7 +55,7 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) {
var prm common.DeletePrm
prm.Address = objects[1].addr
_, err := s.Delete(prm)
_, err := s.Delete(context.Background(), prm)
require.NoError(t, err)
})
@ -64,10 +64,10 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) {
prm.Address = objects[2].addr
prm.StorageID = objects[2].storageID
_, err := s.Delete(prm)
_, err := s.Delete(context.Background(), prm)
require.NoError(t, err)
_, err = s.Delete(prm)
_, err = s.Delete(context.Background(), prm)
require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
})

View File

@ -1,6 +1,7 @@
package blobstortest
import (
"context"
"errors"
"testing"
@ -22,7 +23,7 @@ func TestIterate(t *testing.T, cons Constructor, min, max uint64) {
var delPrm common.DeletePrm
delPrm.Address = objects[2].addr
delPrm.StorageID = objects[2].storageID
_, err := s.Delete(delPrm)
_, err := s.Delete(context.Background(), delPrm)
require.NoError(t, err)
objects = append(objects[:delID], objects[delID+1:]...)

View File

@ -1,6 +1,7 @@
package blobstor
import (
"context"
"encoding/binary"
"os"
"testing"
@ -63,7 +64,7 @@ func TestIterateObjects(t *testing.T) {
}
for _, v := range mObjs {
_, err := blobStor.Put(common.PutPrm{Address: v.addr, RawData: v.data})
_, err := blobStor.Put(context.Background(), common.PutPrm{Address: v.addr, RawData: v.data})
require.NoError(t, err)
}

View File

@ -91,7 +91,7 @@ func (s *memstoreImpl) Exists(_ context.Context, req common.ExistsPrm) (common.E
return common.ExistsRes{Exists: exists}, nil
}
func (s *memstoreImpl) Put(req common.PutPrm) (common.PutRes, error) {
func (s *memstoreImpl) Put(_ context.Context, req common.PutPrm) (common.PutRes, error) {
if s.readOnly {
return common.PutRes{}, common.ErrReadOnly
}
@ -108,7 +108,7 @@ func (s *memstoreImpl) Put(req common.PutPrm) (common.PutRes, error) {
return common.PutRes{StorageID: []byte(s.rootPath)}, nil
}
func (s *memstoreImpl) Delete(req common.DeletePrm) (common.DeleteRes, error) {
func (s *memstoreImpl) Delete(_ context.Context, req common.DeletePrm) (common.DeleteRes, error) {
if s.readOnly {
return common.DeleteRes{}, common.ErrReadOnly
}

View File

@ -28,7 +28,7 @@ func TestSimpleLifecycle(t *testing.T) {
require.NoError(t, err)
{
_, err := s.Put(common.PutPrm{Address: addr, RawData: d, DontCompress: true})
_, err := s.Put(context.Background(), common.PutPrm{Address: addr, RawData: d, DontCompress: true})
require.NoError(t, err)
}
@ -57,7 +57,7 @@ func TestSimpleLifecycle(t *testing.T) {
}
{
_, err := s.Delete(common.DeletePrm{Address: addr})
_, err := s.Delete(context.Background(), common.DeletePrm{Address: addr})
require.NoError(t, err)
}

View File

@ -114,7 +114,7 @@ func BenchmarkSubstorageReadPerf(b *testing.B) {
if err != nil {
return fmt.Errorf("marshal: %v", err)
}
_, err = st.Put(common.PutPrm{
_, err = st.Put(context.Background(), common.PutPrm{
Address: addr,
RawData: raw,
})
@ -165,7 +165,7 @@ func BenchmarkSubstorageWritePerf(b *testing.B) {
addr := testutil.AddressFromObject(b, obj)
raw, err := obj.Marshal()
require.NoError(b, err)
if _, err := st.Put(common.PutPrm{
if _, err := st.Put(context.Background(), common.PutPrm{
Address: addr,
RawData: raw,
}); err != nil {
@ -202,7 +202,7 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) {
addr := testutil.AddressFromObject(b, obj)
raw, err := obj.Marshal()
require.NoError(b, err)
if _, err := st.Put(common.PutPrm{
if _, err := st.Put(context.Background(), common.PutPrm{
Address: addr,
RawData: raw,
}); err != nil {

View File

@ -1,12 +1,16 @@
package blobstor
import (
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// ErrNoPlaceFound is returned when object can't be saved to any sub-storage component
@ -21,7 +25,14 @@ var ErrNoPlaceFound = logicerr.New("couldn't find a place to store an object")
//
// Returns any error encountered that
// did not allow to completely save the object.
func (b *BlobStor) Put(prm common.PutPrm) (common.PutRes, error) {
func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.Put",
trace.WithAttributes(
attribute.String("address", prm.Address.EncodeToString()),
attribute.Bool("dont_compress", prm.DontCompress),
))
defer span.End()
b.modeMtx.RLock()
defer b.modeMtx.RUnlock()
@ -39,7 +50,7 @@ func (b *BlobStor) Put(prm common.PutPrm) (common.PutRes, error) {
for i := range b.storage {
if b.storage[i].Policy == nil || b.storage[i].Policy(prm.Object, prm.RawData) {
res, err := b.storage[i].Storage.Put(prm)
res, err := b.storage[i].Storage.Put(ctx, prm)
if err == nil {
logOp(b.log, putOp, prm.Address, b.storage[i].Storage.Type(), res.StorageID)
}

View File

@ -176,27 +176,27 @@ func (s *TestStore) Exists(ctx context.Context, req common.ExistsPrm) (common.Ex
}
}
func (s *TestStore) Put(req common.PutPrm) (common.PutRes, error) {
func (s *TestStore) Put(ctx context.Context, req common.PutPrm) (common.PutRes, error) {
s.mu.RLock()
defer s.mu.RUnlock()
switch {
case s.overrides.Put != nil:
return s.overrides.Put(req)
case s.st != nil:
return s.st.Put(req)
return s.st.Put(ctx, req)
default:
panic(fmt.Sprintf("unexpected storage call: Put(%+v)", req))
}
}
func (s *TestStore) Delete(req common.DeletePrm) (common.DeleteRes, error) {
func (s *TestStore) Delete(ctx context.Context, req common.DeletePrm) (common.DeleteRes, error) {
s.mu.RLock()
defer s.mu.RUnlock()
switch {
case s.overrides.Delete != nil:
return s.overrides.Delete(req)
case s.st != nil:
return s.st.Delete(req)
return s.st.Delete(ctx, req)
default:
panic(fmt.Sprintf("unexpected storage call: Delete(%+v)", req))
}

View File

@ -308,7 +308,7 @@ loop:
e.removeShards(shardsToRemove...)
for _, p := range shardsToReload {
err := p.sh.Reload(p.opts...)
err := p.sh.Reload(ctx, p.opts...)
if err != nil {
e.log.Error(logs.EngineCouldNotReloadAShard,
zap.Stringer("shard id", p.sh.ID()),

View File

@ -204,7 +204,7 @@ func TestExecBlocks(t *testing.T) {
addr := object.AddressOf(obj)
require.NoError(t, Put(e, obj))
require.NoError(t, Put(context.Background(), e, obj))
// block executions
errBlock := errors.New("block exec err")

View File

@ -4,11 +4,14 @@ import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@ -47,6 +50,13 @@ func (p *DeletePrm) WithForceRemoval() {
// on operations with that object) if WithForceRemoval option has
// been provided.
func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) (res DeleteRes, err error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Delete",
trace.WithAttributes(
attribute.String("address", prm.addr.EncodeToString()),
attribute.Bool("force_removal", prm.forceRemoval),
))
defer span.End()
err = e.execIfNotBlocked(func() error {
res, err = e.delete(ctx, prm)
return err
@ -135,7 +145,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
}
e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
res, err := sh.Select(selectPrm)
res, err := sh.Select(ctx, selectPrm)
if err != nil {
e.log.Warn(logs.EngineErrorDuringSearchingForObjectChildren,
zap.Stringer("addr", addr),

View File

@ -59,9 +59,9 @@ func TestDeleteBigObject(t *testing.T) {
defer e.Close()
for i := range children {
require.NoError(t, Put(e, children[i]))
require.NoError(t, Put(context.Background(), e, children[i]))
}
require.NoError(t, Put(e, link))
require.NoError(t, Put(context.Background(), e, link))
var splitErr *objectSDK.SplitInfoError

View File

@ -60,7 +60,7 @@ func benchmarkExists(b *testing.B, shardNum int) {
addr := oidtest.Address()
for i := 0; i < 100; i++ {
obj := testutil.GenerateObjectWithCID(cidtest.ID())
err := Put(e, obj)
err := Put(context.Background(), e, obj)
if err != nil {
b.Fatal(err)
}
@ -69,7 +69,7 @@ func benchmarkExists(b *testing.B, shardNum int) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
ok, err := e.exists(addr)
ok, err := e.exists(context.Background(), addr)
if err != nil || ok {
b.Fatalf("%t %v", ok, err)
}

View File

@ -98,7 +98,7 @@ func TestErrorReporting(t *testing.T) {
var prm shard.PutPrm
prm.SetObject(obj)
te.ng.mtx.RLock()
_, err := te.ng.shards[te.shards[0].id.String()].Shard.Put(prm)
_, err := te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm)
te.ng.mtx.RUnlock()
require.NoError(t, err)
@ -132,7 +132,7 @@ func TestErrorReporting(t *testing.T) {
var prm shard.PutPrm
prm.SetObject(obj)
te.ng.mtx.RLock()
_, err := te.ng.shards[te.shards[0].id.String()].Put(prm)
_, err := te.ng.shards[te.shards[0].id.String()].Put(context.Background(), prm)
te.ng.mtx.RUnlock()
require.NoError(t, err)
@ -185,7 +185,7 @@ func TestBlobstorFailback(t *testing.T) {
var prm shard.PutPrm
prm.SetObject(obj)
te.ng.mtx.RLock()
_, err = te.ng.shards[te.shards[0].id.String()].Shard.Put(prm)
_, err = te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm)
te.ng.mtx.RUnlock()
require.NoError(t, err)
objs = append(objs, obj)

View File

@ -57,7 +57,7 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
var putPrm shard.PutPrm
putPrm.SetObject(obj)
_, err := e.shards[sh.String()].Put(putPrm)
_, err := e.shards[sh.String()].Put(context.Background(), putPrm)
require.NoError(t, err)
}
@ -67,7 +67,7 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
var putPrm PutPrm
putPrm.WithObject(objects[len(objects)-1])
_, err := e.Put(putPrm)
err := e.Put(context.Background(), putPrm)
require.NoError(t, err)
res, err := e.shards[ids[len(ids)-1].String()].List()

View File

@ -10,14 +10,14 @@ import (
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
func (e *StorageEngine) exists(addr oid.Address) (bool, error) {
func (e *StorageEngine) exists(ctx context.Context, addr oid.Address) (bool, error) {
var shPrm shard.ExistsPrm
shPrm.SetAddress(addr)
alreadyRemoved := false
exists := false
e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
res, err := sh.Exists(context.TODO(), shPrm)
res, err := sh.Exists(ctx, shPrm)
if err != nil {
if shard.IsErrRemoved(err) {
alreadyRemoved = true

View File

@ -48,6 +48,12 @@ func (r GetRes) Object() *objectSDK.Object {
//
// Returns an error if executions are blocked (see BlockExecution).
func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Get",
trace.WithAttributes(
attribute.String("address", prm.addr.EncodeToString()),
))
defer span.End()
err = e.execIfNotBlocked(func() error {
res, err = e.get(ctx, prm)
return err
@ -57,12 +63,6 @@ func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err er
}
func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.get",
trace.WithAttributes(
attribute.String("address", prm.addr.EncodeToString()),
))
defer span.End()
if e.metrics != nil {
defer elapsed(e.metrics.AddGetDuration)()
}

View File

@ -55,11 +55,11 @@ func TestHeadRaw(t *testing.T) {
putPrmLink.SetObject(link)
// put most left object in one shard
_, err := s1.Put(putPrmLeft)
_, err := s1.Put(context.Background(), putPrmLeft)
require.NoError(t, err)
// put link object in another shard
_, err = s2.Put(putPrmLink)
_, err = s2.Put(context.Background(), putPrmLink)
require.NoError(t, err)
// head with raw flag should return SplitInfoError

View File

@ -4,12 +4,15 @@ import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@ -62,6 +65,9 @@ var errInhumeFailure = errors.New("inhume operation failed")
//
// Returns an error if executions are blocked (see BlockExecution).
func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRes, err error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Inhume")
defer span.End()
err = e.execIfNotBlocked(func() error {
res, err = e.inhume(ctx, prm)
return err
@ -82,7 +88,7 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, e
for i := range prm.addrs {
if !prm.forceRemoval {
locked, err := e.IsLocked(prm.addrs[i])
locked, err := e.IsLocked(ctx, prm.addrs[i])
if err != nil {
e.log.Warn(logs.EngineRemovingAnObjectWithoutFullLockingCheck,
zap.Error(err),
@ -181,13 +187,19 @@ func (e *StorageEngine) inhumeAddr(ctx context.Context, addr oid.Address, prm sh
}
// IsLocked checks whether an object is locked according to StorageEngine's state.
func (e *StorageEngine) IsLocked(addr oid.Address) (bool, error) {
func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.IsLocked",
trace.WithAttributes(
attribute.String("address", addr.EncodeToString()),
))
defer span.End()
var locked bool
var err error
var outErr error
e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) {
locked, err = h.Shard.IsLocked(addr)
locked, err = h.Shard.IsLocked(ctx, addr)
if err != nil {
e.reportShardError(h, "can't check object's lockers", err, zap.Stringer("addr", addr))
outErr = err
@ -206,7 +218,7 @@ func (e *StorageEngine) IsLocked(addr oid.Address) (bool, error) {
func (e *StorageEngine) processExpiredTombstones(ctx context.Context, addrs []meta.TombstonedObject) {
e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
sh.HandleExpiredTombstones(addrs)
sh.HandleExpiredTombstones(ctx, addrs)
select {
case <-ctx.Done():

View File

@ -42,7 +42,7 @@ func TestStorageEngine_Inhume(t *testing.T) {
e := testNewEngine(t).setShardsNum(t, 1).engine
defer e.Close()
err := Put(e, parent)
err := Put(context.Background(), e, parent)
require.NoError(t, err)
var inhumePrm InhumePrm
@ -51,7 +51,7 @@ func TestStorageEngine_Inhume(t *testing.T) {
_, err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
addrs, err := Select(e, cnr, fs)
addrs, err := Select(context.Background(), e, cnr, fs)
require.NoError(t, err)
require.Empty(t, addrs)
})
@ -65,12 +65,12 @@ func TestStorageEngine_Inhume(t *testing.T) {
var putChild shard.PutPrm
putChild.SetObject(child)
_, err := s1.Put(putChild)
_, err := s1.Put(context.Background(), putChild)
require.NoError(t, err)
var putLink shard.PutPrm
putLink.SetObject(link)
_, err = s2.Put(putLink)
_, err = s2.Put(context.Background(), putLink)
require.NoError(t, err)
var inhumePrm InhumePrm
@ -79,7 +79,7 @@ func TestStorageEngine_Inhume(t *testing.T) {
_, err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
addrs, err := Select(e, cnr, fs)
addrs, err := Select(context.Background(), e, cnr, fs)
require.NoError(t, err)
require.Empty(t, addrs)
})

View File

@ -1,6 +1,7 @@
package engine
import (
"context"
"errors"
"os"
"sort"
@ -35,7 +36,7 @@ func TestListWithCursor(t *testing.T) {
var prm PutPrm
prm.WithObject(obj)
_, err := e.Put(prm)
err := e.Put(context.Background(), prm)
require.NoError(t, err)
expected = append(expected, object.AddressWithType{Type: objectSDK.TypeRegular, Address: object.AddressOf(obj)})
}

View File

@ -4,12 +4,15 @@ import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
var errLockFailed = errors.New("lock operation failed")
@ -20,19 +23,27 @@ var errLockFailed = errors.New("lock operation failed")
// Allows locking regular objects only (otherwise returns apistatus.LockNonRegularObject).
//
// Locked list should be unique. Panics if it is empty.
func (e *StorageEngine) Lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
func (e *StorageEngine) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Lock",
trace.WithAttributes(
attribute.String("container_id", idCnr.EncodeToString()),
attribute.String("locker", locker.EncodeToString()),
attribute.Int("locked_count", len(locked)),
))
defer span.End()
return e.execIfNotBlocked(func() error {
return e.lock(idCnr, locker, locked)
return e.lock(ctx, idCnr, locker, locked)
})
}
func (e *StorageEngine) lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
for i := range locked {
switch e.lockSingle(idCnr, locker, locked[i], true) {
switch e.lockSingle(ctx, idCnr, locker, locked[i], true) {
case 1:
return logicerr.Wrap(apistatus.LockNonRegularObject{})
case 0:
switch e.lockSingle(idCnr, locker, locked[i], false) {
switch e.lockSingle(ctx, idCnr, locker, locked[i], false) {
case 1:
return logicerr.Wrap(apistatus.LockNonRegularObject{})
case 0:
@ -48,7 +59,7 @@ func (e *StorageEngine) lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error
// - 0: fail
// - 1: locking irregular object
// - 2: ok
func (e *StorageEngine) lockSingle(idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8) {
func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8) {
// code is pretty similar to inhumeAddr, maybe unify?
root := false
var errIrregular apistatus.LockNonRegularObject
@ -70,7 +81,7 @@ func (e *StorageEngine) lockSingle(idCnr cid.ID, locker, locked oid.ID, checkExi
var existsPrm shard.ExistsPrm
existsPrm.SetAddress(addrLocked)
exRes, err := sh.Exists(context.TODO(), existsPrm)
exRes, err := sh.Exists(ctx, existsPrm)
if err != nil {
var siErr *objectSDK.SplitInfoError
if !errors.As(err, &siErr) {
@ -90,7 +101,7 @@ func (e *StorageEngine) lockSingle(idCnr cid.ID, locker, locked oid.ID, checkExi
}
}
err := sh.Lock(idCnr, locker, []oid.ID{locked})
err := sh.Lock(ctx, idCnr, locker, []oid.ID{locked})
if err != nil {
e.reportShardError(sh, "could not lock object in shard", err)

View File

@ -99,7 +99,7 @@ func TestLockUserScenario(t *testing.T) {
id, _ := obj.ID()
objAddr.SetObject(id)
err = Put(e, obj)
err = Put(context.Background(), e, obj)
require.NoError(t, err)
// 2.
@ -107,10 +107,10 @@ func TestLockUserScenario(t *testing.T) {
locker.WriteMembers([]oid.ID{id})
object.WriteLock(lockerObj, locker)
err = Put(e, lockerObj)
err = Put(context.Background(), e, lockerObj)
require.NoError(t, err)
err = e.Lock(cnr, lockerID, []oid.ID{id})
err = e.Lock(context.Background(), cnr, lockerID, []oid.ID{id})
require.NoError(t, err)
// 3.
@ -125,7 +125,7 @@ func TestLockUserScenario(t *testing.T) {
tombObj.SetID(tombForLockID)
tombObj.SetAttributes(a)
err = Put(e, tombObj)
err = Put(context.Background(), e, tombObj)
require.NoError(t, err)
inhumePrm.WithTarget(tombForLockAddr, lockerAddr)
@ -180,7 +180,7 @@ func TestLockExpiration(t *testing.T) {
// 1.
obj := testutil.GenerateObjectWithCID(cnr)
err = Put(e, obj)
err = Put(context.Background(), e, obj)
require.NoError(t, err)
// 2.
@ -192,13 +192,13 @@ func TestLockExpiration(t *testing.T) {
lock.SetType(object.TypeLock)
lock.SetAttributes(a)
err = Put(e, lock)
err = Put(context.Background(), e, lock)
require.NoError(t, err)
id, _ := obj.ID()
idLock, _ := lock.ID()
err = e.Lock(cnr, idLock, []oid.ID{id})
err = e.Lock(context.Background(), cnr, idLock, []oid.ID{id})
require.NoError(t, err)
var inhumePrm InhumePrm
@ -255,20 +255,20 @@ func TestLockForceRemoval(t *testing.T) {
// 1.
obj := testutil.GenerateObjectWithCID(cnr)
err = Put(e, obj)
err = Put(context.Background(), e, obj)
require.NoError(t, err)
// 2.
lock := testutil.GenerateObjectWithCID(cnr)
lock.SetType(object.TypeLock)
err = Put(e, lock)
err = Put(context.Background(), e, lock)
require.NoError(t, err)
id, _ := obj.ID()
idLock, _ := lock.ID()
err = e.Lock(cnr, idLock, []oid.ID{id})
err = e.Lock(context.Background(), cnr, idLock, []oid.ID{id})
require.NoError(t, err)
// 3.

View File

@ -4,6 +4,7 @@ import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
@ -12,6 +13,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@ -20,9 +23,6 @@ type PutPrm struct {
obj *objectSDK.Object
}
// PutRes groups the resulting values of Put operation.
type PutRes struct{}
var errPutShard = errors.New("could not put object to any shard")
// WithObject is a Put option to set object to save.
@ -40,16 +40,22 @@ func (p *PutPrm) WithObject(obj *objectSDK.Object) {
// Returns an error if executions are blocked (see BlockExecution).
//
// Returns an error of type apistatus.ObjectAlreadyRemoved if the object has been marked as removed.
func (e *StorageEngine) Put(prm PutPrm) (res PutRes, err error) {
func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Put",
trace.WithAttributes(
attribute.String("address", object.AddressOf(prm.obj).EncodeToString()),
))
defer span.End()
err = e.execIfNotBlocked(func() error {
res, err = e.put(prm)
err = e.put(ctx, prm)
return err
})
return
}
func (e *StorageEngine) put(prm PutPrm) (PutRes, error) {
func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error {
if e.metrics != nil {
defer elapsed(e.metrics.AddPutDuration)()
}
@ -58,9 +64,9 @@ func (e *StorageEngine) put(prm PutPrm) (PutRes, error) {
// In #1146 this check was parallelized, however, it became
// much slower on fast machines for 4 shards.
_, err := e.exists(addr)
_, err := e.exists(ctx, addr)
if err != nil {
return PutRes{}, err
return err
}
finished := false
@ -74,7 +80,7 @@ func (e *StorageEngine) put(prm PutPrm) (PutRes, error) {
return false
}
putDone, exists := e.putToShard(context.TODO(), sh, ind, pool, addr, prm.obj)
putDone, exists := e.putToShard(ctx, sh, ind, pool, addr, prm.obj)
finished = putDone || exists
return finished
})
@ -83,7 +89,7 @@ func (e *StorageEngine) put(prm PutPrm) (PutRes, error) {
err = errPutShard
}
return PutRes{}, err
return err
}
// putToShard puts object to sh.
@ -117,7 +123,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, ind int,
var toMoveItPrm shard.ToMoveItPrm
toMoveItPrm.SetAddress(addr)
_, err = sh.ToMoveIt(toMoveItPrm)
_, err = sh.ToMoveIt(ctx, toMoveItPrm)
if err != nil {
e.log.Warn(logs.EngineCouldNotMarkObjectForShardRelocation,
zap.Stringer("shard", sh.ID()),
@ -132,7 +138,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, ind int,
var putPrm shard.PutPrm
putPrm.SetObject(obj)
_, err = sh.Put(putPrm)
_, err = sh.Put(ctx, putPrm)
if err != nil {
if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) ||
errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) {
@ -157,11 +163,9 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, ind int,
}
// Put writes provided object to local storage.
func Put(storage *StorageEngine, obj *objectSDK.Object) error {
func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object) error {
var putPrm PutPrm
putPrm.WithObject(obj)
_, err := storage.Put(putPrm)
return err
return storage.Put(ctx, putPrm)
}

View File

@ -129,7 +129,7 @@ func (e *StorageEngine) removeObjects(ctx context.Context, ch <-chan oid.Address
var deletePrm shard.DeletePrm
deletePrm.SetAddresses(addr)
_, err = shards[i].Delete(deletePrm)
_, err = shards[i].Delete(ctx, deletePrm)
if err != nil {
return err
}

View File

@ -49,10 +49,10 @@ func TestRebalance(t *testing.T) {
te.ng.mtx.RLock()
// Every 3rd object (i%3 == 0) is put to both shards, others are distributed.
if i%3 != 1 {
_, err1 = te.ng.shards[te.shards[0].id.String()].Shard.Put(prm)
_, err1 = te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm)
}
if i%3 != 2 {
_, err2 = te.ng.shards[te.shards[1].id.String()].Shard.Put(prm)
_, err2 = te.ng.shards[te.shards[1].id.String()].Shard.Put(context.Background(), prm)
}
te.ng.mtx.RUnlock()
@ -109,8 +109,8 @@ func TestRebalanceSingleThread(t *testing.T) {
var prm shard.PutPrm
prm.SetObject(obj)
te.ng.mtx.RLock()
_, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(prm)
_, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(prm)
_, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm)
_, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(context.Background(), prm)
te.ng.mtx.RUnlock()
require.NoError(t, err1)
require.NoError(t, err2)
@ -162,8 +162,8 @@ func TestRebalanceExitByContext(t *testing.T) {
prm.SetObject(objects[i])
te.ng.mtx.RLock()
_, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(prm)
_, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(prm)
_, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm)
_, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(context.Background(), prm)
te.ng.mtx.RUnlock()
require.NoError(t, err1)

View File

@ -1,11 +1,24 @@
package engine
import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// RestoreShard restores objects from dump to the shard with provided identifier.
//
// Returns an error if shard is not read-only.
func (e *StorageEngine) RestoreShard(id *shard.ID, prm shard.RestorePrm) error {
func (e *StorageEngine) RestoreShard(ctx context.Context, id *shard.ID, prm shard.RestorePrm) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.RestoreShard",
trace.WithAttributes(
attribute.String("shard_id", id.String()),
))
defer span.End()
e.mtx.RLock()
defer e.mtx.RUnlock()
@ -14,6 +27,6 @@ func (e *StorageEngine) RestoreShard(id *shard.ID, prm shard.RestorePrm) error {
return errShardNotFound
}
_, err := sh.Restore(prm)
_, err := sh.Restore(ctx, prm)
return err
}

View File

@ -1,10 +1,15 @@
package engine
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// SelectPrm groups the parameters of Select operation.
@ -38,16 +43,22 @@ func (r SelectRes) AddressList() []oid.Address {
// Returns any error encountered that did not allow to completely select the objects.
//
// Returns an error if executions are blocked (see BlockExecution).
func (e *StorageEngine) Select(prm SelectPrm) (res SelectRes, err error) {
func (e *StorageEngine) Select(ctx context.Context, prm SelectPrm) (res SelectRes, err error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Select",
trace.WithAttributes(
attribute.String("container_id", prm.cnr.EncodeToString()),
))
defer span.End()
err = e.execIfNotBlocked(func() error {
res, err = e._select(prm)
res, err = e._select(ctx, prm)
return err
})
return
}
func (e *StorageEngine) _select(prm SelectPrm) (SelectRes, error) {
func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, error) {
if e.metrics != nil {
defer elapsed(e.metrics.AddSearchDuration)()
}
@ -62,7 +73,7 @@ func (e *StorageEngine) _select(prm SelectPrm) (SelectRes, error) {
shPrm.SetFilters(prm.filters)
e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
res, err := sh.Select(shPrm)
res, err := sh.Select(ctx, shPrm)
if err != nil {
e.reportShardError(sh, "could not select objects from shard", err)
return false
@ -133,12 +144,12 @@ func (e *StorageEngine) list(limit uint64) (SelectRes, error) {
}
// Select selects objects from local storage using provided filters.
func Select(storage *StorageEngine, cnr cid.ID, fs object.SearchFilters) ([]oid.Address, error) {
func Select(ctx context.Context, storage *StorageEngine, cnr cid.ID, fs object.SearchFilters) ([]oid.Address, error) {
var selectPrm SelectPrm
selectPrm.WithContainerID(cnr)
selectPrm.WithFilters(fs)
res, err := storage.Select(selectPrm)
res, err := storage.Select(ctx, selectPrm)
if err != nil {
return nil, err
}

View File

@ -1,6 +1,7 @@
package engine
import (
"context"
"strconv"
"testing"
@ -31,7 +32,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
for i := 0; i < objCount; i++ {
obj := testutil.GenerateObjectWithCID(cid)
testutil.AddAttribute(obj, pilorama.AttributeFilename, strconv.Itoa(i))
err := Put(te.ng, obj)
err := Put(context.Background(), te.ng, obj)
if err != nil {
b.Fatal(err)
}
@ -51,7 +52,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
prm.WithFilters(fs)
for i := 0; i < b.N; i++ {
res, err := te.ng.Select(prm)
res, err := te.ng.Select(context.Background(), prm)
if err != nil {
b.Fatal(err)
}

View File

@ -1,7 +1,12 @@
package engine
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// FlushWriteCachePrm groups the parameters of FlushWriteCache operation.
@ -26,7 +31,14 @@ func (p *FlushWriteCachePrm) SetIgnoreErrors(ignore bool) {
type FlushWriteCacheRes struct{}
// FlushWriteCache flushes write-cache on a single shard.
func (e *StorageEngine) FlushWriteCache(p FlushWriteCachePrm) (FlushWriteCacheRes, error) {
func (e *StorageEngine) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) (FlushWriteCacheRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.FlushWriteCache",
trace.WithAttributes(
attribute.String("shard)id", p.shardID.String()),
attribute.Bool("ignore_errors", p.ignoreErrors),
))
defer span.End()
e.mtx.RLock()
sh, ok := e.shards[p.shardID.String()]
e.mtx.RUnlock()
@ -38,5 +50,5 @@ func (e *StorageEngine) FlushWriteCache(p FlushWriteCachePrm) (FlushWriteCacheRe
var prm shard.FlushWriteCachePrm
prm.SetIgnoreErrors(p.ignoreErrors)
return FlushWriteCacheRes{}, sh.FlushWriteCache(prm)
return FlushWriteCacheRes{}, sh.FlushWriteCache(ctx, prm)
}

View File

@ -1,6 +1,7 @@
package meta_test
import (
"context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@ -55,6 +56,6 @@ func metaExists(db *meta.DB, addr oid.Address) (bool, error) {
var existsPrm meta.ExistsPrm
existsPrm.SetAddress(addr)
res, err := db.Exists(existsPrm)
res, err := db.Exists(context.Background(), existsPrm)
return res.Exists(), err
}

View File

@ -1,6 +1,7 @@
package meta_test
import (
"context"
"testing"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@ -38,7 +39,7 @@ func TestCounters(t *testing.T) {
for i := 0; i < objCount; i++ {
prm.SetObject(oo[i])
_, err = db.Put(prm)
_, err = db.Put(context.Background(), prm)
require.NoError(t, err)
c, err = db.ObjectCounters()
@ -58,7 +59,7 @@ func TestCounters(t *testing.T) {
for i := objCount - 1; i >= 0; i-- {
prm.SetAddresses(objectcore.AddressOf(oo[i]))
res, err := db.Delete(prm)
res, err := db.Delete(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, uint64(1), res.AvailableObjectsRemoved())
@ -89,7 +90,7 @@ func TestCounters(t *testing.T) {
prm.SetTombstoneAddress(oidtest.Address())
prm.SetAddresses(inhumedObjs...)
res, err := db.Inhume(prm)
res, err := db.Inhume(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, uint64(len(inhumedObjs)), res.AvailableInhumed())
@ -159,7 +160,7 @@ func TestCounters(t *testing.T) {
prm.SetTombstoneAddress(oidtest.Address())
prm.SetAddresses(inhumedObjs...)
_, err = db.Inhume(prm)
_, err = db.Inhume(context.Background(), prm)
require.NoError(t, err)
c, err = db.ObjectCounters()
@ -223,7 +224,7 @@ func TestCounters_Expired(t *testing.T) {
inhumePrm.SetGCMark()
inhumePrm.SetAddresses(oo[0])
inhumeRes, err := db.Inhume(inhumePrm)
inhumeRes, err := db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
require.Equal(t, uint64(1), inhumeRes.AvailableInhumed())
@ -240,7 +241,7 @@ func TestCounters_Expired(t *testing.T) {
var deletePrm meta.DeletePrm
deletePrm.SetAddresses(oo[0])
deleteRes, err := db.Delete(deletePrm)
deleteRes, err := db.Delete(context.Background(), deletePrm)
require.NoError(t, err)
require.Zero(t, deleteRes.AvailableObjectsRemoved())
@ -257,7 +258,7 @@ func TestCounters_Expired(t *testing.T) {
deletePrm.SetAddresses(oo[0])
deleteRes, err = db.Delete(deletePrm)
deleteRes, err = db.Delete(context.Background(), deletePrm)
require.NoError(t, err)
require.Equal(t, uint64(1), deleteRes.AvailableObjectsRemoved())
@ -284,7 +285,7 @@ func putObjs(t *testing.T, db *meta.DB, count int, withParent bool) []*object.Ob
oo = append(oo, o)
prm.SetObject(o)
_, err = db.Put(prm)
_, err = db.Put(context.Background(), prm)
require.NoError(t, err)
c, err := db.ObjectCounters()

View File

@ -2,15 +2,19 @@ package meta
import (
"bytes"
"context"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// DeletePrm groups the parameters of Delete operation.
@ -65,7 +69,13 @@ type referenceNumber struct {
type referenceCounter map[string]*referenceNumber
// Delete removed object records from metabase indexes.
func (db *DB) Delete(prm DeletePrm) (DeleteRes, error) {
func (db *DB) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
_, span := tracing.StartSpanFromContext(ctx, "metabase.Delete",
trace.WithAttributes(
attribute.Int("addr_count", len(prm.addrs)),
))
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()

View File

@ -1,6 +1,7 @@
package meta_test
import (
"context"
"errors"
"testing"
@ -139,6 +140,6 @@ func metaDelete(db *meta.DB, addrs ...oid.Address) error {
var deletePrm meta.DeletePrm
deletePrm.SetAddresses(addrs...)
_, err := db.Delete(deletePrm)
_, err := db.Delete(context.Background(), deletePrm)
return err
}

View File

@ -1,15 +1,19 @@
package meta
import (
"context"
"fmt"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// ExistsPrm groups the parameters of Exists operation.
@ -39,7 +43,13 @@ func (p ExistsRes) Exists() bool {
//
// Returns an error of type apistatus.ObjectAlreadyRemoved if object has been placed in graveyard.
// Returns the object.ErrObjectIsExpired if the object is presented but already expired.
func (db *DB) Exists(prm ExistsPrm) (res ExistsRes, err error) {
func (db *DB) Exists(ctx context.Context, prm ExistsPrm) (res ExistsRes, err error) {
_, span := tracing.StartSpanFromContext(ctx, "metabase.Exists",
trace.WithAttributes(
attribute.String("address", prm.addr.EncodeToString()),
))
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()

View File

@ -1,14 +1,18 @@
package meta
import (
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// GetPrm groups the parameters of Get operation.
@ -46,7 +50,14 @@ func (r GetRes) Header() *objectSDK.Object {
// Returns an error of type apistatus.ObjectNotFound if object is missing in DB.
// Returns an error of type apistatus.ObjectAlreadyRemoved if object has been placed in graveyard.
// Returns the object.ErrObjectIsExpired if the object is presented but already expired.
func (db *DB) Get(prm GetPrm) (res GetRes, err error) {
func (db *DB) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) {
_, span := tracing.StartSpanFromContext(ctx, "metabase.Get",
trace.WithAttributes(
attribute.String("address", prm.addr.EncodeToString()),
attribute.Bool("raw", prm.raw),
))
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()

View File

@ -2,6 +2,7 @@ package meta_test
import (
"bytes"
"context"
"fmt"
"os"
"runtime"
@ -132,7 +133,7 @@ func TestDB_Get(t *testing.T) {
var prm meta.InhumePrm
prm.SetAddresses(obj)
_, err = db.Inhume(prm)
_, err = db.Inhume(context.Background(), prm)
require.NoError(t, err)
_, err = metaGet(db, obj, false)
require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
@ -216,7 +217,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
getPrm.SetAddress(addrs[counter%len(addrs)])
counter++
_, err := db.Get(getPrm)
_, err := db.Get(context.Background(), getPrm)
if err != nil {
b.Fatal(err)
}
@ -235,7 +236,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
var getPrm meta.GetPrm
getPrm.SetAddress(addrs[i%len(addrs)])
_, err := db.Get(getPrm)
_, err := db.Get(context.Background(), getPrm)
if err != nil {
b.Fatal(err)
}
@ -248,6 +249,6 @@ func metaGet(db *meta.DB, addr oid.Address, raw bool) (*objectSDK.Object, error)
prm.SetAddress(addr)
prm.SetRaw(raw)
res, err := db.Get(prm)
res, err := db.Get(context.Background(), prm)
return res.Header(), err
}

View File

@ -1,6 +1,7 @@
package meta_test
import (
"context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@ -68,7 +69,7 @@ func TestDB_Iterate_OffsetNotFound(t *testing.T) {
inhumePrm.SetAddresses(object.AddressOf(obj1))
inhumePrm.SetGCMark()
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
var counter int
@ -138,14 +139,14 @@ func TestDB_IterateDeletedObjects(t *testing.T) {
inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2))
inhumePrm.SetTombstoneAddress(addrTombstone)
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
inhumePrm.SetAddresses(object.AddressOf(obj3), object.AddressOf(obj4))
inhumePrm.SetGCMark()
// inhume with GC mark
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
var (
@ -225,7 +226,7 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
object.AddressOf(obj3), object.AddressOf(obj4))
inhumePrm.SetTombstoneAddress(addrTombstone)
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
expectedGraveyard := []oid.Address{
@ -320,7 +321,7 @@ func TestDB_IterateOverGarbage_Offset(t *testing.T) {
object.AddressOf(obj3), object.AddressOf(obj4))
inhumePrm.SetGCMark()
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
expectedGarbage := []oid.Address{
@ -404,7 +405,7 @@ func TestDB_DropGraves(t *testing.T) {
inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2))
inhumePrm.SetTombstoneAddress(addrTombstone)
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
buriedTS := make([]meta.TombstonedObject, 0)

View File

@ -2,9 +2,11 @@ package meta
import (
"bytes"
"context"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@ -118,7 +120,10 @@ var ErrLockObjectRemoval = logicerr.New("lock object removal")
//
// NOTE: Marks any object with GC mark (despite any prohibitions on operations
// with that object) if WithForceGCMark option has been provided.
func (db *DB) Inhume(prm InhumePrm) (res InhumeRes, err error) {
func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRes, err error) {
_, span := tracing.StartSpanFromContext(ctx, "metabase.Inhume")
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()

View File

@ -1,6 +1,7 @@
package meta_test
import (
"context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@ -50,40 +51,40 @@ func TestInhumeTombOnTomb(t *testing.T) {
inhumePrm.SetTombstoneAddress(addr2)
// inhume addr1 via addr2
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
existsPrm.SetAddress(addr1)
// addr1 should become inhumed {addr1:addr2}
_, err = db.Exists(existsPrm)
_, err = db.Exists(context.Background(), existsPrm)
require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved))
inhumePrm.SetAddresses(addr3)
inhumePrm.SetTombstoneAddress(addr1)
// try to inhume addr3 via addr1
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
// record with {addr1:addr2} should be removed from graveyard
// as a tomb-on-tomb; metabase should return ObjectNotFound
// NOT ObjectAlreadyRemoved since that record has been removed
// from graveyard but addr1 is still marked with GC
_, err = db.Exists(existsPrm)
_, err = db.Exists(context.Background(), existsPrm)
require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
existsPrm.SetAddress(addr3)
// addr3 should be inhumed {addr3: addr1}
_, err = db.Exists(existsPrm)
_, err = db.Exists(context.Background(), existsPrm)
require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved))
inhumePrm.SetAddresses(addr1)
inhumePrm.SetTombstoneAddress(oidtest.Address())
// try to inhume addr1 (which is already a tombstone in graveyard)
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
existsPrm.SetAddress(addr1)
@ -91,7 +92,7 @@ func TestInhumeTombOnTomb(t *testing.T) {
// record with addr1 key should not appear in graveyard
// (tomb can not be inhumed) but should be kept as object
// with GC mark
_, err = db.Exists(existsPrm)
_, err = db.Exists(context.Background(), existsPrm)
require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
}
@ -100,13 +101,13 @@ func TestInhumeLocked(t *testing.T) {
locked := oidtest.Address()
err := db.Lock(locked.Container(), oidtest.ID(), []oid.ID{locked.Object()})
err := db.Lock(context.Background(), locked.Container(), oidtest.ID(), []oid.ID{locked.Object()})
require.NoError(t, err)
var prm meta.InhumePrm
prm.SetAddresses(locked)
_, err = db.Inhume(prm)
_, err = db.Inhume(context.Background(), prm)
var e apistatus.ObjectLocked
require.ErrorAs(t, err, &e)
@ -117,6 +118,6 @@ func metaInhume(db *meta.DB, target, tomb oid.Address) error {
inhumePrm.SetAddresses(target)
inhumePrm.SetTombstoneAddress(tomb)
_, err := db.Inhume(inhumePrm)
_, err := db.Inhume(context.Background(), inhumePrm)
return err
}

View File

@ -1,6 +1,7 @@
package meta_test
import (
"context"
"strconv"
"testing"
@ -34,7 +35,7 @@ func TestDB_IterateExpired(t *testing.T) {
expiredLocked := putWithExpiration(t, db, object.TypeRegular, epoch-1)
require.NoError(t, db.Lock(expiredLocked.Container(), oidtest.ID(), []oid.ID{expiredLocked.Object()}))
require.NoError(t, db.Lock(context.Background(), expiredLocked.Container(), oidtest.ID(), []oid.ID{expiredLocked.Object()}))
err := db.IterateExpired(epoch, func(exp *meta.ExpiredObject) error {
if addr, ok := mAlive[exp.Type()]; ok {
@ -81,13 +82,13 @@ func TestDB_IterateCoveredByTombstones(t *testing.T) {
prm.SetAddresses(protected1, protected2, protectedLocked)
prm.SetTombstoneAddress(ts)
_, err = db.Inhume(prm)
_, err = db.Inhume(context.Background(), prm)
require.NoError(t, err)
prm.SetAddresses(garbage)
prm.SetGCMark()
_, err = db.Inhume(prm)
_, err = db.Inhume(context.Background(), prm)
require.NoError(t, err)
var handled []oid.Address
@ -107,7 +108,7 @@ func TestDB_IterateCoveredByTombstones(t *testing.T) {
require.Contains(t, handled, protected2)
require.Contains(t, handled, protectedLocked)
err = db.Lock(protectedLocked.Container(), oidtest.ID(), []oid.ID{protectedLocked.Object()})
err = db.Lock(context.Background(), protectedLocked.Container(), oidtest.ID(), []oid.ID{protectedLocked.Object()})
require.NoError(t, err)
handled = handled[:0]

View File

@ -2,14 +2,18 @@ package meta
import (
"bytes"
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
var bucketNameLocked = []byte{lockedPrefix}
@ -30,7 +34,15 @@ func bucketNameLockers(idCnr cid.ID, key []byte) []byte {
// Allows locking regular objects only (otherwise returns apistatus.LockNonRegularObject).
//
// Locked list should be unique. Panics if it is empty.
func (db *DB) Lock(cnr cid.ID, locker oid.ID, locked []oid.ID) error {
func (db *DB) Lock(ctx context.Context, cnr cid.ID, locker oid.ID, locked []oid.ID) error {
_, span := tracing.StartSpanFromContext(ctx, "metabase.Lock",
trace.WithAttributes(
attribute.String("container_id", cnr.EncodeToString()),
attribute.String("locker", locker.EncodeToString()),
attribute.Int("locked_count", len(locked)),
))
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
@ -266,7 +278,13 @@ func (i IsLockedRes) Locked() bool {
// object is considered as non-locked.
//
// Returns only non-logical errors related to underlying database.
func (db *DB) IsLocked(prm IsLockedPrm) (res IsLockedRes, err error) {
func (db *DB) IsLocked(ctx context.Context, prm IsLockedPrm) (res IsLockedRes, err error) {
_, span := tracing.StartSpanFromContext(ctx, "metabase.IsLocked",
trace.WithAttributes(
attribute.String("address", prm.addr.EncodeToString()),
))
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()

View File

@ -1,6 +1,7 @@
package meta_test
import (
"context"
"testing"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@ -20,8 +21,8 @@ func TestDB_Lock(t *testing.T) {
db := newDB(t)
t.Run("empty locked list", func(t *testing.T) {
require.Panics(t, func() { _ = db.Lock(cnr, oid.ID{}, nil) })
require.Panics(t, func() { _ = db.Lock(cnr, oid.ID{}, []oid.ID{}) })
require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, nil) })
require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, []oid.ID{}) })
})
t.Run("(ir)regular", func(t *testing.T) {
@ -44,7 +45,7 @@ func TestDB_Lock(t *testing.T) {
id, _ := obj.ID()
// try to lock it
err = db.Lock(cnr, oidtest.ID(), []oid.ID{id})
err = db.Lock(context.Background(), cnr, oidtest.ID(), []oid.ID{id})
if typ == object.TypeRegular {
require.NoError(t, err, typ)
} else {
@ -65,27 +66,27 @@ func TestDB_Lock(t *testing.T) {
// check locking relation
inhumePrm.SetAddresses(objAddr)
_, err := db.Inhume(inhumePrm)
_, err := db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
inhumePrm.SetTombstoneAddress(oidtest.Address())
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
// try to remove lock object
inhumePrm.SetAddresses(lockAddr)
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.Error(t, err)
// check that locking relation has not been
// dropped
inhumePrm.SetAddresses(objAddr)
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
inhumePrm.SetTombstoneAddress(oidtest.Address())
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
})
@ -105,7 +106,7 @@ func TestDB_Lock(t *testing.T) {
inhumePrm.SetForceGCMark()
inhumePrm.SetLockObjectHandling()
res, err := db.Inhume(inhumePrm)
res, err := db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
require.Len(t, res.DeletedLockObjects(), 1)
require.Equal(t, objectcore.AddressOf(lockObj), res.DeletedLockObjects()[0])
@ -117,7 +118,7 @@ func TestDB_Lock(t *testing.T) {
inhumePrm.SetGCMark()
// now we can inhume the object
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
})
@ -134,7 +135,7 @@ func TestDB_Lock(t *testing.T) {
inhumePrm.SetAddresses(objectcore.AddressOf(lockObj))
inhumePrm.SetLockObjectHandling()
res, err := db.Inhume(inhumePrm)
res, err := db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
require.Len(t, res.DeletedLockObjects(), 1)
require.Equal(t, objectcore.AddressOf(lockObj), res.DeletedLockObjects()[0])
@ -151,7 +152,7 @@ func TestDB_Lock(t *testing.T) {
for i := 0; i < objsNum; i++ {
inhumePrm.SetAddresses(objectcore.AddressOf(objs[i]))
res, err = db.Inhume(inhumePrm)
res, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
require.Len(t, res.DeletedLockObjects(), 0)
}
@ -164,7 +165,7 @@ func TestDB_Lock(t *testing.T) {
inhumePrm.SetForceGCMark()
inhumePrm.SetAddresses(objectcore.AddressOf(lockObj))
res, err := db.Inhume(inhumePrm)
res, err := db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
require.Len(t, res.DeletedLockObjects(), 0)
})
@ -184,7 +185,7 @@ func TestDB_Lock_Expired(t *testing.T) {
require.ErrorIs(t, err, meta.ErrObjectIsExpired)
// lock the obj
require.NoError(t, db.Lock(addr.Container(), oidtest.ID(), []oid.ID{addr.Object()}))
require.NoError(t, db.Lock(context.Background(), addr.Container(), oidtest.ID(), []oid.ID{addr.Object()}))
// object is expired but locked, thus, must be available
_, err = metaGet(db, addr, false)
@ -202,7 +203,7 @@ func TestDB_IsLocked(t *testing.T) {
for _, obj := range objs {
prm.SetAddress(objectcore.AddressOf(obj))
res, err := db.IsLocked(prm)
res, err := db.IsLocked(context.Background(), prm)
require.NoError(t, err)
require.True(t, res.Locked())
@ -212,7 +213,7 @@ func TestDB_IsLocked(t *testing.T) {
prm.SetAddress(oidtest.Address())
res, err := db.IsLocked(prm)
res, err := db.IsLocked(context.Background(), prm)
require.NoError(t, err)
require.False(t, res.Locked())
@ -224,12 +225,12 @@ func TestDB_IsLocked(t *testing.T) {
var putPrm meta.PutPrm
putPrm.SetObject(obj)
_, err = db.Put(putPrm)
_, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
prm.SetAddress(objectcore.AddressOf(obj))
res, err = db.IsLocked(prm)
res, err = db.IsLocked(context.Background(), prm)
require.NoError(t, err)
require.False(t, res.Locked())
@ -260,7 +261,7 @@ func putAndLockObj(t *testing.T, db *meta.DB, numOfLockedObjs int) ([]*object.Ob
err := putBig(db, lockObj)
require.NoError(t, err)
err = db.Lock(cnr, lockID, lockedObjIDs)
err = db.Lock(context.Background(), cnr, lockID, lockedObjIDs)
require.NoError(t, err)
return lockedObjs, lockObj

View File

@ -1,10 +1,14 @@
package meta
import (
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// ToMoveItPrm groups the parameters of ToMoveIt operation.
@ -48,7 +52,13 @@ func (p MovableRes) AddressList() []oid.Address {
// ToMoveIt marks objects to move it into another shard. This useful for
// faster HRW fetching.
func (db *DB) ToMoveIt(prm ToMoveItPrm) (res ToMoveItRes, err error) {
func (db *DB) ToMoveIt(ctx context.Context, prm ToMoveItPrm) (res ToMoveItRes, err error) {
_, span := tracing.StartSpanFromContext(ctx, "metabase.ToMoveIt",
trace.WithAttributes(
attribute.String("address", prm.addr.EncodeToString()),
))
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()

View File

@ -1,6 +1,7 @@
package meta_test
import (
"context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@ -61,7 +62,7 @@ func metaToMoveIt(db *meta.DB, addr oid.Address) error {
var toMovePrm meta.ToMoveItPrm
toMovePrm.SetAddress(addr)
_, err := db.ToMoveIt(toMovePrm)
_, err := db.ToMoveIt(context.Background(), toMovePrm)
return err
}

View File

@ -1,11 +1,13 @@
package meta
import (
"context"
"encoding/binary"
"errors"
"fmt"
gio "io"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
@ -14,6 +16,8 @@ import (
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/nspcc-dev/neo-go/pkg/io"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
type (
@ -52,7 +56,13 @@ var (
//
// Returns an error of type apistatus.ObjectAlreadyRemoved if object has been placed in graveyard.
// Returns the object.ErrObjectIsExpired if the object is presented but already expired.
func (db *DB) Put(prm PutPrm) (res PutRes, err error) {
func (db *DB) Put(ctx context.Context, prm PutPrm) (res PutRes, err error) {
_, span := tracing.StartSpanFromContext(ctx, "metabase.Put",
trace.WithAttributes(
attribute.String("address", objectCore.AddressOf(prm.obj).EncodeToString()),
))
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()

View File

@ -1,6 +1,7 @@
package meta_test
import (
"context"
"runtime"
"strconv"
"testing"
@ -117,7 +118,7 @@ func metaPut(db *meta.DB, obj *objectSDK.Object, id []byte) error {
putPrm.SetObject(obj)
putPrm.SetStorageID(id)
_, err := db.Put(putPrm)
_, err := db.Put(context.Background(), putPrm)
return err
}

View File

@ -1,17 +1,21 @@
package meta
import (
"context"
"encoding/binary"
"errors"
"fmt"
"strings"
v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@ -56,7 +60,13 @@ func (r SelectRes) AddressList() []oid.Address {
}
// Select returns list of addresses of objects that match search filters.
func (db *DB) Select(prm SelectPrm) (res SelectRes, err error) {
func (db *DB) Select(ctx context.Context, prm SelectPrm) (res SelectRes, err error) {
_, span := tracing.StartSpanFromContext(ctx, "metabase.Select",
trace.WithAttributes(
attribute.String("container_id", prm.cnr.EncodeToString()),
))
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()

View File

@ -1,6 +1,7 @@
package meta_test
import (
"context"
"encoding/hex"
"strconv"
"testing"
@ -829,7 +830,7 @@ func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.Sear
prm.SetFilters(fs)
for i := 0; i < b.N; i++ {
res, err := db.Select(prm)
res, err := db.Select(context.Background(), prm)
if err != nil {
b.Fatal(err)
}
@ -844,6 +845,6 @@ func metaSelect(db *meta.DB, cnr cidSDK.ID, fs objectSDK.SearchFilters) ([]oid.A
prm.SetFilters(fs)
prm.SetContainerID(cnr)
res, err := db.Select(prm)
res, err := db.Select(context.Background(), prm)
return res.AddressList(), err
}

View File

@ -1,11 +1,15 @@
package meta
import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/nspcc-dev/neo-go/pkg/util/slice"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// StorageIDPrm groups the parameters of StorageID operation.
@ -30,7 +34,13 @@ func (r StorageIDRes) StorageID() []byte {
// StorageID returns storage descriptor for objects from the blobstor.
// It is put together with the object can makes get/delete operation faster.
func (db *DB) StorageID(prm StorageIDPrm) (res StorageIDRes, err error) {
func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (res StorageIDRes, err error) {
_, span := tracing.StartSpanFromContext(ctx, "metabase.StorageID",
trace.WithAttributes(
attribute.String("address", prm.addr.EncodeToString()),
))
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()

View File

@ -1,6 +1,7 @@
package meta_test
import (
"context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@ -63,6 +64,6 @@ func metaStorageID(db *meta.DB, addr oid.Address) ([]byte, error) {
var sidPrm meta.StorageIDPrm
sidPrm.SetAddress(addr)
r, err := db.StorageID(sidPrm)
r, err := db.StorageID(context.Background(), sidPrm)
return r.StorageID(), err
}

View File

@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
@ -80,7 +81,10 @@ func (s *Shard) Open() error {
type metabaseSynchronizer Shard
func (x *metabaseSynchronizer) Init() error {
return (*Shard)(x).refillMetabase()
ctx, span := tracing.StartSpanFromContext(context.TODO(), "metabaseSynchronizer.Init")
defer span.End()
return (*Shard)(x).refillMetabase(ctx)
}
// Init initializes all Shard's components.
@ -158,7 +162,7 @@ func (s *Shard) Init(ctx context.Context) error {
return nil
}
func (s *Shard) refillMetabase() error {
func (s *Shard) refillMetabase(ctx context.Context) error {
err := s.metaBase.Reset()
if err != nil {
return fmt.Errorf("could not reset metabase: %w", err)
@ -177,9 +181,9 @@ func (s *Shard) refillMetabase() error {
var err error
switch obj.Type() {
case objectSDK.TypeTombstone:
err = s.refillTombstoneObject(obj)
err = s.refillTombstoneObject(ctx, obj)
case objectSDK.TypeLock:
err = s.refillLockObject(obj)
err = s.refillLockObject(ctx, obj)
default:
}
if err != nil {
@ -190,7 +194,7 @@ func (s *Shard) refillMetabase() error {
mPrm.SetObject(obj)
mPrm.SetStorageID(descriptor)
_, err = s.metaBase.Put(mPrm)
_, err = s.metaBase.Put(ctx, mPrm)
if err != nil && !meta.IsErrRemoved(err) && !errors.Is(err, meta.ErrObjectIsExpired) {
return err
}
@ -209,7 +213,7 @@ func (s *Shard) refillMetabase() error {
return nil
}
func (s *Shard) refillLockObject(obj *objectSDK.Object) error {
func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) error {
var lock objectSDK.Lock
if err := lock.Unmarshal(obj.Payload()); err != nil {
return fmt.Errorf("could not unmarshal lock content: %w", err)
@ -220,14 +224,14 @@ func (s *Shard) refillLockObject(obj *objectSDK.Object) error {
cnr, _ := obj.ContainerID()
id, _ := obj.ID()
err := s.metaBase.Lock(cnr, id, locked)
err := s.metaBase.Lock(ctx, cnr, id, locked)
if err != nil {
return fmt.Errorf("could not lock objects: %w", err)
}
return nil
}
func (s *Shard) refillTombstoneObject(obj *objectSDK.Object) error {
func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object) error {
tombstone := objectSDK.NewTombstone()
if err := tombstone.Unmarshal(obj.Payload()); err != nil {
@ -250,7 +254,7 @@ func (s *Shard) refillTombstoneObject(obj *objectSDK.Object) error {
inhumePrm.SetTombstoneAddress(tombAddr)
inhumePrm.SetAddresses(tombMembers...)
_, err := s.metaBase.Inhume(inhumePrm)
_, err := s.metaBase.Inhume(ctx, inhumePrm)
if err != nil {
return fmt.Errorf("could not inhume objects: %w", err)
}
@ -290,7 +294,10 @@ func (s *Shard) Close() error {
// Reload reloads configuration portions that are necessary.
// If a config option is invalid, it logs an error and returns nil.
// If there was a problem with applying new configuration, an error is returned.
func (s *Shard) Reload(opts ...Option) error {
func (s *Shard) Reload(ctx context.Context, opts ...Option) error {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Reload")
defer span.End()
// Do not use defaultCfg here missing options need not be reloaded.
var c cfg
for i := range opts {
@ -314,7 +321,7 @@ func (s *Shard) Reload(opts ...Option) error {
// Here we refill metabase only if a new instance was opened. This is a feature,
// we don't want to hang for some time just because we forgot to change
// config after the node was updated.
err = s.refillMetabase()
err = s.refillMetabase(ctx)
} else {
err = s.metaBase.Init()
}

View File

@ -126,6 +126,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
}
sh := New(
WithID(NewIDFromBytes([]byte{})),
WithBlobStorOptions(blobOpts...),
WithPiloramaOptions(pilorama.WithPath(filepath.Join(dir, "pilorama"))),
WithMetaBaseOptions(meta.WithPath(filepath.Join(dir, "meta")), meta.WithEpochState(epochState{})))
@ -138,12 +139,12 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
var putPrm PutPrm
putPrm.SetObject(obj)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
require.NoError(t, sh.Close())
addr := object.AddressOf(obj)
_, err = fsTree.Put(common.PutPrm{Address: addr, RawData: []byte("not an object")})
_, err = fsTree.Put(context.Background(), common.PutPrm{Address: addr, RawData: []byte("not an object")})
require.NoError(t, err)
sh = New(
@ -245,13 +246,13 @@ func TestRefillMetabase(t *testing.T) {
for _, v := range mObjs {
putPrm.SetObject(v.obj)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
}
putPrm.SetObject(tombObj)
_, err = sh.Put(putPrm)
_, err = sh.Put(context.Background(), putPrm)
require.NoError(t, err)
// LOCK object handling
@ -263,11 +264,11 @@ func TestRefillMetabase(t *testing.T) {
objectSDK.WriteLock(lockObj, lock)
putPrm.SetObject(lockObj)
_, err = sh.Put(putPrm)
_, err = sh.Put(context.Background(), putPrm)
require.NoError(t, err)
lockID, _ := lockObj.ID()
require.NoError(t, sh.Lock(cnrLocked, lockID, locked))
require.NoError(t, sh.Lock(context.Background(), cnrLocked, lockID, locked))
var inhumePrm InhumePrm
inhumePrm.SetTarget(object.AddressOf(tombObj), tombMembers...)
@ -368,7 +369,7 @@ func TestRefillMetabase(t *testing.T) {
checkObj(object.AddressOf(tombObj), nil)
checkTombMembers(false)
err = sh.refillMetabase()
err = sh.refillMetabase(context.Background())
require.NoError(t, err)
c, err = sh.metaBase.ObjectCounters()

View File

@ -1,13 +1,17 @@
package shard
import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@ -28,14 +32,21 @@ func (p *DeletePrm) SetAddresses(addr ...oid.Address) {
// Delete removes data from the shard's writeCache, metaBase and
// blobStor.
func (s *Shard) Delete(prm DeletePrm) (DeleteRes, error) {
func (s *Shard) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Delete",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
attribute.Int("addr_count", len(prm.addr)),
))
defer span.End()
s.m.RLock()
defer s.m.RUnlock()
return s.delete(prm)
return s.delete(ctx, prm)
}
func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
func (s *Shard) delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
if s.info.Mode.ReadOnly() {
return DeleteRes{}, ErrReadOnlyMode
} else if s.info.Mode.NoMetabase() {
@ -48,7 +59,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
for i := range prm.addr {
if s.hasWriteCache() {
err := s.writeCache.Delete(prm.addr[i])
err := s.writeCache.Delete(ctx, prm.addr[i])
if err != nil && !IsErrNotFound(err) && !errors.Is(err, writecache.ErrReadOnly) {
s.log.Warn(logs.ShardCantDeleteObjectFromWriteCache, zap.String("error", err.Error()))
}
@ -57,7 +68,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
var sPrm meta.StorageIDPrm
sPrm.SetAddress(prm.addr[i])
res, err := s.metaBase.StorageID(sPrm)
res, err := s.metaBase.StorageID(ctx, sPrm)
if err != nil {
s.log.Debug(logs.ShardCantGetStorageIDFromMetabase,
zap.Stringer("object", prm.addr[i]),
@ -74,7 +85,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
var delPrm meta.DeletePrm
delPrm.SetAddresses(prm.addr...)
res, err := s.metaBase.Delete(delPrm)
res, err := s.metaBase.Delete(ctx, delPrm)
if err != nil {
return DeleteRes{}, err // stop on metabase error ?
}
@ -99,7 +110,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
id := smalls[prm.addr[i]]
delPrm.StorageID = id
_, err = s.blobStor.Delete(delPrm)
_, err = s.blobStor.Delete(ctx, delPrm)
if err != nil {
s.log.Debug(logs.ShardCantRemoveObjectFromBlobStor,
zap.Stringer("object_address", prm.addr[i]),

View File

@ -43,13 +43,13 @@ func testShardDelete(t *testing.T, hasWriteCache bool) {
var delPrm shard.DeletePrm
delPrm.SetAddresses(object.AddressOf(obj))
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
_, err = testGet(t, sh, getPrm, hasWriteCache)
require.NoError(t, err)
_, err = sh.Delete(delPrm)
_, err = sh.Delete(context.TODO(), delPrm)
require.NoError(t, err)
_, err = sh.Get(context.Background(), getPrm)
@ -67,13 +67,13 @@ func testShardDelete(t *testing.T, hasWriteCache bool) {
var delPrm shard.DeletePrm
delPrm.SetAddresses(object.AddressOf(obj))
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
_, err = sh.Get(context.Background(), getPrm)
require.NoError(t, err)
_, err = sh.Delete(delPrm)
_, err = sh.Delete(context.Background(), delPrm)
require.NoError(t, err)
_, err = sh.Get(context.Background(), getPrm)

View File

@ -104,7 +104,7 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
var prm shard.PutPrm
prm.SetObject(objects[i])
_, err := sh.Put(prm)
_, err := sh.Put(context.Background(), prm)
require.NoError(t, err)
}
@ -129,13 +129,13 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
t.Run("empty dump", func(t *testing.T) {
var restorePrm shard.RestorePrm
restorePrm.WithPath(outEmpty)
res, err := sh.Restore(restorePrm)
res, err := sh.Restore(context.Background(), restorePrm)
require.NoError(t, err)
require.Equal(t, 0, res.Count())
})
t.Run("invalid path", func(t *testing.T) {
_, err := sh.Restore(*new(shard.RestorePrm))
_, err := sh.Restore(context.Background(), *new(shard.RestorePrm))
require.ErrorIs(t, err, os.ErrNotExist)
})
@ -147,7 +147,7 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
var restorePrm shard.RestorePrm
restorePrm.WithPath(out)
_, err := sh.Restore(restorePrm)
_, err := sh.Restore(context.Background(), restorePrm)
require.ErrorIs(t, err, shard.ErrInvalidMagic)
})
@ -162,7 +162,7 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
var restorePrm shard.RestorePrm
restorePrm.WithPath(out)
_, err := sh.Restore(restorePrm)
_, err := sh.Restore(context.Background(), restorePrm)
require.ErrorIs(t, err, io.ErrUnexpectedEOF)
})
t.Run("incomplete object data", func(t *testing.T) {
@ -173,7 +173,7 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
var restorePrm shard.RestorePrm
restorePrm.WithPath(out)
_, err := sh.Restore(restorePrm)
_, err := sh.Restore(context.Background(), restorePrm)
require.ErrorIs(t, err, io.EOF)
})
t.Run("invalid object", func(t *testing.T) {
@ -184,7 +184,7 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
var restorePrm shard.RestorePrm
restorePrm.WithPath(out)
_, err := sh.Restore(restorePrm)
_, err := sh.Restore(context.Background(), restorePrm)
require.Error(t, err)
t.Run("skip errors", func(t *testing.T) {
@ -195,7 +195,7 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
restorePrm.WithPath(out)
restorePrm.WithIgnoreErrors(true)
res, err := sh.Restore(restorePrm)
res, err := sh.Restore(context.Background(), restorePrm)
require.NoError(t, err)
require.Equal(t, objCount, res.Count())
require.Equal(t, 2, res.FailCount())
@ -208,7 +208,7 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
t.Run("must allow write", func(t *testing.T) {
require.NoError(t, sh.SetMode(mode.ReadOnly))
_, err := sh.Restore(prm)
_, err := sh.Restore(context.Background(), prm)
require.ErrorIs(t, err, shard.ErrReadOnlyMode)
})
@ -234,7 +234,7 @@ func TestStream(t *testing.T) {
var prm shard.PutPrm
prm.SetObject(objects[i])
_, err := sh1.Put(prm)
_, err := sh1.Put(context.Background(), prm)
require.NoError(t, err)
}
@ -269,7 +269,7 @@ func TestStream(t *testing.T) {
}
func checkRestore(t *testing.T, sh *shard.Shard, prm shard.RestorePrm, objects []*objectSDK.Object) {
res, err := sh.Restore(prm)
res, err := sh.Restore(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, len(objects), res.Count())
@ -333,7 +333,7 @@ func TestDumpIgnoreErrors(t *testing.T) {
var prm shard.PutPrm
prm.SetObject(objects[i])
_, err := sh.Put(prm)
_, err := sh.Put(context.Background(), prm)
require.NoError(t, err)
}

View File

@ -3,9 +3,12 @@ package shard
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// ExistsPrm groups the parameters of Exists operation.
@ -36,6 +39,13 @@ func (p ExistsRes) Exists() bool {
// Returns an error of type apistatus.ObjectAlreadyRemoved if object has been marked as removed.
// Returns the object.ErrObjectIsExpired if the object is presented but already expired.
func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Exists",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
attribute.String("address", prm.addr.EncodeToString()),
))
defer span.End()
var exists bool
var err error
@ -54,7 +64,7 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) {
existsPrm.SetAddress(prm.addr)
var res meta.ExistsRes
res, err = s.metaBase.Exists(existsPrm)
res, err = s.metaBase.Exists(ctx, existsPrm)
exists = res.Exists()
}

View File

@ -234,7 +234,7 @@ func (s *Shard) removeGarbage() {
deletePrm.SetAddresses(buf...)
// delete accumulated objects
_, err = s.delete(deletePrm)
_, err = s.delete(context.TODO(), deletePrm)
if err != nil {
s.log.Warn(logs.ShardCouldNotDeleteTheObjects,
zap.String("error", err.Error()),
@ -320,7 +320,7 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address)
inhumePrm.SetGCMark()
// inhume the collected objects
res, err := s.metaBase.Inhume(inhumePrm)
res, err := s.metaBase.Inhume(ctx, inhumePrm)
if err != nil {
s.log.Warn(logs.ShardCouldNotInhumeTheObjects,
zap.String("error", err.Error()),
@ -485,7 +485,7 @@ func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid
// and clears up corresponding graveyard records.
//
// Does not modify tss.
func (s *Shard) HandleExpiredTombstones(tss []meta.TombstonedObject) {
func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.TombstonedObject) {
if s.GetMode().NoMetabase() {
return
}
@ -502,7 +502,7 @@ func (s *Shard) HandleExpiredTombstones(tss []meta.TombstonedObject) {
pInhume.SetAddresses(tsAddrs...)
// inhume tombstones
res, err := s.metaBase.Inhume(pInhume)
res, err := s.metaBase.Inhume(ctx, pInhume)
if err != nil {
s.log.Warn(logs.ShardCouldNotMarkTombstonesAsGarbage,
zap.String("error", err.Error()),
@ -547,7 +547,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
pInhume.SetAddresses(lockers...)
pInhume.SetForceGCMark()
res, err := s.metaBase.Inhume(pInhume)
res, err := s.metaBase.Inhume(ctx, pInhume)
if err != nil {
s.log.Warn(logs.ShardFailureToMarkLockersAsGarbage,
zap.String("error", err.Error()),

View File

@ -100,14 +100,14 @@ func Test_GCDropsLockedExpiredObject(t *testing.T) {
var putPrm shard.PutPrm
putPrm.SetObject(obj)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
err = sh.Lock(cnr, lockID, []oid.ID{objID})
err = sh.Lock(context.Background(), cnr, lockID, []oid.ID{objID})
require.NoError(t, err)
putPrm.SetObject(lock)
_, err = sh.Put(putPrm)
_, err = sh.Put(context.Background(), putPrm)
require.NoError(t, err)
epoch.Value = 105

View File

@ -96,7 +96,7 @@ func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) {
}
skipMeta := prm.skipMeta || s.info.Mode.NoMetabase()
obj, hasMeta, err := s.fetchObjectData(prm.addr, skipMeta, cb, wc)
obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc)
return GetRes{
obj: obj,
@ -109,7 +109,7 @@ func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) {
var emptyStorageID = make([]byte, 0)
// fetchObjectData looks through writeCache and blobStor to find object.
func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher, wc func(w writecache.Cache) (*objectSDK.Object, error)) (*objectSDK.Object, bool, error) {
func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta bool, cb storFetcher, wc func(w writecache.Cache) (*objectSDK.Object, error)) (*objectSDK.Object, bool, error) {
var (
mErr error
mRes meta.ExistsRes
@ -118,7 +118,7 @@ func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher,
if !skipMeta {
var mPrm meta.ExistsPrm
mPrm.SetAddress(addr)
mRes, mErr = s.metaBase.Exists(mPrm)
mRes, mErr = s.metaBase.Exists(ctx, mPrm)
if mErr != nil && !s.info.Mode.NoMetabase() {
return nil, false, mErr
}
@ -154,7 +154,7 @@ func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher,
var mPrm meta.StorageIDPrm
mPrm.SetAddress(addr)
mExRes, err := s.metaBase.StorageID(mPrm)
mExRes, err := s.metaBase.StorageID(ctx, mPrm)
if err != nil {
return nil, true, fmt.Errorf("can't fetch blobovnicza id from metabase: %w", err)
}

View File

@ -40,7 +40,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
putPrm.SetObject(obj)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
getPrm.SetAddress(object.AddressOf(obj))
@ -58,7 +58,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
putPrm.SetObject(obj)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
getPrm.SetAddress(object.AddressOf(obj))
@ -86,7 +86,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
putPrm.SetObject(child)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
getPrm.SetAddress(object.AddressOf(child))

View File

@ -73,7 +73,7 @@ func (s *Shard) Head(ctx context.Context, prm HeadPrm) (HeadRes, error) {
headParams.SetRaw(prm.raw)
var res meta.GetRes
res, err = s.metaBase.Get(headParams)
res, err = s.metaBase.Get(ctx, headParams)
obj = res.Header()
}

View File

@ -37,7 +37,7 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
putPrm.SetObject(obj)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
headPrm.SetAddress(object.AddressOf(obj))
@ -62,7 +62,7 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
putPrm.SetObject(child)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
headPrm.SetAddress(object.AddressOf(parent))

View File

@ -5,9 +5,12 @@ import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@ -62,6 +65,12 @@ var ErrLockObjectRemoval = meta.ErrLockObjectRemoval
//
// Returns ErrReadOnlyMode error if shard is in "read-only" mode.
func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Inhume",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
))
defer span.End()
s.m.RLock()
if s.info.Mode.ReadOnly() {
@ -74,7 +83,7 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
if s.hasWriteCache() {
for i := range prm.target {
_ = s.writeCache.Delete(prm.target[i])
_ = s.writeCache.Delete(ctx, prm.target[i])
}
}
@ -92,7 +101,7 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
metaPrm.SetForceGCMark()
}
res, err := s.metaBase.Inhume(metaPrm)
res, err := s.metaBase.Inhume(ctx, metaPrm)
if err != nil {
if errors.Is(err, meta.ErrLockObjectRemoval) {
s.m.RUnlock()

View File

@ -42,7 +42,7 @@ func testShardInhume(t *testing.T, hasWriteCache bool) {
var getPrm shard.GetPrm
getPrm.SetAddress(object.AddressOf(obj))
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
_, err = testGet(t, sh, getPrm, hasWriteCache)

View File

@ -1,6 +1,7 @@
package shard
import (
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@ -85,7 +86,7 @@ func (s *Shard) List() (res SelectRes, err error) {
sPrm.SetContainerID(lst[i])
sPrm.SetFilters(filters)
sRes, err := s.metaBase.Select(sPrm) // consider making List in metabase
sRes, err := s.metaBase.Select(context.TODO(), sPrm) // consider making List in metabase
if err != nil {
s.log.Debug(logs.ShardCantSelectAllObjects,
zap.Stringer("cid", lst[i]),

View File

@ -1,6 +1,7 @@
package shard_test
import (
"context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@ -52,7 +53,7 @@ func testShardList(t *testing.T, sh *shard.Shard) {
putPrm.SetObject(obj)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
}
}

View File

@ -1,11 +1,15 @@
package shard
import (
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// Lock marks objects as locked with another object. All objects from the
@ -14,7 +18,16 @@ import (
// Allows locking regular objects only (otherwise returns apistatus.LockNonRegularObject).
//
// Locked list should be unique. Panics if it is empty.
func (s *Shard) Lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
func (s *Shard) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Lock",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
attribute.String("container_id", idCnr.EncodeToString()),
attribute.String("locker", locker.EncodeToString()),
attribute.Int("locked_count", len(locked)),
))
defer span.End()
s.m.RLock()
defer s.m.RUnlock()
@ -25,7 +38,7 @@ func (s *Shard) Lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
return ErrDegradedMode
}
err := s.metaBase.Lock(idCnr, locker, locked)
err := s.metaBase.Lock(ctx, idCnr, locker, locked)
if err != nil {
return fmt.Errorf("metabase lock: %w", err)
}
@ -35,7 +48,14 @@ func (s *Shard) Lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
// IsLocked checks object locking relation of the provided object. Not found object is
// considered as not locked. Requires healthy metabase, returns ErrDegradedMode otherwise.
func (s *Shard) IsLocked(addr oid.Address) (bool, error) {
func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.IsLocked",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
attribute.String("address", addr.EncodeToString()),
))
defer span.End()
m := s.GetMode()
if m.NoMetabase() {
return false, ErrDegradedMode
@ -44,7 +64,7 @@ func (s *Shard) IsLocked(addr oid.Address) (bool, error) {
var prm meta.IsLockedPrm
prm.SetAddress(addr)
res, err := s.metaBase.IsLocked(prm)
res, err := s.metaBase.IsLocked(ctx, prm)
if err != nil {
return false, err
}

View File

@ -76,16 +76,16 @@ func TestShard_Lock(t *testing.T) {
var putPrm shard.PutPrm
putPrm.SetObject(obj)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
// lock the object
err = sh.Lock(cnr, lockID, []oid.ID{objID})
err = sh.Lock(context.Background(), cnr, lockID, []oid.ID{objID})
require.NoError(t, err)
putPrm.SetObject(lock)
_, err = sh.Put(putPrm)
_, err = sh.Put(context.Background(), putPrm)
require.NoError(t, err)
t.Run("inhuming locked objects", func(t *testing.T) {
@ -158,21 +158,21 @@ func TestShard_IsLocked(t *testing.T) {
var putPrm shard.PutPrm
putPrm.SetObject(obj)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
// not locked object is not locked
locked, err := sh.IsLocked(objectcore.AddressOf(obj))
locked, err := sh.IsLocked(context.Background(), objectcore.AddressOf(obj))
require.NoError(t, err)
require.False(t, locked)
// locked object is locked
require.NoError(t, sh.Lock(cnrID, lockID, []oid.ID{objID}))
require.NoError(t, sh.Lock(context.Background(), cnrID, lockID, []oid.ID{objID}))
locked, err = sh.IsLocked(objectcore.AddressOf(obj))
locked, err = sh.IsLocked(context.Background(), objectcore.AddressOf(obj))
require.NoError(t, err)
require.True(t, locked)

View File

@ -109,7 +109,7 @@ func TestCounters(t *testing.T) {
for i := 0; i < objNumber; i++ {
prm.SetObject(oo[i])
_, err := sh.Put(prm)
_, err := sh.Put(context.Background(), prm)
require.NoError(t, err)
}
@ -168,7 +168,7 @@ func TestCounters(t *testing.T) {
deletedNumber := int(phy / 4)
prm.SetAddresses(addrFromObjs(oo[:deletedNumber])...)
_, err := sh.Delete(prm)
_, err := sh.Delete(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, phy-uint64(deletedNumber), mm.objCounters[physical])
@ -207,6 +207,7 @@ func shardWithMetrics(t *testing.T, path string) (*shard.Shard, *metricsStore) {
}
sh := shard.New(
shard.WithID(shard.NewIDFromBytes([]byte{})),
shard.WithBlobStorOptions(blobOpts...),
shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(path, "pilorama"))),
shard.WithMetaBaseOptions(

View File

@ -1,9 +1,14 @@
package shard
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@ -23,7 +28,14 @@ func (p *ToMoveItPrm) SetAddress(addr oid.Address) {
// ToMoveIt calls metabase.ToMoveIt method to mark object as relocatable to
// another shard.
func (s *Shard) ToMoveIt(prm ToMoveItPrm) (ToMoveItRes, error) {
func (s *Shard) ToMoveIt(ctx context.Context, prm ToMoveItPrm) (ToMoveItRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.ToMoveIt",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
attribute.String("address", prm.addr.EncodeToString()),
))
defer span.End()
s.m.RLock()
defer s.m.RUnlock()
@ -37,7 +49,7 @@ func (s *Shard) ToMoveIt(prm ToMoveItPrm) (ToMoveItRes, error) {
var toMovePrm meta.ToMoveItPrm
toMovePrm.SetAddress(prm.addr)
_, err := s.metaBase.ToMoveIt(toMovePrm)
_, err := s.metaBase.ToMoveIt(ctx, toMovePrm)
if err != nil {
s.log.Debug(logs.ShardCouldNotMarkObjectForShardRelocationInMetabase,
zap.String("error", err.Error()),

View File

@ -1,13 +1,17 @@
package shard
import (
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@ -30,7 +34,14 @@ func (p *PutPrm) SetObject(obj *object.Object) {
// did not allow to completely save the object.
//
// Returns ErrReadOnlyMode error if shard is in "read-only" mode.
func (s *Shard) Put(prm PutPrm) (PutRes, error) {
func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Put",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
attribute.String("address", objectCore.AddressOf(prm.obj).EncodeToString()),
))
defer span.End()
s.m.RLock()
defer s.m.RUnlock()
@ -55,7 +66,7 @@ func (s *Shard) Put(prm PutPrm) (PutRes, error) {
// ahead of `Put` by storage engine
tryCache := s.hasWriteCache() && !m.NoMetabase()
if tryCache {
res, err = s.writeCache.Put(putPrm)
res, err = s.writeCache.Put(ctx, putPrm)
}
if err != nil || !tryCache {
if err != nil {
@ -63,7 +74,7 @@ func (s *Shard) Put(prm PutPrm) (PutRes, error) {
zap.String("err", err.Error()))
}
res, err = s.blobStor.Put(putPrm)
res, err = s.blobStor.Put(ctx, putPrm)
if err != nil {
return PutRes{}, fmt.Errorf("could not put object to BLOB storage: %w", err)
}
@ -73,7 +84,7 @@ func (s *Shard) Put(prm PutPrm) (PutRes, error) {
var pPrm meta.PutPrm
pPrm.SetObject(prm.obj)
pPrm.SetStorageID(res.StorageID)
if _, err := s.metaBase.Put(pPrm); err != nil {
if _, err := s.metaBase.Put(ctx, pPrm); err != nil {
// may we need to handle this case in a special way
// since the object has been successfully written to BlobStor
return PutRes{}, fmt.Errorf("could not put object to metabase: %w", err)

View File

@ -123,7 +123,7 @@ func (s *Shard) GetRange(ctx context.Context, prm RngPrm) (RngRes, error) {
}
skipMeta := prm.skipMeta || s.info.Mode.NoMetabase()
obj, hasMeta, err := s.fetchObjectData(prm.addr, skipMeta, cb, wc)
obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc)
return RngRes{
obj: obj,

View File

@ -99,7 +99,7 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
var putPrm shard.PutPrm
putPrm.SetObject(obj)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
var rngPrm shard.RngPrm

View File

@ -44,6 +44,7 @@ func TestShardReload(t *testing.T) {
meta.WithEpochState(epochState{})}
opts := []Option{
WithID(NewIDFromBytes([]byte{})),
WithLogger(l),
WithBlobStorOptions(blobOpts...),
WithMetaBaseOptions(metaOpts...),
@ -75,7 +76,7 @@ func TestShardReload(t *testing.T) {
checkHasObjects(t, true)
t.Run("same config, no-op", func(t *testing.T) {
require.NoError(t, sh.Reload(opts...))
require.NoError(t, sh.Reload(context.Background(), opts...))
checkHasObjects(t, true)
})
@ -86,7 +87,7 @@ func TestShardReload(t *testing.T) {
}
newOpts := newShardOpts(filepath.Join(p, "meta1"), false)
require.NoError(t, sh.Reload(newOpts...))
require.NoError(t, sh.Reload(context.Background(), newOpts...))
checkHasObjects(t, false) // new path, but no resync
@ -97,7 +98,7 @@ func TestShardReload(t *testing.T) {
})
newOpts = newShardOpts(filepath.Join(p, "meta2"), true)
require.NoError(t, sh.Reload(newOpts...))
require.NoError(t, sh.Reload(context.Background(), newOpts...))
checkHasObjects(t, true) // all objects are restored, including the new one
@ -106,7 +107,7 @@ func TestShardReload(t *testing.T) {
require.NoError(t, os.WriteFile(badPath, []byte{1}, 0))
newOpts = newShardOpts(badPath, true)
require.Error(t, sh.Reload(newOpts...))
require.Error(t, sh.Reload(context.Background(), newOpts...))
// Cleanup is done, no panic.
obj := newObject()
@ -117,7 +118,7 @@ func TestShardReload(t *testing.T) {
// Successive reload produces no undesired effects.
require.NoError(t, os.RemoveAll(badPath))
require.NoError(t, sh.Reload(newOpts...))
require.NoError(t, sh.Reload(context.Background(), newOpts...))
obj = newObject()
require.NoError(t, putObject(sh, obj))
@ -132,7 +133,7 @@ func putObject(sh *Shard, obj *objectSDK.Object) error {
var prm PutPrm
prm.SetObject(obj)
_, err := sh.Put(prm)
_, err := sh.Put(context.Background(), prm)
return err
}

View File

@ -2,13 +2,17 @@ package shard
import (
"bytes"
"context"
"encoding/binary"
"errors"
"io"
"os"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// ErrInvalidMagic is returned when dump format is invalid.
@ -57,8 +61,15 @@ func (r RestoreRes) FailCount() int {
// Restore restores objects from the dump prepared by Dump.
//
// Returns any error encountered.
func (s *Shard) Restore(prm RestorePrm) (RestoreRes, error) {
// Disallow changing mode during restore.
func (s *Shard) Restore(ctx context.Context, prm RestorePrm) (RestoreRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Restore",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
attribute.String("path", prm.path),
attribute.Bool("ignore_errors", prm.ignoreErrors),
))
defer span.End()
s.m.RLock()
defer s.m.RUnlock()
@ -122,7 +133,7 @@ func (s *Shard) Restore(prm RestorePrm) (RestoreRes, error) {
}
putPrm.SetObject(obj)
_, err = s.Put(putPrm)
_, err = s.Put(ctx, putPrm)
if err != nil && !IsErrObjectExpired(err) && !IsErrRemoved(err) {
return RestoreRes{}, err
}

View File

@ -1,12 +1,16 @@
package shard
import (
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// SelectPrm groups the parameters of Select operation.
@ -39,7 +43,14 @@ func (r SelectRes) AddressList() []oid.Address {
//
// Returns any error encountered that
// did not allow to completely select the objects.
func (s *Shard) Select(prm SelectPrm) (SelectRes, error) {
func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Select",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
attribute.String("container_id", prm.cnr.EncodeToString()),
))
defer span.End()
s.m.RLock()
defer s.m.RUnlock()
@ -51,7 +62,7 @@ func (s *Shard) Select(prm SelectPrm) (SelectRes, error) {
selectPrm.SetFilters(prm.filters)
selectPrm.SetContainerID(prm.cnr)
mRes, err := s.metaBase.Select(selectPrm)
mRes, err := s.metaBase.Select(ctx, selectPrm)
if err != nil {
return SelectRes{}, fmt.Errorf("could not select objects from metabase: %w", err)
}

View File

@ -43,7 +43,7 @@ func TestWriteCacheObjectLoss(t *testing.T) {
for i := range objects {
putPrm.SetObject(objects[i])
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
}
require.NoError(t, sh.Close())

View File

@ -1,7 +1,12 @@
package shard
import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// FlushWriteCachePrm represents parameters of a `FlushWriteCache` operation.
@ -19,7 +24,14 @@ func (p *FlushWriteCachePrm) SetIgnoreErrors(ignore bool) {
var errWriteCacheDisabled = errors.New("write-cache is disabled")
// FlushWriteCache flushes all data from the write-cache.
func (s *Shard) FlushWriteCache(p FlushWriteCachePrm) error {
func (s *Shard) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) error {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.FlushWriteCache",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
attribute.Bool("ignore_errors", p.ignoreErrors),
))
defer span.End()
if !s.hasWriteCache() {
return errWriteCacheDisabled
}
@ -35,5 +47,5 @@ func (s *Shard) FlushWriteCache(p FlushWriteCachePrm) error {
return ErrDegradedMode
}
return s.writeCache.Flush(p.ignoreErrors)
return s.writeCache.Flush(ctx, p.ignoreErrors)
}

View File

@ -1,16 +1,27 @@
package writecache
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// Delete removes object from write-cache.
//
// Returns an error of type apistatus.ObjectNotFound if object is missing in write-cache.
func (c *cache) Delete(addr oid.Address) error {
func (c *cache) Delete(ctx context.Context, addr oid.Address) error {
ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Delete",
trace.WithAttributes(
attribute.String("address", addr.EncodeToString()),
))
defer span.End()
c.modeMtx.RLock()
defer c.modeMtx.RUnlock()
if c.readOnly() {
@ -45,7 +56,7 @@ func (c *cache) Delete(addr oid.Address) error {
return nil
}
_, err := c.fsTree.Delete(common.DeletePrm{Address: addr})
_, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr})
if err == nil {
storagelog.Write(c.log,
storagelog.AddressField(saddr),

View File

@ -2,9 +2,11 @@ package writecache
import (
"bytes"
"context"
"errors"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
@ -15,6 +17,8 @@ import (
"github.com/mr-tron/base58"
"github.com/nspcc-dev/neo-go/pkg/util/slice"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@ -37,7 +41,7 @@ func (c *cache) runFlushLoop() {
}
c.wg.Add(1)
go c.flushBigObjects()
go c.flushBigObjects(context.TODO())
c.wg.Add(1)
go func() {
@ -141,7 +145,7 @@ func (c *cache) flushDB() {
}
}
func (c *cache) flushBigObjects() {
func (c *cache) flushBigObjects(ctx context.Context) {
defer c.wg.Done()
tick := time.NewTicker(defaultFlushInterval * 10)
@ -157,7 +161,7 @@ func (c *cache) flushBigObjects() {
continue
}
_ = c.flushFSTree(true)
_ = c.flushFSTree(ctx, true)
c.modeMtx.RUnlock()
case <-c.closeCh:
@ -176,7 +180,7 @@ func (c *cache) reportFlushError(msg string, addr string, err error) {
}
}
func (c *cache) flushFSTree(ignoreErrors bool) error {
func (c *cache) flushFSTree(ctx context.Context, ignoreErrors bool) error {
var prm common.IteratePrm
prm.IgnoreErrors = ignoreErrors
prm.LazyHandler = func(addr oid.Address, f func() ([]byte, error)) error {
@ -205,7 +209,7 @@ func (c *cache) flushFSTree(ignoreErrors bool) error {
return err
}
err = c.flushObject(&obj, data)
err = c.flushObject(ctx, &obj, data)
if err != nil {
if ignoreErrors {
return nil
@ -236,7 +240,7 @@ func (c *cache) flushWorker(_ int) {
return
}
err := c.flushObject(obj, nil)
err := c.flushObject(context.TODO(), obj, nil)
if err == nil {
c.flushed.Add(objectCore.AddressOf(obj).EncodeToString(), true)
}
@ -244,14 +248,14 @@ func (c *cache) flushWorker(_ int) {
}
// flushObject is used to write object directly to the main storage.
func (c *cache) flushObject(obj *object.Object, data []byte) error {
func (c *cache) flushObject(ctx context.Context, obj *object.Object, data []byte) error {
addr := objectCore.AddressOf(obj)
var prm common.PutPrm
prm.Object = obj
prm.RawData = data
res, err := c.blobstor.Put(prm)
res, err := c.blobstor.Put(ctx, prm)
if err != nil {
if !errors.Is(err, common.ErrNoSpace) && !errors.Is(err, common.ErrReadOnly) &&
!errors.Is(err, blobstor.ErrNoPlaceFound) {
@ -276,15 +280,21 @@ func (c *cache) flushObject(obj *object.Object, data []byte) error {
// Flush flushes all objects from the write-cache to the main storage.
// Write-cache must be in readonly mode to ensure correctness of an operation and
// to prevent interference with background flush workers.
func (c *cache) Flush(ignoreErrors bool) error {
func (c *cache) Flush(ctx context.Context, ignoreErrors bool) error {
ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Flush",
trace.WithAttributes(
attribute.Bool("ignore_errors", ignoreErrors),
))
defer span.End()
c.modeMtx.RLock()
defer c.modeMtx.RUnlock()
return c.flush(ignoreErrors)
return c.flush(ctx, ignoreErrors)
}
func (c *cache) flush(ignoreErrors bool) error {
if err := c.flushFSTree(ignoreErrors); err != nil {
func (c *cache) flush(ctx context.Context, ignoreErrors bool) error {
if err := c.flushFSTree(ctx, ignoreErrors); err != nil {
return err
}
@ -316,7 +326,7 @@ func (c *cache) flush(ignoreErrors bool) error {
return err
}
if err := c.flushObject(&obj, data); err != nil {
if err := c.flushObject(ctx, &obj, data); err != nil {
return err
}
}

View File

@ -89,7 +89,7 @@ func TestFlush(t *testing.T) {
var mPrm meta.StorageIDPrm
mPrm.SetAddress(objects[i].addr)
mRes, err := mb.StorageID(mPrm)
mRes, err := mb.StorageID(context.Background(), mPrm)
require.NoError(t, err)
var prm common.GetPrm
@ -112,12 +112,12 @@ func TestFlush(t *testing.T) {
wc.(*cache).flushed.Add(objects[0].addr.EncodeToString(), true)
wc.(*cache).flushed.Add(objects[1].addr.EncodeToString(), false)
require.NoError(t, wc.Flush(false))
require.NoError(t, wc.Flush(context.Background(), false))
for i := 0; i < 2; i++ {
var mPrm meta.GetPrm
mPrm.SetAddress(objects[i].addr)
_, err := mb.Get(mPrm)
_, err := mb.Get(context.Background(), mPrm)
require.Error(t, err)
_, err = bs.Get(context.Background(), common.GetPrm{Address: objects[i].addr})
@ -147,7 +147,7 @@ func TestFlush(t *testing.T) {
for i := 0; i < 2; i++ {
var mPrm meta.GetPrm
mPrm.SetAddress(objects[i].addr)
_, err := mb.Get(mPrm)
_, err := mb.Get(context.Background(), mPrm)
require.Error(t, err)
_, err = bs.Get(context.Background(), common.GetPrm{Address: objects[i].addr})
@ -171,9 +171,9 @@ func TestFlush(t *testing.T) {
require.NoError(t, mb.SetMode(mode.ReadWrite))
require.Equal(t, uint32(0), errCount.Load())
require.Error(t, wc.Flush(false))
require.Error(t, wc.Flush(context.Background(), false))
require.True(t, errCount.Load() > 0)
require.NoError(t, wc.Flush(true))
require.NoError(t, wc.Flush(context.Background(), true))
check(t, mb, bs, objects)
}
@ -202,7 +202,7 @@ func TestFlush(t *testing.T) {
prm.Address = objectCore.AddressOf(obj)
prm.RawData = data
_, err := c.fsTree.Put(prm)
_, err := c.fsTree.Put(context.Background(), prm)
require.NoError(t, err)
p := prm.Address.Object().EncodeToString() + "." + prm.Address.Container().EncodeToString()
@ -218,7 +218,7 @@ func TestFlush(t *testing.T) {
var prm common.PutPrm
prm.Address = oidtest.Address()
prm.RawData = []byte{1, 2, 3}
_, err := c.fsTree.Put(prm)
_, err := c.fsTree.Put(context.Background(), prm)
require.NoError(t, err)
})
})
@ -245,19 +245,19 @@ func TestFlush(t *testing.T) {
for i := range objects {
var prm meta.PutPrm
prm.SetObject(objects[i].obj)
_, err := mb.Put(prm)
_, err := mb.Put(context.Background(), prm)
require.NoError(t, err)
}
var inhumePrm meta.InhumePrm
inhumePrm.SetAddresses(objects[0].addr, objects[1].addr)
inhumePrm.SetTombstoneAddress(oidtest.Address())
_, err := mb.Inhume(inhumePrm)
_, err := mb.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
var deletePrm meta.DeletePrm
deletePrm.SetAddresses(objects[2].addr, objects[3].addr)
_, err = mb.Delete(deletePrm)
_, err = mb.Delete(context.Background(), deletePrm)
require.NoError(t, err)
require.NoError(t, bs.SetMode(mode.ReadOnly))
@ -294,7 +294,7 @@ func putObject(t *testing.T, c Cache, size int) objectPair {
prm.Object = obj
prm.RawData = data
_, err := c.Put(prm)
_, err := c.Put(context.Background(), prm)
require.NoError(t, err)
return objectPair{prm.Address, prm.Object}

View File

@ -15,21 +15,21 @@ import (
"go.uber.org/zap"
)
func (c *cache) initFlushMarks() {
func (c *cache) initFlushMarks(ctx context.Context) {
var localWG sync.WaitGroup
localWG.Add(1)
go func() {
defer localWG.Done()
c.fsTreeFlushMarkUpdate()
c.fsTreeFlushMarkUpdate(ctx)
}()
localWG.Add(1)
go func() {
defer localWG.Done()
c.dbFlushMarkUpdate()
c.dbFlushMarkUpdate(ctx)
}()
c.initWG.Add(1)
@ -54,7 +54,7 @@ func (c *cache) initFlushMarks() {
var errStopIter = errors.New("stop iteration")
func (c *cache) fsTreeFlushMarkUpdate() {
func (c *cache) fsTreeFlushMarkUpdate(ctx context.Context) {
c.log.Info(logs.WritecacheFillingFlushMarksForObjectsInFSTree)
var prm common.IteratePrm
@ -67,14 +67,14 @@ func (c *cache) fsTreeFlushMarkUpdate() {
default:
}
flushed, needRemove := c.flushStatus(addr)
flushed, needRemove := c.flushStatus(ctx, addr)
if flushed {
c.store.flushed.Add(addr.EncodeToString(), true)
if needRemove {
var prm common.DeletePrm
prm.Address = addr
_, err := c.fsTree.Delete(prm)
_, err := c.fsTree.Delete(ctx, prm)
if err == nil {
storagelog.Write(c.log,
storagelog.AddressField(addr),
@ -90,7 +90,7 @@ func (c *cache) fsTreeFlushMarkUpdate() {
c.log.Info(logs.WritecacheFinishedUpdatingFSTreeFlushMarks)
}
func (c *cache) dbFlushMarkUpdate() {
func (c *cache) dbFlushMarkUpdate(ctx context.Context) {
c.log.Info(logs.WritecacheFillingFlushMarksForObjectsInDatabase)
var m []string
@ -125,7 +125,7 @@ func (c *cache) dbFlushMarkUpdate() {
continue
}
flushed, needRemove := c.flushStatus(addr)
flushed, needRemove := c.flushStatus(ctx, addr)
if flushed {
c.store.flushed.Add(addr.EncodeToString(), true)
if needRemove {
@ -165,11 +165,11 @@ func (c *cache) dbFlushMarkUpdate() {
// flushStatus returns info about the object state in the main storage.
// First return value is true iff object exists.
// Second return value is true iff object can be safely removed.
func (c *cache) flushStatus(addr oid.Address) (bool, bool) {
func (c *cache) flushStatus(ctx context.Context, addr oid.Address) (bool, bool) {
var existsPrm meta.ExistsPrm
existsPrm.SetAddress(addr)
_, err := c.metabase.Exists(existsPrm)
_, err := c.metabase.Exists(ctx, existsPrm)
if err != nil {
needRemove := errors.Is(err, meta.ErrObjectIsExpired) || errors.As(err, new(apistatus.ObjectAlreadyRemoved))
return needRemove, needRemove
@ -178,7 +178,7 @@ func (c *cache) flushStatus(addr oid.Address) (bool, bool) {
var prm meta.StorageIDPrm
prm.SetAddress(addr)
mRes, _ := c.metabase.StorageID(prm)
res, err := c.blobstor.Exists(context.TODO(), common.ExistsPrm{Address: addr, StorageID: mRes.StorageID()})
mRes, _ := c.metabase.StorageID(ctx, prm)
res, err := c.blobstor.Exists(ctx, common.ExistsPrm{Address: addr, StorageID: mRes.StorageID()})
return err == nil && res.Exists, false
}

Some files were not shown because too many files have changed in this diff Show More