Add tracing spans to PUT, DELETE requests and tree service #242

Merged
fyrchik merged 3 commits from dstepanov-yadro/frostfs-node:tracing/put into master 2023-04-14 10:25:55 +00:00
137 changed files with 1478 additions and 624 deletions

View file

@ -36,7 +36,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
storageID := meta.StorageIDPrm{}
storageID.SetAddress(addr)
resStorageID, err := db.StorageID(storageID)
resStorageID, err := db.StorageID(cmd.Context(), storageID)
common.ExitOnErr(cmd, common.Errf("could not check if the obj is small: %w", err))
if id := resStorageID.StorageID(); id != nil {
@ -51,7 +51,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
siErr := new(object.SplitInfoError)
res, err := db.Get(prm)
res, err := db.Get(cmd.Context(), prm)
if errors.As(err, &siErr) {
link, linkSet := siErr.SplitInfo().Link()
last, lastSet := siErr.SplitInfo().LastPart()

View file

@ -42,7 +42,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler
for _, c := range listRes.Containers() {
selectPrm.WithContainerID(c)
selectRes, err := n.e.Select(selectPrm)
selectRes, err := n.e.Select(ctx, selectPrm)
if err != nil {
log.Error(logs.FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer,
zap.Stringer("cid", c),

View file

@ -617,20 +617,20 @@ type engineWithNotifications struct {
defaultTopic string
}
func (e engineWithNotifications) IsLocked(address oid.Address) (bool, error) {
return e.base.IsLocked(address)
func (e engineWithNotifications) IsLocked(ctx context.Context, address oid.Address) (bool, error) {
return e.base.IsLocked(ctx, address)
}
func (e engineWithNotifications) Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error {
return e.base.Delete(ctx, tombstone, toDelete)
}
func (e engineWithNotifications) Lock(locker oid.Address, toLock []oid.ID) error {
return e.base.Lock(locker, toLock)
func (e engineWithNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error {
return e.base.Lock(ctx, locker, toLock)
}
func (e engineWithNotifications) Put(o *objectSDK.Object) error {
if err := e.base.Put(o); err != nil {
func (e engineWithNotifications) Put(ctx context.Context, o *objectSDK.Object) error {
if err := e.base.Put(ctx, o); err != nil {
return err
}
@ -654,8 +654,8 @@ type engineWithoutNotifications struct {
engine *engine.StorageEngine
}
func (e engineWithoutNotifications) IsLocked(address oid.Address) (bool, error) {
return e.engine.IsLocked(address)
func (e engineWithoutNotifications) IsLocked(ctx context.Context, address oid.Address) (bool, error) {
return e.engine.IsLocked(ctx, address)
}
func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error {
@ -673,10 +673,10 @@ func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Ad
return err
}
func (e engineWithoutNotifications) Lock(locker oid.Address, toLock []oid.ID) error {
return e.engine.Lock(locker.Container(), locker.Object(), toLock)
func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error {
return e.engine.Lock(ctx, locker.Container(), locker.Object(), toLock)
}
func (e engineWithoutNotifications) Put(o *objectSDK.Object) error {
return engine.Put(e.engine, o)
func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object) error {
return engine.Put(ctx, e.engine, o)
}

View file

@ -490,7 +490,7 @@ const (
NetmapCantInvokeNetmapUpdatePeer = "can't invoke netmap.UpdatePeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapNonAlphabetModeIgnoreRemoveNodeFromSubnetNotification = "non alphabet mode, ignore remove node from subnet notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCouldNotGetNetworkMapCandidates = "could not get network map candidates" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCouldNotUnmarshalSubnetId = "could not unmarshal subnet id" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCouldNotUnmarshalSubnetID = "could not unmarshal subnet id" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapGotZeroSubnetInRemoveNodeNotification = "got zero subnet in remove node notification" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCouldNotIterateOverSubnetworksOfTheNode = "could not iterate over subnetworks of the node" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCouldNotInvokeNetmapUpdateState = "could not invoke netmap.UpdateState" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go

View file

@ -1,6 +1,7 @@
package object
import (
"context"
"crypto/ecdsa"
"errors"
"fmt"
@ -42,7 +43,7 @@ type DeleteHandler interface {
// LockSource is a source of lock relations between the objects.
type LockSource interface {
// IsLocked must clarify object's lock status.
IsLocked(address oid.Address) (bool, error)
IsLocked(ctx context.Context, address oid.Address) (bool, error)
}
// Locker is an object lock storage interface.
@ -89,7 +90,7 @@ func NewFormatValidator(opts ...FormatValidatorOption) *FormatValidator {
// If unprepared is true, only fields set by user are validated.
//
// Returns nil error if the object has valid structure.
func (v *FormatValidator) Validate(obj *object.Object, unprepared bool) error {
func (v *FormatValidator) Validate(ctx context.Context, obj *object.Object, unprepared bool) error {
if obj == nil {
return errNilObject
}
@ -117,7 +118,7 @@ func (v *FormatValidator) Validate(obj *object.Object, unprepared bool) error {
return fmt.Errorf("(%T) could not validate signature key: %w", v, err)
}
if err := v.checkExpiration(obj); err != nil {
if err := v.checkExpiration(ctx, obj); err != nil {
return fmt.Errorf("object did not pass expiration check: %w", err)
}
@ -128,7 +129,7 @@ func (v *FormatValidator) Validate(obj *object.Object, unprepared bool) error {
if obj = obj.Parent(); obj != nil {
// Parent object already exists.
return v.Validate(obj, false)
return v.Validate(ctx, obj, false)
}
return nil
@ -327,7 +328,7 @@ func (v *FormatValidator) fillAndValidateTombstoneMeta(o *object.Object, meta *C
var errExpired = errors.New("object has expired")
func (v *FormatValidator) checkExpiration(obj *object.Object) error {
func (v *FormatValidator) checkExpiration(ctx context.Context, obj *object.Object) error {
exp, err := expirationEpochAttribute(obj)
if err != nil {
if errors.Is(err, errNoExpirationEpoch) {
@ -348,7 +349,7 @@ func (v *FormatValidator) checkExpiration(obj *object.Object) error {
addr.SetContainer(cID)
addr.SetObject(oID)
locked, err := v.e.IsLocked(addr)
locked, err := v.e.IsLocked(ctx, addr)
if err != nil {
return fmt.Errorf("locking status check for an expired object: %w", err)
}

View file

@ -1,6 +1,7 @@
package object
import (
"context"
"crypto/ecdsa"
"strconv"
"testing"
@ -40,7 +41,7 @@ type testLockSource struct {
m map[oid.Address]bool
}
func (t testLockSource) IsLocked(address oid.Address) (bool, error) {
func (t testLockSource) IsLocked(_ context.Context, address oid.Address) (bool, error) {
return t.m[address], nil
}
@ -62,20 +63,20 @@ func TestFormatValidator_Validate(t *testing.T) {
require.NoError(t, err)
t.Run("nil input", func(t *testing.T) {
require.Error(t, v.Validate(nil, true))
require.Error(t, v.Validate(context.Background(), nil, true))
})
t.Run("nil identifier", func(t *testing.T) {
obj := object.New()
require.ErrorIs(t, v.Validate(obj, false), errNilID)
require.ErrorIs(t, v.Validate(context.Background(), obj, false), errNilID)
})
t.Run("nil container identifier", func(t *testing.T) {
obj := object.New()
obj.SetID(oidtest.ID())
require.ErrorIs(t, v.Validate(obj, true), errNilCID)
require.ErrorIs(t, v.Validate(context.Background(), obj, true), errNilCID)
})
t.Run("unsigned object", func(t *testing.T) {
@ -83,7 +84,7 @@ func TestFormatValidator_Validate(t *testing.T) {
obj.SetContainerID(cidtest.ID())
obj.SetID(oidtest.ID())
require.Error(t, v.Validate(obj, false))
require.Error(t, v.Validate(context.Background(), obj, false))
})
t.Run("correct w/ session token", func(t *testing.T) {
@ -101,7 +102,7 @@ func TestFormatValidator_Validate(t *testing.T) {
require.NoError(t, object.SetIDWithSignature(ownerKey.PrivateKey, obj))
require.NoError(t, v.Validate(obj, false))
require.NoError(t, v.Validate(context.Background(), obj, false))
})
t.Run("correct w/o session token", func(t *testing.T) {
@ -109,7 +110,7 @@ func TestFormatValidator_Validate(t *testing.T) {
require.NoError(t, object.SetIDWithSignature(ownerKey.PrivateKey, obj))
require.NoError(t, v.Validate(obj, false))
require.NoError(t, v.Validate(context.Background(), obj, false))
})
t.Run("tombstone content", func(t *testing.T) {
@ -236,7 +237,7 @@ func TestFormatValidator_Validate(t *testing.T) {
t.Run("invalid attribute value", func(t *testing.T) {
val := "text"
err := v.Validate(fn(val), false)
err := v.Validate(context.Background(), fn(val), false)
require.Error(t, err)
})
@ -245,7 +246,7 @@ func TestFormatValidator_Validate(t *testing.T) {
obj := fn(val)
t.Run("non-locked", func(t *testing.T) {
err := v.Validate(obj, false)
err := v.Validate(context.Background(), obj, false)
require.ErrorIs(t, err, errExpired)
})
@ -258,14 +259,14 @@ func TestFormatValidator_Validate(t *testing.T) {
addr.SetObject(oID)
ls.m[addr] = true
err := v.Validate(obj, false)
err := v.Validate(context.Background(), obj, false)
require.NoError(t, err)
})
})
t.Run("alive object", func(t *testing.T) {
val := strconv.FormatUint(curEpoch, 10)
err := v.Validate(fn(val), true)
err := v.Validate(context.Background(), fn(val), true)
require.NoError(t, err)
})
})

View file

@ -159,7 +159,7 @@ func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) {
err = subnetToRemoveFrom.Unmarshal(rawSubnet)
if err != nil {
np.log.Warn(logs.NetmapCouldNotUnmarshalSubnetId,
np.log.Warn(logs.NetmapCouldNotUnmarshalSubnetID,
zap.Error(err),
)
return

View file

@ -88,7 +88,7 @@ func TestBlobovnicza(t *testing.T) {
var dPrm DeletePrm
dPrm.SetAddress(addr)
_, err := blz.Delete(dPrm)
_, err := blz.Delete(context.Background(), dPrm)
require.NoError(t, err)
// should return 404

View file

@ -1,10 +1,15 @@
package blobovnicza
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@ -30,7 +35,13 @@ func (p *DeletePrm) SetAddress(addr oid.Address) {
// Returns an error of type apistatus.ObjectNotFound if the object to be deleted is not in blobovnicza.
//
// Should not be called in read-only configuration.
func (b *Blobovnicza) Delete(prm DeletePrm) (DeleteRes, error) {
func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
_, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.Delete",
trace.WithAttributes(
attribute.String("address", prm.addr.EncodeToString()),
))
defer span.End()
addrKey := addressKey(prm.addr)
removed := false

View file

@ -1,13 +1,18 @@
package blobovniczatree
import (
"context"
"encoding/hex"
"path/filepath"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@ -15,7 +20,14 @@ import (
//
// If blobocvnicza ID is specified, only this blobovnicza is processed.
// Otherwise, all Blobovniczas are processed descending weight.
func (b *Blobovniczas) Delete(prm common.DeletePrm) (res common.DeleteRes, err error) {
func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res common.DeleteRes, err error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.Delete",
trace.WithAttributes(
attribute.String("address", prm.Address.EncodeToString()),
attribute.String("storage_id", hex.EncodeToString(prm.StorageID)),
))
defer span.End()
if b.readOnly {
return common.DeleteRes{}, common.ErrReadOnly
}
@ -30,7 +42,7 @@ func (b *Blobovniczas) Delete(prm common.DeletePrm) (res common.DeleteRes, err e
return res, err
}
return b.deleteObject(blz, bPrm, prm)
return b.deleteObject(ctx, blz, bPrm, prm)
}
activeCache := make(map[string]struct{})
@ -42,7 +54,7 @@ func (b *Blobovniczas) Delete(prm common.DeletePrm) (res common.DeleteRes, err e
// don't process active blobovnicza of the level twice
_, ok := activeCache[dirPath]
res, err = b.deleteObjectFromLevel(bPrm, p, !ok, prm)
res, err = b.deleteObjectFromLevel(ctx, bPrm, p, !ok, prm)
if err != nil {
if !blobovnicza.IsErrNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
@ -73,7 +85,7 @@ func (b *Blobovniczas) Delete(prm common.DeletePrm) (res common.DeleteRes, err e
// tries to delete object from particular blobovnicza.
//
// returns no error if object was removed from some blobovnicza of the same level.
func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath string, tryActive bool, dp common.DeletePrm) (common.DeleteRes, error) {
func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicza.DeletePrm, blzPath string, tryActive bool, dp common.DeletePrm) (common.DeleteRes, error) {
lvlPath := filepath.Dir(blzPath)
// try to remove from blobovnicza if it is opened
@ -81,7 +93,7 @@ func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath
v, ok := b.opened.Get(blzPath)
b.lruMtx.Unlock()
if ok {
if res, err := b.deleteObject(v, prm, dp); err == nil {
if res, err := b.deleteObject(ctx, v, prm, dp); err == nil {
return res, err
} else if !blobovnicza.IsErrNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromOpenedBlobovnicza,
@ -100,7 +112,7 @@ func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath
b.activeMtx.RUnlock()
if ok && tryActive {
if res, err := b.deleteObject(active.blz, prm, dp); err == nil {
if res, err := b.deleteObject(ctx, active.blz, prm, dp); err == nil {
return res, err
} else if !blobovnicza.IsErrNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromActiveBlobovnicza,
@ -125,11 +137,11 @@ func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath
return common.DeleteRes{}, err
}
return b.deleteObject(blz, prm, dp)
return b.deleteObject(ctx, blz, prm, dp)
}
// removes object from blobovnicza and returns common.DeleteRes.
func (b *Blobovniczas) deleteObject(blz *blobovnicza.Blobovnicza, prm blobovnicza.DeletePrm, dp common.DeletePrm) (common.DeleteRes, error) {
_, err := blz.Delete(prm)
func (b *Blobovniczas) deleteObject(ctx context.Context, blz *blobovnicza.Blobovnicza, prm blobovnicza.DeletePrm, dp common.DeletePrm) (common.DeleteRes, error) {
_, err := blz.Delete(ctx, prm)
return common.DeleteRes{}, err
}

View file

@ -33,7 +33,7 @@ func TestExistsInvalidStorageID(t *testing.T) {
d, err := obj.Marshal()
require.NoError(t, err)
putRes, err := b.Put(common.PutPrm{Address: addr, RawData: d, DontCompress: true})
putRes, err := b.Put(context.Background(), common.PutPrm{Address: addr, RawData: d, DontCompress: true})
require.NoError(t, err)
t.Run("valid but wrong storage id", func(t *testing.T) {

View file

@ -1,20 +1,31 @@
package blobovniczatree
import (
"context"
"errors"
"path/filepath"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
// Put saves object in the maximum weight blobobnicza.
//
// returns error if could not save object in any blobovnicza.
func (b *Blobovniczas) Put(prm common.PutPrm) (common.PutRes, error) {
func (b *Blobovniczas) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) {
_, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.Put",
trace.WithAttributes(
attribute.String("address", prm.Address.EncodeToString()),
attribute.Bool("dont_compress", prm.DontCompress),
))
defer span.End()
if b.readOnly {
return common.PutRes{}, common.ErrReadOnly
}

View file

@ -75,12 +75,12 @@ func TestCompression(t *testing.T) {
testPut := func(t *testing.T, b *BlobStor, i int) {
var prm common.PutPrm
prm.Object = smallObj[i]
_, err := b.Put(prm)
_, err := b.Put(context.Background(), prm)
require.NoError(t, err)
prm = common.PutPrm{}
prm.Object = bigObj[i]
_, err = b.Put(prm)
_, err = b.Put(context.Background(), prm)
require.NoError(t, err)
}

View file

@ -23,7 +23,7 @@ type Storage interface {
Get(context.Context, GetPrm) (GetRes, error)
GetRange(context.Context, GetRangePrm) (GetRangeRes, error)
Exists(context.Context, ExistsPrm) (ExistsRes, error)
Put(PutPrm) (PutRes, error)
Delete(DeletePrm) (DeleteRes, error)
Put(context.Context, PutPrm) (PutRes, error)
Delete(context.Context, DeletePrm) (DeleteRes, error)
Iterate(IteratePrm) (IterateRes, error)
}

View file

@ -1,19 +1,31 @@
package blobstor
import (
"context"
"encoding/hex"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
func (b *BlobStor) Delete(prm common.DeletePrm) (common.DeleteRes, error) {
func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.DeleteRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.Delete",
trace.WithAttributes(
attribute.String("address", prm.Address.EncodeToString()),
attribute.String("storage_id", hex.EncodeToString(prm.StorageID)),
))
defer span.End()
b.modeMtx.RLock()
defer b.modeMtx.RUnlock()
if prm.StorageID == nil {
for i := range b.storage {
res, err := b.storage[i].Storage.Delete(prm)
res, err := b.storage[i].Storage.Delete(ctx, prm)
if err == nil || !errors.As(err, new(apistatus.ObjectNotFound)) {
if err == nil {
logOp(b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID)
@ -31,7 +43,7 @@ func (b *BlobStor) Delete(prm common.DeletePrm) (common.DeleteRes, error) {
st = b.storage[0].Storage
}
res, err := st.Delete(prm)
res, err := st.Delete(ctx, prm)
if err == nil {
logOp(b.log, deleteOp, prm.Address, st.Type(), prm.StorageID)
}

View file

@ -36,7 +36,7 @@ func TestExists(t *testing.T) {
for i := range objects {
var prm common.PutPrm
prm.Object = objects[i]
_, err := b.Put(prm)
_, err := b.Put(context.Background(), prm)
require.NoError(t, err)
}

View file

@ -196,7 +196,13 @@ func (t *FSTree) treePath(addr oid.Address) string {
}
// Delete removes the object with the specified address from the storage.
func (t *FSTree) Delete(prm common.DeletePrm) (common.DeleteRes, error) {
func (t *FSTree) Delete(ctx context.Context, prm common.DeletePrm) (common.DeleteRes, error) {
_, span := tracing.StartSpanFromContext(ctx, "FSTree.Delete",
trace.WithAttributes(
attribute.String("address", prm.Address.EncodeToString()),
))
defer span.End()
if t.readOnly {
return common.DeleteRes{}, common.ErrReadOnly
}
@ -230,7 +236,14 @@ func (t *FSTree) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exist
}
// Put puts an object in the storage.
func (t *FSTree) Put(prm common.PutPrm) (common.PutRes, error) {
func (t *FSTree) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) {
_, span := tracing.StartSpanFromContext(ctx, "FSTree.Put",
trace.WithAttributes(
attribute.String("address", prm.Address.EncodeToString()),
attribute.Bool("dont_compress", prm.DontCompress),
))
defer span.End()
if t.readOnly {
return common.PutRes{}, common.ErrReadOnly
}

View file

@ -1,6 +1,7 @@
package blobstortest
import (
"context"
"math/rand"
"testing"
@ -67,7 +68,7 @@ func prepare(t *testing.T, count int, s common.Storage, min, max uint64) []objec
prm.Object = objects[i].obj
prm.RawData = objects[i].raw
putRes, err := s.Put(prm)
putRes, err := s.Put(context.Background(), prm)
require.NoError(t, err)
objects[i].storageID = putRes.StorageID

View file

@ -36,7 +36,7 @@ func TestControl(t *testing.T, cons Constructor, min, max uint64) {
prm.Object = NewObject(min + uint64(rand.Intn(int(max-min+1))))
prm.Address = objectCore.AddressOf(prm.Object)
_, err := s.Put(prm)
_, err := s.Put(context.Background(), prm)
require.ErrorIs(t, err, common.ErrReadOnly)
})
t.Run("delete fails", func(t *testing.T) {
@ -44,7 +44,7 @@ func TestControl(t *testing.T, cons Constructor, min, max uint64) {
prm.Address = objects[0].addr
prm.StorageID = objects[0].storageID
_, err := s.Delete(prm)
_, err := s.Delete(context.Background(), prm)
require.ErrorIs(t, err, common.ErrReadOnly)
})
}

View file

@ -22,7 +22,7 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) {
var prm common.DeletePrm
prm.Address = oidtest.Address()
_, err := s.Delete(prm)
_, err := s.Delete(context.Background(), prm)
require.Error(t, err, new(apistatus.ObjectNotFound))
})
@ -31,7 +31,7 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) {
prm.Address = objects[0].addr
prm.StorageID = objects[0].storageID
_, err := s.Delete(prm)
_, err := s.Delete(context.Background(), prm)
require.NoError(t, err)
t.Run("exists fail", func(t *testing.T) {
@ -55,7 +55,7 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) {
var prm common.DeletePrm
prm.Address = objects[1].addr
_, err := s.Delete(prm)
_, err := s.Delete(context.Background(), prm)
require.NoError(t, err)
})
@ -64,10 +64,10 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) {
prm.Address = objects[2].addr
prm.StorageID = objects[2].storageID
_, err := s.Delete(prm)
_, err := s.Delete(context.Background(), prm)
require.NoError(t, err)
_, err = s.Delete(prm)
_, err = s.Delete(context.Background(), prm)
require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
})

View file

@ -1,6 +1,7 @@
package blobstortest
import (
"context"
"errors"
"testing"
@ -22,7 +23,7 @@ func TestIterate(t *testing.T, cons Constructor, min, max uint64) {
var delPrm common.DeletePrm
delPrm.Address = objects[2].addr
delPrm.StorageID = objects[2].storageID
_, err := s.Delete(delPrm)
_, err := s.Delete(context.Background(), delPrm)
require.NoError(t, err)
objects = append(objects[:delID], objects[delID+1:]...)

View file

@ -1,6 +1,7 @@
package blobstor
import (
"context"
"encoding/binary"
"os"
"testing"
@ -63,7 +64,7 @@ func TestIterateObjects(t *testing.T) {
}
for _, v := range mObjs {
_, err := blobStor.Put(common.PutPrm{Address: v.addr, RawData: v.data})
_, err := blobStor.Put(context.Background(), common.PutPrm{Address: v.addr, RawData: v.data})
require.NoError(t, err)
}

View file

@ -91,7 +91,7 @@ func (s *memstoreImpl) Exists(_ context.Context, req common.ExistsPrm) (common.E
return common.ExistsRes{Exists: exists}, nil
}
func (s *memstoreImpl) Put(req common.PutPrm) (common.PutRes, error) {
func (s *memstoreImpl) Put(_ context.Context, req common.PutPrm) (common.PutRes, error) {
if s.readOnly {
return common.PutRes{}, common.ErrReadOnly
}
@ -108,7 +108,7 @@ func (s *memstoreImpl) Put(req common.PutPrm) (common.PutRes, error) {
return common.PutRes{StorageID: []byte(s.rootPath)}, nil
}
func (s *memstoreImpl) Delete(req common.DeletePrm) (common.DeleteRes, error) {
func (s *memstoreImpl) Delete(_ context.Context, req common.DeletePrm) (common.DeleteRes, error) {
if s.readOnly {
return common.DeleteRes{}, common.ErrReadOnly
}

View file

@ -28,7 +28,7 @@ func TestSimpleLifecycle(t *testing.T) {
require.NoError(t, err)
{
_, err := s.Put(common.PutPrm{Address: addr, RawData: d, DontCompress: true})
_, err := s.Put(context.Background(), common.PutPrm{Address: addr, RawData: d, DontCompress: true})
require.NoError(t, err)
}
@ -57,7 +57,7 @@ func TestSimpleLifecycle(t *testing.T) {
}
{
_, err := s.Delete(common.DeletePrm{Address: addr})
_, err := s.Delete(context.Background(), common.DeletePrm{Address: addr})
require.NoError(t, err)
}

View file

@ -114,7 +114,7 @@ func BenchmarkSubstorageReadPerf(b *testing.B) {
if err != nil {
return fmt.Errorf("marshal: %v", err)
}
_, err = st.Put(common.PutPrm{
_, err = st.Put(context.Background(), common.PutPrm{
Address: addr,
RawData: raw,
})
@ -165,7 +165,7 @@ func BenchmarkSubstorageWritePerf(b *testing.B) {
addr := testutil.AddressFromObject(b, obj)
raw, err := obj.Marshal()
require.NoError(b, err)
if _, err := st.Put(common.PutPrm{
if _, err := st.Put(context.Background(), common.PutPrm{
Address: addr,
RawData: raw,
}); err != nil {
@ -202,7 +202,7 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) {
addr := testutil.AddressFromObject(b, obj)
raw, err := obj.Marshal()
require.NoError(b, err)
if _, err := st.Put(common.PutPrm{
if _, err := st.Put(context.Background(), common.PutPrm{
Address: addr,
RawData: raw,
}); err != nil {

View file

@ -1,12 +1,16 @@
package blobstor
import (
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// ErrNoPlaceFound is returned when object can't be saved to any sub-storage component
@ -21,7 +25,14 @@ var ErrNoPlaceFound = logicerr.New("couldn't find a place to store an object")
//
// Returns any error encountered that
// did not allow to completely save the object.
func (b *BlobStor) Put(prm common.PutPrm) (common.PutRes, error) {
func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.Put",
trace.WithAttributes(
attribute.String("address", prm.Address.EncodeToString()),
attribute.Bool("dont_compress", prm.DontCompress),
))
defer span.End()
b.modeMtx.RLock()
defer b.modeMtx.RUnlock()
@ -39,7 +50,7 @@ func (b *BlobStor) Put(prm common.PutPrm) (common.PutRes, error) {
for i := range b.storage {
if b.storage[i].Policy == nil || b.storage[i].Policy(prm.Object, prm.RawData) {
res, err := b.storage[i].Storage.Put(prm)
res, err := b.storage[i].Storage.Put(ctx, prm)
if err == nil {
logOp(b.log, putOp, prm.Address, b.storage[i].Storage.Type(), res.StorageID)
}

View file

@ -176,27 +176,27 @@ func (s *TestStore) Exists(ctx context.Context, req common.ExistsPrm) (common.Ex
}
}
func (s *TestStore) Put(req common.PutPrm) (common.PutRes, error) {
func (s *TestStore) Put(ctx context.Context, req common.PutPrm) (common.PutRes, error) {
s.mu.RLock()
defer s.mu.RUnlock()
switch {
case s.overrides.Put != nil:
return s.overrides.Put(req)
case s.st != nil:
return s.st.Put(req)
return s.st.Put(ctx, req)
default:
panic(fmt.Sprintf("unexpected storage call: Put(%+v)", req))
}
}
func (s *TestStore) Delete(req common.DeletePrm) (common.DeleteRes, error) {
func (s *TestStore) Delete(ctx context.Context, req common.DeletePrm) (common.DeleteRes, error) {
s.mu.RLock()
defer s.mu.RUnlock()
switch {
case s.overrides.Delete != nil:
return s.overrides.Delete(req)
case s.st != nil:
return s.st.Delete(req)
return s.st.Delete(ctx, req)
default:
panic(fmt.Sprintf("unexpected storage call: Delete(%+v)", req))
}

View file

@ -308,7 +308,7 @@ loop:
e.removeShards(shardsToRemove...)
for _, p := range shardsToReload {
err := p.sh.Reload(p.opts...)
err := p.sh.Reload(ctx, p.opts...)
if err != nil {
e.log.Error(logs.EngineCouldNotReloadAShard,
zap.Stringer("shard id", p.sh.ID()),

View file

@ -204,7 +204,7 @@ func TestExecBlocks(t *testing.T) {
addr := object.AddressOf(obj)
require.NoError(t, Put(e, obj))
require.NoError(t, Put(context.Background(), e, obj))
// block executions
errBlock := errors.New("block exec err")

View file

@ -4,11 +4,14 @@ import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@ -47,6 +50,13 @@ func (p *DeletePrm) WithForceRemoval() {
// on operations with that object) if WithForceRemoval option has
// been provided.
func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) (res DeleteRes, err error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Delete",
trace.WithAttributes(
attribute.String("address", prm.addr.EncodeToString()),
attribute.Bool("force_removal", prm.forceRemoval),
))
defer span.End()
err = e.execIfNotBlocked(func() error {
res, err = e.delete(ctx, prm)
return err
@ -135,7 +145,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
}
e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
res, err := sh.Select(selectPrm)
res, err := sh.Select(ctx, selectPrm)
if err != nil {
e.log.Warn(logs.EngineErrorDuringSearchingForObjectChildren,
zap.Stringer("addr", addr),

View file

@ -59,9 +59,9 @@ func TestDeleteBigObject(t *testing.T) {
defer e.Close()
for i := range children {
require.NoError(t, Put(e, children[i]))
require.NoError(t, Put(context.Background(), e, children[i]))
}
require.NoError(t, Put(e, link))
require.NoError(t, Put(context.Background(), e, link))
var splitErr *objectSDK.SplitInfoError

View file

@ -60,7 +60,7 @@ func benchmarkExists(b *testing.B, shardNum int) {
addr := oidtest.Address()
for i := 0; i < 100; i++ {
obj := testutil.GenerateObjectWithCID(cidtest.ID())
err := Put(e, obj)
err := Put(context.Background(), e, obj)
if err != nil {
b.Fatal(err)
}
@ -69,7 +69,7 @@ func benchmarkExists(b *testing.B, shardNum int) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
ok, err := e.exists(addr)
ok, err := e.exists(context.Background(), addr)
if err != nil || ok {
b.Fatalf("%t %v", ok, err)
}

View file

@ -98,7 +98,7 @@ func TestErrorReporting(t *testing.T) {
var prm shard.PutPrm
prm.SetObject(obj)
te.ng.mtx.RLock()
_, err := te.ng.shards[te.shards[0].id.String()].Shard.Put(prm)
_, err := te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm)
te.ng.mtx.RUnlock()
require.NoError(t, err)
@ -132,7 +132,7 @@ func TestErrorReporting(t *testing.T) {
var prm shard.PutPrm
prm.SetObject(obj)
te.ng.mtx.RLock()
_, err := te.ng.shards[te.shards[0].id.String()].Put(prm)
_, err := te.ng.shards[te.shards[0].id.String()].Put(context.Background(), prm)
te.ng.mtx.RUnlock()
require.NoError(t, err)
@ -185,7 +185,7 @@ func TestBlobstorFailback(t *testing.T) {
var prm shard.PutPrm
prm.SetObject(obj)
te.ng.mtx.RLock()
_, err = te.ng.shards[te.shards[0].id.String()].Shard.Put(prm)
_, err = te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm)
te.ng.mtx.RUnlock()
require.NoError(t, err)
objs = append(objs, obj)

View file

@ -57,7 +57,7 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
var putPrm shard.PutPrm
putPrm.SetObject(obj)
_, err := e.shards[sh.String()].Put(putPrm)
_, err := e.shards[sh.String()].Put(context.Background(), putPrm)
require.NoError(t, err)
}
@ -67,7 +67,7 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
var putPrm PutPrm
putPrm.WithObject(objects[len(objects)-1])
_, err := e.Put(putPrm)
err := e.Put(context.Background(), putPrm)
require.NoError(t, err)
res, err := e.shards[ids[len(ids)-1].String()].List()

View file

@ -10,14 +10,14 @@ import (
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
func (e *StorageEngine) exists(addr oid.Address) (bool, error) {
func (e *StorageEngine) exists(ctx context.Context, addr oid.Address) (bool, error) {
var shPrm shard.ExistsPrm
shPrm.SetAddress(addr)
alreadyRemoved := false
exists := false
e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
res, err := sh.Exists(context.TODO(), shPrm)
res, err := sh.Exists(ctx, shPrm)
if err != nil {
if shard.IsErrRemoved(err) {
alreadyRemoved = true

View file

@ -48,6 +48,12 @@ func (r GetRes) Object() *objectSDK.Object {
//
// Returns an error if executions are blocked (see BlockExecution).
func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Get",
trace.WithAttributes(
attribute.String("address", prm.addr.EncodeToString()),
))
defer span.End()
err = e.execIfNotBlocked(func() error {
res, err = e.get(ctx, prm)
return err
@ -57,12 +63,6 @@ func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err er
}
func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.get",
trace.WithAttributes(
attribute.String("address", prm.addr.EncodeToString()),
))
defer span.End()
if e.metrics != nil {
defer elapsed(e.metrics.AddGetDuration)()
}

View file

@ -55,11 +55,11 @@ func TestHeadRaw(t *testing.T) {
putPrmLink.SetObject(link)
// put most left object in one shard
_, err := s1.Put(putPrmLeft)
_, err := s1.Put(context.Background(), putPrmLeft)
require.NoError(t, err)
// put link object in another shard
_, err = s2.Put(putPrmLink)
_, err = s2.Put(context.Background(), putPrmLink)
require.NoError(t, err)
// head with raw flag should return SplitInfoError

View file

@ -4,12 +4,15 @@ import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@ -62,6 +65,9 @@ var errInhumeFailure = errors.New("inhume operation failed")
//
// Returns an error if executions are blocked (see BlockExecution).
func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRes, err error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Inhume")
defer span.End()
err = e.execIfNotBlocked(func() error {
res, err = e.inhume(ctx, prm)
return err
@ -82,7 +88,7 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, e
for i := range prm.addrs {
if !prm.forceRemoval {
locked, err := e.IsLocked(prm.addrs[i])
locked, err := e.IsLocked(ctx, prm.addrs[i])
if err != nil {
e.log.Warn(logs.EngineRemovingAnObjectWithoutFullLockingCheck,
zap.Error(err),
@ -181,13 +187,19 @@ func (e *StorageEngine) inhumeAddr(ctx context.Context, addr oid.Address, prm sh
}
// IsLocked checks whether an object is locked according to StorageEngine's state.
func (e *StorageEngine) IsLocked(addr oid.Address) (bool, error) {
func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.IsLocked",
trace.WithAttributes(
attribute.String("address", addr.EncodeToString()),
))
defer span.End()
var locked bool
var err error
var outErr error
e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) {
locked, err = h.Shard.IsLocked(addr)
locked, err = h.Shard.IsLocked(ctx, addr)
if err != nil {
e.reportShardError(h, "can't check object's lockers", err, zap.Stringer("addr", addr))
outErr = err
@ -206,7 +218,7 @@ func (e *StorageEngine) IsLocked(addr oid.Address) (bool, error) {
func (e *StorageEngine) processExpiredTombstones(ctx context.Context, addrs []meta.TombstonedObject) {
e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
sh.HandleExpiredTombstones(addrs)
sh.HandleExpiredTombstones(ctx, addrs)
select {
case <-ctx.Done():

View file

@ -42,7 +42,7 @@ func TestStorageEngine_Inhume(t *testing.T) {
e := testNewEngine(t).setShardsNum(t, 1).engine
defer e.Close()
err := Put(e, parent)
err := Put(context.Background(), e, parent)
require.NoError(t, err)
var inhumePrm InhumePrm
@ -51,7 +51,7 @@ func TestStorageEngine_Inhume(t *testing.T) {
_, err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
addrs, err := Select(e, cnr, fs)
addrs, err := Select(context.Background(), e, cnr, fs)
require.NoError(t, err)
require.Empty(t, addrs)
})
@ -65,12 +65,12 @@ func TestStorageEngine_Inhume(t *testing.T) {
var putChild shard.PutPrm
putChild.SetObject(child)
_, err := s1.Put(putChild)
_, err := s1.Put(context.Background(), putChild)
require.NoError(t, err)
var putLink shard.PutPrm
putLink.SetObject(link)
_, err = s2.Put(putLink)
_, err = s2.Put(context.Background(), putLink)
require.NoError(t, err)
var inhumePrm InhumePrm
@ -79,7 +79,7 @@ func TestStorageEngine_Inhume(t *testing.T) {
_, err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
addrs, err := Select(e, cnr, fs)
addrs, err := Select(context.Background(), e, cnr, fs)
require.NoError(t, err)
require.Empty(t, addrs)
})

View file

@ -1,6 +1,7 @@
package engine
import (
"context"
"errors"
"os"
"sort"
@ -35,7 +36,7 @@ func TestListWithCursor(t *testing.T) {
var prm PutPrm
prm.WithObject(obj)
_, err := e.Put(prm)
err := e.Put(context.Background(), prm)
require.NoError(t, err)
expected = append(expected, object.AddressWithType{Type: objectSDK.TypeRegular, Address: object.AddressOf(obj)})
}

View file

@ -4,12 +4,15 @@ import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
var errLockFailed = errors.New("lock operation failed")
@ -20,19 +23,27 @@ var errLockFailed = errors.New("lock operation failed")
// Allows locking regular objects only (otherwise returns apistatus.LockNonRegularObject).
//
// Locked list should be unique. Panics if it is empty.
func (e *StorageEngine) Lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
func (e *StorageEngine) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Lock",
trace.WithAttributes(
attribute.String("container_id", idCnr.EncodeToString()),
attribute.String("locker", locker.EncodeToString()),
attribute.Int("locked_count", len(locked)),
))
defer span.End()
return e.execIfNotBlocked(func() error {
return e.lock(idCnr, locker, locked)
return e.lock(ctx, idCnr, locker, locked)
})
}
func (e *StorageEngine) lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
for i := range locked {
switch e.lockSingle(idCnr, locker, locked[i], true) {
switch e.lockSingle(ctx, idCnr, locker, locked[i], true) {
case 1:
return logicerr.Wrap(apistatus.LockNonRegularObject{})
case 0:
switch e.lockSingle(idCnr, locker, locked[i], false) {
switch e.lockSingle(ctx, idCnr, locker, locked[i], false) {
case 1:
return logicerr.Wrap(apistatus.LockNonRegularObject{})
case 0:
@ -48,7 +59,7 @@ func (e *StorageEngine) lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error
// - 0: fail
// - 1: locking irregular object
// - 2: ok
func (e *StorageEngine) lockSingle(idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8) {
func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8) {
// code is pretty similar to inhumeAddr, maybe unify?
root := false
var errIrregular apistatus.LockNonRegularObject
@ -70,7 +81,7 @@ func (e *StorageEngine) lockSingle(idCnr cid.ID, locker, locked oid.ID, checkExi
var existsPrm shard.ExistsPrm
existsPrm.SetAddress(addrLocked)
exRes, err := sh.Exists(context.TODO(), existsPrm)
exRes, err := sh.Exists(ctx, existsPrm)
if err != nil {
var siErr *objectSDK.SplitInfoError
if !errors.As(err, &siErr) {
@ -90,7 +101,7 @@ func (e *StorageEngine) lockSingle(idCnr cid.ID, locker, locked oid.ID, checkExi
}
}
err := sh.Lock(idCnr, locker, []oid.ID{locked})
err := sh.Lock(ctx, idCnr, locker, []oid.ID{locked})
if err != nil {
e.reportShardError(sh, "could not lock object in shard", err)

View file

@ -99,7 +99,7 @@ func TestLockUserScenario(t *testing.T) {
id, _ := obj.ID()
objAddr.SetObject(id)
err = Put(e, obj)
err = Put(context.Background(), e, obj)
require.NoError(t, err)
// 2.
@ -107,10 +107,10 @@ func TestLockUserScenario(t *testing.T) {
locker.WriteMembers([]oid.ID{id})
object.WriteLock(lockerObj, locker)
err = Put(e, lockerObj)
err = Put(context.Background(), e, lockerObj)
require.NoError(t, err)
err = e.Lock(cnr, lockerID, []oid.ID{id})
err = e.Lock(context.Background(), cnr, lockerID, []oid.ID{id})
require.NoError(t, err)
// 3.
@ -125,7 +125,7 @@ func TestLockUserScenario(t *testing.T) {
tombObj.SetID(tombForLockID)
tombObj.SetAttributes(a)
err = Put(e, tombObj)
err = Put(context.Background(), e, tombObj)
require.NoError(t, err)
inhumePrm.WithTarget(tombForLockAddr, lockerAddr)
@ -180,7 +180,7 @@ func TestLockExpiration(t *testing.T) {
// 1.
obj := testutil.GenerateObjectWithCID(cnr)
err = Put(e, obj)
err = Put(context.Background(), e, obj)
require.NoError(t, err)
// 2.
@ -192,13 +192,13 @@ func TestLockExpiration(t *testing.T) {
lock.SetType(object.TypeLock)
lock.SetAttributes(a)
err = Put(e, lock)
err = Put(context.Background(), e, lock)
require.NoError(t, err)
id, _ := obj.ID()
idLock, _ := lock.ID()
err = e.Lock(cnr, idLock, []oid.ID{id})
err = e.Lock(context.Background(), cnr, idLock, []oid.ID{id})
require.NoError(t, err)
var inhumePrm InhumePrm
@ -255,20 +255,20 @@ func TestLockForceRemoval(t *testing.T) {
// 1.
obj := testutil.GenerateObjectWithCID(cnr)
err = Put(e, obj)
err = Put(context.Background(), e, obj)
require.NoError(t, err)
// 2.
lock := testutil.GenerateObjectWithCID(cnr)
lock.SetType(object.TypeLock)
err = Put(e, lock)
err = Put(context.Background(), e, lock)
require.NoError(t, err)
id, _ := obj.ID()
idLock, _ := lock.ID()
err = e.Lock(cnr, idLock, []oid.ID{id})
err = e.Lock(context.Background(), cnr, idLock, []oid.ID{id})
require.NoError(t, err)
// 3.

View file

@ -4,6 +4,7 @@ import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
@ -12,6 +13,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@ -20,9 +23,6 @@ type PutPrm struct {
obj *objectSDK.Object
}
// PutRes groups the resulting values of Put operation.
type PutRes struct{}
var errPutShard = errors.New("could not put object to any shard")
// WithObject is a Put option to set object to save.
@ -40,16 +40,22 @@ func (p *PutPrm) WithObject(obj *objectSDK.Object) {
// Returns an error if executions are blocked (see BlockExecution).
//
// Returns an error of type apistatus.ObjectAlreadyRemoved if the object has been marked as removed.
func (e *StorageEngine) Put(prm PutPrm) (res PutRes, err error) {
func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Put",
trace.WithAttributes(
attribute.String("address", object.AddressOf(prm.obj).EncodeToString()),
))
defer span.End()
err = e.execIfNotBlocked(func() error {
res, err = e.put(prm)
err = e.put(ctx, prm)
return err
})
return
}
func (e *StorageEngine) put(prm PutPrm) (PutRes, error) {
func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error {
if e.metrics != nil {
defer elapsed(e.metrics.AddPutDuration)()
}
@ -58,9 +64,9 @@ func (e *StorageEngine) put(prm PutPrm) (PutRes, error) {
// In #1146 this check was parallelized, however, it became
// much slower on fast machines for 4 shards.
_, err := e.exists(addr)
_, err := e.exists(ctx, addr)
if err != nil {
return PutRes{}, err
return err
}
finished := false
@ -74,7 +80,7 @@ func (e *StorageEngine) put(prm PutPrm) (PutRes, error) {
return false
}
putDone, exists := e.putToShard(context.TODO(), sh, ind, pool, addr, prm.obj)
putDone, exists := e.putToShard(ctx, sh, ind, pool, addr, prm.obj)
finished = putDone || exists
return finished
})
@ -83,7 +89,7 @@ func (e *StorageEngine) put(prm PutPrm) (PutRes, error) {
err = errPutShard
}
return PutRes{}, err
return err
}
// putToShard puts object to sh.
@ -117,7 +123,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, ind int,
var toMoveItPrm shard.ToMoveItPrm
toMoveItPrm.SetAddress(addr)
_, err = sh.ToMoveIt(toMoveItPrm)
_, err = sh.ToMoveIt(ctx, toMoveItPrm)
if err != nil {
e.log.Warn(logs.EngineCouldNotMarkObjectForShardRelocation,
zap.Stringer("shard", sh.ID()),
@ -132,7 +138,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, ind int,
var putPrm shard.PutPrm
putPrm.SetObject(obj)
_, err = sh.Put(putPrm)
_, err = sh.Put(ctx, putPrm)
if err != nil {
if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) ||
errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) {
@ -157,11 +163,9 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, ind int,
}
// Put writes provided object to local storage.
func Put(storage *StorageEngine, obj *objectSDK.Object) error {
func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object) error {
var putPrm PutPrm
putPrm.WithObject(obj)
_, err := storage.Put(putPrm)
return err
return storage.Put(ctx, putPrm)
}

View file

@ -129,7 +129,7 @@ func (e *StorageEngine) removeObjects(ctx context.Context, ch <-chan oid.Address
var deletePrm shard.DeletePrm
deletePrm.SetAddresses(addr)
_, err = shards[i].Delete(deletePrm)
_, err = shards[i].Delete(ctx, deletePrm)
if err != nil {
return err
}

View file

@ -49,10 +49,10 @@ func TestRebalance(t *testing.T) {
te.ng.mtx.RLock()
// Every 3rd object (i%3 == 0) is put to both shards, others are distributed.
if i%3 != 1 {
_, err1 = te.ng.shards[te.shards[0].id.String()].Shard.Put(prm)
_, err1 = te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm)
}
if i%3 != 2 {
_, err2 = te.ng.shards[te.shards[1].id.String()].Shard.Put(prm)
_, err2 = te.ng.shards[te.shards[1].id.String()].Shard.Put(context.Background(), prm)
}
te.ng.mtx.RUnlock()
@ -109,8 +109,8 @@ func TestRebalanceSingleThread(t *testing.T) {
var prm shard.PutPrm
prm.SetObject(obj)
te.ng.mtx.RLock()
_, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(prm)
_, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(prm)
_, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm)
_, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(context.Background(), prm)
te.ng.mtx.RUnlock()
require.NoError(t, err1)
require.NoError(t, err2)
@ -162,8 +162,8 @@ func TestRebalanceExitByContext(t *testing.T) {
prm.SetObject(objects[i])
te.ng.mtx.RLock()
_, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(prm)
_, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(prm)
_, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm)
_, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(context.Background(), prm)
te.ng.mtx.RUnlock()
require.NoError(t, err1)

View file

@ -1,11 +1,24 @@
package engine
import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// RestoreShard restores objects from dump to the shard with provided identifier.
//
// Returns an error if shard is not read-only.
func (e *StorageEngine) RestoreShard(id *shard.ID, prm shard.RestorePrm) error {
func (e *StorageEngine) RestoreShard(ctx context.Context, id *shard.ID, prm shard.RestorePrm) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.RestoreShard",
trace.WithAttributes(
attribute.String("shard_id", id.String()),
))
defer span.End()
e.mtx.RLock()
defer e.mtx.RUnlock()
@ -14,6 +27,6 @@ func (e *StorageEngine) RestoreShard(id *shard.ID, prm shard.RestorePrm) error {
return errShardNotFound
}
_, err := sh.Restore(prm)
_, err := sh.Restore(ctx, prm)
return err
}

View file

@ -1,10 +1,15 @@
package engine
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// SelectPrm groups the parameters of Select operation.
@ -38,16 +43,22 @@ func (r SelectRes) AddressList() []oid.Address {
// Returns any error encountered that did not allow to completely select the objects.
//
// Returns an error if executions are blocked (see BlockExecution).
func (e *StorageEngine) Select(prm SelectPrm) (res SelectRes, err error) {
func (e *StorageEngine) Select(ctx context.Context, prm SelectPrm) (res SelectRes, err error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Select",
trace.WithAttributes(
attribute.String("container_id", prm.cnr.EncodeToString()),
))
defer span.End()
err = e.execIfNotBlocked(func() error {
res, err = e._select(prm)
res, err = e._select(ctx, prm)
return err
})
return
}
func (e *StorageEngine) _select(prm SelectPrm) (SelectRes, error) {
func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, error) {
if e.metrics != nil {
defer elapsed(e.metrics.AddSearchDuration)()
}
@ -62,7 +73,7 @@ func (e *StorageEngine) _select(prm SelectPrm) (SelectRes, error) {
shPrm.SetFilters(prm.filters)
e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
res, err := sh.Select(shPrm)
res, err := sh.Select(ctx, shPrm)
if err != nil {
e.reportShardError(sh, "could not select objects from shard", err)
return false
@ -133,12 +144,12 @@ func (e *StorageEngine) list(limit uint64) (SelectRes, error) {
}
// Select selects objects from local storage using provided filters.
func Select(storage *StorageEngine, cnr cid.ID, fs object.SearchFilters) ([]oid.Address, error) {
func Select(ctx context.Context, storage *StorageEngine, cnr cid.ID, fs object.SearchFilters) ([]oid.Address, error) {
var selectPrm SelectPrm
selectPrm.WithContainerID(cnr)
selectPrm.WithFilters(fs)
res, err := storage.Select(selectPrm)
res, err := storage.Select(ctx, selectPrm)
if err != nil {
return nil, err
}

View file

@ -1,24 +1,39 @@
package engine
import (
"context"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
var _ pilorama.Forest = (*StorageEngine)(nil)
// TreeMove implements the pilorama.Forest interface.
func (e *StorageEngine) TreeMove(d pilorama.CIDDescriptor, treeID string, m *pilorama.Move) (*pilorama.Move, error) {
index, lst, err := e.getTreeShard(d.CID, treeID)
func (e *StorageEngine) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, treeID string, m *pilorama.Move) (*pilorama.Move, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeMove",
trace.WithAttributes(
attribute.String("container_id", d.CID.EncodeToString()),
attribute.Int("position", d.Position),
attribute.Int("size", d.Size),
attribute.String("tree_id", treeID),
),
)
defer span.End()
index, lst, err := e.getTreeShard(ctx, d.CID, treeID)
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
return nil, err
}
lm, err := lst[index].TreeMove(d, treeID, m)
lm, err := lst[index].TreeMove(ctx, d, treeID, m)
if err != nil {
if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
e.reportShardError(lst[index], "can't perform `TreeMove`", err,
@ -32,13 +47,26 @@ func (e *StorageEngine) TreeMove(d pilorama.CIDDescriptor, treeID string, m *pil
}
// TreeAddByPath implements the pilorama.Forest interface.
func (e *StorageEngine) TreeAddByPath(d pilorama.CIDDescriptor, treeID string, attr string, path []string, m []pilorama.KeyValue) ([]pilorama.Move, error) {
index, lst, err := e.getTreeShard(d.CID, treeID)
func (e *StorageEngine) TreeAddByPath(ctx context.Context, d pilorama.CIDDescriptor, treeID string, attr string, path []string, m []pilorama.KeyValue) ([]pilorama.Move, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeAddByPath",
trace.WithAttributes(
attribute.String("container_id", d.CID.EncodeToString()),
attribute.Int("position", d.Position),
attribute.Int("size", d.Size),
attribute.String("tree_id", treeID),
attribute.String("attr", attr),
attribute.Int("path_count", len(path)),
attribute.Int("meta_count", len(m)),
),
)
defer span.End()
index, lst, err := e.getTreeShard(ctx, d.CID, treeID)
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
return nil, err
}
lm, err := lst[index].TreeAddByPath(d, treeID, attr, path, m)
lm, err := lst[index].TreeAddByPath(ctx, d, treeID, attr, path, m)
if err != nil {
if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
e.reportShardError(lst[index], "can't perform `TreeAddByPath`", err,
@ -51,13 +79,22 @@ func (e *StorageEngine) TreeAddByPath(d pilorama.CIDDescriptor, treeID string, a
}
// TreeApply implements the pilorama.Forest interface.
func (e *StorageEngine) TreeApply(cnr cidSDK.ID, treeID string, m *pilorama.Move, backgroundSync bool) error {
index, lst, err := e.getTreeShard(cnr, treeID)
func (e *StorageEngine) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *pilorama.Move, backgroundSync bool) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeApply",
trace.WithAttributes(
attribute.String("container_id", cnr.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.Bool("background", backgroundSync),
),
)
defer span.End()
index, lst, err := e.getTreeShard(ctx, cnr, treeID)
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
return err
}
err = lst[index].TreeApply(cnr, treeID, m, backgroundSync)
err = lst[index].TreeApply(ctx, cnr, treeID, m, backgroundSync)
if err != nil {
if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
e.reportShardError(lst[index], "can't perform `TreeApply`", err,
@ -70,11 +107,22 @@ func (e *StorageEngine) TreeApply(cnr cidSDK.ID, treeID string, m *pilorama.Move
}
// TreeGetByPath implements the pilorama.Forest interface.
func (e *StorageEngine) TreeGetByPath(cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) {
func (e *StorageEngine) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeGetByPath",
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.String("attr", attr),
attribute.Int("path_count", len(path)),
attribute.Bool("latest", latest),
),
)
defer span.End()
var err error
var nodes []pilorama.Node
for _, sh := range e.sortShardsByWeight(cid) {
nodes, err = sh.TreeGetByPath(cid, treeID, attr, path, latest)
nodes, err = sh.TreeGetByPath(ctx, cid, treeID, attr, path, latest)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
@ -92,12 +140,21 @@ func (e *StorageEngine) TreeGetByPath(cid cidSDK.ID, treeID string, attr string,
}
// TreeGetMeta implements the pilorama.Forest interface.
func (e *StorageEngine) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID pilorama.Node) (pilorama.Meta, uint64, error) {
func (e *StorageEngine) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node) (pilorama.Meta, uint64, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeGetMeta",
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.String("node_id", fmt.Sprintf("%d", nodeID)),
),
)
defer span.End()
var err error
var m pilorama.Meta
var p uint64
for _, sh := range e.sortShardsByWeight(cid) {
m, p, err = sh.TreeGetMeta(cid, treeID, nodeID)
m, p, err = sh.TreeGetMeta(ctx, cid, treeID, nodeID)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
@ -115,11 +172,20 @@ func (e *StorageEngine) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID piloram
}
// TreeGetChildren implements the pilorama.Forest interface.
func (e *StorageEngine) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID pilorama.Node) ([]uint64, error) {
func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node) ([]uint64, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeGetChildren",
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.String("node_id", fmt.Sprintf("%d", nodeID)),
),
)
defer span.End()
var err error
var nodes []uint64
for _, sh := range e.sortShardsByWeight(cid) {
nodes, err = sh.TreeGetChildren(cid, treeID, nodeID)
nodes, err = sh.TreeGetChildren(ctx, cid, treeID, nodeID)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
@ -137,11 +203,20 @@ func (e *StorageEngine) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID pil
}
// TreeGetOpLog implements the pilorama.Forest interface.
func (e *StorageEngine) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) (pilorama.Move, error) {
func (e *StorageEngine) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (pilorama.Move, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeGetOpLog",
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.String("height", fmt.Sprintf("%d", height)),
),
)
defer span.End()
var err error
var lm pilorama.Move
for _, sh := range e.sortShardsByWeight(cid) {
lm, err = sh.TreeGetOpLog(cid, treeID, height)
lm, err = sh.TreeGetOpLog(ctx, cid, treeID, height)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
@ -159,10 +234,18 @@ func (e *StorageEngine) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64
}
// TreeDrop implements the pilorama.Forest interface.
func (e *StorageEngine) TreeDrop(cid cidSDK.ID, treeID string) error {
func (e *StorageEngine) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeDrop",
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
),
)
defer span.End()
var err error
for _, sh := range e.sortShardsByWeight(cid) {
err = sh.TreeDrop(cid, treeID)
err = sh.TreeDrop(ctx, cid, treeID)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
@ -180,11 +263,18 @@ func (e *StorageEngine) TreeDrop(cid cidSDK.ID, treeID string) error {
}
// TreeList implements the pilorama.Forest interface.
func (e *StorageEngine) TreeList(cid cidSDK.ID) ([]string, error) {
func (e *StorageEngine) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeList",
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
),
)
defer span.End()
var resIDs []string
for _, sh := range e.unsortedShards() {
ids, err := sh.TreeList(cid)
ids, err := sh.TreeList(ctx, cid)
if err != nil {
if errors.Is(err, shard.ErrPiloramaDisabled) || errors.Is(err, shard.ErrReadOnlyMode) {
return nil, err
@ -205,8 +295,16 @@ func (e *StorageEngine) TreeList(cid cidSDK.ID) ([]string, error) {
}
// TreeExists implements the pilorama.Forest interface.
func (e *StorageEngine) TreeExists(cid cidSDK.ID, treeID string) (bool, error) {
_, _, err := e.getTreeShard(cid, treeID)
func (e *StorageEngine) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (bool, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeExists",
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
),
)
defer span.End()
_, _, err := e.getTreeShard(ctx, cid, treeID)
if errors.Is(err, pilorama.ErrTreeNotFound) {
return false, nil
}
@ -214,13 +312,22 @@ func (e *StorageEngine) TreeExists(cid cidSDK.ID, treeID string) (bool, error) {
}
// TreeUpdateLastSyncHeight implements the pilorama.Forest interface.
func (e *StorageEngine) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height uint64) error {
index, lst, err := e.getTreeShard(cid, treeID)
func (e *StorageEngine) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeUpdateLastSyncHeight",
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.String("height", fmt.Sprintf("%d", height)),
),
)
defer span.End()
index, lst, err := e.getTreeShard(ctx, cid, treeID)
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
return err
}
err = lst[index].TreeUpdateLastSyncHeight(cid, treeID, height)
err = lst[index].TreeUpdateLastSyncHeight(ctx, cid, treeID, height)
if err != nil && !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
e.reportShardError(lst[index], "can't update tree synchronization height", err,
zap.Stringer("cid", cid),
@ -230,11 +337,19 @@ func (e *StorageEngine) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, h
}
// TreeLastSyncHeight implements the pilorama.Forest interface.
func (e *StorageEngine) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error) {
func (e *StorageEngine) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeLastSyncHeight",
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
),
)
defer span.End()
var err error
var height uint64
for _, sh := range e.sortShardsByWeight(cid) {
height, err = sh.TreeLastSyncHeight(cid, treeID)
height, err = sh.TreeLastSyncHeight(ctx, cid, treeID)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
@ -251,10 +366,10 @@ func (e *StorageEngine) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64
return height, err
}
func (e *StorageEngine) getTreeShard(cid cidSDK.ID, treeID string) (int, []hashedShard, error) {
func (e *StorageEngine) getTreeShard(ctx context.Context, cid cidSDK.ID, treeID string) (int, []hashedShard, error) {
lst := e.sortShardsByWeight(cid)
for i, sh := range lst {
exists, err := sh.TreeExists(cid, treeID)
exists, err := sh.TreeExists(ctx, cid, treeID)
if err != nil {
return 0, nil, err
}

View file

@ -1,6 +1,7 @@
package engine
import (
"context"
"strconv"
"testing"
@ -31,11 +32,11 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
for i := 0; i < objCount; i++ {
obj := testutil.GenerateObjectWithCID(cid)
testutil.AddAttribute(obj, pilorama.AttributeFilename, strconv.Itoa(i))
err := Put(te.ng, obj)
err := Put(context.Background(), te.ng, obj)
if err != nil {
b.Fatal(err)
}
_, err = te.ng.TreeAddByPath(d, treeID, pilorama.AttributeFilename, nil,
_, err = te.ng.TreeAddByPath(context.Background(), d, treeID, pilorama.AttributeFilename, nil,
[]pilorama.KeyValue{{pilorama.AttributeFilename, []byte(strconv.Itoa(i))}})
if err != nil {
b.Fatal(err)
@ -51,7 +52,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
prm.WithFilters(fs)
for i := 0; i < b.N; i++ {
res, err := te.ng.Select(prm)
res, err := te.ng.Select(context.Background(), prm)
if err != nil {
b.Fatal(err)
}
@ -62,7 +63,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
})
b.Run("TreeGetByPath", func(b *testing.B) {
for i := 0; i < b.N; i++ {
nodes, err := te.ng.TreeGetByPath(cid, treeID, pilorama.AttributeFilename, []string{strconv.Itoa(objCount / 2)}, true)
nodes, err := te.ng.TreeGetByPath(context.Background(), cid, treeID, pilorama.AttributeFilename, []string{strconv.Itoa(objCount / 2)}, true)
if err != nil {
b.Fatal(err)
}

View file

@ -1,7 +1,12 @@
package engine
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// FlushWriteCachePrm groups the parameters of FlushWriteCache operation.
@ -26,7 +31,14 @@ func (p *FlushWriteCachePrm) SetIgnoreErrors(ignore bool) {
type FlushWriteCacheRes struct{}
// FlushWriteCache flushes write-cache on a single shard.
func (e *StorageEngine) FlushWriteCache(p FlushWriteCachePrm) (FlushWriteCacheRes, error) {
func (e *StorageEngine) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) (FlushWriteCacheRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.FlushWriteCache",
trace.WithAttributes(
attribute.String("shard)id", p.shardID.String()),
attribute.Bool("ignore_errors", p.ignoreErrors),
))
defer span.End()
e.mtx.RLock()
sh, ok := e.shards[p.shardID.String()]
e.mtx.RUnlock()
@ -38,5 +50,5 @@ func (e *StorageEngine) FlushWriteCache(p FlushWriteCachePrm) (FlushWriteCacheRe
var prm shard.FlushWriteCachePrm
prm.SetIgnoreErrors(p.ignoreErrors)
return FlushWriteCacheRes{}, sh.FlushWriteCache(prm)
return FlushWriteCacheRes{}, sh.FlushWriteCache(ctx, prm)
}

View file

@ -1,6 +1,7 @@
package meta_test
import (
"context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@ -55,6 +56,6 @@ func metaExists(db *meta.DB, addr oid.Address) (bool, error) {
var existsPrm meta.ExistsPrm
existsPrm.SetAddress(addr)
res, err := db.Exists(existsPrm)
res, err := db.Exists(context.Background(), existsPrm)
return res.Exists(), err
}

View file

@ -1,6 +1,7 @@
package meta_test
import (
"context"
"testing"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@ -38,7 +39,7 @@ func TestCounters(t *testing.T) {
for i := 0; i < objCount; i++ {
prm.SetObject(oo[i])
_, err = db.Put(prm)
_, err = db.Put(context.Background(), prm)
require.NoError(t, err)
c, err = db.ObjectCounters()
@ -58,7 +59,7 @@ func TestCounters(t *testing.T) {
for i := objCount - 1; i >= 0; i-- {
prm.SetAddresses(objectcore.AddressOf(oo[i]))
res, err := db.Delete(prm)
res, err := db.Delete(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, uint64(1), res.AvailableObjectsRemoved())
@ -89,7 +90,7 @@ func TestCounters(t *testing.T) {
prm.SetTombstoneAddress(oidtest.Address())
prm.SetAddresses(inhumedObjs...)
res, err := db.Inhume(prm)
res, err := db.Inhume(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, uint64(len(inhumedObjs)), res.AvailableInhumed())
@ -159,7 +160,7 @@ func TestCounters(t *testing.T) {
prm.SetTombstoneAddress(oidtest.Address())
prm.SetAddresses(inhumedObjs...)
_, err = db.Inhume(prm)
_, err = db.Inhume(context.Background(), prm)
require.NoError(t, err)
c, err = db.ObjectCounters()
@ -223,7 +224,7 @@ func TestCounters_Expired(t *testing.T) {
inhumePrm.SetGCMark()
inhumePrm.SetAddresses(oo[0])
inhumeRes, err := db.Inhume(inhumePrm)
inhumeRes, err := db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
require.Equal(t, uint64(1), inhumeRes.AvailableInhumed())
@ -240,7 +241,7 @@ func TestCounters_Expired(t *testing.T) {
var deletePrm meta.DeletePrm
deletePrm.SetAddresses(oo[0])
deleteRes, err := db.Delete(deletePrm)
deleteRes, err := db.Delete(context.Background(), deletePrm)
require.NoError(t, err)
require.Zero(t, deleteRes.AvailableObjectsRemoved())
@ -257,7 +258,7 @@ func TestCounters_Expired(t *testing.T) {
deletePrm.SetAddresses(oo[0])
deleteRes, err = db.Delete(deletePrm)
deleteRes, err = db.Delete(context.Background(), deletePrm)
require.NoError(t, err)
require.Equal(t, uint64(1), deleteRes.AvailableObjectsRemoved())
@ -284,7 +285,7 @@ func putObjs(t *testing.T, db *meta.DB, count int, withParent bool) []*object.Ob
oo = append(oo, o)
prm.SetObject(o)
_, err = db.Put(prm)
_, err = db.Put(context.Background(), prm)
require.NoError(t, err)
c, err := db.ObjectCounters()

View file

@ -2,15 +2,19 @@ package meta
import (
"bytes"
"context"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// DeletePrm groups the parameters of Delete operation.
@ -65,7 +69,13 @@ type referenceNumber struct {
type referenceCounter map[string]*referenceNumber
// Delete removed object records from metabase indexes.
func (db *DB) Delete(prm DeletePrm) (DeleteRes, error) {
func (db *DB) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
_, span := tracing.StartSpanFromContext(ctx, "metabase.Delete",
trace.WithAttributes(
attribute.Int("addr_count", len(prm.addrs)),
))
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()

View file

@ -1,6 +1,7 @@
package meta_test
import (
"context"
"errors"
"testing"
@ -139,6 +140,6 @@ func metaDelete(db *meta.DB, addrs ...oid.Address) error {
var deletePrm meta.DeletePrm
deletePrm.SetAddresses(addrs...)
_, err := db.Delete(deletePrm)
_, err := db.Delete(context.Background(), deletePrm)
return err
}

View file

@ -1,15 +1,19 @@
package meta
import (
"context"
"fmt"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// ExistsPrm groups the parameters of Exists operation.
@ -39,7 +43,13 @@ func (p ExistsRes) Exists() bool {
//
// Returns an error of type apistatus.ObjectAlreadyRemoved if object has been placed in graveyard.
// Returns the object.ErrObjectIsExpired if the object is presented but already expired.
func (db *DB) Exists(prm ExistsPrm) (res ExistsRes, err error) {
func (db *DB) Exists(ctx context.Context, prm ExistsPrm) (res ExistsRes, err error) {
_, span := tracing.StartSpanFromContext(ctx, "metabase.Exists",
trace.WithAttributes(
attribute.String("address", prm.addr.EncodeToString()),
))
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()

View file

@ -1,14 +1,18 @@
package meta
import (
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// GetPrm groups the parameters of Get operation.
@ -46,7 +50,14 @@ func (r GetRes) Header() *objectSDK.Object {
// Returns an error of type apistatus.ObjectNotFound if object is missing in DB.
// Returns an error of type apistatus.ObjectAlreadyRemoved if object has been placed in graveyard.
// Returns the object.ErrObjectIsExpired if the object is presented but already expired.
func (db *DB) Get(prm GetPrm) (res GetRes, err error) {
func (db *DB) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) {
_, span := tracing.StartSpanFromContext(ctx, "metabase.Get",
trace.WithAttributes(
attribute.String("address", prm.addr.EncodeToString()),
attribute.Bool("raw", prm.raw),
))
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()

View file

@ -2,6 +2,7 @@ package meta_test
import (
"bytes"
"context"
"fmt"
"os"
"runtime"
@ -132,7 +133,7 @@ func TestDB_Get(t *testing.T) {
var prm meta.InhumePrm
prm.SetAddresses(obj)
_, err = db.Inhume(prm)
_, err = db.Inhume(context.Background(), prm)
require.NoError(t, err)
_, err = metaGet(db, obj, false)
require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
@ -216,7 +217,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
getPrm.SetAddress(addrs[counter%len(addrs)])
counter++
_, err := db.Get(getPrm)
_, err := db.Get(context.Background(), getPrm)
if err != nil {
b.Fatal(err)
}
@ -235,7 +236,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
var getPrm meta.GetPrm
getPrm.SetAddress(addrs[i%len(addrs)])
_, err := db.Get(getPrm)
_, err := db.Get(context.Background(), getPrm)
if err != nil {
b.Fatal(err)
}
@ -248,6 +249,6 @@ func metaGet(db *meta.DB, addr oid.Address, raw bool) (*objectSDK.Object, error)
prm.SetAddress(addr)
prm.SetRaw(raw)
res, err := db.Get(prm)
res, err := db.Get(context.Background(), prm)
return res.Header(), err
}

View file

@ -1,6 +1,7 @@
package meta_test
import (
"context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@ -68,7 +69,7 @@ func TestDB_Iterate_OffsetNotFound(t *testing.T) {
inhumePrm.SetAddresses(object.AddressOf(obj1))
inhumePrm.SetGCMark()
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
var counter int
@ -138,14 +139,14 @@ func TestDB_IterateDeletedObjects(t *testing.T) {
inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2))
inhumePrm.SetTombstoneAddress(addrTombstone)
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
inhumePrm.SetAddresses(object.AddressOf(obj3), object.AddressOf(obj4))
inhumePrm.SetGCMark()
// inhume with GC mark
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
var (
@ -225,7 +226,7 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
object.AddressOf(obj3), object.AddressOf(obj4))
inhumePrm.SetTombstoneAddress(addrTombstone)
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
expectedGraveyard := []oid.Address{
@ -320,7 +321,7 @@ func TestDB_IterateOverGarbage_Offset(t *testing.T) {
object.AddressOf(obj3), object.AddressOf(obj4))
inhumePrm.SetGCMark()
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
expectedGarbage := []oid.Address{
@ -404,7 +405,7 @@ func TestDB_DropGraves(t *testing.T) {
inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2))
inhumePrm.SetTombstoneAddress(addrTombstone)
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
buriedTS := make([]meta.TombstonedObject, 0)

View file

@ -2,9 +2,11 @@ package meta
import (
"bytes"
"context"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@ -118,7 +120,10 @@ var ErrLockObjectRemoval = logicerr.New("lock object removal")
//
// NOTE: Marks any object with GC mark (despite any prohibitions on operations
// with that object) if WithForceGCMark option has been provided.
func (db *DB) Inhume(prm InhumePrm) (res InhumeRes, err error) {
func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRes, err error) {
_, span := tracing.StartSpanFromContext(ctx, "metabase.Inhume")
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()

View file

@ -1,6 +1,7 @@
package meta_test
import (
"context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@ -50,40 +51,40 @@ func TestInhumeTombOnTomb(t *testing.T) {
inhumePrm.SetTombstoneAddress(addr2)
// inhume addr1 via addr2
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
existsPrm.SetAddress(addr1)
// addr1 should become inhumed {addr1:addr2}
_, err = db.Exists(existsPrm)
_, err = db.Exists(context.Background(), existsPrm)
require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved))
inhumePrm.SetAddresses(addr3)
inhumePrm.SetTombstoneAddress(addr1)
// try to inhume addr3 via addr1
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
// record with {addr1:addr2} should be removed from graveyard
// as a tomb-on-tomb; metabase should return ObjectNotFound
// NOT ObjectAlreadyRemoved since that record has been removed
// from graveyard but addr1 is still marked with GC
_, err = db.Exists(existsPrm)
_, err = db.Exists(context.Background(), existsPrm)
require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
existsPrm.SetAddress(addr3)
// addr3 should be inhumed {addr3: addr1}
_, err = db.Exists(existsPrm)
_, err = db.Exists(context.Background(), existsPrm)
require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved))
inhumePrm.SetAddresses(addr1)
inhumePrm.SetTombstoneAddress(oidtest.Address())
// try to inhume addr1 (which is already a tombstone in graveyard)
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
existsPrm.SetAddress(addr1)
@ -91,7 +92,7 @@ func TestInhumeTombOnTomb(t *testing.T) {
// record with addr1 key should not appear in graveyard
// (tomb can not be inhumed) but should be kept as object
// with GC mark
_, err = db.Exists(existsPrm)
_, err = db.Exists(context.Background(), existsPrm)
require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
}
@ -100,13 +101,13 @@ func TestInhumeLocked(t *testing.T) {
locked := oidtest.Address()
err := db.Lock(locked.Container(), oidtest.ID(), []oid.ID{locked.Object()})
err := db.Lock(context.Background(), locked.Container(), oidtest.ID(), []oid.ID{locked.Object()})
require.NoError(t, err)
var prm meta.InhumePrm
prm.SetAddresses(locked)
_, err = db.Inhume(prm)
_, err = db.Inhume(context.Background(), prm)
var e apistatus.ObjectLocked
require.ErrorAs(t, err, &e)
@ -117,6 +118,6 @@ func metaInhume(db *meta.DB, target, tomb oid.Address) error {
inhumePrm.SetAddresses(target)
inhumePrm.SetTombstoneAddress(tomb)
_, err := db.Inhume(inhumePrm)
_, err := db.Inhume(context.Background(), inhumePrm)
return err
}

View file

@ -1,6 +1,7 @@
package meta_test
import (
"context"
"strconv"
"testing"
@ -34,7 +35,7 @@ func TestDB_IterateExpired(t *testing.T) {
expiredLocked := putWithExpiration(t, db, object.TypeRegular, epoch-1)
require.NoError(t, db.Lock(expiredLocked.Container(), oidtest.ID(), []oid.ID{expiredLocked.Object()}))
require.NoError(t, db.Lock(context.Background(), expiredLocked.Container(), oidtest.ID(), []oid.ID{expiredLocked.Object()}))
err := db.IterateExpired(epoch, func(exp *meta.ExpiredObject) error {
if addr, ok := mAlive[exp.Type()]; ok {
@ -81,13 +82,13 @@ func TestDB_IterateCoveredByTombstones(t *testing.T) {
prm.SetAddresses(protected1, protected2, protectedLocked)
prm.SetTombstoneAddress(ts)
_, err = db.Inhume(prm)
_, err = db.Inhume(context.Background(), prm)
require.NoError(t, err)
prm.SetAddresses(garbage)
prm.SetGCMark()
_, err = db.Inhume(prm)
_, err = db.Inhume(context.Background(), prm)
require.NoError(t, err)
var handled []oid.Address
@ -107,7 +108,7 @@ func TestDB_IterateCoveredByTombstones(t *testing.T) {
require.Contains(t, handled, protected2)
require.Contains(t, handled, protectedLocked)
err = db.Lock(protectedLocked.Container(), oidtest.ID(), []oid.ID{protectedLocked.Object()})
err = db.Lock(context.Background(), protectedLocked.Container(), oidtest.ID(), []oid.ID{protectedLocked.Object()})
require.NoError(t, err)
handled = handled[:0]

View file

@ -2,14 +2,18 @@ package meta
import (
"bytes"
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
var bucketNameLocked = []byte{lockedPrefix}
@ -30,7 +34,15 @@ func bucketNameLockers(idCnr cid.ID, key []byte) []byte {
// Allows locking regular objects only (otherwise returns apistatus.LockNonRegularObject).
//
// Locked list should be unique. Panics if it is empty.
func (db *DB) Lock(cnr cid.ID, locker oid.ID, locked []oid.ID) error {
func (db *DB) Lock(ctx context.Context, cnr cid.ID, locker oid.ID, locked []oid.ID) error {
_, span := tracing.StartSpanFromContext(ctx, "metabase.Lock",
trace.WithAttributes(
attribute.String("container_id", cnr.EncodeToString()),
attribute.String("locker", locker.EncodeToString()),
attribute.Int("locked_count", len(locked)),
))
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
@ -266,7 +278,13 @@ func (i IsLockedRes) Locked() bool {
// object is considered as non-locked.
//
// Returns only non-logical errors related to underlying database.
func (db *DB) IsLocked(prm IsLockedPrm) (res IsLockedRes, err error) {
func (db *DB) IsLocked(ctx context.Context, prm IsLockedPrm) (res IsLockedRes, err error) {
_, span := tracing.StartSpanFromContext(ctx, "metabase.IsLocked",
trace.WithAttributes(
attribute.String("address", prm.addr.EncodeToString()),
))
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()

View file

@ -1,6 +1,7 @@
package meta_test
import (
"context"
"testing"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@ -20,8 +21,8 @@ func TestDB_Lock(t *testing.T) {
db := newDB(t)
t.Run("empty locked list", func(t *testing.T) {
require.Panics(t, func() { _ = db.Lock(cnr, oid.ID{}, nil) })
require.Panics(t, func() { _ = db.Lock(cnr, oid.ID{}, []oid.ID{}) })
require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, nil) })
require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, []oid.ID{}) })
})
t.Run("(ir)regular", func(t *testing.T) {
@ -44,7 +45,7 @@ func TestDB_Lock(t *testing.T) {
id, _ := obj.ID()
// try to lock it
err = db.Lock(cnr, oidtest.ID(), []oid.ID{id})
err = db.Lock(context.Background(), cnr, oidtest.ID(), []oid.ID{id})
if typ == object.TypeRegular {
require.NoError(t, err, typ)
} else {
@ -65,27 +66,27 @@ func TestDB_Lock(t *testing.T) {
// check locking relation
inhumePrm.SetAddresses(objAddr)
_, err := db.Inhume(inhumePrm)
_, err := db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
inhumePrm.SetTombstoneAddress(oidtest.Address())
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
// try to remove lock object
inhumePrm.SetAddresses(lockAddr)
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.Error(t, err)
// check that locking relation has not been
// dropped
inhumePrm.SetAddresses(objAddr)
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
inhumePrm.SetTombstoneAddress(oidtest.Address())
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
})
@ -105,7 +106,7 @@ func TestDB_Lock(t *testing.T) {
inhumePrm.SetForceGCMark()
inhumePrm.SetLockObjectHandling()
res, err := db.Inhume(inhumePrm)
res, err := db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
require.Len(t, res.DeletedLockObjects(), 1)
require.Equal(t, objectcore.AddressOf(lockObj), res.DeletedLockObjects()[0])
@ -117,7 +118,7 @@ func TestDB_Lock(t *testing.T) {
inhumePrm.SetGCMark()
// now we can inhume the object
_, err = db.Inhume(inhumePrm)
_, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
})
@ -134,7 +135,7 @@ func TestDB_Lock(t *testing.T) {
inhumePrm.SetAddresses(objectcore.AddressOf(lockObj))
inhumePrm.SetLockObjectHandling()
res, err := db.Inhume(inhumePrm)
res, err := db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
require.Len(t, res.DeletedLockObjects(), 1)
require.Equal(t, objectcore.AddressOf(lockObj), res.DeletedLockObjects()[0])
@ -151,7 +152,7 @@ func TestDB_Lock(t *testing.T) {
for i := 0; i < objsNum; i++ {
inhumePrm.SetAddresses(objectcore.AddressOf(objs[i]))
res, err = db.Inhume(inhumePrm)
res, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
require.Len(t, res.DeletedLockObjects(), 0)
}
@ -164,7 +165,7 @@ func TestDB_Lock(t *testing.T) {
inhumePrm.SetForceGCMark()
inhumePrm.SetAddresses(objectcore.AddressOf(lockObj))
res, err := db.Inhume(inhumePrm)
res, err := db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
require.Len(t, res.DeletedLockObjects(), 0)
})
@ -184,7 +185,7 @@ func TestDB_Lock_Expired(t *testing.T) {
require.ErrorIs(t, err, meta.ErrObjectIsExpired)
// lock the obj
require.NoError(t, db.Lock(addr.Container(), oidtest.ID(), []oid.ID{addr.Object()}))
require.NoError(t, db.Lock(context.Background(), addr.Container(), oidtest.ID(), []oid.ID{addr.Object()}))
// object is expired but locked, thus, must be available
_, err = metaGet(db, addr, false)
@ -202,7 +203,7 @@ func TestDB_IsLocked(t *testing.T) {
for _, obj := range objs {
prm.SetAddress(objectcore.AddressOf(obj))
res, err := db.IsLocked(prm)
res, err := db.IsLocked(context.Background(), prm)
require.NoError(t, err)
require.True(t, res.Locked())
@ -212,7 +213,7 @@ func TestDB_IsLocked(t *testing.T) {
prm.SetAddress(oidtest.Address())
res, err := db.IsLocked(prm)
res, err := db.IsLocked(context.Background(), prm)
require.NoError(t, err)
require.False(t, res.Locked())
@ -224,12 +225,12 @@ func TestDB_IsLocked(t *testing.T) {
var putPrm meta.PutPrm
putPrm.SetObject(obj)
_, err = db.Put(putPrm)
_, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
prm.SetAddress(objectcore.AddressOf(obj))
res, err = db.IsLocked(prm)
res, err = db.IsLocked(context.Background(), prm)
require.NoError(t, err)
require.False(t, res.Locked())
@ -260,7 +261,7 @@ func putAndLockObj(t *testing.T, db *meta.DB, numOfLockedObjs int) ([]*object.Ob
err := putBig(db, lockObj)
require.NoError(t, err)
err = db.Lock(cnr, lockID, lockedObjIDs)
err = db.Lock(context.Background(), cnr, lockID, lockedObjIDs)
require.NoError(t, err)
return lockedObjs, lockObj

View file

@ -1,10 +1,14 @@
package meta
import (
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// ToMoveItPrm groups the parameters of ToMoveIt operation.
@ -48,7 +52,13 @@ func (p MovableRes) AddressList() []oid.Address {
// ToMoveIt marks objects to move it into another shard. This useful for
// faster HRW fetching.
func (db *DB) ToMoveIt(prm ToMoveItPrm) (res ToMoveItRes, err error) {
func (db *DB) ToMoveIt(ctx context.Context, prm ToMoveItPrm) (res ToMoveItRes, err error) {
_, span := tracing.StartSpanFromContext(ctx, "metabase.ToMoveIt",
trace.WithAttributes(
attribute.String("address", prm.addr.EncodeToString()),
))
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()

View file

@ -1,6 +1,7 @@
package meta_test
import (
"context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@ -61,7 +62,7 @@ func metaToMoveIt(db *meta.DB, addr oid.Address) error {
var toMovePrm meta.ToMoveItPrm
toMovePrm.SetAddress(addr)
_, err := db.ToMoveIt(toMovePrm)
_, err := db.ToMoveIt(context.Background(), toMovePrm)
return err
}

View file

@ -1,11 +1,13 @@
package meta
import (
"context"
"encoding/binary"
"errors"
"fmt"
gio "io"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
@ -14,6 +16,8 @@ import (
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/nspcc-dev/neo-go/pkg/io"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
type (
@ -52,7 +56,13 @@ var (
//
// Returns an error of type apistatus.ObjectAlreadyRemoved if object has been placed in graveyard.
// Returns the object.ErrObjectIsExpired if the object is presented but already expired.
func (db *DB) Put(prm PutPrm) (res PutRes, err error) {
func (db *DB) Put(ctx context.Context, prm PutPrm) (res PutRes, err error) {
_, span := tracing.StartSpanFromContext(ctx, "metabase.Put",
trace.WithAttributes(
attribute.String("address", objectCore.AddressOf(prm.obj).EncodeToString()),
))
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()

View file

@ -1,6 +1,7 @@
package meta_test
import (
"context"
"runtime"
"strconv"
"testing"
@ -117,7 +118,7 @@ func metaPut(db *meta.DB, obj *objectSDK.Object, id []byte) error {
putPrm.SetObject(obj)
putPrm.SetStorageID(id)
_, err := db.Put(putPrm)
_, err := db.Put(context.Background(), putPrm)
return err
}

View file

@ -1,17 +1,21 @@
package meta
import (
"context"
"encoding/binary"
"errors"
"fmt"
"strings"
v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@ -56,7 +60,13 @@ func (r SelectRes) AddressList() []oid.Address {
}
// Select returns list of addresses of objects that match search filters.
func (db *DB) Select(prm SelectPrm) (res SelectRes, err error) {
func (db *DB) Select(ctx context.Context, prm SelectPrm) (res SelectRes, err error) {
_, span := tracing.StartSpanFromContext(ctx, "metabase.Select",
trace.WithAttributes(
attribute.String("container_id", prm.cnr.EncodeToString()),
))
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()

View file

@ -1,6 +1,7 @@
package meta_test
import (
"context"
"encoding/hex"
"strconv"
"testing"
@ -829,7 +830,7 @@ func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.Sear
prm.SetFilters(fs)
for i := 0; i < b.N; i++ {
res, err := db.Select(prm)
res, err := db.Select(context.Background(), prm)
if err != nil {
b.Fatal(err)
}
@ -844,6 +845,6 @@ func metaSelect(db *meta.DB, cnr cidSDK.ID, fs objectSDK.SearchFilters) ([]oid.A
prm.SetFilters(fs)
prm.SetContainerID(cnr)
res, err := db.Select(prm)
res, err := db.Select(context.Background(), prm)
return res.AddressList(), err
}

View file

@ -1,11 +1,15 @@
package meta
import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/nspcc-dev/neo-go/pkg/util/slice"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// StorageIDPrm groups the parameters of StorageID operation.
@ -30,7 +34,13 @@ func (r StorageIDRes) StorageID() []byte {
// StorageID returns storage descriptor for objects from the blobstor.
// It is put together with the object can makes get/delete operation faster.
func (db *DB) StorageID(prm StorageIDPrm) (res StorageIDRes, err error) {
func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (res StorageIDRes, err error) {
_, span := tracing.StartSpanFromContext(ctx, "metabase.StorageID",
trace.WithAttributes(
attribute.String("address", prm.addr.EncodeToString()),
))
defer span.End()
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()

View file

@ -1,6 +1,7 @@
package meta_test
import (
"context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@ -63,6 +64,6 @@ func metaStorageID(db *meta.DB, addr oid.Address) ([]byte, error) {
var sidPrm meta.StorageIDPrm
sidPrm.SetAddress(addr)
r, err := db.StorageID(sidPrm)
r, err := db.StorageID(context.Background(), sidPrm)
return r.StorageID(), err
}

View file

@ -2,6 +2,7 @@ package pilorama
import (
"bytes"
"context"
"encoding/binary"
"errors"
"fmt"
@ -11,12 +12,15 @@ import (
"sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/io"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
type boltForest struct {
@ -144,7 +148,17 @@ func (t *boltForest) Close() error {
}
// TreeMove implements the Forest interface.
func (t *boltForest) TreeMove(d CIDDescriptor, treeID string, m *Move) (*Move, error) {
func (t *boltForest) TreeMove(ctx context.Context, d CIDDescriptor, treeID string, m *Move) (*Move, error) {
_, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeMove",
trace.WithAttributes(
attribute.String("container_id", d.CID.EncodeToString()),
attribute.Int("position", d.Position),
attribute.Int("size", d.Size),
attribute.String("tree_id", treeID),
),
)
defer span.End()
if !d.checkValid() {
return nil, ErrInvalidCIDDescriptor
}
@ -175,7 +189,15 @@ func (t *boltForest) TreeMove(d CIDDescriptor, treeID string, m *Move) (*Move, e
}
// TreeExists implements the Forest interface.
func (t *boltForest) TreeExists(cid cidSDK.ID, treeID string) (bool, error) {
func (t *boltForest) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (bool, error) {
_, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeExists",
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
),
)
defer span.End()
t.modeMtx.RLock()
defer t.modeMtx.RUnlock()
@ -197,7 +219,16 @@ func (t *boltForest) TreeExists(cid cidSDK.ID, treeID string) (bool, error) {
var syncHeightKey = []byte{'h'}
// TreeUpdateLastSyncHeight implements the pilorama.Forest interface.
func (t *boltForest) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height uint64) error {
func (t *boltForest) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) error {
_, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeUpdateLastSyncHeight",
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.String("height", fmt.Sprintf("%d", height)),
),
)
defer span.End()
rawHeight := make([]byte, 8)
binary.LittleEndian.PutUint64(rawHeight, height)
@ -214,7 +245,15 @@ func (t *boltForest) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, heig
}
// TreeLastSyncHeight implements the pilorama.Forest interface.
func (t *boltForest) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error) {
func (t *boltForest) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error) {
_, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeLastSyncHeight",
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
),
)
defer span.End()
var height uint64
buck := bucketName(cid, treeID)
@ -235,7 +274,20 @@ func (t *boltForest) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, e
}
// TreeAddByPath implements the Forest interface.
func (t *boltForest) TreeAddByPath(d CIDDescriptor, treeID string, attr string, path []string, meta []KeyValue) ([]Move, error) {
func (t *boltForest) TreeAddByPath(ctx context.Context, d CIDDescriptor, treeID string, attr string, path []string, meta []KeyValue) ([]Move, error) {
_, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeAddByPath",
trace.WithAttributes(
attribute.String("container_id", d.CID.EncodeToString()),
attribute.Int("position", d.Position),
attribute.Int("size", d.Size),
attribute.String("tree_id", treeID),
attribute.String("attr", attr),
attribute.Int("path_count", len(path)),
attribute.Int("meta_count", len(meta)),
),
)
defer span.End()
if !d.checkValid() {
return nil, ErrInvalidCIDDescriptor
}
@ -329,7 +381,16 @@ func (t *boltForest) findSpareID(bTree *bbolt.Bucket) uint64 {
}
// TreeApply implements the Forest interface.
func (t *boltForest) TreeApply(cnr cidSDK.ID, treeID string, m *Move, backgroundSync bool) error {
func (t *boltForest) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *Move, backgroundSync bool) error {
_, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeApply",
trace.WithAttributes(
attribute.String("container_id", cnr.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.Bool("background", backgroundSync),
),
)
defer span.End()
t.modeMtx.RLock()
defer t.modeMtx.RUnlock()
@ -627,7 +688,18 @@ func (t *boltForest) isAncestor(b *bbolt.Bucket, parent, child Node) bool {
}
// TreeGetByPath implements the Forest interface.
func (t *boltForest) TreeGetByPath(cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]Node, error) {
func (t *boltForest) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]Node, error) {
_, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeGetByPath",
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.String("attr", attr),
attribute.Int("path_count", len(path)),
attribute.Bool("latest", latest),
),
)
defer span.End()
if !isAttributeInternal(attr) {
return nil, ErrNotPathAttribute
}
@ -686,7 +758,16 @@ func (t *boltForest) TreeGetByPath(cid cidSDK.ID, treeID string, attr string, pa
}
// TreeGetMeta implements the forest interface.
func (t *boltForest) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID Node) (Meta, Node, error) {
func (t *boltForest) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) (Meta, Node, error) {
_, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeGetMeta",
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.String("node_id", fmt.Sprintf("%d", nodeID)),
),
)
defer span.End()
t.modeMtx.RLock()
defer t.modeMtx.RUnlock()
@ -717,7 +798,16 @@ func (t *boltForest) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID Node) (Met
}
// TreeGetChildren implements the Forest interface.
func (t *boltForest) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID Node) ([]uint64, error) {
func (t *boltForest) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]uint64, error) {
_, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeGetChildren",
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.String("node_id", fmt.Sprintf("%d", nodeID)),
),
)
defer span.End()
t.modeMtx.RLock()
defer t.modeMtx.RUnlock()
@ -749,7 +839,14 @@ func (t *boltForest) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID Node)
}
// TreeList implements the Forest interface.
func (t *boltForest) TreeList(cid cidSDK.ID) ([]string, error) {
func (t *boltForest) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) {
_, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeList",
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
),
)
defer span.End()
t.modeMtx.RLock()
defer t.modeMtx.RUnlock()
@ -783,7 +880,16 @@ func (t *boltForest) TreeList(cid cidSDK.ID) ([]string, error) {
}
// TreeGetOpLog implements the pilorama.Forest interface.
func (t *boltForest) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) (Move, error) {
func (t *boltForest) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (Move, error) {
_, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeGetOpLog",
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.String("height", fmt.Sprintf("%d", height)),
),
)
defer span.End()
t.modeMtx.RLock()
defer t.modeMtx.RUnlock()
@ -813,7 +919,15 @@ func (t *boltForest) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) (
}
// TreeDrop implements the pilorama.Forest interface.
func (t *boltForest) TreeDrop(cid cidSDK.ID, treeID string) error {
func (t *boltForest) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) error {
_, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeDrop",
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
),
)
defer span.End()
t.modeMtx.RLock()
defer t.modeMtx.RUnlock()

View file

@ -1,6 +1,7 @@
package pilorama
import (
"context"
"sort"
"strings"
@ -25,7 +26,7 @@ func NewMemoryForest() ForestStorage {
}
// TreeMove implements the Forest interface.
func (f *memoryForest) TreeMove(d CIDDescriptor, treeID string, op *Move) (*Move, error) {
func (f *memoryForest) TreeMove(_ context.Context, d CIDDescriptor, treeID string, op *Move) (*Move, error) {
if !d.checkValid() {
return nil, ErrInvalidCIDDescriptor
}
@ -48,7 +49,7 @@ func (f *memoryForest) TreeMove(d CIDDescriptor, treeID string, op *Move) (*Move
}
// TreeAddByPath implements the Forest interface.
func (f *memoryForest) TreeAddByPath(d CIDDescriptor, treeID string, attr string, path []string, m []KeyValue) ([]Move, error) {
func (f *memoryForest) TreeAddByPath(_ context.Context, d CIDDescriptor, treeID string, attr string, path []string, m []KeyValue) ([]Move, error) {
if !d.checkValid() {
return nil, ErrInvalidCIDDescriptor
}
@ -93,7 +94,7 @@ func (f *memoryForest) TreeAddByPath(d CIDDescriptor, treeID string, attr string
}
// TreeApply implements the Forest interface.
func (f *memoryForest) TreeApply(cnr cid.ID, treeID string, op *Move, _ bool) error {
func (f *memoryForest) TreeApply(_ context.Context, cnr cid.ID, treeID string, op *Move, _ bool) error {
fullID := cnr.String() + "/" + treeID
s, ok := f.treeMap[fullID]
if !ok {
@ -119,7 +120,7 @@ func (f *memoryForest) Close() error {
}
// TreeGetByPath implements the Forest interface.
func (f *memoryForest) TreeGetByPath(cid cid.ID, treeID string, attr string, path []string, latest bool) ([]Node, error) {
func (f *memoryForest) TreeGetByPath(_ context.Context, cid cid.ID, treeID string, attr string, path []string, latest bool) ([]Node, error) {
if !isAttributeInternal(attr) {
return nil, ErrNotPathAttribute
}
@ -134,7 +135,7 @@ func (f *memoryForest) TreeGetByPath(cid cid.ID, treeID string, attr string, pat
}
// TreeGetMeta implements the Forest interface.
func (f *memoryForest) TreeGetMeta(cid cid.ID, treeID string, nodeID Node) (Meta, Node, error) {
func (f *memoryForest) TreeGetMeta(_ context.Context, cid cid.ID, treeID string, nodeID Node) (Meta, Node, error) {
fullID := cid.String() + "/" + treeID
s, ok := f.treeMap[fullID]
if !ok {
@ -145,7 +146,7 @@ func (f *memoryForest) TreeGetMeta(cid cid.ID, treeID string, nodeID Node) (Meta
}
// TreeGetChildren implements the Forest interface.
func (f *memoryForest) TreeGetChildren(cid cid.ID, treeID string, nodeID Node) ([]uint64, error) {
func (f *memoryForest) TreeGetChildren(_ context.Context, cid cid.ID, treeID string, nodeID Node) ([]uint64, error) {
fullID := cid.String() + "/" + treeID
s, ok := f.treeMap[fullID]
if !ok {
@ -163,7 +164,7 @@ func (f *memoryForest) TreeGetChildren(cid cid.ID, treeID string, nodeID Node) (
}
// TreeGetOpLog implements the pilorama.Forest interface.
func (f *memoryForest) TreeGetOpLog(cid cid.ID, treeID string, height uint64) (Move, error) {
func (f *memoryForest) TreeGetOpLog(_ context.Context, cid cid.ID, treeID string, height uint64) (Move, error) {
fullID := cid.String() + "/" + treeID
s, ok := f.treeMap[fullID]
if !ok {
@ -180,7 +181,7 @@ func (f *memoryForest) TreeGetOpLog(cid cid.ID, treeID string, height uint64) (M
}
// TreeDrop implements the pilorama.Forest interface.
func (f *memoryForest) TreeDrop(cid cid.ID, treeID string) error {
func (f *memoryForest) TreeDrop(_ context.Context, cid cid.ID, treeID string) error {
cidStr := cid.String()
if treeID == "" {
for k := range f.treeMap {
@ -200,7 +201,7 @@ func (f *memoryForest) TreeDrop(cid cid.ID, treeID string) error {
}
// TreeList implements the pilorama.Forest interface.
func (f *memoryForest) TreeList(cid cid.ID) ([]string, error) {
func (f *memoryForest) TreeList(_ context.Context, cid cid.ID) ([]string, error) {
var res []string
cidStr := cid.EncodeToString()
@ -217,14 +218,14 @@ func (f *memoryForest) TreeList(cid cid.ID) ([]string, error) {
}
// TreeExists implements the pilorama.Forest interface.
func (f *memoryForest) TreeExists(cid cid.ID, treeID string) (bool, error) {
func (f *memoryForest) TreeExists(_ context.Context, cid cid.ID, treeID string) (bool, error) {
fullID := cid.EncodeToString() + "/" + treeID
_, ok := f.treeMap[fullID]
return ok, nil
}
// TreeUpdateLastSyncHeight implements the pilorama.Forest interface.
func (f *memoryForest) TreeUpdateLastSyncHeight(cid cid.ID, treeID string, height uint64) error {
func (f *memoryForest) TreeUpdateLastSyncHeight(_ context.Context, cid cid.ID, treeID string, height uint64) error {
fullID := cid.EncodeToString() + "/" + treeID
t, ok := f.treeMap[fullID]
if !ok {
@ -235,7 +236,7 @@ func (f *memoryForest) TreeUpdateLastSyncHeight(cid cid.ID, treeID string, heigh
}
// TreeLastSyncHeight implements the pilorama.Forest interface.
func (f *memoryForest) TreeLastSyncHeight(cid cid.ID, treeID string) (uint64, error) {
func (f *memoryForest) TreeLastSyncHeight(_ context.Context, cid cid.ID, treeID string) (uint64, error) {
fullID := cid.EncodeToString() + "/" + treeID
t, ok := f.treeMap[fullID]
if !ok {

View file

@ -1,6 +1,7 @@
package pilorama
import (
"context"
"fmt"
"math/rand"
"os"
@ -49,7 +50,7 @@ var providers = []struct {
}
func testMeta(t *testing.T, f Forest, cid cidSDK.ID, treeID string, nodeID, parentID Node, expected Meta) {
actualMeta, actualParent, err := f.TreeGetMeta(cid, treeID, nodeID)
actualMeta, actualParent, err := f.TreeGetMeta(context.Background(), cid, treeID, nodeID)
require.NoError(t, err)
require.Equal(t, parentID, actualParent)
require.Equal(t, expected, actualMeta)
@ -71,13 +72,13 @@ func testForestTreeMove(t *testing.T, s Forest) {
meta := []KeyValue{
{Key: AttributeVersion, Value: []byte("XXX")},
{Key: AttributeFilename, Value: []byte("file.txt")}}
lm, err := s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "to"}, meta)
lm, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "to"}, meta)
require.NoError(t, err)
require.Equal(t, 3, len(lm))
nodeID := lm[2].Child
t.Run("invalid descriptor", func(t *testing.T) {
_, err = s.TreeMove(CIDDescriptor{cid, 0, 0}, treeID, &Move{
_, err = s.TreeMove(context.Background(), CIDDescriptor{cid, 0, 0}, treeID, &Move{
Parent: lm[1].Child,
Meta: Meta{Items: append(meta, KeyValue{Key: "NewKey", Value: []byte("NewValue")})},
Child: nodeID,
@ -85,7 +86,7 @@ func testForestTreeMove(t *testing.T, s Forest) {
require.ErrorIs(t, err, ErrInvalidCIDDescriptor)
})
t.Run("same parent, update meta", func(t *testing.T) {
res, err := s.TreeMove(d, treeID, &Move{
res, err := s.TreeMove(context.Background(), d, treeID, &Move{
Parent: lm[1].Child,
Meta: Meta{Items: append(meta, KeyValue{Key: "NewKey", Value: []byte("NewValue")})},
Child: nodeID,
@ -93,12 +94,12 @@ func testForestTreeMove(t *testing.T, s Forest) {
require.NoError(t, err)
require.Equal(t, res.Child, nodeID)
nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false)
nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false)
require.NoError(t, err)
require.ElementsMatch(t, []Node{nodeID}, nodes)
})
t.Run("different parent", func(t *testing.T) {
res, err := s.TreeMove(d, treeID, &Move{
res, err := s.TreeMove(context.Background(), d, treeID, &Move{
Parent: RootID,
Meta: Meta{Items: append(meta, KeyValue{Key: "NewKey", Value: []byte("NewValue")})},
Child: nodeID,
@ -106,11 +107,11 @@ func testForestTreeMove(t *testing.T, s Forest) {
require.NoError(t, err)
require.Equal(t, res.Child, nodeID)
nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false)
nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false)
require.NoError(t, err)
require.True(t, len(nodes) == 0)
nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"file.txt"}, false)
nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"file.txt"}, false)
require.NoError(t, err)
require.ElementsMatch(t, []Node{nodeID}, nodes)
})
@ -130,7 +131,7 @@ func testForestTreeGetChildren(t *testing.T, s Forest) {
treeID := "version"
treeAdd := func(t *testing.T, child, parent Node) {
_, err := s.TreeMove(d, treeID, &Move{
_, err := s.TreeMove(context.Background(), d, treeID, &Move{
Parent: parent,
Child: child,
})
@ -152,7 +153,7 @@ func testForestTreeGetChildren(t *testing.T, s Forest) {
treeAdd(t, 7, 0)
testGetChildren := func(t *testing.T, nodeID Node, expected []Node) {
actual, err := s.TreeGetChildren(cid, treeID, nodeID)
actual, err := s.TreeGetChildren(context.Background(), cid, treeID, nodeID)
require.NoError(t, err)
require.ElementsMatch(t, expected, actual)
}
@ -168,7 +169,7 @@ func testForestTreeGetChildren(t *testing.T, s Forest) {
testGetChildren(t, 42, nil)
})
t.Run("missing tree", func(t *testing.T) {
_, err := s.TreeGetChildren(cid, treeID+"123", 0)
_, err := s.TreeGetChildren(context.Background(), cid, treeID+"123", 0)
require.ErrorIs(t, err, ErrTreeNotFound)
})
}
@ -191,10 +192,10 @@ func testForestTreeDrop(t *testing.T, s Forest) {
cid := cids[0]
t.Run("return nil if not found", func(t *testing.T) {
require.ErrorIs(t, s.TreeDrop(cid, "123"), ErrTreeNotFound)
require.ErrorIs(t, s.TreeDrop(context.Background(), cid, "123"), ErrTreeNotFound)
})
require.NoError(t, s.TreeDrop(cid, ""))
require.NoError(t, s.TreeDrop(context.Background(), cid, ""))
trees := []string{"tree1", "tree2"}
var descs [cidsSize]CIDDescriptor
@ -203,39 +204,39 @@ func testForestTreeDrop(t *testing.T, s Forest) {
}
d := descs[0]
for i := range trees {
_, err := s.TreeAddByPath(d, trees[i], AttributeFilename, []string{"path"},
_, err := s.TreeAddByPath(context.Background(), d, trees[i], AttributeFilename, []string{"path"},
[]KeyValue{{Key: "TreeName", Value: []byte(trees[i])}})
require.NoError(t, err)
}
err := s.TreeDrop(cid, trees[0])
err := s.TreeDrop(context.Background(), cid, trees[0])
require.NoError(t, err)
_, err = s.TreeGetByPath(cid, trees[0], AttributeFilename, []string{"path"}, true)
_, err = s.TreeGetByPath(context.Background(), cid, trees[0], AttributeFilename, []string{"path"}, true)
require.ErrorIs(t, err, ErrTreeNotFound)
_, err = s.TreeGetByPath(cid, trees[1], AttributeFilename, []string{"path"}, true)
_, err = s.TreeGetByPath(context.Background(), cid, trees[1], AttributeFilename, []string{"path"}, true)
require.NoError(t, err)
for j := range descs {
for i := range trees {
_, err := s.TreeAddByPath(descs[j], trees[i], AttributeFilename, []string{"path"},
_, err := s.TreeAddByPath(context.Background(), descs[j], trees[i], AttributeFilename, []string{"path"},
[]KeyValue{{Key: "TreeName", Value: []byte(trees[i])}})
require.NoError(t, err)
}
}
list, err := s.TreeList(cid)
list, err := s.TreeList(context.Background(), cid)
require.NoError(t, err)
require.NotEmpty(t, list)
require.NoError(t, s.TreeDrop(cid, ""))
require.NoError(t, s.TreeDrop(context.Background(), cid, ""))
list, err = s.TreeList(cid)
list, err = s.TreeList(context.Background(), cid)
require.NoError(t, err)
require.Empty(t, list)
for j := 1; j < len(cids); j++ {
list, err = s.TreeList(cids[j])
list, err = s.TreeList(context.Background(), cids[j])
require.NoError(t, err)
require.Equal(t, len(list), len(trees))
}
@ -264,24 +265,24 @@ func testForestTreeAdd(t *testing.T, s Forest) {
}
t.Run("invalid descriptor", func(t *testing.T) {
_, err := s.TreeMove(CIDDescriptor{cid, 0, 0}, treeID, m)
_, err := s.TreeMove(context.Background(), CIDDescriptor{cid, 0, 0}, treeID, m)
require.ErrorIs(t, err, ErrInvalidCIDDescriptor)
})
lm, err := s.TreeMove(d, treeID, m)
lm, err := s.TreeMove(context.Background(), d, treeID, m)
require.NoError(t, err)
testMeta(t, s, cid, treeID, lm.Child, lm.Parent, Meta{Time: lm.Time, Items: meta})
nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"file.txt"}, false)
nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"file.txt"}, false)
require.NoError(t, err)
require.ElementsMatch(t, []Node{lm.Child}, nodes)
t.Run("other trees are unaffected", func(t *testing.T) {
_, err := s.TreeGetByPath(cid, treeID+"123", AttributeFilename, []string{"file.txt"}, false)
_, err := s.TreeGetByPath(context.Background(), cid, treeID+"123", AttributeFilename, []string{"file.txt"}, false)
require.ErrorIs(t, err, ErrTreeNotFound)
_, _, err = s.TreeGetMeta(cid, treeID+"123", 0)
_, _, err = s.TreeGetMeta(context.Background(), cid, treeID+"123", 0)
require.ErrorIs(t, err, ErrTreeNotFound)
})
}
@ -304,15 +305,15 @@ func testForestTreeAddByPath(t *testing.T, s Forest) {
{Key: AttributeFilename, Value: []byte("file.txt")}}
t.Run("invalid descriptor", func(t *testing.T) {
_, err := s.TreeAddByPath(CIDDescriptor{cid, 0, 0}, treeID, AttributeFilename, []string{"yyy"}, meta)
_, err := s.TreeAddByPath(context.Background(), CIDDescriptor{cid, 0, 0}, treeID, AttributeFilename, []string{"yyy"}, meta)
require.ErrorIs(t, err, ErrInvalidCIDDescriptor)
})
t.Run("invalid attribute", func(t *testing.T) {
_, err := s.TreeAddByPath(d, treeID, AttributeVersion, []string{"yyy"}, meta)
_, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeVersion, []string{"yyy"}, meta)
require.ErrorIs(t, err, ErrNotPathAttribute)
})
lm, err := s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "to"}, meta)
lm, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "to"}, meta)
require.NoError(t, err)
require.Equal(t, 3, len(lm))
testMeta(t, s, cid, treeID, lm[0].Child, lm[0].Parent, Meta{Time: lm[0].Time, Items: []KeyValue{{AttributeFilename, []byte("path")}}})
@ -322,7 +323,7 @@ func testForestTreeAddByPath(t *testing.T, s Forest) {
testMeta(t, s, cid, treeID, firstID, lm[2].Parent, Meta{Time: lm[2].Time, Items: meta})
meta[0].Value = []byte("YYY")
lm, err = s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "to"}, meta)
lm, err = s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "to"}, meta)
require.NoError(t, err)
require.Equal(t, 1, len(lm))
@ -331,19 +332,19 @@ func testForestTreeAddByPath(t *testing.T, s Forest) {
t.Run("get versions", func(t *testing.T) {
// All versions.
nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false)
nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false)
require.NoError(t, err)
require.ElementsMatch(t, []Node{firstID, secondID}, nodes)
// Latest version.
nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, true)
nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, true)
require.NoError(t, err)
require.Equal(t, []Node{secondID}, nodes)
})
meta[0].Value = []byte("ZZZ")
meta[1].Value = []byte("cat.jpg")
lm, err = s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "dir"}, meta)
lm, err = s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "dir"}, meta)
require.NoError(t, err)
require.Equal(t, 2, len(lm))
testMeta(t, s, cid, treeID, lm[0].Child, lm[0].Parent, Meta{Time: lm[0].Time, Items: []KeyValue{{AttributeFilename, []byte("dir")}}})
@ -352,7 +353,7 @@ func testForestTreeAddByPath(t *testing.T, s Forest) {
t.Run("create internal nodes", func(t *testing.T) {
meta[0].Value = []byte("SomeValue")
meta[1].Value = []byte("another")
lm, err = s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path"}, meta)
lm, err = s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path"}, meta)
require.NoError(t, err)
require.Equal(t, 1, len(lm))
@ -360,7 +361,7 @@ func testForestTreeAddByPath(t *testing.T, s Forest) {
meta[0].Value = []byte("Leaf")
meta[1].Value = []byte("file.txt")
lm, err = s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "another"}, meta)
lm, err = s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "another"}, meta)
require.NoError(t, err)
require.Equal(t, 2, len(lm))
@ -375,12 +376,12 @@ func testForestTreeAddByPath(t *testing.T, s Forest) {
{AttributeFilename, []byte("another")}}})
t.Run("get by path", func(t *testing.T) {
nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "another"}, false)
nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "another"}, false)
require.NoError(t, err)
require.Equal(t, 2, len(nodes))
require.ElementsMatch(t, []Node{lm[0].Child, oldMove.Child}, nodes)
nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "another", "file.txt"}, false)
nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "another", "file.txt"}, false)
require.NoError(t, err)
require.Equal(t, 1, len(nodes))
require.Equal(t, lm[1].Child, nodes[0])
@ -391,11 +392,11 @@ func testForestTreeAddByPath(t *testing.T, s Forest) {
meta := []KeyValue{
{Key: AttributeVersion, Value: []byte("XXX")},
{Key: AttributeFilename, Value: []byte{}}}
lm, err := s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "to"}, meta)
lm, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "to"}, meta)
require.NoError(t, err)
require.Equal(t, 1, len(lm))
nodes, err := s.TreeGetByPath(d.CID, treeID, AttributeFilename, []string{"path", "to", ""}, false)
nodes, err := s.TreeGetByPath(context.Background(), d.CID, treeID, AttributeFilename, []string{"path", "to", ""}, false)
require.NoError(t, err)
require.Equal(t, 1, len(nodes))
require.Equal(t, lm[0].Child, nodes[0])
@ -415,7 +416,7 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio
treeID := "version"
testApply := func(t *testing.T, s Forest, child, parent Node, meta Meta) {
require.NoError(t, s.TreeApply(cid, treeID, &Move{
require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &Move{
Child: child,
Parent: parent,
Meta: meta,
@ -475,16 +476,16 @@ func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Op
s := constructor(t)
t.Run("empty log, no panic", func(t *testing.T) {
_, err := s.TreeGetOpLog(cid, treeID, 0)
_, err := s.TreeGetOpLog(context.Background(), cid, treeID, 0)
require.ErrorIs(t, err, ErrTreeNotFound)
})
for i := range logs {
require.NoError(t, s.TreeApply(cid, treeID, &logs[i], false))
require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &logs[i], false))
}
testGetOpLog := func(t *testing.T, height uint64, m Move) {
lm, err := s.TreeGetOpLog(cid, treeID, height)
lm, err := s.TreeGetOpLog(context.Background(), cid, treeID, height)
require.NoError(t, err)
require.Equal(t, m, lm)
}
@ -498,7 +499,7 @@ func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Op
testGetOpLog(t, 261, Move{})
})
t.Run("missing tree", func(t *testing.T) {
_, err := s.TreeGetOpLog(cid, treeID+"123", 4)
_, err := s.TreeGetOpLog(context.Background(), cid, treeID+"123", 4)
require.ErrorIs(t, err, ErrTreeNotFound)
})
}
@ -515,7 +516,7 @@ func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...O
s := constructor(t)
checkExists := func(t *testing.T, expected bool, cid cidSDK.ID, treeID string) {
actual, err := s.TreeExists(cid, treeID)
actual, err := s.TreeExists(context.Background(), cid, treeID)
require.NoError(t, err)
require.Equal(t, expected, actual)
}
@ -527,13 +528,13 @@ func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...O
checkExists(t, false, cid, treeID)
})
require.NoError(t, s.TreeApply(cid, treeID, &Move{Parent: 0, Child: 1}, false))
require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &Move{Parent: 0, Child: 1}, false))
checkExists(t, true, cid, treeID)
checkExists(t, false, cidtest.ID(), treeID) // different CID, same tree
checkExists(t, false, cid, "another tree") // same CID, different tree
t.Run("can be removed", func(t *testing.T) {
require.NoError(t, s.TreeDrop(cid, treeID))
require.NoError(t, s.TreeDrop(context.Background(), cid, treeID))
checkExists(t, false, cid, treeID)
})
}
@ -563,11 +564,11 @@ func TestApplyTricky1(t *testing.T) {
t.Run(providers[i].name, func(t *testing.T) {
s := providers[i].construct(t)
for i := range ops {
require.NoError(t, s.TreeApply(cid, treeID, &ops[i], false))
require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
for i := range expected {
_, parent, err := s.TreeGetMeta(cid, treeID, expected[i].child)
_, parent, err := s.TreeGetMeta(context.Background(), cid, treeID, expected[i].child)
require.NoError(t, err)
require.Equal(t, expected[i].parent, parent)
}
@ -624,11 +625,11 @@ func TestApplyTricky2(t *testing.T) {
t.Run(providers[i].name, func(t *testing.T) {
s := providers[i].construct(t)
for i := range ops {
require.NoError(t, s.TreeApply(cid, treeID, &ops[i], false))
require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
for i := range expected {
_, parent, err := s.TreeGetMeta(cid, treeID, expected[i].child)
_, parent, err := s.TreeGetMeta(context.Background(), cid, treeID, expected[i].child)
require.NoError(t, err)
require.Equal(t, expected[i].parent, parent)
}
@ -697,9 +698,9 @@ func prepareRandomTree(nodeCount, opCount int) []Move {
func compareForests(t *testing.T, expected, actual Forest, cid cidSDK.ID, treeID string, nodeCount int) {
for i := uint64(0); i < uint64(nodeCount); i++ {
expectedMeta, expectedParent, err := expected.TreeGetMeta(cid, treeID, i)
expectedMeta, expectedParent, err := expected.TreeGetMeta(context.Background(), cid, treeID, i)
require.NoError(t, err)
actualMeta, actualParent, err := actual.TreeGetMeta(cid, treeID, i)
actualMeta, actualParent, err := actual.TreeGetMeta(context.Background(), cid, treeID, i)
require.NoError(t, err)
require.Equal(t, expectedParent, actualParent, "node id: %d", i)
require.Equal(t, expectedMeta, actualMeta, "node id: %d", i)
@ -738,7 +739,7 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
expected := constructor(t)
for i := range ops {
require.NoError(t, expected.TreeApply(cid, treeID, &ops[i], false))
require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
for i := 0; i < iterCount; i++ {
@ -753,7 +754,7 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
go func() {
defer wg.Done()
for op := range ch {
require.NoError(t, actual.TreeApply(cid, treeID, op, false))
require.NoError(t, actual.TreeApply(context.Background(), cid, treeID, op, false))
}
}()
}
@ -783,7 +784,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
expected := constructor(t)
for i := range ops {
require.NoError(t, expected.TreeApply(cid, treeID, &ops[i], false))
require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
const iterCount = 200
@ -793,7 +794,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
actual := constructor(t)
for i := range ops {
require.NoError(t, actual.TreeApply(cid, treeID, &ops[i], false))
require.NoError(t, actual.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
compareForests(t, expected, actual, cid, treeID, nodeCount)
}
@ -886,7 +887,7 @@ func benchmarkApply(b *testing.B, s Forest, genFunc func(int) []Move) {
b.SetParallelism(10)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
if err := s.TreeApply(cid, treeID, &ops[<-ch], false); err != nil {
if err := s.TreeApply(context.Background(), cid, treeID, &ops[<-ch], false); err != nil {
b.Fatalf("error in `Apply`: %v", err)
}
}
@ -929,27 +930,27 @@ func testTreeGetByPath(t *testing.T, s Forest) {
}
t.Run("invalid attribute", func(t *testing.T) {
_, err := s.TreeGetByPath(cid, treeID, AttributeVersion, []string{"", "TTT"}, false)
_, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeVersion, []string{"", "TTT"}, false)
require.ErrorIs(t, err, ErrNotPathAttribute)
})
nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"b", "cat1.jpg"}, false)
nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"b", "cat1.jpg"}, false)
require.NoError(t, err)
require.Equal(t, []Node{4, 5}, nodes)
nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"a", "cat1.jpg"}, false)
nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"a", "cat1.jpg"}, false)
require.Equal(t, []Node{3}, nodes)
t.Run("missing child", func(t *testing.T) {
nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"a", "cat3.jpg"}, false)
nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"a", "cat3.jpg"}, false)
require.True(t, len(nodes) == 0)
})
t.Run("missing parent", func(t *testing.T) {
nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"xyz", "cat1.jpg"}, false)
nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"xyz", "cat1.jpg"}, false)
require.True(t, len(nodes) == 0)
})
t.Run("empty path", func(t *testing.T) {
nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, nil, false)
nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, nil, false)
require.True(t, len(nodes) == 0)
})
}
@ -961,7 +962,7 @@ func testMove(t *testing.T, s Forest, ts int, node, parent Node, cid cidSDK.ID,
items = append(items, KeyValue{AttributeVersion, []byte(version)})
}
require.NoError(t, s.TreeApply(cid, treeID, &Move{
require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &Move{
Parent: parent,
Child: node,
Meta: Meta{
@ -1000,7 +1001,7 @@ func testTreeGetTrees(t *testing.T, s Forest) {
d.CID = cid
for _, treeID := range treeIDs[cid] {
_, err := s.TreeAddByPath(d, treeID, objectSDK.AttributeFileName, []string{"path"}, nil)
_, err := s.TreeAddByPath(context.Background(), d, treeID, objectSDK.AttributeFileName, []string{"path"}, nil)
require.NoError(t, err)
}
}
@ -1008,7 +1009,7 @@ func testTreeGetTrees(t *testing.T, s Forest) {
for _, cid := range cids {
d.CID = cid
trees, err := s.TreeList(cid)
trees, err := s.TreeList(context.Background(), cid)
require.NoError(t, err)
require.ElementsMatch(t, treeIDs[cid], trees)
@ -1028,38 +1029,38 @@ func testTreeLastSyncHeight(t *testing.T, f Forest) {
treeID := "someTree"
t.Run("ErrNotFound if no log operations are stored for a tree", func(t *testing.T) {
_, err := f.TreeLastSyncHeight(cnr, treeID)
_, err := f.TreeLastSyncHeight(context.Background(), cnr, treeID)
require.ErrorIs(t, err, ErrTreeNotFound)
err = f.TreeUpdateLastSyncHeight(cnr, treeID, 1)
err = f.TreeUpdateLastSyncHeight(context.Background(), cnr, treeID, 1)
require.ErrorIs(t, err, ErrTreeNotFound)
})
_, err := f.TreeMove(CIDDescriptor{CID: cnr, Size: 1}, treeID, &Move{
_, err := f.TreeMove(context.Background(), CIDDescriptor{CID: cnr, Size: 1}, treeID, &Move{
Parent: RootID,
Child: 1,
})
require.NoError(t, err)
h, err := f.TreeLastSyncHeight(cnr, treeID)
h, err := f.TreeLastSyncHeight(context.Background(), cnr, treeID)
require.NoError(t, err)
require.EqualValues(t, 0, h)
t.Run("separate storages for separate containers", func(t *testing.T) {
_, err := f.TreeLastSyncHeight(cidtest.ID(), treeID)
_, err := f.TreeLastSyncHeight(context.Background(), cidtest.ID(), treeID)
require.ErrorIs(t, err, ErrTreeNotFound)
})
require.NoError(t, f.TreeUpdateLastSyncHeight(cnr, treeID, 10))
require.NoError(t, f.TreeUpdateLastSyncHeight(context.Background(), cnr, treeID, 10))
h, err = f.TreeLastSyncHeight(cnr, treeID)
h, err = f.TreeLastSyncHeight(context.Background(), cnr, treeID)
require.NoError(t, err)
require.EqualValues(t, 10, h)
t.Run("removed correctly", func(t *testing.T) {
require.NoError(t, f.TreeDrop(cnr, treeID))
require.NoError(t, f.TreeDrop(context.Background(), cnr, treeID))
_, err := f.TreeLastSyncHeight(cnr, treeID)
_, err := f.TreeLastSyncHeight(context.Background(), cnr, treeID)
require.ErrorIs(t, err, ErrTreeNotFound)
})
}

View file

@ -1,6 +1,8 @@
package pilorama
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@ -11,43 +13,43 @@ type Forest interface {
// TreeMove moves node in the tree.
// If the parent of the move operation is TrashID, the node is removed.
// If the child of the move operation is RootID, new ID is generated and added to a tree.
TreeMove(d CIDDescriptor, treeID string, m *Move) (*Move, error)
TreeMove(ctx context.Context, d CIDDescriptor, treeID string, m *Move) (*Move, error)
// TreeAddByPath adds new node in the tree using provided path.
// The path is constructed by descending from the root using the values of the attr in meta.
// Internal nodes in path should have exactly one attribute, otherwise a new node is created.
TreeAddByPath(d CIDDescriptor, treeID string, attr string, path []string, meta []KeyValue) ([]Move, error)
TreeAddByPath(ctx context.Context, d CIDDescriptor, treeID string, attr string, path []string, meta []KeyValue) ([]Move, error)
// TreeApply applies replicated operation from another node.
// If background is true, TreeApply will first check whether an operation exists.
TreeApply(cnr cidSDK.ID, treeID string, m *Move, backgroundSync bool) error
TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *Move, backgroundSync bool) error
// TreeGetByPath returns all nodes corresponding to the path.
// The path is constructed by descending from the root using the values of the
// AttributeFilename in meta.
// The last argument determines whether only the node with the latest timestamp is returned.
// Should return ErrTreeNotFound if the tree is not found, and empty result if the path is not in the tree.
TreeGetByPath(cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]Node, error)
TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]Node, error)
// TreeGetMeta returns meta information of the node with the specified ID.
// Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree.
TreeGetMeta(cid cidSDK.ID, treeID string, nodeID Node) (Meta, Node, error)
TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) (Meta, Node, error)
// TreeGetChildren returns children of the node with the specified ID. The order is arbitrary.
// Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree.
TreeGetChildren(cid cidSDK.ID, treeID string, nodeID Node) ([]uint64, error)
TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]uint64, error)
// TreeGetOpLog returns first log operation stored at or above the height.
// In case no such operation is found, empty Move and nil error should be returned.
TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) (Move, error)
TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (Move, error)
// TreeDrop drops a tree from the database.
// If the tree is not found, ErrTreeNotFound should be returned.
// In case of empty treeID drops all trees related to container.
TreeDrop(cid cidSDK.ID, treeID string) error
TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) error
// TreeList returns all the tree IDs that have been added to the
// passed container ID. Nil slice should be returned if no tree found.
TreeList(cid cidSDK.ID) ([]string, error)
TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error)
// TreeExists checks if a tree exists locally.
// If the tree is not found, false and a nil error should be returned.
TreeExists(cid cidSDK.ID, treeID string) (bool, error)
TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (bool, error)
// TreeUpdateLastSyncHeight updates last log height synchronized with _all_ container nodes.
TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height uint64) error
TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) error
// TreeLastSyncHeight returns last log height synchronized with _all_ container nodes.
TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error)
TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error)
}
type ForestStorage interface {

View file

@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
@ -80,7 +81,10 @@ func (s *Shard) Open() error {
type metabaseSynchronizer Shard
func (x *metabaseSynchronizer) Init() error {
return (*Shard)(x).refillMetabase()
ctx, span := tracing.StartSpanFromContext(context.TODO(), "metabaseSynchronizer.Init")
defer span.End()
return (*Shard)(x).refillMetabase(ctx)
}
// Init initializes all Shard's components.
@ -158,7 +162,7 @@ func (s *Shard) Init(ctx context.Context) error {
return nil
}
func (s *Shard) refillMetabase() error {
func (s *Shard) refillMetabase(ctx context.Context) error {
err := s.metaBase.Reset()
if err != nil {
return fmt.Errorf("could not reset metabase: %w", err)
@ -177,9 +181,9 @@ func (s *Shard) refillMetabase() error {
var err error
switch obj.Type() {
case objectSDK.TypeTombstone:
err = s.refillTombstoneObject(obj)
err = s.refillTombstoneObject(ctx, obj)
case objectSDK.TypeLock:
err = s.refillLockObject(obj)
err = s.refillLockObject(ctx, obj)
default:
}
if err != nil {
@ -190,7 +194,7 @@ func (s *Shard) refillMetabase() error {
mPrm.SetObject(obj)
mPrm.SetStorageID(descriptor)
_, err = s.metaBase.Put(mPrm)
_, err = s.metaBase.Put(ctx, mPrm)
if err != nil && !meta.IsErrRemoved(err) && !errors.Is(err, meta.ErrObjectIsExpired) {
return err
}
@ -209,7 +213,7 @@ func (s *Shard) refillMetabase() error {
return nil
}
func (s *Shard) refillLockObject(obj *objectSDK.Object) error {
func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) error {
var lock objectSDK.Lock
if err := lock.Unmarshal(obj.Payload()); err != nil {
return fmt.Errorf("could not unmarshal lock content: %w", err)
@ -220,14 +224,14 @@ func (s *Shard) refillLockObject(obj *objectSDK.Object) error {
cnr, _ := obj.ContainerID()
id, _ := obj.ID()
err := s.metaBase.Lock(cnr, id, locked)
err := s.metaBase.Lock(ctx, cnr, id, locked)
if err != nil {
return fmt.Errorf("could not lock objects: %w", err)
}
return nil
}
func (s *Shard) refillTombstoneObject(obj *objectSDK.Object) error {
func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object) error {
tombstone := objectSDK.NewTombstone()
if err := tombstone.Unmarshal(obj.Payload()); err != nil {
@ -250,7 +254,7 @@ func (s *Shard) refillTombstoneObject(obj *objectSDK.Object) error {
inhumePrm.SetTombstoneAddress(tombAddr)
inhumePrm.SetAddresses(tombMembers...)
_, err := s.metaBase.Inhume(inhumePrm)
_, err := s.metaBase.Inhume(ctx, inhumePrm)
if err != nil {
return fmt.Errorf("could not inhume objects: %w", err)
}
@ -290,7 +294,10 @@ func (s *Shard) Close() error {
// Reload reloads configuration portions that are necessary.
// If a config option is invalid, it logs an error and returns nil.
// If there was a problem with applying new configuration, an error is returned.
func (s *Shard) Reload(opts ...Option) error {
func (s *Shard) Reload(ctx context.Context, opts ...Option) error {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Reload")
defer span.End()
// Do not use defaultCfg here missing options need not be reloaded.
var c cfg
for i := range opts {
@ -314,7 +321,7 @@ func (s *Shard) Reload(opts ...Option) error {
// Here we refill metabase only if a new instance was opened. This is a feature,
// we don't want to hang for some time just because we forgot to change
// config after the node was updated.
err = s.refillMetabase()
err = s.refillMetabase(ctx)
} else {
err = s.metaBase.Init()
}

View file

@ -126,6 +126,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
}
sh := New(
WithID(NewIDFromBytes([]byte{})),
WithBlobStorOptions(blobOpts...),
WithPiloramaOptions(pilorama.WithPath(filepath.Join(dir, "pilorama"))),
WithMetaBaseOptions(meta.WithPath(filepath.Join(dir, "meta")), meta.WithEpochState(epochState{})))
@ -138,12 +139,12 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
var putPrm PutPrm
putPrm.SetObject(obj)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
require.NoError(t, sh.Close())
addr := object.AddressOf(obj)
_, err = fsTree.Put(common.PutPrm{Address: addr, RawData: []byte("not an object")})
_, err = fsTree.Put(context.Background(), common.PutPrm{Address: addr, RawData: []byte("not an object")})
require.NoError(t, err)
sh = New(
@ -245,13 +246,13 @@ func TestRefillMetabase(t *testing.T) {
for _, v := range mObjs {
putPrm.SetObject(v.obj)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
}
putPrm.SetObject(tombObj)
_, err = sh.Put(putPrm)
_, err = sh.Put(context.Background(), putPrm)
require.NoError(t, err)
// LOCK object handling
@ -263,11 +264,11 @@ func TestRefillMetabase(t *testing.T) {
objectSDK.WriteLock(lockObj, lock)
putPrm.SetObject(lockObj)
_, err = sh.Put(putPrm)
_, err = sh.Put(context.Background(), putPrm)
require.NoError(t, err)
lockID, _ := lockObj.ID()
require.NoError(t, sh.Lock(cnrLocked, lockID, locked))
require.NoError(t, sh.Lock(context.Background(), cnrLocked, lockID, locked))
var inhumePrm InhumePrm
inhumePrm.SetTarget(object.AddressOf(tombObj), tombMembers...)
@ -368,7 +369,7 @@ func TestRefillMetabase(t *testing.T) {
checkObj(object.AddressOf(tombObj), nil)
checkTombMembers(false)
err = sh.refillMetabase()
err = sh.refillMetabase(context.Background())
require.NoError(t, err)
c, err = sh.metaBase.ObjectCounters()

View file

@ -1,13 +1,17 @@
package shard
import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@ -28,14 +32,21 @@ func (p *DeletePrm) SetAddresses(addr ...oid.Address) {
// Delete removes data from the shard's writeCache, metaBase and
// blobStor.
func (s *Shard) Delete(prm DeletePrm) (DeleteRes, error) {
func (s *Shard) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Delete",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
attribute.Int("addr_count", len(prm.addr)),
))
defer span.End()
s.m.RLock()
defer s.m.RUnlock()
return s.delete(prm)
return s.delete(ctx, prm)
}
func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
func (s *Shard) delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
if s.info.Mode.ReadOnly() {
return DeleteRes{}, ErrReadOnlyMode
} else if s.info.Mode.NoMetabase() {
@ -48,7 +59,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
for i := range prm.addr {
if s.hasWriteCache() {
err := s.writeCache.Delete(prm.addr[i])
err := s.writeCache.Delete(ctx, prm.addr[i])
if err != nil && !IsErrNotFound(err) && !errors.Is(err, writecache.ErrReadOnly) {
s.log.Warn(logs.ShardCantDeleteObjectFromWriteCache, zap.String("error", err.Error()))
}
@ -57,7 +68,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
var sPrm meta.StorageIDPrm
sPrm.SetAddress(prm.addr[i])
res, err := s.metaBase.StorageID(sPrm)
res, err := s.metaBase.StorageID(ctx, sPrm)
if err != nil {
s.log.Debug(logs.ShardCantGetStorageIDFromMetabase,
zap.Stringer("object", prm.addr[i]),
@ -74,7 +85,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
var delPrm meta.DeletePrm
delPrm.SetAddresses(prm.addr...)
res, err := s.metaBase.Delete(delPrm)
res, err := s.metaBase.Delete(ctx, delPrm)
if err != nil {
return DeleteRes{}, err // stop on metabase error ?
}
@ -99,7 +110,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
id := smalls[prm.addr[i]]
delPrm.StorageID = id
_, err = s.blobStor.Delete(delPrm)
_, err = s.blobStor.Delete(ctx, delPrm)
if err != nil {
s.log.Debug(logs.ShardCantRemoveObjectFromBlobStor,
zap.Stringer("object_address", prm.addr[i]),

View file

@ -43,13 +43,13 @@ func testShardDelete(t *testing.T, hasWriteCache bool) {
var delPrm shard.DeletePrm
delPrm.SetAddresses(object.AddressOf(obj))
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
_, err = testGet(t, sh, getPrm, hasWriteCache)
require.NoError(t, err)
_, err = sh.Delete(delPrm)
_, err = sh.Delete(context.TODO(), delPrm)
require.NoError(t, err)
_, err = sh.Get(context.Background(), getPrm)
@ -67,13 +67,13 @@ func testShardDelete(t *testing.T, hasWriteCache bool) {
var delPrm shard.DeletePrm
delPrm.SetAddresses(object.AddressOf(obj))
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
_, err = sh.Get(context.Background(), getPrm)
require.NoError(t, err)
_, err = sh.Delete(delPrm)
_, err = sh.Delete(context.Background(), delPrm)
require.NoError(t, err)
_, err = sh.Get(context.Background(), getPrm)

View file

@ -104,7 +104,7 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
var prm shard.PutPrm
prm.SetObject(objects[i])
_, err := sh.Put(prm)
_, err := sh.Put(context.Background(), prm)
require.NoError(t, err)
}
@ -129,13 +129,13 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
t.Run("empty dump", func(t *testing.T) {
var restorePrm shard.RestorePrm
restorePrm.WithPath(outEmpty)
res, err := sh.Restore(restorePrm)
res, err := sh.Restore(context.Background(), restorePrm)
require.NoError(t, err)
require.Equal(t, 0, res.Count())
})
t.Run("invalid path", func(t *testing.T) {
_, err := sh.Restore(*new(shard.RestorePrm))
_, err := sh.Restore(context.Background(), *new(shard.RestorePrm))
require.ErrorIs(t, err, os.ErrNotExist)
})
@ -147,7 +147,7 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
var restorePrm shard.RestorePrm
restorePrm.WithPath(out)
_, err := sh.Restore(restorePrm)
_, err := sh.Restore(context.Background(), restorePrm)
require.ErrorIs(t, err, shard.ErrInvalidMagic)
})
@ -162,7 +162,7 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
var restorePrm shard.RestorePrm
restorePrm.WithPath(out)
_, err := sh.Restore(restorePrm)
_, err := sh.Restore(context.Background(), restorePrm)
require.ErrorIs(t, err, io.ErrUnexpectedEOF)
})
t.Run("incomplete object data", func(t *testing.T) {
@ -173,7 +173,7 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
var restorePrm shard.RestorePrm
restorePrm.WithPath(out)
_, err := sh.Restore(restorePrm)
_, err := sh.Restore(context.Background(), restorePrm)
require.ErrorIs(t, err, io.EOF)
})
t.Run("invalid object", func(t *testing.T) {
@ -184,7 +184,7 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
var restorePrm shard.RestorePrm
restorePrm.WithPath(out)
_, err := sh.Restore(restorePrm)
_, err := sh.Restore(context.Background(), restorePrm)
require.Error(t, err)
t.Run("skip errors", func(t *testing.T) {
@ -195,7 +195,7 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
restorePrm.WithPath(out)
restorePrm.WithIgnoreErrors(true)
res, err := sh.Restore(restorePrm)
res, err := sh.Restore(context.Background(), restorePrm)
require.NoError(t, err)
require.Equal(t, objCount, res.Count())
require.Equal(t, 2, res.FailCount())
@ -208,7 +208,7 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
t.Run("must allow write", func(t *testing.T) {
require.NoError(t, sh.SetMode(mode.ReadOnly))
_, err := sh.Restore(prm)
_, err := sh.Restore(context.Background(), prm)
require.ErrorIs(t, err, shard.ErrReadOnlyMode)
})
@ -234,7 +234,7 @@ func TestStream(t *testing.T) {
var prm shard.PutPrm
prm.SetObject(objects[i])
_, err := sh1.Put(prm)
_, err := sh1.Put(context.Background(), prm)
require.NoError(t, err)
}
@ -269,7 +269,7 @@ func TestStream(t *testing.T) {
}
func checkRestore(t *testing.T, sh *shard.Shard, prm shard.RestorePrm, objects []*objectSDK.Object) {
res, err := sh.Restore(prm)
res, err := sh.Restore(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, len(objects), res.Count())
@ -333,7 +333,7 @@ func TestDumpIgnoreErrors(t *testing.T) {
var prm shard.PutPrm
prm.SetObject(objects[i])
_, err := sh.Put(prm)
_, err := sh.Put(context.Background(), prm)
require.NoError(t, err)
}

View file

@ -3,9 +3,12 @@ package shard
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// ExistsPrm groups the parameters of Exists operation.
@ -36,6 +39,13 @@ func (p ExistsRes) Exists() bool {
// Returns an error of type apistatus.ObjectAlreadyRemoved if object has been marked as removed.
// Returns the object.ErrObjectIsExpired if the object is presented but already expired.
func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Exists",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
attribute.String("address", prm.addr.EncodeToString()),
))
defer span.End()
var exists bool
var err error
@ -54,7 +64,7 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) {
existsPrm.SetAddress(prm.addr)
var res meta.ExistsRes
res, err = s.metaBase.Exists(existsPrm)
res, err = s.metaBase.Exists(ctx, existsPrm)
exists = res.Exists()
}

View file

@ -234,7 +234,7 @@ func (s *Shard) removeGarbage() {
deletePrm.SetAddresses(buf...)
// delete accumulated objects
_, err = s.delete(deletePrm)
_, err = s.delete(context.TODO(), deletePrm)
if err != nil {
s.log.Warn(logs.ShardCouldNotDeleteTheObjects,
zap.String("error", err.Error()),
@ -320,7 +320,7 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address)
inhumePrm.SetGCMark()
// inhume the collected objects
res, err := s.metaBase.Inhume(inhumePrm)
res, err := s.metaBase.Inhume(ctx, inhumePrm)
if err != nil {
s.log.Warn(logs.ShardCouldNotInhumeTheObjects,
zap.String("error", err.Error()),
@ -485,7 +485,7 @@ func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid
// and clears up corresponding graveyard records.
//
// Does not modify tss.
func (s *Shard) HandleExpiredTombstones(tss []meta.TombstonedObject) {
func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.TombstonedObject) {
if s.GetMode().NoMetabase() {
return
}
@ -502,7 +502,7 @@ func (s *Shard) HandleExpiredTombstones(tss []meta.TombstonedObject) {
pInhume.SetAddresses(tsAddrs...)
// inhume tombstones
res, err := s.metaBase.Inhume(pInhume)
res, err := s.metaBase.Inhume(ctx, pInhume)
if err != nil {
s.log.Warn(logs.ShardCouldNotMarkTombstonesAsGarbage,
zap.String("error", err.Error()),
@ -547,7 +547,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
pInhume.SetAddresses(lockers...)
pInhume.SetForceGCMark()
res, err := s.metaBase.Inhume(pInhume)
res, err := s.metaBase.Inhume(ctx, pInhume)
if err != nil {
s.log.Warn(logs.ShardFailureToMarkLockersAsGarbage,
zap.String("error", err.Error()),

View file

@ -100,14 +100,14 @@ func Test_GCDropsLockedExpiredObject(t *testing.T) {
var putPrm shard.PutPrm
putPrm.SetObject(obj)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
err = sh.Lock(cnr, lockID, []oid.ID{objID})
err = sh.Lock(context.Background(), cnr, lockID, []oid.ID{objID})
require.NoError(t, err)
putPrm.SetObject(lock)
_, err = sh.Put(putPrm)
_, err = sh.Put(context.Background(), putPrm)
require.NoError(t, err)
epoch.Value = 105

View file

@ -96,7 +96,7 @@ func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) {
}
skipMeta := prm.skipMeta || s.info.Mode.NoMetabase()
obj, hasMeta, err := s.fetchObjectData(prm.addr, skipMeta, cb, wc)
obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc)
return GetRes{
obj: obj,
@ -109,7 +109,7 @@ func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) {
var emptyStorageID = make([]byte, 0)
// fetchObjectData looks through writeCache and blobStor to find object.
func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher, wc func(w writecache.Cache) (*objectSDK.Object, error)) (*objectSDK.Object, bool, error) {
func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta bool, cb storFetcher, wc func(w writecache.Cache) (*objectSDK.Object, error)) (*objectSDK.Object, bool, error) {
var (
mErr error
mRes meta.ExistsRes
@ -118,7 +118,7 @@ func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher,
if !skipMeta {
var mPrm meta.ExistsPrm
mPrm.SetAddress(addr)
mRes, mErr = s.metaBase.Exists(mPrm)
mRes, mErr = s.metaBase.Exists(ctx, mPrm)
if mErr != nil && !s.info.Mode.NoMetabase() {
return nil, false, mErr
}
@ -154,7 +154,7 @@ func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher,
var mPrm meta.StorageIDPrm
mPrm.SetAddress(addr)
mExRes, err := s.metaBase.StorageID(mPrm)
mExRes, err := s.metaBase.StorageID(ctx, mPrm)
if err != nil {
return nil, true, fmt.Errorf("can't fetch blobovnicza id from metabase: %w", err)
}

View file

@ -40,7 +40,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
putPrm.SetObject(obj)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
getPrm.SetAddress(object.AddressOf(obj))
@ -58,7 +58,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
putPrm.SetObject(obj)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
getPrm.SetAddress(object.AddressOf(obj))
@ -86,7 +86,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
putPrm.SetObject(child)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
getPrm.SetAddress(object.AddressOf(child))

View file

@ -73,7 +73,7 @@ func (s *Shard) Head(ctx context.Context, prm HeadPrm) (HeadRes, error) {
headParams.SetRaw(prm.raw)
var res meta.GetRes
res, err = s.metaBase.Get(headParams)
res, err = s.metaBase.Get(ctx, headParams)
obj = res.Header()
}

View file

@ -37,7 +37,7 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
putPrm.SetObject(obj)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
headPrm.SetAddress(object.AddressOf(obj))
@ -62,7 +62,7 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
putPrm.SetObject(child)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
headPrm.SetAddress(object.AddressOf(parent))

View file

@ -5,9 +5,12 @@ import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@ -62,6 +65,12 @@ var ErrLockObjectRemoval = meta.ErrLockObjectRemoval
//
// Returns ErrReadOnlyMode error if shard is in "read-only" mode.
func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Inhume",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
))
defer span.End()
s.m.RLock()
if s.info.Mode.ReadOnly() {
@ -74,7 +83,7 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
if s.hasWriteCache() {
for i := range prm.target {
_ = s.writeCache.Delete(prm.target[i])
_ = s.writeCache.Delete(ctx, prm.target[i])
}
}
@ -92,7 +101,7 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
metaPrm.SetForceGCMark()
}
res, err := s.metaBase.Inhume(metaPrm)
res, err := s.metaBase.Inhume(ctx, metaPrm)
if err != nil {
if errors.Is(err, meta.ErrLockObjectRemoval) {
s.m.RUnlock()

View file

@ -42,7 +42,7 @@ func testShardInhume(t *testing.T, hasWriteCache bool) {
var getPrm shard.GetPrm
getPrm.SetAddress(object.AddressOf(obj))
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
_, err = testGet(t, sh, getPrm, hasWriteCache)

View file

@ -1,6 +1,7 @@
package shard
import (
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@ -85,7 +86,7 @@ func (s *Shard) List() (res SelectRes, err error) {
sPrm.SetContainerID(lst[i])
sPrm.SetFilters(filters)
sRes, err := s.metaBase.Select(sPrm) // consider making List in metabase
sRes, err := s.metaBase.Select(context.TODO(), sPrm) // consider making List in metabase
if err != nil {
s.log.Debug(logs.ShardCantSelectAllObjects,
zap.Stringer("cid", lst[i]),

View file

@ -1,6 +1,7 @@
package shard_test
import (
"context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@ -52,7 +53,7 @@ func testShardList(t *testing.T, sh *shard.Shard) {
putPrm.SetObject(obj)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
}
}

View file

@ -1,11 +1,15 @@
package shard
import (
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// Lock marks objects as locked with another object. All objects from the
@ -14,7 +18,16 @@ import (
// Allows locking regular objects only (otherwise returns apistatus.LockNonRegularObject).
//
// Locked list should be unique. Panics if it is empty.
func (s *Shard) Lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
func (s *Shard) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Lock",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
attribute.String("container_id", idCnr.EncodeToString()),
attribute.String("locker", locker.EncodeToString()),
attribute.Int("locked_count", len(locked)),
))
defer span.End()
s.m.RLock()
defer s.m.RUnlock()
@ -25,7 +38,7 @@ func (s *Shard) Lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
return ErrDegradedMode
}
err := s.metaBase.Lock(idCnr, locker, locked)
err := s.metaBase.Lock(ctx, idCnr, locker, locked)
if err != nil {
return fmt.Errorf("metabase lock: %w", err)
}
@ -35,7 +48,14 @@ func (s *Shard) Lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
// IsLocked checks object locking relation of the provided object. Not found object is
// considered as not locked. Requires healthy metabase, returns ErrDegradedMode otherwise.
func (s *Shard) IsLocked(addr oid.Address) (bool, error) {
func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.IsLocked",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
attribute.String("address", addr.EncodeToString()),
))
defer span.End()
m := s.GetMode()
if m.NoMetabase() {
return false, ErrDegradedMode
@ -44,7 +64,7 @@ func (s *Shard) IsLocked(addr oid.Address) (bool, error) {
var prm meta.IsLockedPrm
prm.SetAddress(addr)
res, err := s.metaBase.IsLocked(prm)
res, err := s.metaBase.IsLocked(ctx, prm)
if err != nil {
return false, err
}

View file

@ -76,16 +76,16 @@ func TestShard_Lock(t *testing.T) {
var putPrm shard.PutPrm
putPrm.SetObject(obj)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
// lock the object
err = sh.Lock(cnr, lockID, []oid.ID{objID})
err = sh.Lock(context.Background(), cnr, lockID, []oid.ID{objID})
require.NoError(t, err)
putPrm.SetObject(lock)
_, err = sh.Put(putPrm)
_, err = sh.Put(context.Background(), putPrm)
require.NoError(t, err)
t.Run("inhuming locked objects", func(t *testing.T) {
@ -158,21 +158,21 @@ func TestShard_IsLocked(t *testing.T) {
var putPrm shard.PutPrm
putPrm.SetObject(obj)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
// not locked object is not locked
locked, err := sh.IsLocked(objectcore.AddressOf(obj))
locked, err := sh.IsLocked(context.Background(), objectcore.AddressOf(obj))
require.NoError(t, err)
require.False(t, locked)
// locked object is locked
require.NoError(t, sh.Lock(cnrID, lockID, []oid.ID{objID}))
require.NoError(t, sh.Lock(context.Background(), cnrID, lockID, []oid.ID{objID}))
locked, err = sh.IsLocked(objectcore.AddressOf(obj))
locked, err = sh.IsLocked(context.Background(), objectcore.AddressOf(obj))
require.NoError(t, err)
require.True(t, locked)

View file

@ -109,7 +109,7 @@ func TestCounters(t *testing.T) {
for i := 0; i < objNumber; i++ {
prm.SetObject(oo[i])
_, err := sh.Put(prm)
_, err := sh.Put(context.Background(), prm)
require.NoError(t, err)
}
@ -168,7 +168,7 @@ func TestCounters(t *testing.T) {
deletedNumber := int(phy / 4)
prm.SetAddresses(addrFromObjs(oo[:deletedNumber])...)
_, err := sh.Delete(prm)
_, err := sh.Delete(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, phy-uint64(deletedNumber), mm.objCounters[physical])
@ -207,6 +207,7 @@ func shardWithMetrics(t *testing.T, path string) (*shard.Shard, *metricsStore) {
}
sh := shard.New(
shard.WithID(shard.NewIDFromBytes([]byte{})),
shard.WithBlobStorOptions(blobOpts...),
shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(path, "pilorama"))),
shard.WithMetaBaseOptions(

View file

@ -1,9 +1,14 @@
package shard
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@ -23,7 +28,14 @@ func (p *ToMoveItPrm) SetAddress(addr oid.Address) {
// ToMoveIt calls metabase.ToMoveIt method to mark object as relocatable to
// another shard.
func (s *Shard) ToMoveIt(prm ToMoveItPrm) (ToMoveItRes, error) {
func (s *Shard) ToMoveIt(ctx context.Context, prm ToMoveItPrm) (ToMoveItRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.ToMoveIt",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
attribute.String("address", prm.addr.EncodeToString()),
))
defer span.End()
s.m.RLock()
defer s.m.RUnlock()
@ -37,7 +49,7 @@ func (s *Shard) ToMoveIt(prm ToMoveItPrm) (ToMoveItRes, error) {
var toMovePrm meta.ToMoveItPrm
toMovePrm.SetAddress(prm.addr)
_, err := s.metaBase.ToMoveIt(toMovePrm)
_, err := s.metaBase.ToMoveIt(ctx, toMovePrm)
if err != nil {
s.log.Debug(logs.ShardCouldNotMarkObjectForShardRelocationInMetabase,
zap.String("error", err.Error()),

View file

@ -1,13 +1,17 @@
package shard
import (
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@ -30,7 +34,14 @@ func (p *PutPrm) SetObject(obj *object.Object) {
// did not allow to completely save the object.
//
// Returns ErrReadOnlyMode error if shard is in "read-only" mode.
func (s *Shard) Put(prm PutPrm) (PutRes, error) {
func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Put",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
attribute.String("address", objectCore.AddressOf(prm.obj).EncodeToString()),
))
defer span.End()
s.m.RLock()
defer s.m.RUnlock()
@ -55,7 +66,7 @@ func (s *Shard) Put(prm PutPrm) (PutRes, error) {
// ahead of `Put` by storage engine
tryCache := s.hasWriteCache() && !m.NoMetabase()
if tryCache {
res, err = s.writeCache.Put(putPrm)
res, err = s.writeCache.Put(ctx, putPrm)
}
if err != nil || !tryCache {
if err != nil {
@ -63,7 +74,7 @@ func (s *Shard) Put(prm PutPrm) (PutRes, error) {
zap.String("err", err.Error()))
}
res, err = s.blobStor.Put(putPrm)
res, err = s.blobStor.Put(ctx, putPrm)
if err != nil {
return PutRes{}, fmt.Errorf("could not put object to BLOB storage: %w", err)
}
@ -73,7 +84,7 @@ func (s *Shard) Put(prm PutPrm) (PutRes, error) {
var pPrm meta.PutPrm
pPrm.SetObject(prm.obj)
pPrm.SetStorageID(res.StorageID)
if _, err := s.metaBase.Put(pPrm); err != nil {
if _, err := s.metaBase.Put(ctx, pPrm); err != nil {
// may we need to handle this case in a special way
// since the object has been successfully written to BlobStor
return PutRes{}, fmt.Errorf("could not put object to metabase: %w", err)

View file

@ -123,7 +123,7 @@ func (s *Shard) GetRange(ctx context.Context, prm RngPrm) (RngRes, error) {
}
skipMeta := prm.skipMeta || s.info.Mode.NoMetabase()
obj, hasMeta, err := s.fetchObjectData(prm.addr, skipMeta, cb, wc)
obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc)
return RngRes{
obj: obj,

View file

@ -99,7 +99,7 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
var putPrm shard.PutPrm
putPrm.SetObject(obj)
_, err := sh.Put(putPrm)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
var rngPrm shard.RngPrm

View file

@ -44,6 +44,7 @@ func TestShardReload(t *testing.T) {
meta.WithEpochState(epochState{})}
opts := []Option{
WithID(NewIDFromBytes([]byte{})),
WithLogger(l),
WithBlobStorOptions(blobOpts...),
WithMetaBaseOptions(metaOpts...),
@ -75,7 +76,7 @@ func TestShardReload(t *testing.T) {
checkHasObjects(t, true)
t.Run("same config, no-op", func(t *testing.T) {
require.NoError(t, sh.Reload(opts...))
require.NoError(t, sh.Reload(context.Background(), opts...))
checkHasObjects(t, true)
})
@ -86,7 +87,7 @@ func TestShardReload(t *testing.T) {
}
newOpts := newShardOpts(filepath.Join(p, "meta1"), false)
require.NoError(t, sh.Reload(newOpts...))
require.NoError(t, sh.Reload(context.Background(), newOpts...))
checkHasObjects(t, false) // new path, but no resync
@ -97,7 +98,7 @@ func TestShardReload(t *testing.T) {
})
newOpts = newShardOpts(filepath.Join(p, "meta2"), true)
require.NoError(t, sh.Reload(newOpts...))
require.NoError(t, sh.Reload(context.Background(), newOpts...))
checkHasObjects(t, true) // all objects are restored, including the new one
@ -106,7 +107,7 @@ func TestShardReload(t *testing.T) {
require.NoError(t, os.WriteFile(badPath, []byte{1}, 0))
newOpts = newShardOpts(badPath, true)
require.Error(t, sh.Reload(newOpts...))
require.Error(t, sh.Reload(context.Background(), newOpts...))
// Cleanup is done, no panic.
obj := newObject()
@ -117,7 +118,7 @@ func TestShardReload(t *testing.T) {
// Successive reload produces no undesired effects.
require.NoError(t, os.RemoveAll(badPath))
require.NoError(t, sh.Reload(newOpts...))
require.NoError(t, sh.Reload(context.Background(), newOpts...))
obj = newObject()
require.NoError(t, putObject(sh, obj))
@ -132,7 +133,7 @@ func putObject(sh *Shard, obj *objectSDK.Object) error {
var prm PutPrm
prm.SetObject(obj)
_, err := sh.Put(prm)
_, err := sh.Put(context.Background(), prm)
return err
}

View file

@ -2,13 +2,17 @@ package shard
import (
"bytes"
"context"
"encoding/binary"
"errors"
"io"
"os"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// ErrInvalidMagic is returned when dump format is invalid.
@ -57,8 +61,15 @@ func (r RestoreRes) FailCount() int {
// Restore restores objects from the dump prepared by Dump.
//
// Returns any error encountered.
func (s *Shard) Restore(prm RestorePrm) (RestoreRes, error) {
// Disallow changing mode during restore.
func (s *Shard) Restore(ctx context.Context, prm RestorePrm) (RestoreRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Restore",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
attribute.String("path", prm.path),
attribute.Bool("ignore_errors", prm.ignoreErrors),
))
defer span.End()
s.m.RLock()
defer s.m.RUnlock()
@ -122,7 +133,7 @@ func (s *Shard) Restore(prm RestorePrm) (RestoreRes, error) {
}
putPrm.SetObject(obj)
_, err = s.Put(putPrm)
_, err = s.Put(ctx, putPrm)
if err != nil && !IsErrObjectExpired(err) && !IsErrRemoved(err) {
return RestoreRes{}, err
}

Some files were not shown because too many files have changed in this diff Show more