frostfs-node/pkg/local_object_storage/engine/lock.go
Dmitrii Stepanov 3a441f072f
[#1709] shard: Check if context canceled for shard iteration
If context has already been canceled, then there is no need to check other shards.
At the same time, it is necessary to avoid handling context cancellation
in each handler. Therefore, the context check has been moved to the shard
iteration method, which now returns an error.

Change-Id: I70030ace36593ce7d2b8376bee39fe82e9dbf88f
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2025-04-21 15:20:50 +03:00

156 lines
4.8 KiB
Go

package engine
import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
var errLockFailed = errors.New("lock operation failed")
// Lock marks objects as locked with another object. All objects from the
// specified container.
//
// Allows locking regular objects only (otherwise returns apistatus.LockNonRegularObject).
//
// Locked list should be unique. Panics if it is empty.
func (e *StorageEngine) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Lock",
trace.WithAttributes(
attribute.String("container_id", idCnr.EncodeToString()),
attribute.String("locker", locker.EncodeToString()),
attribute.Int("locked_count", len(locked)),
))
defer span.End()
defer elapsed("Lock", e.metrics.AddMethodDuration)()
return e.execIfNotBlocked(func() error {
return e.lock(ctx, idCnr, locker, locked)
})
}
func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
for i := range locked {
st, err := e.lockSingle(ctx, idCnr, locker, locked[i], true)
if err != nil {
return err
}
switch st {
case 1:
return logicerr.Wrap(new(apistatus.LockNonRegularObject))
case 0:
st, err = e.lockSingle(ctx, idCnr, locker, locked[i], false)
if err != nil {
return err
}
switch st {
case 1:
return logicerr.Wrap(new(apistatus.LockNonRegularObject))
case 0:
return logicerr.Wrap(errLockFailed)
}
}
}
return nil
}
// Returns:
// - 0: fail
// - 1: locking irregular object
// - 2: ok
func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8, retErr error) {
// code is pretty similar to inhumeAddr, maybe unify?
root := false
var addrLocked oid.Address
addrLocked.SetContainer(idCnr)
addrLocked.SetObject(locked)
retErr = e.iterateOverSortedShards(ctx, addrLocked, func(_ int, sh hashedShard) (stop bool) {
defer func() {
// if object is root we continue since information about it
// can be presented in other shards
if checkExists && root {
stop = false
}
}()
if checkExists {
var existsPrm shard.ExistsPrm
existsPrm.Address = addrLocked
exRes, err := sh.Exists(ctx, existsPrm)
if err != nil {
var siErr *objectSDK.SplitInfoError
var eiErr *objectSDK.ECInfoError
if errors.As(err, &eiErr) {
eclocked, ok := e.checkECLocked(ctx, sh, idCnr, locker, locked, eiErr)
if !ok {
return false
}
err = sh.Lock(ctx, idCnr, locker, eclocked)
if err != nil {
e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
return false
}
root = true
return false
} else if !errors.As(err, &siErr) {
if shard.IsErrObjectExpired(err) {
// object is already expired =>
// do not lock it
return true
}
e.reportShardError(ctx, sh, "could not check locked object for presence in shard", err, zap.Stringer("container_id", idCnr),
zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
return
}
root = true
} else if !exRes.Exists() {
return
}
}
err := sh.Lock(ctx, idCnr, locker, []oid.ID{locked})
if err != nil {
e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
var errIrregular *apistatus.LockNonRegularObject
if errors.As(err, &errIrregular) {
status = 1
return true
}
return false
}
status = 2
return true
})
return
}
func (e *StorageEngine) checkECLocked(ctx context.Context, sh hashedShard, idCnr cid.ID, locker, locked oid.ID, eiErr *objectSDK.ECInfoError) ([]oid.ID, bool) {
eclocked := []oid.ID{locked}
for _, chunk := range eiErr.ECInfo().Chunks {
var objID oid.ID
err := objID.ReadFromV2(chunk.ID)
if err != nil {
e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
return nil, false
}
eclocked = append(eclocked, objID)
}
return eclocked, true
}