package engine import ( "context" "errors" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" "go.uber.org/zap" ) // PutPrm groups the parameters of Put operation. type PutPrm struct { obj *objectSDK.Object } var errPutShard = errors.New("could not put object to any shard") type putToShardStatus byte const ( putToShardUnknown putToShardStatus = iota putToShardSuccess putToShardExists putToShardRemoved ) type putToShardRes struct { status putToShardStatus err error } // WithObject is a Put option to set object to save. // // Option is required. func (p *PutPrm) WithObject(obj *objectSDK.Object) { p.obj = obj } // Put saves the object to local storage. // // Returns any error encountered that // did not allow to completely save the object. // // Returns an error if executions are blocked (see BlockExecution). // // Returns an error of type apistatus.ObjectAlreadyRemoved if the object has been marked as removed. func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Put", trace.WithAttributes( attribute.String("address", object.AddressOf(prm.obj).EncodeToString()), )) defer span.End() err = e.execIfNotBlocked(func() error { err = e.put(ctx, prm) return err }) return } func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { if e.metrics != nil { defer elapsed("Put", e.metrics.AddMethodDuration)() } addr := object.AddressOf(prm.obj) // In #1146 this check was parallelized, however, it became // much slower on fast machines for 4 shards. var parent oid.Address if prm.obj.ECHeader() != nil { parent.SetObject(prm.obj.ECHeader().Parent()) parent.SetContainer(addr.Container()) } var shPrm shard.ExistsPrm shPrm.Address = addr shPrm.ParentAddress = parent existed, locked, err := e.exists(ctx, shPrm) if err != nil { return err } if !existed && locked { lockers, err := e.GetLocked(ctx, parent) if err != nil { return err } for _, locker := range lockers { err = e.lock(ctx, addr.Container(), locker, []oid.ID{addr.Object()}) if err != nil { return err } } } var shRes putToShardRes e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { e.mtx.RLock() pool, ok := e.shardPools[sh.ID().String()] e.mtx.RUnlock() if !ok { // Shard was concurrently removed, skip. return false } shRes = e.putToShard(ctx, sh, pool, addr, prm.obj) return shRes.status != putToShardUnknown }) switch shRes.status { case putToShardUnknown: return errPutShard case putToShardRemoved: return shRes.err case putToShardExists, putToShardSuccess: return nil default: return errPutShard } } // putToShard puts object to sh. // Return putToShardStatus and error if it is necessary to propagate an error upper. func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool util.WorkerPool, addr oid.Address, obj *objectSDK.Object, ) (res putToShardRes) { exitCh := make(chan struct{}) if err := pool.Submit(func() { defer close(exitCh) var existPrm shard.ExistsPrm existPrm.Address = addr exists, err := sh.Exists(ctx, existPrm) if err != nil { if shard.IsErrObjectExpired(err) { // object is already found but // expired => do nothing with it res.status = putToShardExists } else { e.log.Warn(logs.EngineCouldNotCheckObjectExistence, zap.Stringer("shard_id", sh.ID()), zap.String("error", err.Error()), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } return // this is not ErrAlreadyRemoved error so we can go to the next shard } if exists.Exists() { res.status = putToShardExists return } var putPrm shard.PutPrm putPrm.SetObject(obj) _, err = sh.Put(ctx, putPrm) if err != nil { if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) || errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) { e.log.Warn(logs.EngineCouldNotPutObjectToShard, zap.Stringer("shard_id", sh.ID()), zap.String("error", err.Error()), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return } if client.IsErrObjectAlreadyRemoved(err) { e.log.Warn(logs.EngineCouldNotPutObjectToShard, zap.Stringer("shard_id", sh.ID()), zap.String("error", err.Error()), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) res.status = putToShardRemoved res.err = err return } e.reportShardError(sh, "could not put object to shard", err, zap.Stringer("address", addr)) return } res.status = putToShardSuccess }); err != nil { e.log.Warn(logs.EngineCouldNotPutObjectToShard, zap.Error(err)) close(exitCh) } <-exitCh return } // Put writes provided object to local storage. func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object) error { var putPrm PutPrm putPrm.WithObject(obj) return storage.Put(ctx, putPrm) }