Anton Nikiforov
112a7c690f
All checks were successful
DCO action / DCO (pull_request) Successful in 1m44s
Vulncheck / Vulncheck (pull_request) Successful in 3m3s
Build / Build Components (1.21) (pull_request) Successful in 4m0s
Build / Build Components (1.22) (pull_request) Successful in 3m57s
Tests and linters / Staticcheck (pull_request) Successful in 4m46s
Tests and linters / gopls check (pull_request) Successful in 4m48s
Tests and linters / Lint (pull_request) Successful in 5m45s
Tests and linters / Tests (1.21) (pull_request) Successful in 8m57s
Tests and linters / Tests with -race (pull_request) Successful in 9m10s
Tests and linters / Tests (1.22) (pull_request) Successful in 9m20s
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
224 lines
6 KiB
Go
224 lines
6 KiB
Go
package engine
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
|
tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
"go.opentelemetry.io/otel/attribute"
|
|
"go.opentelemetry.io/otel/trace"
|
|
"go.uber.org/zap"
|
|
)
|
|
|
|
// GetPrm groups the parameters of Get operation.
|
|
type GetPrm struct {
|
|
addr oid.Address
|
|
}
|
|
|
|
// GetRes groups the resulting values of Get operation.
|
|
type GetRes struct {
|
|
obj *objectSDK.Object
|
|
}
|
|
|
|
// WithAddress is a Get option to set the address of the requested object.
|
|
//
|
|
// Option is required.
|
|
func (p *GetPrm) WithAddress(addr oid.Address) {
|
|
p.addr = addr
|
|
}
|
|
|
|
// Object returns the requested object.
|
|
func (r GetRes) Object() *objectSDK.Object {
|
|
return r.obj
|
|
}
|
|
|
|
// Get reads an object from local storage.
|
|
//
|
|
// Returns any error encountered that
|
|
// did not allow to completely read the object part.
|
|
//
|
|
// Returns an error of type apistatus.ObjectNotFound if the requested object is missing in local storage.
|
|
// Returns an error of type apistatus.ObjectAlreadyRemoved if the object has been marked as removed.
|
|
//
|
|
// Returns an error if executions are blocked (see BlockExecution).
|
|
func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) {
|
|
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Get",
|
|
trace.WithAttributes(
|
|
attribute.String("address", prm.addr.EncodeToString()),
|
|
))
|
|
defer span.End()
|
|
|
|
err = e.execIfNotBlocked(func() error {
|
|
res, err = e.get(ctx, prm)
|
|
return err
|
|
})
|
|
|
|
return
|
|
}
|
|
|
|
func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
|
|
if e.metrics != nil {
|
|
defer elapsed("Get", e.metrics.AddMethodDuration)()
|
|
}
|
|
|
|
errNotFound := new(apistatus.ObjectNotFound)
|
|
|
|
var shPrm shard.GetPrm
|
|
shPrm.SetAddress(prm.addr)
|
|
|
|
it := &getShardIterator{
|
|
OutError: errNotFound,
|
|
ShardPrm: shPrm,
|
|
Address: prm.addr,
|
|
Engine: e,
|
|
}
|
|
|
|
it.tryGetWithMeta(ctx)
|
|
|
|
if it.SplitInfo != nil {
|
|
return GetRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo))
|
|
}
|
|
|
|
if it.ECInfo != nil {
|
|
return GetRes{}, logicerr.Wrap(objectSDK.NewECInfoError(it.ECInfo))
|
|
}
|
|
|
|
if it.ObjectExpired {
|
|
return GetRes{}, errNotFound
|
|
}
|
|
|
|
if it.Object == nil {
|
|
if !it.HasDegraded && it.ShardWithMeta.Shard == nil || !client.IsErrObjectNotFound(it.OutError) {
|
|
return GetRes{}, it.OutError
|
|
}
|
|
|
|
it.tryGetFromBlobstore(ctx)
|
|
|
|
if it.Object == nil {
|
|
return GetRes{}, it.OutError
|
|
}
|
|
if it.ShardWithMeta.Shard != nil && it.MetaError != nil {
|
|
e.log.Warn(logs.ShardMetaInfoPresentButObjectNotFound,
|
|
zap.Stringer("shard_id", it.ShardWithMeta.ID()),
|
|
zap.String("error", it.MetaError.Error()),
|
|
zap.Stringer("address", prm.addr),
|
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
|
}
|
|
}
|
|
|
|
return GetRes{
|
|
obj: it.Object,
|
|
}, nil
|
|
}
|
|
|
|
type getShardIterator struct {
|
|
Object *objectSDK.Object
|
|
SplitInfo *objectSDK.SplitInfo
|
|
ECInfo *objectSDK.ECInfo
|
|
OutError error
|
|
ShardWithMeta hashedShard
|
|
MetaError error
|
|
HasDegraded bool
|
|
ObjectExpired bool
|
|
|
|
ShardPrm shard.GetPrm
|
|
Address oid.Address
|
|
Engine *StorageEngine
|
|
|
|
splitInfoErr *objectSDK.SplitInfoError
|
|
ecInfoErr *objectSDK.ECInfoError
|
|
}
|
|
|
|
func (i *getShardIterator) tryGetWithMeta(ctx context.Context) {
|
|
i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) {
|
|
noMeta := sh.GetMode().NoMetabase()
|
|
i.ShardPrm.SetIgnoreMeta(noMeta)
|
|
|
|
i.HasDegraded = i.HasDegraded || noMeta
|
|
|
|
res, err := sh.Get(ctx, i.ShardPrm)
|
|
if err == nil {
|
|
i.Object = res.Object()
|
|
return true
|
|
}
|
|
|
|
if res.HasMeta() {
|
|
i.ShardWithMeta = sh
|
|
i.MetaError = err
|
|
}
|
|
switch {
|
|
case client.IsErrObjectNotFound(err):
|
|
return false // ignore, go to next shard
|
|
case errors.As(err, &i.splitInfoErr):
|
|
if i.SplitInfo == nil {
|
|
i.SplitInfo = objectSDK.NewSplitInfo()
|
|
}
|
|
|
|
util.MergeSplitInfo(i.splitInfoErr.SplitInfo(), i.SplitInfo)
|
|
|
|
_, withLink := i.SplitInfo.Link()
|
|
_, withLast := i.SplitInfo.LastPart()
|
|
|
|
// stop iterating over shards if SplitInfo structure is complete
|
|
return withLink && withLast
|
|
case errors.As(err, &i.ecInfoErr):
|
|
if i.ECInfo == nil {
|
|
i.ECInfo = objectSDK.NewECInfo()
|
|
}
|
|
|
|
util.MergeECInfo(i.ecInfoErr.ECInfo(), i.ECInfo)
|
|
// stop iterating over shards if ECInfo structure is complete
|
|
return len(i.ECInfo.Chunks) == int(i.ECInfo.Chunks[0].Total)
|
|
case client.IsErrObjectAlreadyRemoved(err):
|
|
i.OutError = err
|
|
return true // stop, return it back
|
|
case shard.IsErrObjectExpired(err):
|
|
// object is found but should not be returned
|
|
i.ObjectExpired = true
|
|
return true
|
|
default:
|
|
i.Engine.reportShardError(sh, "could not get object from shard", err)
|
|
return false
|
|
}
|
|
})
|
|
}
|
|
|
|
func (i *getShardIterator) tryGetFromBlobstore(ctx context.Context) {
|
|
// If the object is not found but is present in metabase,
|
|
// try to fetch it from blobstor directly. If it is found in any
|
|
// blobstor, increase the error counter for the shard which contains the meta.
|
|
i.ShardPrm.SetIgnoreMeta(true)
|
|
|
|
i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) {
|
|
if sh.GetMode().NoMetabase() {
|
|
// Already visited.
|
|
return false
|
|
}
|
|
|
|
res, err := sh.Get(ctx, i.ShardPrm)
|
|
i.Object = res.Object()
|
|
return err == nil
|
|
})
|
|
}
|
|
|
|
// Get reads object from local storage by provided address.
|
|
func Get(ctx context.Context, storage *StorageEngine, addr oid.Address) (*objectSDK.Object, error) {
|
|
var getPrm GetPrm
|
|
getPrm.WithAddress(addr)
|
|
|
|
res, err := storage.Get(ctx, getPrm)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return res.Object(), nil
|
|
}
|