Evgenii Stratonikov
c6af4a3ec8
All checks were successful
DCO action / DCO (pull_request) Successful in 1m43s
Vulncheck / Vulncheck (pull_request) Successful in 2m0s
Tests and linters / Staticcheck (pull_request) Successful in 2m54s
Build / Build Components (1.20) (pull_request) Successful in 2m49s
Build / Build Components (1.21) (pull_request) Successful in 2m44s
Tests and linters / Tests (1.21) (pull_request) Successful in 3m43s
Tests and linters / Lint (pull_request) Successful in 3m58s
Tests and linters / Tests (1.20) (pull_request) Successful in 23m28s
Tests and linters / Tests with -race (pull_request) Successful in 29m0s
It was introduced in 69e1e6ca
to help node determine faulty shards.
However, the situation is possible in a real-life scenario:
1. Object O is evacuated from shard A to B.
2. Shard A is unmounted because of lower-level errors.
3. We now have object in meta on A and in blobstor on B. Technically we
have it in meta on shard B too, but we still got the error if B goes
to a degraded mode.
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
208 lines
5.4 KiB
Go
208 lines
5.4 KiB
Go
package engine
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
"go.opentelemetry.io/otel/attribute"
|
|
"go.opentelemetry.io/otel/trace"
|
|
"go.uber.org/zap"
|
|
)
|
|
|
|
// GetPrm groups the parameters of Get operation.
|
|
type GetPrm struct {
|
|
addr oid.Address
|
|
}
|
|
|
|
// GetRes groups the resulting values of Get operation.
|
|
type GetRes struct {
|
|
obj *objectSDK.Object
|
|
}
|
|
|
|
// WithAddress is a Get option to set the address of the requested object.
|
|
//
|
|
// Option is required.
|
|
func (p *GetPrm) WithAddress(addr oid.Address) {
|
|
p.addr = addr
|
|
}
|
|
|
|
// Object returns the requested object.
|
|
func (r GetRes) Object() *objectSDK.Object {
|
|
return r.obj
|
|
}
|
|
|
|
// Get reads an object from local storage.
|
|
//
|
|
// Returns any error encountered that
|
|
// did not allow to completely read the object part.
|
|
//
|
|
// Returns an error of type apistatus.ObjectNotFound if the requested object is missing in local storage.
|
|
// Returns an error of type apistatus.ObjectAlreadyRemoved if the object has been marked as removed.
|
|
//
|
|
// Returns an error if executions are blocked (see BlockExecution).
|
|
func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) {
|
|
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Get",
|
|
trace.WithAttributes(
|
|
attribute.String("address", prm.addr.EncodeToString()),
|
|
))
|
|
defer span.End()
|
|
|
|
err = e.execIfNotBlocked(func() error {
|
|
res, err = e.get(ctx, prm)
|
|
return err
|
|
})
|
|
|
|
return
|
|
}
|
|
|
|
func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
|
|
if e.metrics != nil {
|
|
defer elapsed("Get", e.metrics.AddMethodDuration)()
|
|
}
|
|
|
|
errNotFound := new(apistatus.ObjectNotFound)
|
|
|
|
var shPrm shard.GetPrm
|
|
shPrm.SetAddress(prm.addr)
|
|
|
|
it := &getShardIterator{
|
|
OutError: errNotFound,
|
|
ShardPrm: shPrm,
|
|
Address: prm.addr,
|
|
Engine: e,
|
|
}
|
|
|
|
it.tryGetWithMeta(ctx)
|
|
|
|
if it.SplitInfo != nil {
|
|
return GetRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo))
|
|
}
|
|
|
|
if it.ObjectExpired {
|
|
return GetRes{}, errNotFound
|
|
}
|
|
|
|
if it.Object == nil {
|
|
if !it.HasDegraded && it.ShardWithMeta.Shard == nil || !client.IsErrObjectNotFound(it.OutError) {
|
|
return GetRes{}, it.OutError
|
|
}
|
|
|
|
it.tryGetFromBlobstore(ctx)
|
|
|
|
if it.Object == nil {
|
|
return GetRes{}, it.OutError
|
|
}
|
|
if it.ShardWithMeta.Shard != nil && it.MetaError != nil {
|
|
e.log.Warn(logs.ShardMetaInfoPresentButObjectNotFound,
|
|
zap.Stringer("shard_id", it.ShardWithMeta.ID()),
|
|
zap.String("error", it.MetaError.Error()),
|
|
zap.Stringer("address", prm.addr))
|
|
}
|
|
}
|
|
|
|
return GetRes{
|
|
obj: it.Object,
|
|
}, nil
|
|
}
|
|
|
|
type getShardIterator struct {
|
|
Object *objectSDK.Object
|
|
SplitInfo *objectSDK.SplitInfo
|
|
OutError error
|
|
ShardWithMeta hashedShard
|
|
MetaError error
|
|
HasDegraded bool
|
|
ObjectExpired bool
|
|
|
|
ShardPrm shard.GetPrm
|
|
Address oid.Address
|
|
Engine *StorageEngine
|
|
|
|
splitInfoErr *objectSDK.SplitInfoError
|
|
}
|
|
|
|
func (i *getShardIterator) tryGetWithMeta(ctx context.Context) {
|
|
i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) {
|
|
noMeta := sh.GetMode().NoMetabase()
|
|
i.ShardPrm.SetIgnoreMeta(noMeta)
|
|
|
|
i.HasDegraded = i.HasDegraded || noMeta
|
|
|
|
res, err := sh.Get(ctx, i.ShardPrm)
|
|
if err == nil {
|
|
i.Object = res.Object()
|
|
return true
|
|
}
|
|
|
|
if res.HasMeta() {
|
|
i.ShardWithMeta = sh
|
|
i.MetaError = err
|
|
}
|
|
switch {
|
|
case client.IsErrObjectNotFound(err):
|
|
return false // ignore, go to next shard
|
|
case errors.As(err, &i.splitInfoErr):
|
|
if i.SplitInfo == nil {
|
|
i.SplitInfo = objectSDK.NewSplitInfo()
|
|
}
|
|
|
|
util.MergeSplitInfo(i.splitInfoErr.SplitInfo(), i.SplitInfo)
|
|
|
|
_, withLink := i.SplitInfo.Link()
|
|
_, withLast := i.SplitInfo.LastPart()
|
|
|
|
// stop iterating over shards if SplitInfo structure is complete
|
|
return withLink && withLast
|
|
case client.IsErrObjectAlreadyRemoved(err):
|
|
i.OutError = err
|
|
return true // stop, return it back
|
|
case shard.IsErrObjectExpired(err):
|
|
// object is found but should not be returned
|
|
i.ObjectExpired = true
|
|
return true
|
|
default:
|
|
i.Engine.reportShardError(sh, "could not get object from shard", err)
|
|
return false
|
|
}
|
|
})
|
|
}
|
|
|
|
func (i *getShardIterator) tryGetFromBlobstore(ctx context.Context) {
|
|
// If the object is not found but is present in metabase,
|
|
// try to fetch it from blobstor directly. If it is found in any
|
|
// blobstor, increase the error counter for the shard which contains the meta.
|
|
i.ShardPrm.SetIgnoreMeta(true)
|
|
|
|
i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) {
|
|
if sh.GetMode().NoMetabase() {
|
|
// Already visited.
|
|
return false
|
|
}
|
|
|
|
res, err := sh.Get(ctx, i.ShardPrm)
|
|
i.Object = res.Object()
|
|
return err == nil
|
|
})
|
|
}
|
|
|
|
// Get reads object from local storage by provided address.
|
|
func Get(ctx context.Context, storage *StorageEngine, addr oid.Address) (*objectSDK.Object, error) {
|
|
var getPrm GetPrm
|
|
getPrm.WithAddress(addr)
|
|
|
|
res, err := storage.Get(ctx, getPrm)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return res.Object(), nil
|
|
}
|