2022-09-12 11:48:21 +00:00
|
|
|
package engine
|
|
|
|
|
|
|
|
import (
|
2023-03-13 11:37:35 +00:00
|
|
|
"context"
|
2022-09-12 11:48:21 +00:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2023-05-19 15:06:20 +00:00
|
|
|
"sync/atomic"
|
2022-09-12 11:48:21 +00:00
|
|
|
|
2023-04-12 14:35:10 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
2023-03-31 08:33:08 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
2023-03-07 13:38:26 +00:00
|
|
|
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
2023-04-14 06:38:29 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
2023-09-27 08:02:06 +00:00
|
|
|
tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
|
2023-03-07 13:38:26 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
|
2023-05-31 09:24:04 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
2023-03-07 13:38:26 +00:00
|
|
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
|
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
|
|
"git.frostfs.info/TrueCloudLab/hrw"
|
2023-05-04 10:58:26 +00:00
|
|
|
"go.opentelemetry.io/otel/attribute"
|
|
|
|
"go.opentelemetry.io/otel/trace"
|
2022-09-12 11:48:21 +00:00
|
|
|
"go.uber.org/zap"
|
|
|
|
)
|
|
|
|
|
2023-05-31 10:24:30 +00:00
|
|
|
var (
|
|
|
|
ErrMustBeReadOnly = logicerr.New("shard must be in read-only mode")
|
|
|
|
|
|
|
|
evacuationOperationLogField = zap.String("operation", "evacuation")
|
|
|
|
)
|
2023-04-14 06:38:29 +00:00
|
|
|
|
2022-09-12 11:48:21 +00:00
|
|
|
// EvacuateShardPrm represents parameters for the EvacuateShard operation.
|
|
|
|
type EvacuateShardPrm struct {
|
2022-10-10 17:54:14 +00:00
|
|
|
shardID []*shard.ID
|
2023-05-02 11:16:13 +00:00
|
|
|
handler func(context.Context, oid.Address, *objectSDK.Object) error
|
2022-09-12 11:48:21 +00:00
|
|
|
ignoreErrors bool
|
2023-05-04 10:58:26 +00:00
|
|
|
async bool
|
2022-09-12 11:48:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// EvacuateShardRes represents result of the EvacuateShard operation.
|
|
|
|
type EvacuateShardRes struct {
|
2023-05-04 10:58:26 +00:00
|
|
|
evacuated *atomic.Uint64
|
|
|
|
total *atomic.Uint64
|
|
|
|
failed *atomic.Uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewEvacuateShardRes creates new EvacuateShardRes instance.
|
|
|
|
func NewEvacuateShardRes() *EvacuateShardRes {
|
|
|
|
return &EvacuateShardRes{
|
2023-05-19 15:06:20 +00:00
|
|
|
evacuated: new(atomic.Uint64),
|
|
|
|
total: new(atomic.Uint64),
|
|
|
|
failed: new(atomic.Uint64),
|
2023-05-04 10:58:26 +00:00
|
|
|
}
|
2022-09-12 11:48:21 +00:00
|
|
|
}
|
|
|
|
|
2022-10-10 17:54:14 +00:00
|
|
|
// WithShardIDList sets shard ID.
|
|
|
|
func (p *EvacuateShardPrm) WithShardIDList(id []*shard.ID) {
|
2022-09-12 11:48:21 +00:00
|
|
|
p.shardID = id
|
|
|
|
}
|
|
|
|
|
|
|
|
// WithIgnoreErrors sets flag to ignore errors.
|
|
|
|
func (p *EvacuateShardPrm) WithIgnoreErrors(ignore bool) {
|
|
|
|
p.ignoreErrors = ignore
|
|
|
|
}
|
|
|
|
|
2022-09-19 10:31:55 +00:00
|
|
|
// WithFaultHandler sets handler to call for objects which cannot be saved on other shards.
|
2023-05-02 11:16:13 +00:00
|
|
|
func (p *EvacuateShardPrm) WithFaultHandler(f func(context.Context, oid.Address, *objectSDK.Object) error) {
|
2022-09-19 10:31:55 +00:00
|
|
|
p.handler = f
|
|
|
|
}
|
|
|
|
|
2023-05-04 10:58:26 +00:00
|
|
|
// WithAsync sets flag to run evacuate async.
|
|
|
|
func (p *EvacuateShardPrm) WithAsync(async bool) {
|
|
|
|
p.async = async
|
|
|
|
}
|
|
|
|
|
|
|
|
// Evacuated returns amount of evacuated objects.
|
2022-09-19 10:31:55 +00:00
|
|
|
// Objects for which handler returned no error are also assumed evacuated.
|
2023-05-04 10:58:26 +00:00
|
|
|
func (p *EvacuateShardRes) Evacuated() uint64 {
|
|
|
|
if p == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return p.evacuated.Load()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Total returns total count objects to evacuate.
|
|
|
|
func (p *EvacuateShardRes) Total() uint64 {
|
|
|
|
if p == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return p.total.Load()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Failed returns count of failed objects to evacuate.
|
|
|
|
func (p *EvacuateShardRes) Failed() uint64 {
|
|
|
|
if p == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return p.failed.Load()
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeepCopy returns deep copy of result instance.
|
|
|
|
func (p *EvacuateShardRes) DeepCopy() *EvacuateShardRes {
|
|
|
|
if p == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2023-05-19 15:06:20 +00:00
|
|
|
|
|
|
|
res := &EvacuateShardRes{
|
|
|
|
evacuated: new(atomic.Uint64),
|
|
|
|
total: new(atomic.Uint64),
|
|
|
|
failed: new(atomic.Uint64),
|
2023-05-04 10:58:26 +00:00
|
|
|
}
|
2023-05-19 15:06:20 +00:00
|
|
|
|
|
|
|
res.evacuated.Store(p.evacuated.Load())
|
|
|
|
res.total.Store(p.total.Load())
|
|
|
|
res.failed.Store(p.failed.Load())
|
|
|
|
return res
|
2022-09-12 11:48:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const defaultEvacuateBatchSize = 100
|
|
|
|
|
|
|
|
type pooledShard struct {
|
|
|
|
hashedShard
|
|
|
|
pool util.WorkerPool
|
|
|
|
}
|
|
|
|
|
2022-10-10 17:54:14 +00:00
|
|
|
var errMustHaveTwoShards = errors.New("must have at least 1 spare shard")
|
2022-09-12 11:48:21 +00:00
|
|
|
|
|
|
|
// Evacuate moves data from one shard to the others.
|
|
|
|
// The shard being moved must be in read-only mode.
|
2023-05-04 10:58:26 +00:00
|
|
|
func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*EvacuateShardRes, error) {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return nil, ctx.Err()
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2023-03-31 08:33:08 +00:00
|
|
|
shardIDs := make([]string, len(prm.shardID))
|
2022-10-10 17:54:14 +00:00
|
|
|
for i := range prm.shardID {
|
2023-03-31 08:33:08 +00:00
|
|
|
shardIDs[i] = prm.shardID[i].String()
|
2022-10-10 17:54:14 +00:00
|
|
|
}
|
2022-09-12 11:48:21 +00:00
|
|
|
|
2023-05-04 10:58:26 +00:00
|
|
|
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Evacuate",
|
|
|
|
trace.WithAttributes(
|
|
|
|
attribute.StringSlice("shardIDs", shardIDs),
|
|
|
|
attribute.Bool("async", prm.async),
|
|
|
|
attribute.Bool("ignoreErrors", prm.ignoreErrors),
|
|
|
|
))
|
|
|
|
defer span.End()
|
|
|
|
|
2023-03-31 08:33:08 +00:00
|
|
|
shards, weights, err := e.getActualShards(shardIDs, prm.handler != nil)
|
|
|
|
if err != nil {
|
2023-05-04 10:58:26 +00:00
|
|
|
return nil, err
|
2023-03-31 08:33:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
shardsToEvacuate := make(map[string]*shard.Shard)
|
|
|
|
for i := range shardIDs {
|
|
|
|
for j := range shards {
|
|
|
|
if shards[j].ID().String() == shardIDs[i] {
|
|
|
|
shardsToEvacuate[shardIDs[i]] = shards[j].Shard
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-04 10:58:26 +00:00
|
|
|
res := NewEvacuateShardRes()
|
|
|
|
ctx = ctxOrBackground(ctx, prm.async)
|
|
|
|
eg, egCtx, err := e.evacuateLimiter.TryStart(ctx, shardIDs, res)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
eg.Go(func() error {
|
|
|
|
return e.evacuateShards(egCtx, shardIDs, prm, res, shards, weights, shardsToEvacuate)
|
|
|
|
})
|
|
|
|
|
|
|
|
if prm.async {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return res, eg.Wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
func ctxOrBackground(ctx context.Context, background bool) context.Context {
|
|
|
|
if background {
|
|
|
|
return context.Background()
|
|
|
|
}
|
|
|
|
return ctx
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, prm EvacuateShardPrm, res *EvacuateShardRes,
|
|
|
|
shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard) error {
|
|
|
|
var err error
|
|
|
|
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShards",
|
|
|
|
trace.WithAttributes(
|
|
|
|
attribute.StringSlice("shardIDs", shardIDs),
|
|
|
|
attribute.Bool("async", prm.async),
|
|
|
|
attribute.Bool("ignoreErrors", prm.ignoreErrors),
|
|
|
|
))
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
span.End()
|
|
|
|
e.evacuateLimiter.Complete(err)
|
|
|
|
}()
|
|
|
|
|
2023-09-27 08:02:06 +00:00
|
|
|
e.log.Info(logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
|
|
|
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
2023-03-31 08:33:08 +00:00
|
|
|
|
2023-05-04 10:58:26 +00:00
|
|
|
err = e.getTotalObjectsCount(ctx, shardsToEvacuate, res)
|
|
|
|
if err != nil {
|
2023-09-27 08:02:06 +00:00
|
|
|
e.log.Error(logs.EngineShardsEvacuationFailedToCount, zap.Strings("shard_ids", shardIDs), zap.Error(err), evacuationOperationLogField,
|
|
|
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
2023-05-04 10:58:26 +00:00
|
|
|
return err
|
|
|
|
}
|
2023-03-31 08:33:08 +00:00
|
|
|
|
|
|
|
for _, shardID := range shardIDs {
|
2023-05-04 10:58:26 +00:00
|
|
|
if err = e.evacuateShard(ctx, shardID, prm, res, shards, weights, shardsToEvacuate); err != nil {
|
2023-05-31 10:24:30 +00:00
|
|
|
e.log.Error(logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField)
|
2023-05-04 10:58:26 +00:00
|
|
|
return err
|
2023-03-31 08:33:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-31 10:24:30 +00:00
|
|
|
e.log.Info(logs.EngineFinishedSuccessfullyShardsEvacuation,
|
|
|
|
zap.Strings("shard_ids", shardIDs),
|
|
|
|
evacuationOperationLogField,
|
|
|
|
zap.Uint64("total", res.Total()),
|
|
|
|
zap.Uint64("evacuated", res.Evacuated()),
|
|
|
|
zap.Uint64("failed", res.Failed()),
|
|
|
|
)
|
2023-05-04 10:58:26 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *StorageEngine) getTotalObjectsCount(ctx context.Context, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes) error {
|
|
|
|
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getTotalObjectsCount")
|
|
|
|
defer span.End()
|
|
|
|
|
|
|
|
for _, sh := range shardsToEvacuate {
|
|
|
|
cnt, err := sh.LogicalObjectsCount(ctx)
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, shard.ErrDegradedMode) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
res.total.Add(cnt)
|
|
|
|
}
|
|
|
|
return nil
|
2023-03-31 08:33:08 +00:00
|
|
|
}
|
|
|
|
|
2023-03-13 11:37:35 +00:00
|
|
|
func (e *StorageEngine) evacuateShard(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
|
2023-03-31 08:33:08 +00:00
|
|
|
shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard) error {
|
2023-05-04 10:58:26 +00:00
|
|
|
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShard",
|
|
|
|
trace.WithAttributes(
|
|
|
|
attribute.String("shardID", shardID),
|
|
|
|
))
|
|
|
|
defer span.End()
|
|
|
|
|
2023-03-31 08:33:08 +00:00
|
|
|
var listPrm shard.ListWithCursorPrm
|
|
|
|
listPrm.WithCount(defaultEvacuateBatchSize)
|
|
|
|
|
|
|
|
sh := shardsToEvacuate[shardID]
|
|
|
|
|
|
|
|
var c *meta.Cursor
|
|
|
|
for {
|
|
|
|
listPrm.WithCursor(c)
|
|
|
|
|
|
|
|
// TODO (@fyrchik): #1731 this approach doesn't work in degraded modes
|
|
|
|
// because ListWithCursor works only with the metabase.
|
2023-06-06 09:27:19 +00:00
|
|
|
listRes, err := sh.ListWithCursor(ctx, listPrm)
|
2023-03-31 08:33:08 +00:00
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, meta.ErrEndOfListing) || errors.Is(err, shard.ErrDegradedMode) {
|
|
|
|
break
|
|
|
|
}
|
2023-09-27 08:02:06 +00:00
|
|
|
e.log.Error(logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField,
|
|
|
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
2023-03-31 08:33:08 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-03-13 11:37:35 +00:00
|
|
|
if err = e.evacuateObjects(ctx, sh, listRes.AddressList(), prm, res, shards, weights, shardsToEvacuate); err != nil {
|
2023-03-31 08:33:08 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
c = listRes.Cursor()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *StorageEngine) getActualShards(shardIDs []string, handlerDefined bool) ([]pooledShard, []float64, error) {
|
2022-09-12 11:48:21 +00:00
|
|
|
e.mtx.RLock()
|
2023-03-31 08:33:08 +00:00
|
|
|
defer e.mtx.RUnlock()
|
|
|
|
|
|
|
|
for i := range shardIDs {
|
|
|
|
sh, ok := e.shards[shardIDs[i]]
|
2022-10-10 17:54:14 +00:00
|
|
|
if !ok {
|
2023-03-31 08:33:08 +00:00
|
|
|
return nil, nil, errShardNotFound
|
2022-10-10 17:54:14 +00:00
|
|
|
}
|
2022-09-12 11:48:21 +00:00
|
|
|
|
2022-10-10 17:54:14 +00:00
|
|
|
if !sh.GetMode().ReadOnly() {
|
2023-04-14 06:38:29 +00:00
|
|
|
return nil, nil, ErrMustBeReadOnly
|
2022-10-10 17:54:14 +00:00
|
|
|
}
|
2022-09-12 11:48:21 +00:00
|
|
|
}
|
|
|
|
|
2023-03-31 08:33:08 +00:00
|
|
|
if len(e.shards)-len(shardIDs) < 1 && !handlerDefined {
|
|
|
|
return nil, nil, errMustHaveTwoShards
|
2022-09-12 11:48:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// We must have all shards, to have correct information about their
|
|
|
|
// indexes in a sorted slice and set appropriate marks in the metabase.
|
|
|
|
// Evacuated shard is skipped during put.
|
|
|
|
shards := make([]pooledShard, 0, len(e.shards))
|
|
|
|
for id := range e.shards {
|
|
|
|
shards = append(shards, pooledShard{
|
|
|
|
hashedShard: hashedShard(e.shards[id]),
|
|
|
|
pool: e.shardPools[id],
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
weights := make([]float64, 0, len(shards))
|
|
|
|
for i := range shards {
|
|
|
|
weights = append(weights, e.shardWeight(shards[i].Shard))
|
|
|
|
}
|
|
|
|
|
2023-03-31 08:33:08 +00:00
|
|
|
return shards, weights, nil
|
|
|
|
}
|
|
|
|
|
2023-03-13 11:37:35 +00:00
|
|
|
func (e *StorageEngine) evacuateObjects(ctx context.Context, sh *shard.Shard, toEvacuate []object.AddressWithType, prm EvacuateShardPrm, res *EvacuateShardRes,
|
2023-03-31 08:33:08 +00:00
|
|
|
shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard) error {
|
2023-05-04 10:58:26 +00:00
|
|
|
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects",
|
|
|
|
trace.WithAttributes(
|
|
|
|
attribute.Int("objects_count", len(toEvacuate)),
|
|
|
|
))
|
|
|
|
defer span.End()
|
|
|
|
|
2023-03-31 08:33:08 +00:00
|
|
|
for i := range toEvacuate {
|
2023-05-02 11:16:13 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
default:
|
|
|
|
}
|
2023-03-31 08:33:08 +00:00
|
|
|
addr := toEvacuate[i].Address
|
|
|
|
|
|
|
|
var getPrm shard.GetPrm
|
|
|
|
getPrm.SetAddress(addr)
|
|
|
|
|
2023-03-13 11:37:35 +00:00
|
|
|
getRes, err := sh.Get(ctx, getPrm)
|
2023-03-31 08:33:08 +00:00
|
|
|
if err != nil {
|
|
|
|
if prm.ignoreErrors {
|
2023-05-19 15:06:20 +00:00
|
|
|
res.failed.Add(1)
|
2023-03-31 08:33:08 +00:00
|
|
|
continue
|
2022-10-10 17:54:14 +00:00
|
|
|
}
|
2023-09-27 08:02:06 +00:00
|
|
|
e.log.Error(logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
|
|
|
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
2023-03-31 08:33:08 +00:00
|
|
|
return err
|
2022-10-10 17:54:14 +00:00
|
|
|
}
|
|
|
|
|
2023-05-04 10:58:26 +00:00
|
|
|
evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), sh, shards, weights, shardsToEvacuate, res)
|
2023-05-02 11:16:13 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if evacuatedLocal {
|
2023-03-31 08:33:08 +00:00
|
|
|
continue
|
|
|
|
}
|
2022-09-12 11:48:21 +00:00
|
|
|
|
2023-03-31 08:33:08 +00:00
|
|
|
if prm.handler == nil {
|
|
|
|
// Do not check ignoreErrors flag here because
|
|
|
|
// ignoring errors on put make this command kinda useless.
|
|
|
|
return fmt.Errorf("%w: %s", errPutShard, toEvacuate[i])
|
|
|
|
}
|
2022-09-12 11:48:21 +00:00
|
|
|
|
2023-05-02 11:16:13 +00:00
|
|
|
err = prm.handler(ctx, addr, getRes.Object())
|
2023-03-31 08:33:08 +00:00
|
|
|
if err != nil {
|
2023-09-27 08:02:06 +00:00
|
|
|
e.log.Error(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
|
|
|
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
2023-03-31 08:33:08 +00:00
|
|
|
return err
|
|
|
|
}
|
2023-05-19 15:06:20 +00:00
|
|
|
res.evacuated.Add(1)
|
2023-03-31 08:33:08 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2022-09-12 11:48:21 +00:00
|
|
|
|
2023-05-04 10:58:26 +00:00
|
|
|
func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Address, object *objectSDK.Object, sh *shard.Shard,
|
|
|
|
shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes) (bool, error) {
|
2023-06-02 12:39:16 +00:00
|
|
|
hrw.SortHasherSliceByWeightValue(shards, weights, hrw.StringHash(addr.EncodeToString()))
|
2023-03-31 08:33:08 +00:00
|
|
|
for j := range shards {
|
2023-05-02 11:16:13 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return false, ctx.Err()
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2023-03-31 08:33:08 +00:00
|
|
|
if _, ok := shardsToEvacuate[shards[j].ID().String()]; ok {
|
|
|
|
continue
|
|
|
|
}
|
2023-03-13 11:37:35 +00:00
|
|
|
putDone, exists := e.putToShard(ctx, shards[j].hashedShard, j, shards[j].pool, addr, object)
|
2023-03-31 08:33:08 +00:00
|
|
|
if putDone || exists {
|
|
|
|
if putDone {
|
2023-05-19 15:06:20 +00:00
|
|
|
res.evacuated.Add(1)
|
2023-04-12 14:35:10 +00:00
|
|
|
e.log.Debug(logs.EngineObjectIsMovedToAnotherShard,
|
2023-03-31 08:33:08 +00:00
|
|
|
zap.Stringer("from", sh.ID()),
|
|
|
|
zap.Stringer("to", shards[j].ID()),
|
2023-05-31 10:24:30 +00:00
|
|
|
zap.Stringer("addr", addr),
|
2023-09-27 08:02:06 +00:00
|
|
|
evacuationOperationLogField,
|
|
|
|
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
2022-09-19 10:31:55 +00:00
|
|
|
}
|
2023-05-02 11:16:13 +00:00
|
|
|
return true, nil
|
2022-10-10 17:54:14 +00:00
|
|
|
}
|
2022-09-12 11:48:21 +00:00
|
|
|
}
|
2022-10-10 17:54:14 +00:00
|
|
|
|
2023-05-02 11:16:13 +00:00
|
|
|
return false, nil
|
2022-09-12 11:48:21 +00:00
|
|
|
}
|
2023-05-04 10:58:26 +00:00
|
|
|
|
|
|
|
func (e *StorageEngine) GetEvacuationState(ctx context.Context) (*EvacuationState, error) {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return nil, ctx.Err()
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
return e.evacuateLimiter.GetState(), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *StorageEngine) EnqueRunningEvacuationStop(ctx context.Context) error {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
return e.evacuateLimiter.CancelIfRunning()
|
|
|
|
}
|