forked from TrueCloudLab/frostfs-node
[#1005] engine: Drop shards weights
Unused. Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
parent
abea258b65
commit
2ad433dbcb
8 changed files with 65 additions and 77 deletions
|
@ -221,7 +221,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*Ev
|
|||
))
|
||||
defer span.End()
|
||||
|
||||
shards, weights, err := e.getActualShards(shardIDs, prm)
|
||||
shards, err := e.getActualShards(shardIDs, prm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -243,7 +243,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*Ev
|
|||
}
|
||||
|
||||
eg.Go(func() error {
|
||||
return e.evacuateShards(egCtx, shardIDs, prm, res, shards, weights, shardsToEvacuate)
|
||||
return e.evacuateShards(egCtx, shardIDs, prm, res, shards, shardsToEvacuate)
|
||||
})
|
||||
|
||||
if prm.Async {
|
||||
|
@ -261,7 +261,7 @@ func ctxOrBackground(ctx context.Context, background bool) context.Context {
|
|||
}
|
||||
|
||||
func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, prm EvacuateShardPrm, res *EvacuateShardRes,
|
||||
shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard,
|
||||
shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
|
||||
) error {
|
||||
var err error
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShards",
|
||||
|
@ -288,7 +288,7 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p
|
|||
}
|
||||
|
||||
for _, shardID := range shardIDs {
|
||||
if err = e.evacuateShard(ctx, shardID, prm, res, shards, weights, shardsToEvacuate); err != nil {
|
||||
if err = e.evacuateShard(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil {
|
||||
e.log.Error(logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope))
|
||||
return err
|
||||
|
@ -336,7 +336,7 @@ func (e *StorageEngine) getTotals(ctx context.Context, prm EvacuateShardPrm, sha
|
|||
}
|
||||
|
||||
func (e *StorageEngine) evacuateShard(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
|
||||
shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard,
|
||||
shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
|
||||
) error {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShard",
|
||||
trace.WithAttributes(
|
||||
|
@ -345,13 +345,13 @@ func (e *StorageEngine) evacuateShard(ctx context.Context, shardID string, prm E
|
|||
defer span.End()
|
||||
|
||||
if prm.Scope.WithObjects() {
|
||||
if err := e.evacuateShardObjects(ctx, shardID, prm, res, shards, weights, shardsToEvacuate); err != nil {
|
||||
if err := e.evacuateShardObjects(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if prm.Scope.WithTrees() && shardsToEvacuate[shardID].PiloramaEnabled() {
|
||||
if err := e.evacuateShardTrees(ctx, shardID, prm, res, shards, weights, shardsToEvacuate); err != nil {
|
||||
if err := e.evacuateShardTrees(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -360,7 +360,7 @@ func (e *StorageEngine) evacuateShard(ctx context.Context, shardID string, prm E
|
|||
}
|
||||
|
||||
func (e *StorageEngine) evacuateShardObjects(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
|
||||
shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard,
|
||||
shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
|
||||
) error {
|
||||
var listPrm shard.ListWithCursorPrm
|
||||
listPrm.WithCount(defaultEvacuateBatchSize)
|
||||
|
@ -383,7 +383,7 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, shardID string
|
|||
return err
|
||||
}
|
||||
|
||||
if err = e.evacuateObjects(ctx, sh, listRes.AddressList(), prm, res, shards, weights, shardsToEvacuate); err != nil {
|
||||
if err = e.evacuateObjects(ctx, sh, listRes.AddressList(), prm, res, shards, shardsToEvacuate); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -393,7 +393,7 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, shardID string
|
|||
}
|
||||
|
||||
func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
|
||||
shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard,
|
||||
shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
|
||||
) error {
|
||||
sh := shardsToEvacuate[shardID]
|
||||
|
||||
|
@ -414,7 +414,7 @@ func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string,
|
|||
return err
|
||||
}
|
||||
listPrm.NextPageToken = listRes.NextPageToken
|
||||
if err := e.evacuateTrees(ctx, sh, listRes.Items, prm, res, shards, weights, shardsToEvacuate); err != nil {
|
||||
if err := e.evacuateTrees(ctx, sh, listRes.Items, prm, res, shards, shardsToEvacuate); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -422,8 +422,7 @@ func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string,
|
|||
}
|
||||
|
||||
func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, trees []pilorama.ContainerIDTreeID,
|
||||
prm EvacuateShardPrm, res *EvacuateShardRes, shards []pooledShard, weights []float64,
|
||||
shardsToEvacuate map[string]*shard.Shard,
|
||||
prm EvacuateShardPrm, res *EvacuateShardRes, shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
|
||||
) error {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateTrees",
|
||||
trace.WithAttributes(
|
||||
|
@ -438,7 +437,7 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree
|
|||
default:
|
||||
}
|
||||
|
||||
success, shardID, err := e.tryEvacuateTreeLocal(ctx, sh, contTree, prm, shards, weights, shardsToEvacuate)
|
||||
success, shardID, err := e.tryEvacuateTreeLocal(ctx, sh, contTree, prm, shards, shardsToEvacuate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -477,9 +476,9 @@ func (e *StorageEngine) evacuateTreeToOtherNode(ctx context.Context, sh *shard.S
|
|||
}
|
||||
|
||||
func (e *StorageEngine) tryEvacuateTreeLocal(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID,
|
||||
prm EvacuateShardPrm, shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard,
|
||||
prm EvacuateShardPrm, shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
|
||||
) (bool, string, error) {
|
||||
target, found, err := e.findShardToEvacuateTree(ctx, tree, shards, weights, shardsToEvacuate)
|
||||
target, found, err := e.findShardToEvacuateTree(ctx, tree, shards, shardsToEvacuate)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
|
@ -547,9 +546,9 @@ func (e *StorageEngine) tryEvacuateTreeLocal(ctx context.Context, sh *shard.Shar
|
|||
|
||||
// findShardToEvacuateTree returns first shard according HRW or first shard with tree exists.
|
||||
func (e *StorageEngine) findShardToEvacuateTree(ctx context.Context, tree pilorama.ContainerIDTreeID,
|
||||
shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard,
|
||||
shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
|
||||
) (pooledShard, bool, error) {
|
||||
hrw.SortHasherSliceByWeightValue(shards, weights, hrw.StringHash(tree.CID.EncodeToString()))
|
||||
hrw.SortHasherSliceByValue(shards, hrw.StringHash(tree.CID.EncodeToString()))
|
||||
var result pooledShard
|
||||
var found bool
|
||||
for _, target := range shards {
|
||||
|
@ -583,31 +582,31 @@ func (e *StorageEngine) findShardToEvacuateTree(ctx context.Context, tree pilora
|
|||
return result, found, nil
|
||||
}
|
||||
|
||||
func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) ([]pooledShard, []float64, error) {
|
||||
func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) ([]pooledShard, error) {
|
||||
e.mtx.RLock()
|
||||
defer e.mtx.RUnlock()
|
||||
|
||||
for i := range shardIDs {
|
||||
sh, ok := e.shards[shardIDs[i]]
|
||||
if !ok {
|
||||
return nil, nil, errShardNotFound
|
||||
return nil, errShardNotFound
|
||||
}
|
||||
|
||||
if !sh.GetMode().ReadOnly() {
|
||||
return nil, nil, ErrMustBeReadOnly
|
||||
return nil, ErrMustBeReadOnly
|
||||
}
|
||||
|
||||
if prm.Scope.TreesOnly() && !sh.PiloramaEnabled() {
|
||||
return nil, nil, fmt.Errorf("shard %s doesn't have pilorama enabled", sh.ID())
|
||||
return nil, fmt.Errorf("shard %s doesn't have pilorama enabled", sh.ID())
|
||||
}
|
||||
}
|
||||
|
||||
if len(e.shards)-len(shardIDs) < 1 && prm.ObjectsHandler == nil && prm.Scope.WithObjects() {
|
||||
return nil, nil, errMustHaveTwoShards
|
||||
return nil, errMustHaveTwoShards
|
||||
}
|
||||
|
||||
if len(e.shards)-len(shardIDs) < 1 && prm.TreeHandler == nil && prm.Scope.WithTrees() {
|
||||
return nil, nil, errMustHaveTwoShards
|
||||
return nil, errMustHaveTwoShards
|
||||
}
|
||||
|
||||
// We must have all shards, to have correct information about their
|
||||
|
@ -620,17 +619,11 @@ func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm)
|
|||
pool: e.shardPools[id],
|
||||
})
|
||||
}
|
||||
|
||||
weights := make([]float64, 0, len(shards))
|
||||
for i := range shards {
|
||||
weights = append(weights, e.shardWeight(shards[i].Shard))
|
||||
}
|
||||
|
||||
return shards, weights, nil
|
||||
return shards, nil
|
||||
}
|
||||
|
||||
func (e *StorageEngine) evacuateObjects(ctx context.Context, sh *shard.Shard, toEvacuate []object.AddressWithType, prm EvacuateShardPrm, res *EvacuateShardRes,
|
||||
shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard,
|
||||
shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
|
||||
) error {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects",
|
||||
trace.WithAttributes(
|
||||
|
@ -660,7 +653,7 @@ func (e *StorageEngine) evacuateObjects(ctx context.Context, sh *shard.Shard, to
|
|||
return err
|
||||
}
|
||||
|
||||
evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), sh, shards, weights, shardsToEvacuate, res)
|
||||
evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), sh, shards, shardsToEvacuate, res)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -687,9 +680,9 @@ func (e *StorageEngine) evacuateObjects(ctx context.Context, sh *shard.Shard, to
|
|||
}
|
||||
|
||||
func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Address, object *objectSDK.Object, sh *shard.Shard,
|
||||
shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes,
|
||||
shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes,
|
||||
) (bool, error) {
|
||||
hrw.SortHasherSliceByWeightValue(shards, weights, hrw.StringHash(addr.EncodeToString()))
|
||||
hrw.SortHasherSliceByValue(shards, hrw.StringHash(addr.EncodeToString()))
|
||||
for j := range shards {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue