2024-05-13 13:50:21 +00:00
|
|
|
package policer
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2024-05-16 09:26:49 +00:00
|
|
|
"encoding/hex"
|
2024-05-13 13:50:21 +00:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
|
|
|
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
2024-10-01 12:27:06 +00:00
|
|
|
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
2024-05-13 13:50:21 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
2024-05-14 11:43:21 +00:00
|
|
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
2024-05-16 09:26:49 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
|
2024-05-14 11:43:21 +00:00
|
|
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
2024-05-13 13:50:21 +00:00
|
|
|
"go.uber.org/zap"
|
2024-05-16 09:26:49 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
2024-05-13 13:50:21 +00:00
|
|
|
)
|
|
|
|
|
2024-05-14 11:43:21 +00:00
|
|
|
var errNoECinfoReturnded = errors.New("no EC info returned")
|
|
|
|
|
|
|
|
type ecChunkProcessResult struct {
|
|
|
|
validPlacement bool
|
|
|
|
removeLocal bool
|
|
|
|
}
|
|
|
|
|
2024-07-17 11:30:51 +00:00
|
|
|
var errInvalidECPlacement = errors.New("invalid EC placement: EC placement must have one placement vector with at least one node")
|
2024-05-13 13:50:21 +00:00
|
|
|
|
2024-10-01 12:27:06 +00:00
|
|
|
func (p *Policer) processECContainerObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error {
|
2024-05-13 13:50:21 +00:00
|
|
|
if objInfo.ECInfo == nil {
|
2024-10-01 12:27:06 +00:00
|
|
|
return p.processECContainerRepObject(ctx, objInfo, cnr.PlacementPolicy())
|
2024-05-13 13:50:21 +00:00
|
|
|
}
|
2024-10-01 12:27:06 +00:00
|
|
|
return p.processECContainerECObject(ctx, objInfo, cnr)
|
2024-05-13 13:50:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// processECContainerRepObject processes non erasure coded objects in EC container: tombstones, locks and linking objects.
|
|
|
|
// All of them must be stored on all of the container nodes.
|
|
|
|
func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error {
|
|
|
|
objID := objInfo.Address.Object()
|
|
|
|
nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objID, policy)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err)
|
|
|
|
}
|
2024-07-17 11:30:51 +00:00
|
|
|
if len(nn) != 1 || len(nn[0]) == 0 {
|
2024-05-13 13:50:21 +00:00
|
|
|
return errInvalidECPlacement
|
|
|
|
}
|
|
|
|
|
|
|
|
c := &placementRequirements{}
|
|
|
|
checkedNodes := newNodeCache()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2024-05-15 13:11:51 +00:00
|
|
|
p.processRepNodes(ctx, c, objInfo, nn[0], uint32(len(nn[0])), checkedNodes)
|
2024-05-13 13:50:21 +00:00
|
|
|
|
|
|
|
if !c.needLocalCopy && c.removeLocalCopy {
|
2024-10-21 07:22:54 +00:00
|
|
|
p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected,
|
2024-05-13 13:50:21 +00:00
|
|
|
zap.Stringer("object", objInfo.Address),
|
|
|
|
)
|
|
|
|
|
|
|
|
p.cbRedundantCopy(ctx, objInfo.Address)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-10-01 12:27:06 +00:00
|
|
|
func (p *Policer) processECContainerECObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error {
|
|
|
|
nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objInfo.ECInfo.ParentID, cnr.PlacementPolicy())
|
2024-05-13 13:50:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err)
|
|
|
|
}
|
2024-07-17 11:30:51 +00:00
|
|
|
if len(nn) != 1 || len(nn[0]) == 0 {
|
2024-05-13 13:50:21 +00:00
|
|
|
return errInvalidECPlacement
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2024-05-14 11:43:21 +00:00
|
|
|
res := p.processECChunk(ctx, objInfo, nn[0])
|
|
|
|
if !res.validPlacement {
|
|
|
|
// drop local chunk only if all required chunks are in place
|
2024-10-01 12:27:06 +00:00
|
|
|
res.removeLocal = res.removeLocal && p.pullRequiredECChunks(ctx, objInfo, nn[0], cnr)
|
2024-05-14 11:43:21 +00:00
|
|
|
}
|
2024-10-01 12:27:06 +00:00
|
|
|
p.adjustECPlacement(ctx, objInfo, nn[0], cnr)
|
2024-05-14 11:43:21 +00:00
|
|
|
|
|
|
|
if res.removeLocal {
|
2024-10-21 07:22:54 +00:00
|
|
|
p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address))
|
2024-05-14 11:43:21 +00:00
|
|
|
p.cbRedundantCopy(ctx, objInfo.Address)
|
2024-05-13 13:50:21 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// processECChunk replicates EC chunk if needed.
|
2024-05-14 11:43:21 +00:00
|
|
|
func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo) ecChunkProcessResult {
|
2024-05-13 13:50:21 +00:00
|
|
|
var removeLocalChunk bool
|
|
|
|
requiredNode := nodes[int(objInfo.ECInfo.Index)%(len(nodes))]
|
|
|
|
if p.cfg.netmapKeys.IsLocalKey(requiredNode.PublicKey()) {
|
|
|
|
// current node is required node, we are happy
|
2024-05-14 11:43:21 +00:00
|
|
|
return ecChunkProcessResult{
|
|
|
|
validPlacement: true,
|
|
|
|
}
|
2024-05-13 13:50:21 +00:00
|
|
|
}
|
2024-09-18 09:21:53 +00:00
|
|
|
if requiredNode.Status().IsMaintenance() {
|
2024-05-13 13:50:21 +00:00
|
|
|
// consider maintenance mode has object, but do not drop local copy
|
2024-10-21 07:22:54 +00:00
|
|
|
p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
|
2024-05-14 11:43:21 +00:00
|
|
|
return ecChunkProcessResult{}
|
2024-05-13 13:50:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
callCtx, cancel := context.WithTimeout(ctx, p.headTimeout)
|
2024-05-14 11:43:21 +00:00
|
|
|
_, err := p.remoteHeader(callCtx, requiredNode, objInfo.Address, false)
|
2024-05-13 13:50:21 +00:00
|
|
|
cancel()
|
|
|
|
|
|
|
|
if err == nil {
|
|
|
|
removeLocalChunk = true
|
|
|
|
} else if client.IsErrObjectNotFound(err) {
|
2024-10-21 07:22:54 +00:00
|
|
|
p.log.Debug(ctx, logs.PolicerShortageOfObjectCopiesDetected, zap.Stringer("object", objInfo.Address), zap.Uint32("shortage", 1))
|
2024-05-13 13:50:21 +00:00
|
|
|
task := replicator.Task{
|
|
|
|
NumCopies: 1,
|
|
|
|
Addr: objInfo.Address,
|
|
|
|
Nodes: []netmap.NodeInfo{requiredNode},
|
|
|
|
}
|
|
|
|
p.replicator.HandleReplicationTask(ctx, task, newNodeCache())
|
2024-06-18 07:20:45 +00:00
|
|
|
} else if client.IsErrNodeUnderMaintenance(err) {
|
2024-05-13 13:50:21 +00:00
|
|
|
// consider maintenance mode has object, but do not drop local copy
|
2024-10-21 07:22:54 +00:00
|
|
|
p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
|
2024-05-13 13:50:21 +00:00
|
|
|
} else {
|
2024-10-21 07:22:54 +00:00
|
|
|
p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.String("error", err.Error()))
|
2024-05-13 13:50:21 +00:00
|
|
|
}
|
|
|
|
|
2024-05-14 11:43:21 +00:00
|
|
|
return ecChunkProcessResult{
|
|
|
|
removeLocal: removeLocalChunk,
|
2024-05-13 13:50:21 +00:00
|
|
|
}
|
2024-05-14 11:43:21 +00:00
|
|
|
}
|
|
|
|
|
2024-10-01 12:27:06 +00:00
|
|
|
func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, cnr containerSDK.Container) bool {
|
2024-05-14 11:43:21 +00:00
|
|
|
var parentAddress oid.Address
|
|
|
|
parentAddress.SetContainer(objInfo.Address.Container())
|
|
|
|
parentAddress.SetObject(objInfo.ECInfo.ParentID)
|
2024-05-13 13:50:21 +00:00
|
|
|
|
2024-05-14 11:43:21 +00:00
|
|
|
requiredChunkIndexes := p.collectRequiredECChunks(nodes, objInfo)
|
|
|
|
if len(requiredChunkIndexes) == 0 {
|
2024-10-21 07:22:54 +00:00
|
|
|
p.log.Info(ctx, logs.PolicerNodeIsNotECObjectNode, zap.Stringer("object", objInfo.ECInfo.ParentID))
|
2024-05-14 11:43:21 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
err := p.resolveLocalECChunks(ctx, parentAddress, requiredChunkIndexes)
|
|
|
|
if err != nil {
|
2024-10-21 07:22:54 +00:00
|
|
|
p.log.Error(ctx, logs.PolicerFailedToGetLocalECChunks, zap.Error(err), zap.Stringer("object", parentAddress))
|
2024-05-14 11:43:21 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
if len(requiredChunkIndexes) == 0 {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
indexToObjectID := make(map[uint32]oid.ID)
|
|
|
|
success := p.resolveRemoteECChunks(ctx, parentAddress, nodes, requiredChunkIndexes, indexToObjectID)
|
|
|
|
if !success {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
for index, candidates := range requiredChunkIndexes {
|
|
|
|
var addr oid.Address
|
|
|
|
addr.SetContainer(objInfo.Address.Container())
|
|
|
|
addr.SetObject(indexToObjectID[index])
|
|
|
|
p.replicator.HandlePullTask(ctx, replicator.Task{
|
2024-10-01 12:27:06 +00:00
|
|
|
Addr: addr,
|
|
|
|
Nodes: candidates,
|
|
|
|
Container: cnr,
|
2024-05-14 11:43:21 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
// there was some missing chunks, it's not ok
|
2024-05-13 13:50:21 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2024-05-14 11:43:21 +00:00
|
|
|
func (p *Policer) collectRequiredECChunks(nodes []netmap.NodeInfo, objInfo objectcore.Info) map[uint32][]netmap.NodeInfo {
|
|
|
|
requiredChunkIndexes := make(map[uint32][]netmap.NodeInfo)
|
2024-05-13 13:50:21 +00:00
|
|
|
for i, n := range nodes {
|
2024-05-14 11:43:21 +00:00
|
|
|
if uint32(i) == objInfo.ECInfo.Total {
|
|
|
|
break
|
|
|
|
}
|
2024-05-13 13:50:21 +00:00
|
|
|
if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) {
|
2024-05-14 11:43:21 +00:00
|
|
|
requiredChunkIndexes[uint32(i)] = []netmap.NodeInfo{}
|
2024-05-13 13:50:21 +00:00
|
|
|
}
|
|
|
|
}
|
2024-05-14 11:43:21 +00:00
|
|
|
return requiredChunkIndexes
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Policer) resolveLocalECChunks(ctx context.Context, parentAddress oid.Address, required map[uint32][]netmap.NodeInfo) error {
|
|
|
|
_, err := p.localHeader(ctx, parentAddress)
|
|
|
|
var eiErr *objectSDK.ECInfoError
|
|
|
|
if err == nil { // should not be happen
|
|
|
|
return errNoECinfoReturnded
|
|
|
|
}
|
|
|
|
if !errors.As(err, &eiErr) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, ch := range eiErr.ECInfo().Chunks {
|
|
|
|
delete(required, ch.Index)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.Address, nodes []netmap.NodeInfo, required map[uint32][]netmap.NodeInfo, indexToObjectID map[uint32]oid.ID) bool {
|
|
|
|
var eiErr *objectSDK.ECInfoError
|
|
|
|
for _, n := range nodes {
|
|
|
|
if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
_, err := p.remoteHeader(ctx, n, parentAddress, true)
|
|
|
|
if !errors.As(err, &eiErr) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, ch := range eiErr.ECInfo().Chunks {
|
|
|
|
if candidates, ok := required[ch.Index]; ok {
|
|
|
|
candidates = append(candidates, n)
|
|
|
|
required[ch.Index] = candidates
|
|
|
|
|
|
|
|
var chunkID oid.ID
|
|
|
|
if err := chunkID.ReadFromV2(ch.ID); err != nil {
|
2024-10-21 07:22:54 +00:00
|
|
|
p.log.Error(ctx, logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
|
2024-05-14 11:43:21 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
if existed, ok := indexToObjectID[ch.Index]; ok && existed != chunkID {
|
2024-10-21 07:22:54 +00:00
|
|
|
p.log.Error(ctx, logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", existed),
|
2024-05-16 09:26:49 +00:00
|
|
|
zap.Stringer("second", chunkID), zap.Stringer("object", parentAddress), zap.Uint32("index", ch.Index))
|
2024-05-14 11:43:21 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
indexToObjectID[ch.Index] = chunkID
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for index, candidates := range required {
|
|
|
|
if len(candidates) == 0 {
|
2024-10-21 07:22:54 +00:00
|
|
|
p.log.Error(ctx, logs.PolicerMissingECChunk, zap.Stringer("object", parentAddress), zap.Uint32("index", index))
|
2024-05-14 11:43:21 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2024-10-01 12:27:06 +00:00
|
|
|
func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, cnr containerSDK.Container) {
|
2024-05-14 11:43:21 +00:00
|
|
|
var parentAddress oid.Address
|
|
|
|
parentAddress.SetContainer(objInfo.Address.Container())
|
|
|
|
parentAddress.SetObject(objInfo.ECInfo.ParentID)
|
|
|
|
var eiErr *objectSDK.ECInfoError
|
2024-05-16 09:26:49 +00:00
|
|
|
resolved := make(map[uint32][]netmap.NodeInfo)
|
|
|
|
chunkIDs := make(map[uint32]oid.ID)
|
|
|
|
restore := true // do not restore EC chunks if some node returned error
|
2024-05-14 11:43:21 +00:00
|
|
|
for idx, n := range nodes {
|
2024-05-16 09:26:49 +00:00
|
|
|
if uint32(idx) >= objInfo.ECInfo.Total && uint32(len(resolved)) == objInfo.ECInfo.Total {
|
2024-05-14 11:43:21 +00:00
|
|
|
return
|
|
|
|
}
|
2024-05-16 09:26:49 +00:00
|
|
|
var err error
|
2024-05-14 11:43:21 +00:00
|
|
|
if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) {
|
2024-05-16 09:26:49 +00:00
|
|
|
_, err = p.localHeader(ctx, parentAddress)
|
|
|
|
} else {
|
|
|
|
_, err = p.remoteHeader(ctx, n, parentAddress, true)
|
2024-05-14 11:43:21 +00:00
|
|
|
}
|
2024-05-16 09:26:49 +00:00
|
|
|
|
2024-05-14 11:43:21 +00:00
|
|
|
if errors.As(err, &eiErr) {
|
2024-05-16 09:26:49 +00:00
|
|
|
for _, ch := range eiErr.ECInfo().Chunks {
|
|
|
|
resolved[ch.Index] = append(resolved[ch.Index], n)
|
|
|
|
var ecInfoChunkID oid.ID
|
|
|
|
if err := ecInfoChunkID.ReadFromV2(ch.ID); err != nil {
|
2024-10-21 07:22:54 +00:00
|
|
|
p.log.Error(ctx, logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress))
|
2024-05-16 09:26:49 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if chunkID, exist := chunkIDs[ch.Index]; exist && chunkID != ecInfoChunkID {
|
2024-10-21 07:22:54 +00:00
|
|
|
p.log.Error(ctx, logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", chunkID),
|
2024-05-16 09:26:49 +00:00
|
|
|
zap.Stringer("second", ecInfoChunkID), zap.Stringer("object", parentAddress), zap.Uint32("index", ch.Index))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
chunkIDs[ch.Index] = ecInfoChunkID
|
|
|
|
}
|
2024-12-10 09:47:20 +00:00
|
|
|
} else if client.IsErrObjectAlreadyRemoved(err) {
|
|
|
|
restore = false
|
2024-05-16 09:26:49 +00:00
|
|
|
} else if !p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total {
|
2024-10-21 07:22:54 +00:00
|
|
|
p.log.Warn(ctx, logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err))
|
2024-05-16 09:26:49 +00:00
|
|
|
p.replicator.HandleReplicationTask(ctx, replicator.Task{
|
|
|
|
NumCopies: 1,
|
|
|
|
Addr: objInfo.Address,
|
|
|
|
Nodes: []netmap.NodeInfo{n},
|
|
|
|
}, newNodeCache())
|
|
|
|
restore = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !restore || uint32(len(resolved)) == objInfo.ECInfo.Total {
|
|
|
|
return
|
|
|
|
}
|
2024-10-01 12:27:06 +00:00
|
|
|
if objInfo.ECInfo.Total-uint32(len(resolved)) > cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount() {
|
2024-05-16 09:26:49 +00:00
|
|
|
var found []uint32
|
|
|
|
for i := range resolved {
|
|
|
|
found = append(found, i)
|
|
|
|
}
|
2024-10-21 07:22:54 +00:00
|
|
|
p.log.Error(ctx, logs.PolicerCouldNotRestoreObjectNotEnoughChunks, zap.Stringer("object", parentAddress), zap.Uint32s("found_chunks", found))
|
2024-05-16 09:26:49 +00:00
|
|
|
return
|
|
|
|
}
|
2024-10-01 12:27:06 +00:00
|
|
|
p.restoreECObject(ctx, objInfo, parentAddress, nodes, resolved, chunkIDs, cnr)
|
2024-05-16 09:26:49 +00:00
|
|
|
}
|
|
|
|
|
2024-10-01 12:27:06 +00:00
|
|
|
func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, parentAddress oid.Address, nodes []netmap.NodeInfo, existedChunks map[uint32][]netmap.NodeInfo, chunkIDs map[uint32]oid.ID,
|
|
|
|
cnr containerSDK.Container,
|
|
|
|
) {
|
|
|
|
c, err := erasurecode.NewConstructor(int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECDataCount()), int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount()))
|
2024-05-16 09:26:49 +00:00
|
|
|
if err != nil {
|
2024-10-21 07:22:54 +00:00
|
|
|
p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
|
2024-05-16 09:26:49 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
parts := p.collectExistedChunks(ctx, objInfo, existedChunks, parentAddress, chunkIDs)
|
|
|
|
if parts == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
key, err := p.keyStorage.GetKey(nil)
|
|
|
|
if err != nil {
|
2024-10-21 07:22:54 +00:00
|
|
|
p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
|
2024-05-16 09:26:49 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
required := make([]bool, len(parts))
|
|
|
|
for i, p := range parts {
|
|
|
|
if p == nil {
|
|
|
|
required[i] = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err := c.ReconstructParts(parts, required, key); err != nil {
|
2024-10-21 07:22:54 +00:00
|
|
|
p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
|
2024-05-16 09:26:49 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
for idx, part := range parts {
|
|
|
|
if _, exists := existedChunks[uint32(idx)]; exists {
|
2024-05-14 11:43:21 +00:00
|
|
|
continue
|
|
|
|
}
|
2024-05-16 09:26:49 +00:00
|
|
|
var addr oid.Address
|
|
|
|
addr.SetContainer(parentAddress.Container())
|
|
|
|
pID, _ := part.ID()
|
|
|
|
addr.SetObject(pID)
|
|
|
|
targetNode := nodes[idx%len(nodes)]
|
|
|
|
if p.cfg.netmapKeys.IsLocalKey(targetNode.PublicKey()) {
|
|
|
|
p.replicator.HandleLocalPutTask(ctx, replicator.Task{
|
2024-10-01 12:27:06 +00:00
|
|
|
Addr: addr,
|
|
|
|
Obj: part,
|
|
|
|
Container: cnr,
|
2024-05-16 09:26:49 +00:00
|
|
|
})
|
|
|
|
} else {
|
|
|
|
p.replicator.HandleReplicationTask(ctx, replicator.Task{
|
|
|
|
NumCopies: 1,
|
|
|
|
Addr: addr,
|
|
|
|
Nodes: []netmap.NodeInfo{targetNode},
|
|
|
|
Obj: part,
|
|
|
|
}, newNodeCache())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.Info, existedChunks map[uint32][]netmap.NodeInfo, parentAddress oid.Address, chunkIDs map[uint32]oid.ID) []*objectSDK.Object {
|
|
|
|
parts := make([]*objectSDK.Object, objInfo.ECInfo.Total)
|
|
|
|
errGroup, egCtx := errgroup.WithContext(ctx)
|
|
|
|
for idx, nodes := range existedChunks {
|
|
|
|
errGroup.Go(func() error {
|
|
|
|
var objID oid.Address
|
|
|
|
objID.SetContainer(parentAddress.Container())
|
|
|
|
objID.SetObject(chunkIDs[idx])
|
|
|
|
var obj *objectSDK.Object
|
|
|
|
var err error
|
|
|
|
for _, node := range nodes {
|
|
|
|
if p.cfg.netmapKeys.IsLocalKey(node.PublicKey()) {
|
|
|
|
obj, err = p.localObject(egCtx, objID)
|
|
|
|
} else {
|
|
|
|
obj, err = p.remoteObject(egCtx, node, objID)
|
|
|
|
}
|
|
|
|
if err == nil {
|
|
|
|
break
|
|
|
|
}
|
2024-10-21 07:22:54 +00:00
|
|
|
p.log.Warn(ctx, logs.PolicerCouldNotGetChunk, zap.Stringer("object", parentAddress), zap.Stringer("chunkID", objID), zap.Error(err), zap.String("node", hex.EncodeToString(node.PublicKey())))
|
2024-05-16 09:26:49 +00:00
|
|
|
}
|
|
|
|
if obj != nil {
|
|
|
|
parts[idx] = obj
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
if err := errGroup.Wait(); err != nil {
|
2024-10-21 07:22:54 +00:00
|
|
|
p.log.Error(ctx, logs.PolicerCouldNotGetChunks, zap.Stringer("object", parentAddress), zap.Error(err))
|
2024-05-16 09:26:49 +00:00
|
|
|
return nil
|
2024-05-13 13:50:21 +00:00
|
|
|
}
|
2024-05-16 09:26:49 +00:00
|
|
|
return parts
|
2024-05-13 13:50:21 +00:00
|
|
|
}
|