package control import ( "bytes" "context" "crypto/sha256" "encoding/hex" "errors" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) var errFailedToBuildListOfContainerNodes = errors.New("can't build a list of container nodes") func (s *Server) EvacuateShard(ctx context.Context, req *control.EvacuateShardRequest) (*control.EvacuateShardResponse, error) { err := s.isValidRequest(req) if err != nil { return nil, status.Error(codes.PermissionDenied, err.Error()) } prm := engine.EvacuateShardPrm{ ShardID: s.getShardIDList(req.GetBody().GetShard_ID()), IgnoreErrors: req.GetBody().GetIgnoreErrors(), ObjectsHandler: s.replicateObject, Scope: engine.EvacuateScopeObjects, } res, err := s.s.Evacuate(ctx, prm) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } resp := &control.EvacuateShardResponse{ Body: &control.EvacuateShardResponse_Body{ Count: uint32(res.ObjectsEvacuated()), }, } err = SignMessage(s.key, resp) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } return resp, nil } func (s *Server) replicateObject(ctx context.Context, addr oid.Address, obj *objectSDK.Object) error { cid, ok := obj.ContainerID() if !ok { // Return nil to prevent situations where a shard can't be evacuated // because of a single bad/corrupted object. return nil } nodes, err := s.getContainerNodes(cid) if err != nil { return err } var res replicatorResult task := replicator.Task{ NumCopies: 1, Addr: addr, Obj: obj, Nodes: nodes, } s.replicator.HandleTask(ctx, task, &res) if res.count == 0 { return errors.New("object was not replicated") } return nil } func (s *Server) replicateTree(ctx context.Context, contID cid.ID, treeID string, forest pilorama.Forest) (string, error) { nodes, err := s.getContainerNodes(contID) if err != nil { return "", err } if len(nodes) == 0 { return "", fmt.Errorf("no remote nodes available to replicate tree '%s' of container %s", treeID, contID) } for _, node := range nodes { err = s.replicateTreeToNode(ctx, forest, contID, treeID, node) if err == nil { return hex.EncodeToString(node.PublicKey()), nil } } return "", err } func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest, contID cid.ID, treeID string, node netmap.NodeInfo) error { rawCID := make([]byte, sha256.Size) contID.Encode(rawCID) var height uint64 for { op, err := forest.TreeGetOpLog(ctx, contID, treeID, height) if err != nil { return err } if op.Time == 0 { return nil } req := &tree.ApplyRequest{ Body: &tree.ApplyRequest_Body{ ContainerId: rawCID, TreeId: treeID, Operation: &tree.LogMove{ ParentId: op.Parent, Meta: op.Meta.Bytes(), ChildId: op.Child, }, }, } err = tree.SignMessage(req, s.key) if err != nil { return fmt.Errorf("can't sign apply request: %w", err) } err = s.treeService.ReplicateTreeOp(ctx, node, req) if err != nil { return err } height = op.Time + 1 } } func (s *Server) getContainerNodes(contID cid.ID) ([]netmap.NodeInfo, error) { nm, err := s.netMapSrc.GetNetMap(0) if err != nil { return nil, err } c, err := s.cnrSrc.Get(contID) if err != nil { return nil, err } binCnr := make([]byte, sha256.Size) contID.Encode(binCnr) ns, err := nm.ContainerNodes(c.Value.PlacementPolicy(), binCnr) if err != nil { return nil, errFailedToBuildListOfContainerNodes } nodes := placement.FlattenNodes(ns) bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes() for i := 0; i < len(nodes); i++ { if bytes.Equal(nodes[i].PublicKey(), bs) { copy(nodes[i:], nodes[i+1:]) nodes = nodes[:len(nodes)-1] } } return nodes, nil } type replicatorResult struct { count int } // SubmitSuccessfulReplication implements the replicator.TaskResult interface. func (r *replicatorResult) SubmitSuccessfulReplication(_ netmap.NodeInfo) { r.count++ }