2022-05-12 11:19:44 +00:00
|
|
|
package tree
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"crypto/sha256"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
2022-12-19 15:53:03 +00:00
|
|
|
"math"
|
2022-12-10 10:50:08 +00:00
|
|
|
"math/rand"
|
2022-12-12 11:49:40 +00:00
|
|
|
"sync"
|
2023-11-28 07:51:15 +00:00
|
|
|
"sync/atomic"
|
2023-05-24 07:01:50 +00:00
|
|
|
"time"
|
2022-05-12 11:19:44 +00:00
|
|
|
|
2023-04-12 14:35:10 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
2023-08-24 12:27:24 +00:00
|
|
|
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
2023-03-07 13:38:26 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
|
2023-05-31 09:26:54 +00:00
|
|
|
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
|
|
|
|
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
|
|
tracing_grpc "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
2023-03-07 13:38:26 +00:00
|
|
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
|
|
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
2022-12-12 11:49:40 +00:00
|
|
|
"github.com/panjf2000/ants/v2"
|
2022-10-18 14:15:24 +00:00
|
|
|
"go.uber.org/zap"
|
2023-04-05 11:56:15 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
2022-05-12 11:19:44 +00:00
|
|
|
"google.golang.org/grpc"
|
2022-07-30 07:24:16 +00:00
|
|
|
"google.golang.org/grpc/credentials/insecure"
|
2022-05-12 11:19:44 +00:00
|
|
|
)
|
|
|
|
|
2022-10-06 20:18:26 +00:00
|
|
|
// ErrNotInContainer is returned when operation could not be performed
|
|
|
|
// because the node is not included in the container.
|
|
|
|
var ErrNotInContainer = errors.New("node is not in container")
|
|
|
|
|
2022-12-12 11:49:40 +00:00
|
|
|
const defaultSyncWorkerCount = 20
|
|
|
|
|
2022-12-19 15:18:41 +00:00
|
|
|
// synchronizeAllTrees synchronizes all the trees of the container. It fetches
|
2022-10-18 14:15:24 +00:00
|
|
|
// tree IDs from the other container nodes. Returns ErrNotInContainer if the node
|
|
|
|
// is not included in the container.
|
2022-12-19 15:18:41 +00:00
|
|
|
func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
|
2022-10-06 20:18:26 +00:00
|
|
|
nodes, pos, err := s.getContainerNodes(cid)
|
2022-05-12 11:19:44 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("can't get container nodes: %w", err)
|
|
|
|
}
|
|
|
|
|
2022-10-06 20:18:26 +00:00
|
|
|
if pos < 0 {
|
|
|
|
return ErrNotInContainer
|
|
|
|
}
|
|
|
|
|
2022-12-10 10:50:08 +00:00
|
|
|
nodes = randomizeNodeOrder(nodes, pos)
|
2022-10-18 14:15:24 +00:00
|
|
|
if len(nodes) == 0 {
|
2022-12-15 12:15:43 +00:00
|
|
|
return nil
|
2022-10-18 14:15:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
rawCID := make([]byte, sha256.Size)
|
|
|
|
cid.Encode(rawCID)
|
|
|
|
|
|
|
|
req := &TreeListRequest{
|
|
|
|
Body: &TreeListRequest_Body{
|
|
|
|
ContainerId: rawCID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
err = SignMessage(req, s.key)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("could not sign request: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var resp *TreeListResponse
|
|
|
|
var treesToSync []string
|
|
|
|
var outErr error
|
|
|
|
|
|
|
|
err = s.forEachNode(ctx, nodes, func(c TreeServiceClient) bool {
|
|
|
|
resp, outErr = c.TreeList(ctx, req)
|
|
|
|
if outErr != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
treesToSync = resp.GetBody().GetIds()
|
|
|
|
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
outErr = err
|
|
|
|
}
|
|
|
|
|
|
|
|
if outErr != nil {
|
|
|
|
return fmt.Errorf("could not fetch tree ID list: %w", outErr)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tid := range treesToSync {
|
2023-04-13 12:36:20 +00:00
|
|
|
h, err := s.forest.TreeLastSyncHeight(ctx, cid, tid)
|
2023-01-25 12:44:44 +00:00
|
|
|
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
|
2024-10-21 07:22:54 +00:00
|
|
|
s.log.Warn(ctx, logs.TreeCouldNotGetLastSynchronizedHeightForATree,
|
2023-03-21 12:43:12 +00:00
|
|
|
zap.Stringer("cid", cid),
|
2023-01-25 12:44:44 +00:00
|
|
|
zap.String("tree", tid))
|
|
|
|
continue
|
|
|
|
}
|
2023-03-21 12:43:12 +00:00
|
|
|
newHeight := s.synchronizeTree(ctx, cid, h, tid, nodes)
|
2023-01-25 12:44:44 +00:00
|
|
|
if h < newHeight {
|
2023-04-13 12:36:20 +00:00
|
|
|
if err := s.forest.TreeUpdateLastSyncHeight(ctx, cid, tid, newHeight); err != nil {
|
2024-10-21 07:22:54 +00:00
|
|
|
s.log.Warn(ctx, logs.TreeCouldNotUpdateLastSynchronizedHeightForATree,
|
2023-03-21 12:43:12 +00:00
|
|
|
zap.Stringer("cid", cid),
|
2023-01-25 12:44:44 +00:00
|
|
|
zap.String("tree", tid))
|
|
|
|
}
|
2022-10-18 14:15:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// SynchronizeTree tries to synchronize log starting from the last stored height.
|
|
|
|
func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string) error {
|
|
|
|
nodes, pos, err := s.getContainerNodes(cid)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("can't get container nodes: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if pos < 0 {
|
|
|
|
return ErrNotInContainer
|
|
|
|
}
|
|
|
|
|
2022-12-10 10:50:08 +00:00
|
|
|
nodes = randomizeNodeOrder(nodes, pos)
|
2022-10-18 14:15:24 +00:00
|
|
|
if len(nodes) == 0 {
|
2022-12-15 12:15:43 +00:00
|
|
|
return nil
|
2022-10-18 14:15:24 +00:00
|
|
|
}
|
|
|
|
|
2023-03-21 12:43:12 +00:00
|
|
|
s.synchronizeTree(ctx, cid, 0, treeID, nodes)
|
2022-12-19 15:53:03 +00:00
|
|
|
return nil
|
2022-10-18 14:15:24 +00:00
|
|
|
}
|
|
|
|
|
2023-04-20 12:58:40 +00:00
|
|
|
// mergeOperationStreams performs merge sort for node operation streams to one stream.
|
2023-05-02 13:50:03 +00:00
|
|
|
func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *pilorama.Move) uint64 {
|
2023-04-20 12:58:40 +00:00
|
|
|
defer close(merged)
|
|
|
|
|
|
|
|
ms := make([]*pilorama.Move, len(streams))
|
|
|
|
for i := range streams {
|
|
|
|
ms[i] = <-streams[i]
|
|
|
|
}
|
|
|
|
|
|
|
|
// Merging different node streams shuffles incoming operations like that:
|
|
|
|
//
|
|
|
|
// x - operation from the stream A
|
|
|
|
// o - operation from the stream B
|
|
|
|
//
|
|
|
|
// --o---o--x--x--x--o---x--x------> t
|
|
|
|
// ^
|
|
|
|
// If all ops have been successfully applied, we must start from the last
|
|
|
|
// operation height from the stream B. This height is stored in minStreamedLastHeight.
|
|
|
|
var minStreamedLastHeight uint64 = math.MaxUint64
|
|
|
|
|
|
|
|
for {
|
|
|
|
var minTimeMoveTime uint64 = math.MaxUint64
|
|
|
|
minTimeMoveIndex := -1
|
|
|
|
for i, m := range ms {
|
|
|
|
if m != nil && minTimeMoveTime > m.Time {
|
|
|
|
minTimeMoveTime = m.Time
|
|
|
|
minTimeMoveIndex = i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if minTimeMoveIndex == -1 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
merged <- ms[minTimeMoveIndex]
|
|
|
|
height := ms[minTimeMoveIndex].Time
|
|
|
|
if ms[minTimeMoveIndex] = <-streams[minTimeMoveIndex]; ms[minTimeMoveIndex] == nil {
|
2024-04-10 11:16:18 +00:00
|
|
|
minStreamedLastHeight = min(minStreamedLastHeight, height)
|
2023-04-20 12:58:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return minStreamedLastHeight
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID string,
|
2023-10-31 11:56:55 +00:00
|
|
|
operationStream <-chan *pilorama.Move,
|
|
|
|
) uint64 {
|
2023-04-20 12:58:40 +00:00
|
|
|
var prev *pilorama.Move
|
2024-10-30 08:02:52 +00:00
|
|
|
var batch []*pilorama.Move
|
2023-04-20 12:58:40 +00:00
|
|
|
for m := range operationStream {
|
|
|
|
// skip already applied op
|
|
|
|
if prev != nil && prev.Time == m.Time {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
prev = m
|
2024-10-30 08:02:52 +00:00
|
|
|
batch = append(batch, m)
|
2023-04-20 12:58:40 +00:00
|
|
|
|
2024-10-30 08:02:52 +00:00
|
|
|
if len(batch) == s.syncBatchSize {
|
|
|
|
if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil {
|
|
|
|
return batch[0].Time
|
2023-04-20 12:58:40 +00:00
|
|
|
}
|
2024-10-30 08:02:52 +00:00
|
|
|
batch = batch[:0]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(batch) > 0 {
|
|
|
|
if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil {
|
|
|
|
return batch[0].Time
|
|
|
|
}
|
2023-04-20 12:58:40 +00:00
|
|
|
}
|
2024-10-30 08:02:52 +00:00
|
|
|
return math.MaxUint64
|
2023-04-20 12:58:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string,
|
2023-11-28 14:04:02 +00:00
|
|
|
height uint64, cc *grpc.ClientConn, opsCh chan<- *pilorama.Move,
|
2023-11-28 07:12:39 +00:00
|
|
|
) error {
|
2023-11-28 14:04:02 +00:00
|
|
|
treeClient := NewTreeServiceClient(cc)
|
|
|
|
|
2023-04-20 12:58:40 +00:00
|
|
|
rawCID := make([]byte, sha256.Size)
|
|
|
|
cid.Encode(rawCID)
|
|
|
|
|
2023-11-28 07:12:39 +00:00
|
|
|
req := &GetOpLogRequest{
|
|
|
|
Body: &GetOpLogRequest_Body{
|
|
|
|
ContainerId: rawCID,
|
|
|
|
TreeId: treeID,
|
|
|
|
Height: height,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := SignMessage(req, s.key); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-04-20 12:58:40 +00:00
|
|
|
|
2023-11-28 07:12:39 +00:00
|
|
|
c, err := treeClient.GetOpLog(ctx, req)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("can't initialize client: %w", err)
|
|
|
|
}
|
|
|
|
res, err := c.Recv()
|
|
|
|
for ; err == nil; res, err = c.Recv() {
|
|
|
|
lm := res.GetBody().GetOperation()
|
|
|
|
m := &pilorama.Move{
|
2023-12-11 10:18:34 +00:00
|
|
|
Parent: lm.GetParentId(),
|
|
|
|
Child: lm.GetChildId(),
|
2023-04-20 12:58:40 +00:00
|
|
|
}
|
2023-12-11 10:18:34 +00:00
|
|
|
if err := m.Meta.FromBytes(lm.GetMeta()); err != nil {
|
2023-11-28 07:12:39 +00:00
|
|
|
return err
|
2023-04-20 12:58:40 +00:00
|
|
|
}
|
2023-11-28 07:12:39 +00:00
|
|
|
opsCh <- m
|
2023-04-20 12:58:40 +00:00
|
|
|
}
|
2024-03-01 08:58:17 +00:00
|
|
|
if !errors.Is(err, io.EOF) {
|
2023-11-28 07:12:39 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
2023-04-20 12:58:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// synchronizeTree synchronizes operations getting them from different nodes.
|
|
|
|
// Each available node does stream operations to a separate stream. These streams
|
|
|
|
// are merged into one big stream ordered by operation time. This way allows to skip
|
|
|
|
// already applied operation and keep good batching.
|
|
|
|
// The method returns a height that service should start sync from in the next time.
|
2023-04-03 13:11:56 +00:00
|
|
|
func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
|
2023-10-31 11:56:55 +00:00
|
|
|
treeID string, nodes []netmapSDK.NodeInfo,
|
|
|
|
) uint64 {
|
2024-10-21 07:22:54 +00:00
|
|
|
s.log.Debug(ctx, logs.TreeSynchronizeTree, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.Uint64("from", from))
|
2022-05-12 11:19:44 +00:00
|
|
|
|
2023-04-05 11:56:15 +00:00
|
|
|
errGroup, egCtx := errgroup.WithContext(ctx)
|
2023-04-20 12:58:40 +00:00
|
|
|
const workersCount = 1024
|
2023-04-05 11:56:15 +00:00
|
|
|
errGroup.SetLimit(workersCount)
|
|
|
|
|
2023-04-20 12:58:40 +00:00
|
|
|
nodeOperationStreams := make([]chan *pilorama.Move, len(nodes))
|
|
|
|
for i := range nodeOperationStreams {
|
|
|
|
nodeOperationStreams[i] = make(chan *pilorama.Move)
|
|
|
|
}
|
|
|
|
merged := make(chan *pilorama.Move)
|
|
|
|
var minStreamedLastHeight uint64
|
|
|
|
errGroup.Go(func() error {
|
2023-05-02 13:50:03 +00:00
|
|
|
minStreamedLastHeight = mergeOperationStreams(nodeOperationStreams, merged)
|
2023-04-20 12:58:40 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
var minUnappliedHeight uint64
|
|
|
|
errGroup.Go(func() error {
|
|
|
|
minUnappliedHeight = s.applyOperationStream(ctx, cid, treeID, merged)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2023-11-28 07:51:15 +00:00
|
|
|
var allNodesSynced atomic.Bool
|
|
|
|
allNodesSynced.Store(true)
|
|
|
|
|
2023-04-05 11:56:15 +00:00
|
|
|
for i, n := range nodes {
|
|
|
|
errGroup.Go(func() error {
|
2023-11-28 07:51:15 +00:00
|
|
|
var nodeSynced bool
|
2023-04-05 11:56:15 +00:00
|
|
|
n.IterateNetworkEndpoints(func(addr string) bool {
|
|
|
|
var a network.Address
|
|
|
|
if err := a.FromString(addr); err != nil {
|
2024-10-21 07:22:54 +00:00
|
|
|
s.log.Warn(ctx, logs.TreeFailedToParseAddressForTreeSynchronization, zap.Error(err), zap.String("address", addr))
|
2023-04-05 11:56:15 +00:00
|
|
|
return false
|
|
|
|
}
|
2022-05-12 11:19:44 +00:00
|
|
|
|
2024-09-13 08:39:25 +00:00
|
|
|
cc, err := s.createConnection(a)
|
2023-04-05 11:56:15 +00:00
|
|
|
if err != nil {
|
2024-10-21 07:22:54 +00:00
|
|
|
s.log.Warn(ctx, logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr))
|
2023-04-05 11:56:15 +00:00
|
|
|
return false
|
2022-05-12 11:19:44 +00:00
|
|
|
}
|
2023-04-05 11:56:15 +00:00
|
|
|
defer cc.Close()
|
|
|
|
|
2023-11-28 14:04:02 +00:00
|
|
|
err = s.startStream(egCtx, cid, treeID, from, cc, nodeOperationStreams[i])
|
2023-11-28 07:12:39 +00:00
|
|
|
if err != nil {
|
2024-10-21 07:22:54 +00:00
|
|
|
s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationForSpecificNode, zap.Error(err), zap.String("address", addr))
|
2022-05-12 11:19:44 +00:00
|
|
|
}
|
2023-11-28 07:51:15 +00:00
|
|
|
nodeSynced = err == nil
|
2023-11-28 07:12:39 +00:00
|
|
|
return true
|
2023-04-05 11:56:15 +00:00
|
|
|
})
|
2023-04-20 12:58:40 +00:00
|
|
|
close(nodeOperationStreams[i])
|
2023-11-28 07:51:15 +00:00
|
|
|
if !nodeSynced {
|
|
|
|
allNodesSynced.Store(false)
|
|
|
|
}
|
2023-04-05 11:56:15 +00:00
|
|
|
return nil
|
2022-05-12 11:19:44 +00:00
|
|
|
})
|
2023-04-05 11:56:15 +00:00
|
|
|
}
|
|
|
|
if err := errGroup.Wait(); err != nil {
|
2023-11-28 07:51:15 +00:00
|
|
|
allNodesSynced.Store(false)
|
2024-10-21 07:22:54 +00:00
|
|
|
s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationOverAllNodes, zap.Error(err))
|
2023-04-05 11:56:15 +00:00
|
|
|
}
|
|
|
|
|
2023-04-20 12:58:40 +00:00
|
|
|
newHeight := minStreamedLastHeight
|
|
|
|
if newHeight > minUnappliedHeight {
|
|
|
|
newHeight = minUnappliedHeight
|
|
|
|
} else {
|
|
|
|
newHeight++
|
2022-12-19 15:53:03 +00:00
|
|
|
}
|
2023-11-28 07:51:15 +00:00
|
|
|
if allNodesSynced.Load() {
|
|
|
|
return newHeight
|
|
|
|
}
|
|
|
|
return from
|
2022-05-12 11:19:44 +00:00
|
|
|
}
|
|
|
|
|
2024-09-13 08:39:25 +00:00
|
|
|
func (*Service) createConnection(a network.Address) (*grpc.ClientConn, error) {
|
|
|
|
return grpc.NewClient(a.URIAddr(),
|
2023-11-28 14:04:02 +00:00
|
|
|
grpc.WithChainUnaryInterceptor(
|
|
|
|
metrics.NewUnaryClientInterceptor(),
|
|
|
|
tracing_grpc.NewUnaryClientInteceptor(),
|
|
|
|
),
|
|
|
|
grpc.WithChainStreamInterceptor(
|
|
|
|
metrics.NewStreamClientInterceptor(),
|
|
|
|
tracing_grpc.NewStreamClientInterceptor(),
|
|
|
|
),
|
2024-10-23 11:04:37 +00:00
|
|
|
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
|
|
|
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
|
|
|
|
)
|
2023-11-28 14:04:02 +00:00
|
|
|
}
|
|
|
|
|
2022-10-18 21:33:45 +00:00
|
|
|
// ErrAlreadySyncing is returned when a service synchronization has already
|
|
|
|
// been started.
|
|
|
|
var ErrAlreadySyncing = errors.New("service is being synchronized")
|
|
|
|
|
|
|
|
// ErrShuttingDown is returned when the service is shitting down and could not
|
|
|
|
// accept any calls.
|
|
|
|
var ErrShuttingDown = errors.New("service is shutting down")
|
|
|
|
|
|
|
|
// SynchronizeAll forces tree service to synchronize all the trees according to
|
|
|
|
// netmap information. Must not be called before Service.Start.
|
|
|
|
// Returns ErrAlreadySyncing if synchronization has been started and blocked
|
|
|
|
// by another routine.
|
|
|
|
// Note: non-blocking operation.
|
|
|
|
func (s *Service) SynchronizeAll() error {
|
|
|
|
select {
|
|
|
|
case <-s.closeCh:
|
|
|
|
return ErrShuttingDown
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case s.syncChan <- struct{}{}:
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
return ErrAlreadySyncing
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) syncLoop(ctx context.Context) {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-s.closeCh:
|
|
|
|
return
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case <-s.syncChan:
|
2023-04-13 12:36:20 +00:00
|
|
|
ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.sync")
|
2024-10-21 07:22:54 +00:00
|
|
|
s.log.Info(ctx, logs.TreeSyncingTrees)
|
2022-10-18 21:33:45 +00:00
|
|
|
|
2023-05-24 07:01:50 +00:00
|
|
|
start := time.Now()
|
|
|
|
|
2022-10-18 21:33:45 +00:00
|
|
|
cnrs, err := s.cfg.cnrSource.List()
|
|
|
|
if err != nil {
|
2024-10-21 07:22:54 +00:00
|
|
|
s.log.Error(ctx, logs.TreeCouldNotFetchContainers, zap.Error(err))
|
2023-05-24 07:01:50 +00:00
|
|
|
s.metrics.AddSyncDuration(time.Since(start), false)
|
2023-04-13 12:36:20 +00:00
|
|
|
span.End()
|
2023-06-13 08:43:25 +00:00
|
|
|
break
|
2022-10-18 21:33:45 +00:00
|
|
|
}
|
|
|
|
|
2023-03-21 13:51:21 +00:00
|
|
|
newMap, cnrsToSync := s.containersToSync(cnrs)
|
2022-10-18 21:33:45 +00:00
|
|
|
|
2023-03-21 13:51:21 +00:00
|
|
|
s.syncContainers(ctx, cnrsToSync)
|
2022-10-18 21:33:45 +00:00
|
|
|
|
2023-03-21 13:51:21 +00:00
|
|
|
s.removeContainers(ctx, newMap)
|
2022-10-18 21:33:45 +00:00
|
|
|
|
2024-10-21 07:22:54 +00:00
|
|
|
s.log.Info(ctx, logs.TreeTreesHaveBeenSynchronized)
|
2023-05-24 07:01:50 +00:00
|
|
|
|
|
|
|
s.metrics.AddSyncDuration(time.Since(start), true)
|
2023-04-13 12:36:20 +00:00
|
|
|
span.End()
|
2023-03-21 13:51:21 +00:00
|
|
|
}
|
2023-06-13 08:43:25 +00:00
|
|
|
s.initialSyncDone.Store(true)
|
2023-03-21 13:51:21 +00:00
|
|
|
}
|
|
|
|
}
|
2022-10-18 21:33:45 +00:00
|
|
|
|
2023-03-21 13:51:21 +00:00
|
|
|
func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) {
|
2023-04-13 12:36:20 +00:00
|
|
|
ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.syncContainers")
|
|
|
|
defer span.End()
|
|
|
|
|
2023-03-21 13:51:21 +00:00
|
|
|
// sync new containers
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for _, cnr := range cnrs {
|
|
|
|
wg.Add(1)
|
2024-08-28 11:45:57 +00:00
|
|
|
|
2023-03-21 13:51:21 +00:00
|
|
|
err := s.syncPool.Submit(func() {
|
|
|
|
defer wg.Done()
|
2024-10-21 07:22:54 +00:00
|
|
|
s.log.Debug(ctx, logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr))
|
2023-03-21 13:51:21 +00:00
|
|
|
|
|
|
|
err := s.synchronizeAllTrees(ctx, cnr)
|
|
|
|
if err != nil {
|
2024-10-21 07:22:54 +00:00
|
|
|
s.log.Error(ctx, logs.TreeCouldNotSyncTrees, zap.Stringer("cid", cnr), zap.Error(err))
|
2023-03-21 13:51:21 +00:00
|
|
|
return
|
2022-12-19 15:53:03 +00:00
|
|
|
}
|
2023-03-21 13:51:21 +00:00
|
|
|
|
2024-10-21 07:22:54 +00:00
|
|
|
s.log.Debug(ctx, logs.TreeContainerTreesHaveBeenSynced, zap.Stringer("cid", cnr))
|
2023-03-21 13:51:21 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
wg.Done()
|
2024-10-21 07:22:54 +00:00
|
|
|
s.log.Error(ctx, logs.TreeCouldNotQueryTreesForSynchronization,
|
2023-03-21 13:51:21 +00:00
|
|
|
zap.Stringer("cid", cnr),
|
|
|
|
zap.Error(err))
|
|
|
|
if errors.Is(err, ants.ErrPoolClosed) {
|
|
|
|
return
|
2022-12-19 15:53:03 +00:00
|
|
|
}
|
2023-03-21 13:51:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
}
|
2022-12-14 07:02:41 +00:00
|
|
|
|
2023-03-21 13:51:21 +00:00
|
|
|
func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID]struct{}) {
|
2023-04-13 12:36:20 +00:00
|
|
|
ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.removeContainers")
|
|
|
|
defer span.End()
|
|
|
|
|
2023-03-21 13:51:21 +00:00
|
|
|
s.cnrMapMtx.Lock()
|
|
|
|
defer s.cnrMapMtx.Unlock()
|
2022-10-18 21:33:45 +00:00
|
|
|
|
2023-03-21 13:51:21 +00:00
|
|
|
var removed []cid.ID
|
|
|
|
for cnr := range s.cnrMap {
|
|
|
|
if _, ok := newContainers[cnr]; ok {
|
|
|
|
continue
|
|
|
|
}
|
2023-08-18 14:20:30 +00:00
|
|
|
|
2023-08-24 12:35:19 +00:00
|
|
|
existed, err := containerCore.WasRemoved(s.cnrSource, cnr)
|
2023-08-18 14:20:30 +00:00
|
|
|
if err != nil {
|
2024-10-21 07:22:54 +00:00
|
|
|
s.log.Error(ctx, logs.TreeCouldNotCheckIfContainerExisted,
|
2023-08-18 14:20:30 +00:00
|
|
|
zap.Stringer("cid", cnr),
|
|
|
|
zap.Error(err))
|
|
|
|
} else if existed {
|
|
|
|
removed = append(removed, cnr)
|
|
|
|
}
|
2023-03-21 13:51:21 +00:00
|
|
|
}
|
|
|
|
for i := range removed {
|
|
|
|
delete(s.cnrMap, removed[i])
|
|
|
|
}
|
2022-10-18 21:33:45 +00:00
|
|
|
|
2023-03-21 13:51:21 +00:00
|
|
|
for _, cnr := range removed {
|
2024-10-21 07:22:54 +00:00
|
|
|
s.log.Debug(ctx, logs.TreeRemovingRedundantTrees, zap.Stringer("cid", cnr))
|
2023-03-21 13:51:21 +00:00
|
|
|
|
|
|
|
err := s.DropTree(ctx, cnr, "")
|
|
|
|
if err != nil {
|
2024-10-21 07:22:54 +00:00
|
|
|
s.log.Error(ctx, logs.TreeCouldNotRemoveRedundantTree,
|
2023-03-21 13:51:21 +00:00
|
|
|
zap.Stringer("cid", cnr),
|
|
|
|
zap.Error(err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) containersToSync(cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID) {
|
|
|
|
newMap := make(map[cid.ID]struct{}, len(s.cnrMap))
|
|
|
|
cnrsToSync := make([]cid.ID, 0, len(cnrs))
|
|
|
|
|
|
|
|
for _, cnr := range cnrs {
|
|
|
|
_, pos, err := s.getContainerNodes(cnr)
|
|
|
|
if err != nil {
|
2024-10-21 07:22:54 +00:00
|
|
|
s.log.Error(context.Background(), logs.TreeCouldNotCalculateContainerNodes,
|
2023-03-21 13:51:21 +00:00
|
|
|
zap.Stringer("cid", cnr),
|
|
|
|
zap.Error(err))
|
|
|
|
continue
|
2022-10-18 21:33:45 +00:00
|
|
|
}
|
2023-03-21 13:51:21 +00:00
|
|
|
|
|
|
|
if pos < 0 {
|
|
|
|
// node is not included in the container.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
newMap[cnr] = struct{}{}
|
|
|
|
cnrsToSync = append(cnrsToSync, cnr)
|
2022-10-18 21:33:45 +00:00
|
|
|
}
|
2023-03-21 13:51:21 +00:00
|
|
|
return newMap, cnrsToSync
|
2022-10-18 21:33:45 +00:00
|
|
|
}
|
2022-12-10 10:50:08 +00:00
|
|
|
|
|
|
|
// randomizeNodeOrder shuffles nodes and removes not a `pos` index.
|
2023-01-25 11:07:47 +00:00
|
|
|
// It is assumed that 0 <= pos < len(nodes).
|
2022-12-10 10:50:08 +00:00
|
|
|
func randomizeNodeOrder(cnrNodes []netmap.NodeInfo, pos int) []netmap.NodeInfo {
|
|
|
|
if len(cnrNodes) == 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
nodes := make([]netmap.NodeInfo, len(cnrNodes)-1)
|
|
|
|
n := copy(nodes, cnrNodes[:pos])
|
|
|
|
copy(nodes[n:], cnrNodes[pos+1:])
|
|
|
|
|
|
|
|
rand.Shuffle(len(nodes), func(i, j int) {
|
|
|
|
nodes[i], nodes[j] = nodes[j], nodes[i]
|
|
|
|
})
|
|
|
|
return nodes
|
|
|
|
}
|