Store tree service sync height in the metabase #82

Merged
fyrchik merged 2 commits from fyrchik/tree-service-store-sync-height into master 2023-03-13 11:25:45 +00:00
3 changed files with 18 additions and 26 deletions
Showing only changes of commit f6f5611375 - Show all commits

View file

@ -13,6 +13,7 @@ Changelog for FrostFS Node
- Reload config for pprof and metrics on SIGHUP in `neofs-node` (#1868)
- Multiple configs support (#44)
- Parameters `nns-name` and `nns-zone` for command `frostfs-cli container create` (#37)
- Tree service now saves the last synchronization height which persists across restarts (#82)
Review

"82"? we will have conflicts some day then

"82"? we will have conflicts some day then
Review

What do you mean? This PR is #82.

What do you mean? This PR is #82.
Review

what we gonna do when #1868 PR is open in that repo?

what we gonna do when `#1868` PR is open in that repo?
Review

We will discuss, but it is unrelated to this PR.

We will discuss, but it is unrelated to this PR.
Review

it is unrelated to this PR

sure but i would discuss it as early as possible since we are already filling CHANGELOG with conflicting lines (adjacent rows belong to the different lists, what is that file for then?)

> it is unrelated to this PR sure but i would discuss it as early as possible since we are already filling CHANGELOG with conflicting lines (adjacent rows belong to the different lists, what is that file for then?)
### Changed
- Change `frostfs_node_engine_container_size` to counting sizes of logical objects

View file

@ -31,10 +31,8 @@ type Service struct {
syncChan chan struct{}
syncPool *ants.Pool
// cnrMap maps contrainer and tree ID to the minimum height which was fetched from _each_ client.
// This allows us to better handle split-brain scenario, because we always synchronize
// from the last seen height. The inner map is read-only and should not be modified in-place.
cnrMap map[cidSDK.ID]map[string]uint64
// cnrMap contains existing (used) container IDs.
cnrMap map[cidSDK.ID]struct{}
// cnrMapMtx protects cnrMap
cnrMapMtx sync.Mutex
}
@ -63,7 +61,7 @@ func New(opts ...Option) *Service {
s.replicateLocalCh = make(chan applyOp)
s.replicationTasks = make(chan replicationTask, s.replicatorWorkerCount)
s.containerCache.init(s.containerCacheSize)
s.cnrMap = make(map[cidSDK.ID]map[string]uint64)
s.cnrMap = make(map[cidSDK.ID]struct{})
s.syncChan = make(chan struct{})
s.syncPool, _ = ants.NewPool(defaultSyncWorkerCount)

View file

@ -86,31 +86,24 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
return fmt.Errorf("could not fetch tree ID list: %w", outErr)
}
s.cnrMapMtx.Lock()
oldStatus := s.cnrMap[cid]
s.cnrMapMtx.Unlock()
syncStatus := map[string]uint64{}
for i := range treesToSync {
syncStatus[treesToSync[i]] = 0
}
for tid := range oldStatus {
if _, ok := syncStatus[tid]; ok {
syncStatus[tid] = oldStatus[tid]
}
}
for _, tid := range treesToSync {
h := s.synchronizeTree(ctx, d, syncStatus[tid], tid, nodes)
if syncStatus[tid] < h {
syncStatus[tid] = h
h, err := s.forest.TreeLastSyncHeight(d.CID, tid)
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
s.log.Warn("could not get last synchronized height for a tree",
zap.Stringer("cid", d.CID),
zap.String("tree", tid))
continue
}
newHeight := s.synchronizeTree(ctx, d, h, tid, nodes)
if h < newHeight {
if err := s.forest.TreeUpdateLastSyncHeight(d.CID, tid, newHeight); err != nil {
s.log.Warn("could not update last synchronized height for a tree",
zap.Stringer("cid", d.CID),
zap.String("tree", tid))
}
}
}
s.cnrMapMtx.Lock()
s.cnrMap[cid] = syncStatus
s.cnrMapMtx.Unlock()
return nil
}