forked from TrueCloudLab/frostfs-node
129 lines
3.4 KiB
Go
129 lines
3.4 KiB
Go
package pilorama
|
|
|
|
import (
|
|
"encoding/binary"
|
|
"sort"
|
|
"sync"
|
|
"time"
|
|
|
|
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
"go.etcd.io/bbolt"
|
|
)
|
|
|
|
type batch struct {
|
|
forest *boltForest
|
|
timer *time.Timer
|
|
// mtx protects timer and operations fields.
|
|
// Because mtx can be taken inside a transaction,
|
|
// transactions MUST NOT be executed with the mutex taken to avoid a deadlock.
|
|
mtx sync.Mutex
|
|
start sync.Once
|
|
cid cidSDK.ID
|
|
treeID string
|
|
results []chan<- error
|
|
operations []*Move
|
|
}
|
|
|
|
func (b *batch) trigger() {
|
|
b.mtx.Lock()
|
|
if b.timer != nil {
|
|
b.timer.Stop()
|
|
}
|
|
b.mtx.Unlock()
|
|
b.start.Do(b.run)
|
|
}
|
|
|
|
func (b *batch) run() {
|
|
fullID := bucketName(b.cid, b.treeID)
|
|
err := b.forest.db.Update(func(tx *bbolt.Tx) error {
|
|
bLog, bTree, err := b.forest.getTreeBuckets(tx, fullID)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
b.mtx.Lock()
|
|
b.timer = nil
|
|
b.mtx.Unlock()
|
|
|
|
// Sorting without a mutex is ok, because we append to this slice only if timer is non-nil.
|
|
// See (*boltForest).addBatch for details.
|
|
sort.Slice(b.operations, func(i, j int) bool {
|
|
return b.operations[i].Time < b.operations[j].Time
|
|
})
|
|
b.operations = removeDuplicatesInPlace(b.operations)
|
|
|
|
// Our main use-case is addition of new items. In this case,
|
|
// we do not need to perform undo()/redo(), just do().
|
|
// https://github.com/trvedata/move-op/blob/6c23447c12a7862ff31b7fc2205f6c90fbdb9dc0/proof/Move_Create.thy#L259
|
|
//
|
|
// For this optimization to work we need to ensure three things:
|
|
// 1. The node itself is not yet in tree.
|
|
// 2. The node is not a parent. This case is not mentioned in the article, because
|
|
// they consider a "static order" (perform all CREATE operations before MOVE).
|
|
// We need this because if node _is_ a parent, we could violate (3) for some late operation.
|
|
// See TestForest_ApplySameOperation for details.
|
|
// 3. Parent of each operation is already in tree.
|
|
var parents map[uint64]struct{}
|
|
var cKey [maxKeySize]byte
|
|
var slow bool
|
|
for i := range b.operations {
|
|
_, _, _, inTree := b.forest.getState(bTree, stateKey(cKey[:], b.operations[i].Child))
|
|
if inTree {
|
|
slow = true
|
|
break
|
|
}
|
|
|
|
key := childrenKey(cKey[:], b.operations[i].Child, 0)
|
|
k, _ := bTree.Cursor().Seek(key)
|
|
if len(k) == childrenKeySize && binary.LittleEndian.Uint64(k[1:]) == b.operations[i].Child {
|
|
slow = true
|
|
break
|
|
}
|
|
|
|
if b.operations[i].Parent == RootID {
|
|
continue
|
|
} else if parents == nil {
|
|
// Attaching key only to root is done frequently,
|
|
// no allocations are performed unless necessary.
|
|
parents = make(map[uint64]struct{})
|
|
} else if _, ok := parents[b.operations[i].Parent]; ok {
|
|
continue
|
|
}
|
|
|
|
p := b.operations[i].Parent
|
|
_, ts, _, inTree := b.forest.getState(bTree, stateKey(cKey[:], p))
|
|
if !inTree || b.operations[0].Time < ts {
|
|
slow = true
|
|
break
|
|
}
|
|
parents[b.operations[i].Parent] = struct{}{}
|
|
}
|
|
|
|
if slow {
|
|
var lm Move
|
|
return b.forest.applyOperation(bLog, bTree, b.operations, &lm)
|
|
}
|
|
|
|
for i := range b.operations {
|
|
if err := b.forest.do(bLog, bTree, cKey[:], b.operations[i]); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return nil
|
|
})
|
|
for i := range b.results {
|
|
b.results[i] <- err
|
|
}
|
|
}
|
|
|
|
func removeDuplicatesInPlace(a []*Move) []*Move {
|
|
equalCount := 0
|
|
for i := 1; i < len(a); i++ {
|
|
if a[i].Time == a[i-1].Time {
|
|
equalCount++
|
|
} else {
|
|
a[i-equalCount] = a[i]
|
|
}
|
|
}
|
|
return a[:len(a)-equalCount]
|
|
}
|