[#232] netmap: Allow to configure snapshot history size
Signed-off-by: Evgenii Stratonikov <evgeniy@nspcc.ru>
This commit is contained in:
parent
5fc7474447
commit
4a0f0d7408
2 changed files with 220 additions and 15 deletions
|
@ -35,9 +35,10 @@ const (
|
||||||
notaryDisabledKey = "notary"
|
notaryDisabledKey = "notary"
|
||||||
innerRingKey = "innerring"
|
innerRingKey = "innerring"
|
||||||
|
|
||||||
// SnapshotCount contains the number of previous snapshots stored by this contract.
|
// DefaultSnapshotCount contains the number of previous snapshots stored by this contract.
|
||||||
// Must be less than 255.
|
// Must be less than 255.
|
||||||
SnapshotCount = 10
|
DefaultSnapshotCount = 10
|
||||||
|
snapshotCountKey = "snapshotCount"
|
||||||
snapshotKeyPrefix = "snapshot_"
|
snapshotKeyPrefix = "snapshot_"
|
||||||
snapshotCurrentIDKey = "snapshotCurrent"
|
snapshotCurrentIDKey = "snapshotCurrent"
|
||||||
snapshotEpoch = "snapshotEpoch"
|
snapshotEpoch = "snapshotEpoch"
|
||||||
|
@ -88,6 +89,7 @@ func _deploy(data interface{}, isUpdate bool) {
|
||||||
|
|
||||||
if isUpdate {
|
if isUpdate {
|
||||||
common.CheckVersion(args.version)
|
common.CheckVersion(args.version)
|
||||||
|
storage.Put(ctx, snapshotCountKey, DefaultSnapshotCount)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,14 +98,15 @@ func _deploy(data interface{}, isUpdate bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// epoch number is a little endian int, it doesn't need to be serialized
|
// epoch number is a little endian int, it doesn't need to be serialized
|
||||||
|
storage.Put(ctx, snapshotCountKey, DefaultSnapshotCount)
|
||||||
storage.Put(ctx, snapshotEpoch, 0)
|
storage.Put(ctx, snapshotEpoch, 0)
|
||||||
storage.Put(ctx, snapshotBlockKey, 0)
|
storage.Put(ctx, snapshotBlockKey, 0)
|
||||||
|
|
||||||
prefix := []byte(snapshotKeyPrefix)
|
prefix := []byte(snapshotKeyPrefix)
|
||||||
for i := 0; i < SnapshotCount; i++ {
|
for i := 0; i < DefaultSnapshotCount; i++ {
|
||||||
common.SetSerialized(ctx, append(prefix, byte(i)), []storageNode{})
|
common.SetSerialized(ctx, append(prefix, byte(i)), []storageNode{})
|
||||||
}
|
}
|
||||||
common.SetSerialized(ctx, snapshotCurrentIDKey, 0)
|
storage.Put(ctx, snapshotCurrentIDKey, 0)
|
||||||
|
|
||||||
storage.Put(ctx, balanceContractKey, args.addrBalance)
|
storage.Put(ctx, balanceContractKey, args.addrBalance)
|
||||||
storage.Put(ctx, containerContractKey, args.addrContainer)
|
storage.Put(ctx, containerContractKey, args.addrContainer)
|
||||||
|
@ -389,7 +392,7 @@ func NewEpoch(epochNum int) {
|
||||||
storage.Put(ctx, snapshotBlockKey, ledger.CurrentIndex())
|
storage.Put(ctx, snapshotBlockKey, ledger.CurrentIndex())
|
||||||
|
|
||||||
id := storage.Get(ctx, snapshotCurrentIDKey).(int)
|
id := storage.Get(ctx, snapshotCurrentIDKey).(int)
|
||||||
id = (id + 1) % SnapshotCount
|
id = (id + 1) % getSnapshotCount(ctx)
|
||||||
storage.Put(ctx, snapshotCurrentIDKey, id)
|
storage.Put(ctx, snapshotCurrentIDKey, id)
|
||||||
|
|
||||||
// put netmap into actual snapshot
|
// put netmap into actual snapshot
|
||||||
|
@ -437,17 +440,104 @@ func NetmapCandidates() []netmapNode {
|
||||||
// Netmap contract contains only two recent network map snapshot: current and
|
// Netmap contract contains only two recent network map snapshot: current and
|
||||||
// previous epoch. For diff bigger than 1 or less than 0 method throws panic.
|
// previous epoch. For diff bigger than 1 or less than 0 method throws panic.
|
||||||
func Snapshot(diff int) []storageNode {
|
func Snapshot(diff int) []storageNode {
|
||||||
if diff < 0 || SnapshotCount <= diff {
|
ctx := storage.GetReadOnlyContext()
|
||||||
|
count := getSnapshotCount(ctx)
|
||||||
|
if diff < 0 || count <= diff {
|
||||||
panic("incorrect diff")
|
panic("incorrect diff")
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := storage.GetReadOnlyContext()
|
|
||||||
id := storage.Get(ctx, snapshotCurrentIDKey).(int)
|
id := storage.Get(ctx, snapshotCurrentIDKey).(int)
|
||||||
needID := (id - diff + SnapshotCount) % SnapshotCount
|
needID := (id - diff + count) % count
|
||||||
key := snapshotKeyPrefix + string([]byte{byte(needID)})
|
key := snapshotKeyPrefix + string([]byte{byte(needID)})
|
||||||
return getSnapshot(ctx, key)
|
return getSnapshot(ctx, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getSnapshotCount(ctx storage.Context) int {
|
||||||
|
return storage.Get(ctx, snapshotCountKey).(int)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateSnapshotCount updates number of stored snapshots.
|
||||||
|
// If new number is less than the old one, old snapshots are removed.
|
||||||
|
// Otherwise, history is extended to with empty snapshots, so
|
||||||
|
// `Snapshot` method can return invalid results for `diff = new-old` epochs
|
||||||
|
// until `diff` epochs have passed.
|
||||||
|
func UpdateSnapshotCount(count int) {
|
||||||
|
common.CheckAlphabetWitness(common.AlphabetAddress())
|
||||||
|
if count < 0 {
|
||||||
|
panic("count must be positive")
|
||||||
|
}
|
||||||
|
ctx := storage.GetContext()
|
||||||
|
curr := getSnapshotCount(ctx)
|
||||||
|
if curr == count {
|
||||||
|
panic("count has not changed")
|
||||||
|
}
|
||||||
|
storage.Put(ctx, snapshotCountKey, count)
|
||||||
|
|
||||||
|
id := storage.Get(ctx, snapshotCurrentIDKey).(int)
|
||||||
|
var delStart, delFinish int
|
||||||
|
if curr < count {
|
||||||
|
// Increase history size.
|
||||||
|
//
|
||||||
|
// Old state (N = count, K = curr, E = current index, C = current epoch)
|
||||||
|
// KEY INDEX: 0 | 1 | ... | E | E+1 | ... | K-1 | ... | N-1
|
||||||
|
// EPOCH : C-E | C-E+1 | ... | C | C-K+1 | ... | C-E-1 |
|
||||||
|
//
|
||||||
|
// New state:
|
||||||
|
// KEY INDEX: 0 | 1 | ... | E | E+1 | ... | K-1 | ... | N-1
|
||||||
|
// EPOCH : C-E | C-E+1 | ... | C | nil | ... | . | ... | C-E-1
|
||||||
|
//
|
||||||
|
// So we need to move tail snapshots N-K keys forward,
|
||||||
|
// i.e. from E+1 .. K to N-K+E+1 .. N
|
||||||
|
diff := count - curr
|
||||||
|
lower := diff + id + 1
|
||||||
|
for k := count - 1; k >= lower; k-- {
|
||||||
|
moveSnapshot(ctx, k-diff, k)
|
||||||
|
}
|
||||||
|
delStart, delFinish = id+1, id+1+diff
|
||||||
|
if curr < delFinish {
|
||||||
|
delFinish = curr
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Decrease history size.
|
||||||
|
//
|
||||||
|
// Old state (N = curr, K = count)
|
||||||
|
// KEY INDEX: 0 | 1 | ... K1 ... | E | E+1 | ... K2-1 ... | N-1
|
||||||
|
// EPOCH : C-E | C-E+1 | ... .. ... | C | C-N+1 | ... ... ... | C-E-1
|
||||||
|
var step, start int
|
||||||
|
if id < count {
|
||||||
|
// K2 case, move snapshots from E+1+N-K .. N-1 range to E+1 .. K-1
|
||||||
|
// New state:
|
||||||
|
// KEY INDEX: 0 | 1 | ... | E | E+1 | ... | K-1
|
||||||
|
// EPOCH : C-E | C-E+1 | ... | C | C-K+1 | ... | C-E-1
|
||||||
|
step = curr - count
|
||||||
|
start = id + 1
|
||||||
|
} else {
|
||||||
|
// New state:
|
||||||
|
// KEY INDEX: 0 | 1 | ... | K-1
|
||||||
|
// EPOCH : C-K+1 | C-K+2 | ... | C
|
||||||
|
// K1 case, move snapshots from E-K+1 .. E range to 0 .. K-1
|
||||||
|
// AND replace current id with K-1
|
||||||
|
step = id - count + 1
|
||||||
|
storage.Put(ctx, snapshotCurrentIDKey, count-1)
|
||||||
|
}
|
||||||
|
for k := start; k < count; k++ {
|
||||||
|
moveSnapshot(ctx, k+step, k)
|
||||||
|
}
|
||||||
|
delStart, delFinish = count, curr
|
||||||
|
}
|
||||||
|
for k := delStart; k < delFinish; k++ {
|
||||||
|
key := snapshotKeyPrefix + string([]byte{byte(k)})
|
||||||
|
storage.Delete(ctx, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func moveSnapshot(ctx storage.Context, from, to int) {
|
||||||
|
keyFrom := snapshotKeyPrefix + string([]byte{byte(from)})
|
||||||
|
keyTo := snapshotKeyPrefix + string([]byte{byte(to)})
|
||||||
|
data := storage.Get(ctx, keyFrom)
|
||||||
|
storage.Put(ctx, keyTo, data)
|
||||||
|
}
|
||||||
|
|
||||||
// SnapshotByEpoch method returns list of structures that contain node state
|
// SnapshotByEpoch method returns list of structures that contain node state
|
||||||
// (online: 1) and byte array of stable marshalled netmap.NodeInfo structure.
|
// (online: 1) and byte array of stable marshalled netmap.NodeInfo structure.
|
||||||
// These structure contain Storage nodes of specified epoch.
|
// These structure contain Storage nodes of specified epoch.
|
||||||
|
|
|
@ -109,7 +109,7 @@ func TestAddPeer(t *testing.T) {
|
||||||
func TestNewEpoch(t *testing.T) {
|
func TestNewEpoch(t *testing.T) {
|
||||||
rand.Seed(42)
|
rand.Seed(42)
|
||||||
|
|
||||||
const epochCount = netmap.SnapshotCount * 2
|
const epochCount = netmap.DefaultSnapshotCount * 2
|
||||||
|
|
||||||
cNm := newNetmapInvoker(t)
|
cNm := newNetmapInvoker(t)
|
||||||
nodes := make([][]testNodeInfo, epochCount)
|
nodes := make([][]testNodeInfo, epochCount)
|
||||||
|
@ -151,15 +151,12 @@ func TestNewEpoch(t *testing.T) {
|
||||||
require.Equal(t, 1, s.Len())
|
require.Equal(t, 1, s.Len())
|
||||||
checkSnapshot(t, s, nodes[i])
|
checkSnapshot(t, s, nodes[i])
|
||||||
|
|
||||||
for j := 0; j <= i && j < netmap.SnapshotCount; j++ {
|
for j := 0; j <= i && j < netmap.DefaultSnapshotCount; j++ {
|
||||||
t.Logf("Epoch: %d, diff: %d", i, j)
|
t.Logf("Epoch: %d, diff: %d", i, j)
|
||||||
s, err := cNm.TestInvoke(t, "snapshot", int64(j))
|
checkSnapshotAt(t, j, cNm, nodes[i-j])
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 1, s.Len())
|
|
||||||
checkSnapshot(t, s, nodes[i-j])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = cNm.TestInvoke(t, "snapshot", netmap.SnapshotCount)
|
_, err = cNm.TestInvoke(t, "snapshot", netmap.DefaultSnapshotCount)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.True(t, strings.Contains(err.Error(), "incorrect diff"))
|
require.True(t, strings.Contains(err.Error(), "incorrect diff"))
|
||||||
|
|
||||||
|
@ -169,6 +166,124 @@ func TestNewEpoch(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUpdateSnapshotCount(t *testing.T) {
|
||||||
|
rand.Seed(42)
|
||||||
|
|
||||||
|
require.True(t, netmap.DefaultSnapshotCount > 5) // sanity check, adjust tests if false.
|
||||||
|
|
||||||
|
prepare := func(t *testing.T, cNm *neotest.ContractInvoker, epochCount int) [][]testNodeInfo {
|
||||||
|
nodes := make([][]testNodeInfo, epochCount)
|
||||||
|
nodes[0] = []testNodeInfo{newStorageNode(t, cNm)}
|
||||||
|
cNm.Invoke(t, stackitem.Null{}, "addPeerIR", nodes[0][0].raw)
|
||||||
|
cNm.Invoke(t, stackitem.Null{}, "newEpoch", 1)
|
||||||
|
for i := 1; i < len(nodes); i++ {
|
||||||
|
sn := newStorageNode(t, cNm)
|
||||||
|
nodes[i] = append(nodes[i-1], sn)
|
||||||
|
cNm.Invoke(t, stackitem.Null{}, "addPeerIR", sn.raw)
|
||||||
|
cNm.Invoke(t, stackitem.Null{}, "newEpoch", i+1)
|
||||||
|
}
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("increase size, extend with nil", func(t *testing.T) {
|
||||||
|
// Before: S-old .. S
|
||||||
|
// After : S-old .. S nil nil ...
|
||||||
|
const epochCount = netmap.DefaultSnapshotCount / 2
|
||||||
|
|
||||||
|
cNm := newNetmapInvoker(t)
|
||||||
|
nodes := prepare(t, cNm, epochCount)
|
||||||
|
|
||||||
|
const newCount = netmap.DefaultSnapshotCount + 3
|
||||||
|
cNm.Invoke(t, stackitem.Null{}, "updateSnapshotCount", newCount)
|
||||||
|
|
||||||
|
s, err := cNm.TestInvoke(t, "netmap")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, s.Len())
|
||||||
|
checkSnapshot(t, s, nodes[epochCount-1])
|
||||||
|
for i := 0; i < epochCount; i++ {
|
||||||
|
checkSnapshotAt(t, i, cNm, nodes[epochCount-i-1])
|
||||||
|
}
|
||||||
|
for i := epochCount; i < newCount; i++ {
|
||||||
|
checkSnapshotAt(t, i, cNm, nil)
|
||||||
|
}
|
||||||
|
_, err = cNm.TestInvoke(t, "snapshot", int64(newCount))
|
||||||
|
require.Error(t, err)
|
||||||
|
})
|
||||||
|
t.Run("increase size, copy old snapshots", func(t *testing.T) {
|
||||||
|
// Before: S-x .. S S-old ...
|
||||||
|
// After : S-x .. S nil nil S-old ...
|
||||||
|
const epochCount = netmap.DefaultSnapshotCount + netmap.DefaultSnapshotCount/2
|
||||||
|
|
||||||
|
cNm := newNetmapInvoker(t)
|
||||||
|
nodes := prepare(t, cNm, epochCount)
|
||||||
|
|
||||||
|
const newCount = netmap.DefaultSnapshotCount + 3
|
||||||
|
cNm.Invoke(t, stackitem.Null{}, "updateSnapshotCount", newCount)
|
||||||
|
|
||||||
|
s, err := cNm.TestInvoke(t, "netmap")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, s.Len())
|
||||||
|
checkSnapshot(t, s, nodes[epochCount-1])
|
||||||
|
for i := 0; i < newCount-3; i++ {
|
||||||
|
checkSnapshotAt(t, i, cNm, nodes[epochCount-i-1])
|
||||||
|
}
|
||||||
|
for i := newCount - 3; i < newCount; i++ {
|
||||||
|
checkSnapshotAt(t, i, cNm, nil)
|
||||||
|
}
|
||||||
|
_, err = cNm.TestInvoke(t, "snapshot", int64(newCount))
|
||||||
|
require.Error(t, err)
|
||||||
|
})
|
||||||
|
t.Run("decrease size, small decrease", func(t *testing.T) {
|
||||||
|
// Before: S-x .. S S-old ... ...
|
||||||
|
// After : S-x .. S S-new ...
|
||||||
|
const epochCount = netmap.DefaultSnapshotCount + netmap.DefaultSnapshotCount/2
|
||||||
|
|
||||||
|
cNm := newNetmapInvoker(t)
|
||||||
|
nodes := prepare(t, cNm, epochCount)
|
||||||
|
|
||||||
|
const newCount = netmap.DefaultSnapshotCount/2 + 2
|
||||||
|
cNm.Invoke(t, stackitem.Null{}, "updateSnapshotCount", newCount)
|
||||||
|
|
||||||
|
s, err := cNm.TestInvoke(t, "netmap")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, s.Len())
|
||||||
|
checkSnapshot(t, s, nodes[epochCount-1])
|
||||||
|
for i := 0; i < newCount; i++ {
|
||||||
|
checkSnapshotAt(t, i, cNm, nodes[epochCount-i-1])
|
||||||
|
}
|
||||||
|
_, err = cNm.TestInvoke(t, "snapshot", int64(newCount))
|
||||||
|
require.Error(t, err)
|
||||||
|
})
|
||||||
|
t.Run("decrease size, big decrease", func(t *testing.T) {
|
||||||
|
// Before: S-x ... ... S S-old ... ...
|
||||||
|
// After : S-new ... S
|
||||||
|
const epochCount = netmap.DefaultSnapshotCount + netmap.DefaultSnapshotCount/2
|
||||||
|
|
||||||
|
cNm := newNetmapInvoker(t)
|
||||||
|
nodes := prepare(t, cNm, epochCount)
|
||||||
|
|
||||||
|
const newCount = netmap.DefaultSnapshotCount/2 - 2
|
||||||
|
cNm.Invoke(t, stackitem.Null{}, "updateSnapshotCount", newCount)
|
||||||
|
|
||||||
|
s, err := cNm.TestInvoke(t, "netmap")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, s.Len())
|
||||||
|
checkSnapshot(t, s, nodes[epochCount-1])
|
||||||
|
for i := 0; i < newCount; i++ {
|
||||||
|
checkSnapshotAt(t, i, cNm, nodes[epochCount-i-1])
|
||||||
|
}
|
||||||
|
_, err = cNm.TestInvoke(t, "snapshot", int64(newCount))
|
||||||
|
require.Error(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkSnapshotAt(t *testing.T, epoch int, cNm *neotest.ContractInvoker, nodes []testNodeInfo) {
|
||||||
|
s, err := cNm.TestInvoke(t, "snapshot", int64(epoch))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, s.Len())
|
||||||
|
checkSnapshot(t, s, nodes)
|
||||||
|
}
|
||||||
|
|
||||||
func checkSnapshot(t *testing.T, s *vm.Stack, nodes []testNodeInfo) {
|
func checkSnapshot(t *testing.T, s *vm.Stack, nodes []testNodeInfo) {
|
||||||
arr, ok := s.Pop().Value().([]stackitem.Item)
|
arr, ok := s.Pop().Value().([]stackitem.Item)
|
||||||
require.True(t, ok, "expected array")
|
require.True(t, ok, "expected array")
|
||||||
|
|
Loading…
Reference in a new issue