storage: expose private storage map for more efficient MPT batch

It couldn't be done previously with two maps and mixed storage, but now all of
the storage changes are located in a single map, so it's trivial to do exact
slice allocations and avoid string->[]byte conversions.
This commit is contained in:
Roman Khimov 2022-02-17 12:11:59 +03:00
parent 7dc8fc443f
commit f80680187e
6 changed files with 44 additions and 48 deletions

View file

@ -20,6 +20,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/core/interop"
"github.com/nspcc-dev/neo-go/pkg/core/interop/contract"
"github.com/nspcc-dev/neo-go/pkg/core/mempool"
"github.com/nspcc-dev/neo-go/pkg/core/mpt"
"github.com/nspcc-dev/neo-go/pkg/core/native"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@ -1138,7 +1139,7 @@ func (bc *Blockchain) storeBlock(block *block.Block, txpool *mempool.Pool) error
appExecResults = append(appExecResults, aer)
aerchan <- aer
close(aerchan)
b := cache.GetMPTBatch()
b := mpt.MapToMPTBatch(cache.Store.GetStorageChanges())
mpt, sr, err := bc.stateRoot.AddMPTBatch(block.Index, b, cache.Store)
if err != nil {
// Release goroutines, don't care about errors, we already have one.

View file

@ -9,7 +9,6 @@ import (
"sort"
"github.com/nspcc-dev/neo-go/pkg/core/block"
"github.com/nspcc-dev/neo-go/pkg/core/mpt"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/storage"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
@ -771,12 +770,3 @@ func (dao *Simple) Persist() (int, error) {
func (dao *Simple) PersistSync() (int, error) {
return dao.Store.PersistSync()
}
// GetMPTBatch storage changes to be applied to MPT.
func (dao *Simple) GetMPTBatch() mpt.Batch {
var b mpt.Batch
dao.Store.SeekAll([]byte{byte(dao.Version.StoragePrefix)}, func(k, v []byte) {
b.Add(k[1:], v)
})
return b
}

View file

@ -16,23 +16,19 @@ type keyValue struct {
value []byte
}
// Add adds key-value pair to batch.
// If there is an item with the specified key, it is replaced.
func (b *Batch) Add(key []byte, value []byte) {
path := toNibbles(key)
i := sort.Search(len(b.kv), func(i int) bool {
return bytes.Compare(path, b.kv[i].key) <= 0
})
if i == len(b.kv) {
b.kv = append(b.kv, keyValue{path, value})
} else if bytes.Equal(b.kv[i].key, path) {
b.kv[i].value = value
} else {
b.kv = append(b.kv, keyValue{})
copy(b.kv[i+1:], b.kv[i:])
b.kv[i].key = path
b.kv[i].value = value
// MapToMPTBatch makes a Batch from unordered set of storage changes.
func MapToMPTBatch(m map[string][]byte) Batch {
var b Batch
b.kv = make([]keyValue, 0, len(m))
for k, v := range m {
b.kv = append(b.kv, keyValue{strToNibbles(k), v}) // Strip storage prefix.
}
sort.Slice(b.kv, func(i, j int) bool {
return bytes.Compare(b.kv[i].key, b.kv[j].key) < 0
})
return b
}
// PutBatch puts batch to trie.

View file

@ -10,12 +10,13 @@ import (
)
func TestBatchAdd(t *testing.T) {
b := new(Batch)
b.Add([]byte{1}, []byte{2})
b.Add([]byte{2, 16}, []byte{3})
b.Add([]byte{2, 0}, []byte{4})
b.Add([]byte{0, 1}, []byte{5})
b.Add([]byte{2, 0}, []byte{6})
b := MapToMPTBatch(map[string][]byte{
"a\x01": {2},
"a\x02\x10": {3},
"a\x00\x01": {5},
"a\x02\x00": {6},
})
expected := []keyValue{
{[]byte{0, 0, 0, 1}, []byte{5}},
{[]byte{0, 1}, []byte{2}},
@ -28,7 +29,7 @@ func TestBatchAdd(t *testing.T) {
type pairs = [][2][]byte
func testIncompletePut(t *testing.T, ps pairs, n int, tr1, tr2 *Trie) {
var b Batch
var m = make(map[string][]byte)
for i, p := range ps {
if i < n {
if p[1] == nil {
@ -43,9 +44,10 @@ func testIncompletePut(t *testing.T, ps pairs, n int, tr1, tr2 *Trie) {
require.Error(t, tr1.Put(p[0], p[1]), "item %d", i)
}
}
b.Add(p[0], p[1])
m["a"+string(p[0])] = p[1]
}
b := MapToMPTBatch(m)
num, err := tr2.PutBatch(b)
if n == len(ps) {
require.NoError(t, err)
@ -308,8 +310,10 @@ func TestTrie_PutBatchEmpty(t *testing.T) {
// For the sake of coverage.
func TestTrie_InvalidNodeType(t *testing.T) {
tr := NewTrie(EmptyNode{}, ModeAll, newTestStore())
var b Batch
b.Add([]byte{1}, []byte("value"))
var b = Batch{kv: []keyValue{{
key: []byte{0, 1},
value: []byte("value"),
}}}
tr.root = Node(nil)
require.Panics(t, func() { _, _ = tr.PutBatch(b) })
}

View file

@ -43,6 +43,17 @@ func toNibbles(path []byte) []byte {
return result
}
// strToNibbles mangles path by splitting every byte into 2 containing low- and high- 4-byte part,
// ignoring the first byte (prefix).
func strToNibbles(path string) []byte {
result := make([]byte, (len(path)-1)*2)
for i := 0; i < len(path)-1; i++ {
result[i*2] = path[i+1] >> 4
result[i*2+1] = path[i+1] & 0x0F
}
return result
}
// fromNibbles performs operation opposite to toNibbles and does no path validity checks.
func fromNibbles(path []byte) []byte {
result := make([]byte, len(path)/2)

View file

@ -156,19 +156,13 @@ func (s *MemCachedStore) Seek(rng SeekRange, f func(k, v []byte) bool) {
s.seek(context.Background(), rng, false, f)
}
// SeekAll is like seek but also iterates over deleted items.
func (s *MemCachedStore) SeekAll(key []byte, f func(k, v []byte)) {
// GetStorageChanges returns all current storage changes. It can only be done for private
// MemCachedStore.
func (s *MemCachedStore) GetStorageChanges() map[string][]byte {
if !s.private {
s.mut.RLock()
defer s.mut.RUnlock()
}
sk := string(key)
m := s.chooseMap(key)
for k, v := range m {
if strings.HasPrefix(k, sk) {
f([]byte(k), v)
}
panic("GetStorageChanges called on shared MemCachedStore")
}
return s.stor
}
// SeekAsync returns non-buffered channel with matching KeyValue pairs. Key and