mirror of
https://github.com/nspcc-dev/neo-go.git
synced 2024-12-25 13:56:35 +00:00
Merge pull request #2108 from nspcc-dev/optimize-mpt
Some allocation optimizations
This commit is contained in:
commit
b989504d74
18 changed files with 224 additions and 130 deletions
|
@ -1,6 +1,7 @@
|
||||||
package mpt
|
package mpt
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
|
||||||
|
@ -23,7 +24,6 @@ type BaseNodeIface interface {
|
||||||
Hash() util.Uint256
|
Hash() util.Uint256
|
||||||
Type() NodeType
|
Type() NodeType
|
||||||
Bytes() []byte
|
Bytes() []byte
|
||||||
EncodeBinaryAsChild(w *io.BinWriter)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type flushedNode interface {
|
type flushedNode interface {
|
||||||
|
@ -55,8 +55,8 @@ func (b *BaseNode) getBytes(n Node) []byte {
|
||||||
|
|
||||||
// updateHash updates hash field for this BaseNode.
|
// updateHash updates hash field for this BaseNode.
|
||||||
func (b *BaseNode) updateHash(n Node) {
|
func (b *BaseNode) updateHash(n Node) {
|
||||||
if n.Type() == HashT {
|
if n.Type() == HashT || n.Type() == EmptyT {
|
||||||
panic("can't update hash for hash node")
|
panic("can't update hash for empty or hash node")
|
||||||
}
|
}
|
||||||
b.hash = hash.DoubleSha256(b.getBytes(n))
|
b.hash = hash.DoubleSha256(b.getBytes(n))
|
||||||
b.hashValid = true
|
b.hashValid = true
|
||||||
|
@ -64,8 +64,9 @@ func (b *BaseNode) updateHash(n Node) {
|
||||||
|
|
||||||
// updateCache updates hash and bytes fields for this BaseNode.
|
// updateCache updates hash and bytes fields for this BaseNode.
|
||||||
func (b *BaseNode) updateBytes(n Node) {
|
func (b *BaseNode) updateBytes(n Node) {
|
||||||
buf := io.NewBufBinWriter()
|
buf := bytes.NewBuffer(make([]byte, 0, 1+n.Size()))
|
||||||
encodeNodeWithType(n, buf.BinWriter)
|
bw := io.NewBinWriterFromIO(buf)
|
||||||
|
encodeNodeWithType(n, bw)
|
||||||
b.bytes = buf.Bytes()
|
b.bytes = buf.Bytes()
|
||||||
b.bytesValid = true
|
b.bytesValid = true
|
||||||
}
|
}
|
||||||
|
@ -76,19 +77,18 @@ func (b *BaseNode) invalidateCache() {
|
||||||
b.hashValid = false
|
b.hashValid = false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func encodeBinaryAsChild(n Node, w *io.BinWriter) {
|
||||||
|
if isEmpty(n) {
|
||||||
|
w.WriteB(byte(EmptyT))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.WriteB(byte(HashT))
|
||||||
|
w.WriteBytes(n.Hash().BytesBE())
|
||||||
|
}
|
||||||
|
|
||||||
// encodeNodeWithType encodes node together with it's type.
|
// encodeNodeWithType encodes node together with it's type.
|
||||||
func encodeNodeWithType(n Node, w *io.BinWriter) {
|
func encodeNodeWithType(n Node, w *io.BinWriter) {
|
||||||
switch t := n.Type(); t {
|
w.WriteB(byte(n.Type()))
|
||||||
case HashT:
|
|
||||||
hn := n.(*HashNode)
|
|
||||||
if !hn.hashValid {
|
|
||||||
w.WriteB(byte(EmptyT))
|
|
||||||
break
|
|
||||||
}
|
|
||||||
fallthrough
|
|
||||||
default:
|
|
||||||
w.WriteB(byte(t))
|
|
||||||
}
|
|
||||||
n.EncodeBinary(w)
|
n.EncodeBinary(w)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,11 +112,7 @@ func DecodeNodeWithType(r *io.BinReader) Node {
|
||||||
case LeafT:
|
case LeafT:
|
||||||
n = new(LeafNode)
|
n = new(LeafNode)
|
||||||
case EmptyT:
|
case EmptyT:
|
||||||
n = &HashNode{
|
n = EmptyNode{}
|
||||||
BaseNode: BaseNode{
|
|
||||||
hashValid: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
r.Err = fmt.Errorf("invalid node type: %x", typ)
|
r.Err = fmt.Errorf("invalid node type: %x", typ)
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -62,6 +62,8 @@ func (t *Trie) putBatchIntoNode(curr Node, kv []keyValue) (Node, int, error) {
|
||||||
return t.putBatchIntoExtension(n, kv)
|
return t.putBatchIntoExtension(n, kv)
|
||||||
case *HashNode:
|
case *HashNode:
|
||||||
return t.putBatchIntoHash(n, kv)
|
return t.putBatchIntoHash(n, kv)
|
||||||
|
case EmptyNode:
|
||||||
|
return t.putBatchIntoEmpty(kv)
|
||||||
default:
|
default:
|
||||||
panic("invalid MPT node type")
|
panic("invalid MPT node type")
|
||||||
}
|
}
|
||||||
|
@ -84,11 +86,9 @@ func (t *Trie) mergeExtension(prefix []byte, sub Node) (Node, error) {
|
||||||
sn.invalidateCache()
|
sn.invalidateCache()
|
||||||
t.addRef(sn.Hash(), sn.bytes)
|
t.addRef(sn.Hash(), sn.bytes)
|
||||||
return sn, nil
|
return sn, nil
|
||||||
case *HashNode:
|
case EmptyNode:
|
||||||
if sn.IsEmpty() {
|
|
||||||
return sn, nil
|
return sn, nil
|
||||||
}
|
case *HashNode:
|
||||||
|
|
||||||
n, err := t.getFromStore(sn.Hash())
|
n, err := t.getFromStore(sn.Hash())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return sn, err
|
return sn, err
|
||||||
|
@ -141,8 +141,8 @@ func (t *Trie) putBatchIntoExtensionNoPrefix(key []byte, next Node, kv []keyValu
|
||||||
}
|
}
|
||||||
|
|
||||||
func isEmpty(n Node) bool {
|
func isEmpty(n Node) bool {
|
||||||
hn, ok := n.(*HashNode)
|
_, ok := n.(EmptyNode)
|
||||||
return ok && hn.IsEmpty()
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
// addToBranch puts items into the branch node assuming b is not yet in trie.
|
// addToBranch puts items into the branch node assuming b is not yet in trie.
|
||||||
|
@ -190,7 +190,7 @@ func (t *Trie) stripBranch(b *BranchNode) (Node, error) {
|
||||||
}
|
}
|
||||||
switch {
|
switch {
|
||||||
case n == 0:
|
case n == 0:
|
||||||
return new(HashNode), nil
|
return EmptyNode{}, nil
|
||||||
case n == 1:
|
case n == 1:
|
||||||
if lastIndex != lastChild {
|
if lastIndex != lastChild {
|
||||||
return t.mergeExtension([]byte{lastIndex}, b.Children[lastIndex])
|
return t.mergeExtension([]byte{lastIndex}, b.Children[lastIndex])
|
||||||
|
@ -219,12 +219,13 @@ func (t *Trie) iterateBatch(kv []keyValue, f func(c byte, kv []keyValue) (int, e
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Trie) putBatchIntoHash(curr *HashNode, kv []keyValue) (Node, int, error) {
|
func (t *Trie) putBatchIntoEmpty(kv []keyValue) (Node, int, error) {
|
||||||
if curr.IsEmpty() {
|
|
||||||
common := lcpMany(kv)
|
common := lcpMany(kv)
|
||||||
stripPrefix(len(common), kv)
|
stripPrefix(len(common), kv)
|
||||||
return t.newSubTrieMany(common, kv, nil)
|
return t.newSubTrieMany(common, kv, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *Trie) putBatchIntoHash(curr *HashNode, kv []keyValue) (Node, int, error) {
|
||||||
result, err := t.getFromStore(curr.hash)
|
result, err := t.getFromStore(curr.hash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return curr, 0, err
|
return curr, 0, err
|
||||||
|
@ -242,7 +243,7 @@ func (t *Trie) newSubTrieMany(prefix []byte, kv []keyValue, value []byte) (Node,
|
||||||
if len(kv[0].key) == 0 {
|
if len(kv[0].key) == 0 {
|
||||||
if len(kv[0].value) == 0 {
|
if len(kv[0].value) == 0 {
|
||||||
if len(kv) == 1 {
|
if len(kv) == 1 {
|
||||||
return new(HashNode), 1, nil
|
return EmptyNode{}, 1, nil
|
||||||
}
|
}
|
||||||
node, n, err := t.newSubTrieMany(prefix, kv[1:], nil)
|
node, n, err := t.newSubTrieMany(prefix, kv[1:], nil)
|
||||||
return node, n + 1, err
|
return node, n + 1, err
|
||||||
|
|
|
@ -68,8 +68,8 @@ func testPut(t *testing.T, ps pairs, tr1, tr2 *Trie) {
|
||||||
|
|
||||||
func TestTrie_PutBatchLeaf(t *testing.T) {
|
func TestTrie_PutBatchLeaf(t *testing.T) {
|
||||||
prepareLeaf := func(t *testing.T) (*Trie, *Trie) {
|
prepareLeaf := func(t *testing.T) (*Trie, *Trie) {
|
||||||
tr1 := NewTrie(new(HashNode), false, newTestStore())
|
tr1 := NewTrie(EmptyNode{}, false, newTestStore())
|
||||||
tr2 := NewTrie(new(HashNode), false, newTestStore())
|
tr2 := NewTrie(EmptyNode{}, false, newTestStore())
|
||||||
require.NoError(t, tr1.Put([]byte{0}, []byte("value")))
|
require.NoError(t, tr1.Put([]byte{0}, []byte("value")))
|
||||||
require.NoError(t, tr2.Put([]byte{0}, []byte("value")))
|
require.NoError(t, tr2.Put([]byte{0}, []byte("value")))
|
||||||
return tr1, tr2
|
return tr1, tr2
|
||||||
|
@ -97,8 +97,8 @@ func TestTrie_PutBatchLeaf(t *testing.T) {
|
||||||
|
|
||||||
func TestTrie_PutBatchExtension(t *testing.T) {
|
func TestTrie_PutBatchExtension(t *testing.T) {
|
||||||
prepareExtension := func(t *testing.T) (*Trie, *Trie) {
|
prepareExtension := func(t *testing.T) (*Trie, *Trie) {
|
||||||
tr1 := NewTrie(new(HashNode), false, newTestStore())
|
tr1 := NewTrie(EmptyNode{}, false, newTestStore())
|
||||||
tr2 := NewTrie(new(HashNode), false, newTestStore())
|
tr2 := NewTrie(EmptyNode{}, false, newTestStore())
|
||||||
require.NoError(t, tr1.Put([]byte{1, 2}, []byte("value1")))
|
require.NoError(t, tr1.Put([]byte{1, 2}, []byte("value1")))
|
||||||
require.NoError(t, tr2.Put([]byte{1, 2}, []byte("value1")))
|
require.NoError(t, tr2.Put([]byte{1, 2}, []byte("value1")))
|
||||||
return tr1, tr2
|
return tr1, tr2
|
||||||
|
@ -144,8 +144,8 @@ func TestTrie_PutBatchExtension(t *testing.T) {
|
||||||
|
|
||||||
func TestTrie_PutBatchBranch(t *testing.T) {
|
func TestTrie_PutBatchBranch(t *testing.T) {
|
||||||
prepareBranch := func(t *testing.T) (*Trie, *Trie) {
|
prepareBranch := func(t *testing.T) (*Trie, *Trie) {
|
||||||
tr1 := NewTrie(new(HashNode), false, newTestStore())
|
tr1 := NewTrie(EmptyNode{}, false, newTestStore())
|
||||||
tr2 := NewTrie(new(HashNode), false, newTestStore())
|
tr2 := NewTrie(EmptyNode{}, false, newTestStore())
|
||||||
require.NoError(t, tr1.Put([]byte{0x00, 2}, []byte("value1")))
|
require.NoError(t, tr1.Put([]byte{0x00, 2}, []byte("value1")))
|
||||||
require.NoError(t, tr2.Put([]byte{0x00, 2}, []byte("value1")))
|
require.NoError(t, tr2.Put([]byte{0x00, 2}, []byte("value1")))
|
||||||
require.NoError(t, tr1.Put([]byte{0x10, 3}, []byte("value2")))
|
require.NoError(t, tr1.Put([]byte{0x10, 3}, []byte("value2")))
|
||||||
|
@ -175,8 +175,8 @@ func TestTrie_PutBatchBranch(t *testing.T) {
|
||||||
require.IsType(t, (*ExtensionNode)(nil), tr1.root)
|
require.IsType(t, (*ExtensionNode)(nil), tr1.root)
|
||||||
})
|
})
|
||||||
t.Run("non-empty child is last node", func(t *testing.T) {
|
t.Run("non-empty child is last node", func(t *testing.T) {
|
||||||
tr1 := NewTrie(new(HashNode), false, newTestStore())
|
tr1 := NewTrie(EmptyNode{}, false, newTestStore())
|
||||||
tr2 := NewTrie(new(HashNode), false, newTestStore())
|
tr2 := NewTrie(EmptyNode{}, false, newTestStore())
|
||||||
require.NoError(t, tr1.Put([]byte{0x00, 2}, []byte("value1")))
|
require.NoError(t, tr1.Put([]byte{0x00, 2}, []byte("value1")))
|
||||||
require.NoError(t, tr2.Put([]byte{0x00, 2}, []byte("value1")))
|
require.NoError(t, tr2.Put([]byte{0x00, 2}, []byte("value1")))
|
||||||
require.NoError(t, tr1.Put([]byte{0x00}, []byte("value2")))
|
require.NoError(t, tr1.Put([]byte{0x00}, []byte("value2")))
|
||||||
|
@ -222,8 +222,8 @@ func TestTrie_PutBatchBranch(t *testing.T) {
|
||||||
|
|
||||||
func TestTrie_PutBatchHash(t *testing.T) {
|
func TestTrie_PutBatchHash(t *testing.T) {
|
||||||
prepareHash := func(t *testing.T) (*Trie, *Trie) {
|
prepareHash := func(t *testing.T) (*Trie, *Trie) {
|
||||||
tr1 := NewTrie(new(HashNode), false, newTestStore())
|
tr1 := NewTrie(EmptyNode{}, false, newTestStore())
|
||||||
tr2 := NewTrie(new(HashNode), false, newTestStore())
|
tr2 := NewTrie(EmptyNode{}, false, newTestStore())
|
||||||
require.NoError(t, tr1.Put([]byte{0x10}, []byte("value1")))
|
require.NoError(t, tr1.Put([]byte{0x10}, []byte("value1")))
|
||||||
require.NoError(t, tr2.Put([]byte{0x10}, []byte("value1")))
|
require.NoError(t, tr2.Put([]byte{0x10}, []byte("value1")))
|
||||||
require.NoError(t, tr1.Put([]byte{0x20}, []byte("value2")))
|
require.NoError(t, tr1.Put([]byte{0x20}, []byte("value2")))
|
||||||
|
@ -257,8 +257,8 @@ func TestTrie_PutBatchHash(t *testing.T) {
|
||||||
|
|
||||||
func TestTrie_PutBatchEmpty(t *testing.T) {
|
func TestTrie_PutBatchEmpty(t *testing.T) {
|
||||||
t.Run("good", func(t *testing.T) {
|
t.Run("good", func(t *testing.T) {
|
||||||
tr1 := NewTrie(new(HashNode), false, newTestStore())
|
tr1 := NewTrie(EmptyNode{}, false, newTestStore())
|
||||||
tr2 := NewTrie(new(HashNode), false, newTestStore())
|
tr2 := NewTrie(EmptyNode{}, false, newTestStore())
|
||||||
var ps = pairs{
|
var ps = pairs{
|
||||||
{[]byte{0}, []byte("value0")},
|
{[]byte{0}, []byte("value0")},
|
||||||
{[]byte{1}, []byte("value1")},
|
{[]byte{1}, []byte("value1")},
|
||||||
|
@ -273,15 +273,15 @@ func TestTrie_PutBatchEmpty(t *testing.T) {
|
||||||
{[]byte{2}, nil},
|
{[]byte{2}, nil},
|
||||||
{[]byte{3}, []byte("replace3")},
|
{[]byte{3}, []byte("replace3")},
|
||||||
}
|
}
|
||||||
tr1 := NewTrie(new(HashNode), false, newTestStore())
|
tr1 := NewTrie(EmptyNode{}, false, newTestStore())
|
||||||
tr2 := NewTrie(new(HashNode), false, newTestStore())
|
tr2 := NewTrie(EmptyNode{}, false, newTestStore())
|
||||||
testIncompletePut(t, ps, 4, tr1, tr2)
|
testIncompletePut(t, ps, 4, tr1, tr2)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// For the sake of coverage.
|
// For the sake of coverage.
|
||||||
func TestTrie_InvalidNodeType(t *testing.T) {
|
func TestTrie_InvalidNodeType(t *testing.T) {
|
||||||
tr := NewTrie(new(HashNode), false, newTestStore())
|
tr := NewTrie(EmptyNode{}, false, newTestStore())
|
||||||
var b Batch
|
var b Batch
|
||||||
b.Add([]byte{1}, []byte("value"))
|
b.Add([]byte{1}, []byte("value"))
|
||||||
tr.root = Node(nil)
|
tr.root = Node(nil)
|
||||||
|
@ -289,8 +289,8 @@ func TestTrie_InvalidNodeType(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTrie_PutBatch(t *testing.T) {
|
func TestTrie_PutBatch(t *testing.T) {
|
||||||
tr1 := NewTrie(new(HashNode), false, newTestStore())
|
tr1 := NewTrie(EmptyNode{}, false, newTestStore())
|
||||||
tr2 := NewTrie(new(HashNode), false, newTestStore())
|
tr2 := NewTrie(EmptyNode{}, false, newTestStore())
|
||||||
var ps = pairs{
|
var ps = pairs{
|
||||||
{[]byte{1}, []byte{1}},
|
{[]byte{1}, []byte{1}},
|
||||||
{[]byte{2}, []byte{3}},
|
{[]byte{2}, []byte{3}},
|
||||||
|
@ -312,11 +312,10 @@ var _ = printNode
|
||||||
// `spew.Dump()`.
|
// `spew.Dump()`.
|
||||||
func printNode(prefix string, n Node) {
|
func printNode(prefix string, n Node) {
|
||||||
switch tn := n.(type) {
|
switch tn := n.(type) {
|
||||||
case *HashNode:
|
case EmptyNode:
|
||||||
if tn.IsEmpty() {
|
|
||||||
fmt.Printf("%s empty\n", prefix)
|
fmt.Printf("%s empty\n", prefix)
|
||||||
return
|
return
|
||||||
}
|
case *HashNode:
|
||||||
fmt.Printf("%s %s\n", prefix, tn.Hash().StringLE())
|
fmt.Printf("%s %s\n", prefix, tn.Hash().StringLE())
|
||||||
case *BranchNode:
|
case *BranchNode:
|
||||||
for i, c := range tn.Children {
|
for i, c := range tn.Children {
|
||||||
|
|
|
@ -27,7 +27,7 @@ var _ Node = (*BranchNode)(nil)
|
||||||
func NewBranchNode() *BranchNode {
|
func NewBranchNode() *BranchNode {
|
||||||
b := new(BranchNode)
|
b := new(BranchNode)
|
||||||
for i := 0; i < childrenCount; i++ {
|
for i := 0; i < childrenCount; i++ {
|
||||||
b.Children[i] = new(HashNode)
|
b.Children[i] = EmptyNode{}
|
||||||
}
|
}
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
@ -45,19 +45,24 @@ func (b *BranchNode) Bytes() []byte {
|
||||||
return b.getBytes(b)
|
return b.getBytes(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Size implements Node interface.
|
||||||
|
func (b *BranchNode) Size() int {
|
||||||
|
sz := childrenCount
|
||||||
|
for i := range b.Children {
|
||||||
|
if !isEmpty(b.Children[i]) {
|
||||||
|
sz += util.Uint256Size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sz
|
||||||
|
}
|
||||||
|
|
||||||
// EncodeBinary implements io.Serializable.
|
// EncodeBinary implements io.Serializable.
|
||||||
func (b *BranchNode) EncodeBinary(w *io.BinWriter) {
|
func (b *BranchNode) EncodeBinary(w *io.BinWriter) {
|
||||||
for i := 0; i < childrenCount; i++ {
|
for i := 0; i < childrenCount; i++ {
|
||||||
b.Children[i].EncodeBinaryAsChild(w)
|
encodeBinaryAsChild(b.Children[i], w)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// EncodeBinaryAsChild implements BaseNode interface.
|
|
||||||
func (b *BranchNode) EncodeBinaryAsChild(w *io.BinWriter) {
|
|
||||||
n := &NodeObject{Node: NewHashNode(b.Hash())} // with type
|
|
||||||
n.EncodeBinary(w)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeBinary implements io.Serializable.
|
// DecodeBinary implements io.Serializable.
|
||||||
func (b *BranchNode) DecodeBinary(r *io.BinReader) {
|
func (b *BranchNode) DecodeBinary(r *io.BinReader) {
|
||||||
for i := 0; i < childrenCount; i++ {
|
for i := 0; i < childrenCount; i++ {
|
||||||
|
|
56
pkg/core/mpt/empty.go
Normal file
56
pkg/core/mpt/empty.go
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
package mpt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EmptyNode represents empty node.
|
||||||
|
type EmptyNode struct{}
|
||||||
|
|
||||||
|
// DecodeBinary implements io.Serializable interface.
|
||||||
|
func (e EmptyNode) DecodeBinary(*io.BinReader) {
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeBinary implements io.Serializable interface.
|
||||||
|
func (e EmptyNode) EncodeBinary(*io.BinWriter) {
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size implements Node interface.
|
||||||
|
func (EmptyNode) Size() int { return 0 }
|
||||||
|
|
||||||
|
// MarshalJSON implements Node interface.
|
||||||
|
func (e EmptyNode) MarshalJSON() ([]byte, error) {
|
||||||
|
return []byte(`{}`), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON implements Node interface.
|
||||||
|
func (e EmptyNode) UnmarshalJSON(bytes []byte) error {
|
||||||
|
var m map[string]interface{}
|
||||||
|
err := json.Unmarshal(bytes, &m)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(m) != 0 {
|
||||||
|
return errors.New("expected empty node")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hash implements Node interface.
|
||||||
|
func (e EmptyNode) Hash() util.Uint256 {
|
||||||
|
panic("can't get hash of an EmptyNode")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type implements Node interface.
|
||||||
|
func (e EmptyNode) Type() NodeType {
|
||||||
|
return EmptyT
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes implements Node interface.
|
||||||
|
func (e EmptyNode) Bytes() []byte {
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -69,13 +69,13 @@ func (e *ExtensionNode) DecodeBinary(r *io.BinReader) {
|
||||||
// EncodeBinary implements io.Serializable.
|
// EncodeBinary implements io.Serializable.
|
||||||
func (e ExtensionNode) EncodeBinary(w *io.BinWriter) {
|
func (e ExtensionNode) EncodeBinary(w *io.BinWriter) {
|
||||||
w.WriteVarBytes(e.key)
|
w.WriteVarBytes(e.key)
|
||||||
e.next.EncodeBinaryAsChild(w)
|
encodeBinaryAsChild(e.next, w)
|
||||||
}
|
}
|
||||||
|
|
||||||
// EncodeBinaryAsChild implements BaseNode interface.
|
// Size implements Node interface.
|
||||||
func (e *ExtensionNode) EncodeBinaryAsChild(w *io.BinWriter) {
|
func (e *ExtensionNode) Size() int {
|
||||||
n := &NodeObject{Node: NewHashNode(e.Hash())} // with type
|
return io.GetVarSize(len(e.key)) + len(e.key) +
|
||||||
n.EncodeBinary(w)
|
1 + util.Uint256Size // e.next is never empty
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON implements json.Marshaler.
|
// MarshalJSON implements json.Marshaler.
|
||||||
|
|
|
@ -27,6 +27,11 @@ func NewHashNode(h util.Uint256) *HashNode {
|
||||||
// Type implements Node interface.
|
// Type implements Node interface.
|
||||||
func (h *HashNode) Type() NodeType { return HashT }
|
func (h *HashNode) Type() NodeType { return HashT }
|
||||||
|
|
||||||
|
// Size implements Node interface.
|
||||||
|
func (h *HashNode) Size() int {
|
||||||
|
return util.Uint256Size
|
||||||
|
}
|
||||||
|
|
||||||
// Hash implements Node interface.
|
// Hash implements Node interface.
|
||||||
func (h *HashNode) Hash() util.Uint256 {
|
func (h *HashNode) Hash() util.Uint256 {
|
||||||
if !h.hashValid {
|
if !h.hashValid {
|
||||||
|
@ -35,9 +40,6 @@ func (h *HashNode) Hash() util.Uint256 {
|
||||||
return h.hash
|
return h.hash
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsEmpty returns true if h is an empty node i.e. contains no hash.
|
|
||||||
func (h *HashNode) IsEmpty() bool { return !h.hashValid }
|
|
||||||
|
|
||||||
// Bytes returns serialized HashNode.
|
// Bytes returns serialized HashNode.
|
||||||
func (h *HashNode) Bytes() []byte {
|
func (h *HashNode) Bytes() []byte {
|
||||||
return h.getBytes(h)
|
return h.getBytes(h)
|
||||||
|
@ -58,17 +60,8 @@ func (h HashNode) EncodeBinary(w *io.BinWriter) {
|
||||||
w.WriteBytes(h.hash[:])
|
w.WriteBytes(h.hash[:])
|
||||||
}
|
}
|
||||||
|
|
||||||
// EncodeBinaryAsChild implements BaseNode interface.
|
|
||||||
func (h *HashNode) EncodeBinaryAsChild(w *io.BinWriter) {
|
|
||||||
no := &NodeObject{Node: h} // with type
|
|
||||||
no.EncodeBinary(w)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON implements json.Marshaler.
|
// MarshalJSON implements json.Marshaler.
|
||||||
func (h *HashNode) MarshalJSON() ([]byte, error) {
|
func (h *HashNode) MarshalJSON() ([]byte, error) {
|
||||||
if !h.hashValid {
|
|
||||||
return []byte(`{}`), nil
|
|
||||||
}
|
|
||||||
return []byte(`{"hash":"` + h.hash.StringLE() + `"}`), nil
|
return []byte(`{"hash":"` + h.hash.StringLE() + `"}`), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -56,10 +56,9 @@ func (n LeafNode) EncodeBinary(w *io.BinWriter) {
|
||||||
w.WriteVarBytes(n.value)
|
w.WriteVarBytes(n.value)
|
||||||
}
|
}
|
||||||
|
|
||||||
// EncodeBinaryAsChild implements BaseNode interface.
|
// Size implements Node interface.
|
||||||
func (n *LeafNode) EncodeBinaryAsChild(w *io.BinWriter) {
|
func (n *LeafNode) Size() int {
|
||||||
no := &NodeObject{Node: NewHashNode(n.Hash())} // with type
|
return io.GetVarSize(len(n.value)) + len(n.value)
|
||||||
no.EncodeBinary(w)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON implements json.Marshaler.
|
// MarshalJSON implements json.Marshaler.
|
||||||
|
|
|
@ -33,6 +33,7 @@ type Node interface {
|
||||||
io.Serializable
|
io.Serializable
|
||||||
json.Marshaler
|
json.Marshaler
|
||||||
json.Unmarshaler
|
json.Unmarshaler
|
||||||
|
Size() int
|
||||||
BaseNodeIface
|
BaseNodeIface
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -68,7 +69,7 @@ func (n *NodeObject) UnmarshalJSON(data []byte) error {
|
||||||
|
|
||||||
switch len(m) {
|
switch len(m) {
|
||||||
case 0:
|
case 0:
|
||||||
n.Node = new(HashNode)
|
n.Node = EmptyNode{}
|
||||||
case 1:
|
case 1:
|
||||||
if v, ok := m["hash"]; ok {
|
if v, ok := m["hash"]; ok {
|
||||||
var h util.Uint256
|
var h util.Uint256
|
||||||
|
|
|
@ -27,6 +27,7 @@ func getTestFuncEncode(ok bool, expected, actual Node) func(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, expected.Type(), actual.Type())
|
require.Equal(t, expected.Type(), actual.Type())
|
||||||
require.Equal(t, expected.Hash(), actual.Hash())
|
require.Equal(t, expected.Hash(), actual.Hash())
|
||||||
|
require.Equal(t, 1+expected.Size(), len(expected.Bytes()))
|
||||||
})
|
})
|
||||||
t.Run("JSON", func(t *testing.T) {
|
t.Run("JSON", func(t *testing.T) {
|
||||||
bs, err := json.Marshal(expected)
|
bs, err := json.Marshal(expected)
|
||||||
|
@ -78,9 +79,6 @@ func TestNode_Serializable(t *testing.T) {
|
||||||
t.Run("Raw", getTestFuncEncode(true, h, new(HashNode)))
|
t.Run("Raw", getTestFuncEncode(true, h, new(HashNode)))
|
||||||
t.Run("WithType", getTestFuncEncode(true, &NodeObject{h}, new(NodeObject)))
|
t.Run("WithType", getTestFuncEncode(true, &NodeObject{h}, new(NodeObject)))
|
||||||
})
|
})
|
||||||
t.Run("Empty", func(t *testing.T) { // compare nodes, not hashes
|
|
||||||
testserdes.EncodeDecodeBinary(t, new(HashNode), new(HashNode))
|
|
||||||
})
|
|
||||||
t.Run("InvalidSize", func(t *testing.T) {
|
t.Run("InvalidSize", func(t *testing.T) {
|
||||||
buf := io.NewBufBinWriter()
|
buf := io.NewBufBinWriter()
|
||||||
buf.BinWriter.WriteBytes(make([]byte, 13))
|
buf.BinWriter.WriteBytes(make([]byte, 13))
|
||||||
|
@ -111,7 +109,7 @@ func TestInvalidJSON(t *testing.T) {
|
||||||
t.Run("InvalidChildrenCount", func(t *testing.T) {
|
t.Run("InvalidChildrenCount", func(t *testing.T) {
|
||||||
var cs [childrenCount + 1]Node
|
var cs [childrenCount + 1]Node
|
||||||
for i := range cs {
|
for i := range cs {
|
||||||
cs[i] = new(HashNode)
|
cs[i] = EmptyNode{}
|
||||||
}
|
}
|
||||||
data, err := json.Marshal(cs)
|
data, err := json.Marshal(cs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -49,14 +49,12 @@ func (t *Trie) getProof(curr Node, path []byte, proofs *[][]byte) (Node, error)
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
case *HashNode:
|
case *HashNode:
|
||||||
if !n.IsEmpty() {
|
|
||||||
r, err := t.getFromStore(n.Hash())
|
r, err := t.getFromStore(n.Hash())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return t.getProof(r, path, proofs)
|
return t.getProof(r, path, proofs)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return nil, ErrNotFound
|
return nil, ErrNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,7 @@ var ErrNotFound = errors.New("item not found")
|
||||||
// This also has the benefit, that every `Put` can be considered an atomic operation.
|
// This also has the benefit, that every `Put` can be considered an atomic operation.
|
||||||
func NewTrie(root Node, enableRefCount bool, store *storage.MemCachedStore) *Trie {
|
func NewTrie(root Node, enableRefCount bool, store *storage.MemCachedStore) *Trie {
|
||||||
if root == nil {
|
if root == nil {
|
||||||
root = new(HashNode)
|
root = EmptyNode{}
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Trie{
|
return &Trie{
|
||||||
|
@ -75,12 +75,11 @@ func (t *Trie) getWithPath(curr Node, path []byte) (Node, []byte, error) {
|
||||||
}
|
}
|
||||||
n.Children[i] = r
|
n.Children[i] = r
|
||||||
return n, bs, nil
|
return n, bs, nil
|
||||||
|
case EmptyNode:
|
||||||
case *HashNode:
|
case *HashNode:
|
||||||
if !n.IsEmpty() {
|
|
||||||
if r, err := t.getFromStore(n.hash); err == nil {
|
if r, err := t.getFromStore(n.hash); err == nil {
|
||||||
return t.getWithPath(r, path)
|
return t.getWithPath(r, path)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
case *ExtensionNode:
|
case *ExtensionNode:
|
||||||
if bytes.HasPrefix(path, n.key) {
|
if bytes.HasPrefix(path, n.key) {
|
||||||
r, bs, err := t.getWithPath(n.next, path[len(n.key):])
|
r, bs, err := t.getWithPath(n.next, path[len(n.key):])
|
||||||
|
@ -187,14 +186,13 @@ func (t *Trie) putIntoExtension(curr *ExtensionNode, path []byte, val Node) (Nod
|
||||||
return b, nil
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *Trie) putIntoEmpty(path []byte, val Node) (Node, error) {
|
||||||
|
return t.newSubTrie(path, val, true), nil
|
||||||
|
}
|
||||||
|
|
||||||
// putIntoHash puts val to trie if current node is a HashNode.
|
// putIntoHash puts val to trie if current node is a HashNode.
|
||||||
// It returns Node if curr needs to be replaced and error if any.
|
// It returns Node if curr needs to be replaced and error if any.
|
||||||
func (t *Trie) putIntoHash(curr *HashNode, path []byte, val Node) (Node, error) {
|
func (t *Trie) putIntoHash(curr *HashNode, path []byte, val Node) (Node, error) {
|
||||||
if curr.IsEmpty() {
|
|
||||||
hn := t.newSubTrie(path, val, true)
|
|
||||||
return hn, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := t.getFromStore(curr.hash)
|
result, err := t.getFromStore(curr.hash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -227,6 +225,8 @@ func (t *Trie) putIntoNode(curr Node, path []byte, val Node) (Node, error) {
|
||||||
return t.putIntoExtension(n, path, val)
|
return t.putIntoExtension(n, path, val)
|
||||||
case *HashNode:
|
case *HashNode:
|
||||||
return t.putIntoHash(n, path, val)
|
return t.putIntoHash(n, path, val)
|
||||||
|
case EmptyNode:
|
||||||
|
return t.putIntoEmpty(path, val)
|
||||||
default:
|
default:
|
||||||
panic("invalid MPT node type")
|
panic("invalid MPT node type")
|
||||||
}
|
}
|
||||||
|
@ -257,8 +257,7 @@ func (t *Trie) deleteFromBranch(b *BranchNode, path []byte) (Node, error) {
|
||||||
b.invalidateCache()
|
b.invalidateCache()
|
||||||
var count, index int
|
var count, index int
|
||||||
for i := range b.Children {
|
for i := range b.Children {
|
||||||
h, ok := b.Children[i].(*HashNode)
|
if !isEmpty(b.Children[i]) {
|
||||||
if !ok || !h.IsEmpty() {
|
|
||||||
index = i
|
index = i
|
||||||
count++
|
count++
|
||||||
}
|
}
|
||||||
|
@ -307,10 +306,9 @@ func (t *Trie) deleteFromExtension(n *ExtensionNode, path []byte) (Node, error)
|
||||||
t.removeRef(nxt.Hash(), nxt.bytes)
|
t.removeRef(nxt.Hash(), nxt.bytes)
|
||||||
n.key = append(n.key, nxt.key...)
|
n.key = append(n.key, nxt.key...)
|
||||||
n.next = nxt.next
|
n.next = nxt.next
|
||||||
case *HashNode:
|
case EmptyNode:
|
||||||
if nxt.IsEmpty() {
|
|
||||||
return nxt, nil
|
return nxt, nil
|
||||||
}
|
case *HashNode:
|
||||||
n.next = nxt
|
n.next = nxt
|
||||||
default:
|
default:
|
||||||
n.next = r
|
n.next = r
|
||||||
|
@ -327,17 +325,16 @@ func (t *Trie) deleteFromNode(curr Node, path []byte) (Node, error) {
|
||||||
case *LeafNode:
|
case *LeafNode:
|
||||||
if len(path) == 0 {
|
if len(path) == 0 {
|
||||||
t.removeRef(curr.Hash(), curr.Bytes())
|
t.removeRef(curr.Hash(), curr.Bytes())
|
||||||
return new(HashNode), nil
|
return EmptyNode{}, nil
|
||||||
}
|
}
|
||||||
return curr, nil
|
return curr, nil
|
||||||
case *BranchNode:
|
case *BranchNode:
|
||||||
return t.deleteFromBranch(n, path)
|
return t.deleteFromBranch(n, path)
|
||||||
case *ExtensionNode:
|
case *ExtensionNode:
|
||||||
return t.deleteFromExtension(n, path)
|
return t.deleteFromExtension(n, path)
|
||||||
case *HashNode:
|
case EmptyNode:
|
||||||
if n.IsEmpty() {
|
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
case *HashNode:
|
||||||
newNode, err := t.getFromStore(n.Hash())
|
newNode, err := t.getFromStore(n.Hash())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -350,7 +347,7 @@ func (t *Trie) deleteFromNode(curr Node, path []byte) (Node, error) {
|
||||||
|
|
||||||
// StateRoot returns root hash of t.
|
// StateRoot returns root hash of t.
|
||||||
func (t *Trie) StateRoot() util.Uint256 {
|
func (t *Trie) StateRoot() util.Uint256 {
|
||||||
if hn, ok := t.root.(*HashNode); ok && hn.IsEmpty() {
|
if isEmpty(t.root) {
|
||||||
return util.Uint256{}
|
return util.Uint256{}
|
||||||
}
|
}
|
||||||
return t.root.Hash()
|
return t.root.Hash()
|
||||||
|
@ -486,9 +483,11 @@ func (t *Trie) Collapse(depth int) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func collapse(depth int, node Node) Node {
|
func collapse(depth int, node Node) Node {
|
||||||
if _, ok := node.(*HashNode); ok {
|
switch node.(type) {
|
||||||
|
case *HashNode, EmptyNode:
|
||||||
return node
|
return node
|
||||||
} else if depth == 0 {
|
}
|
||||||
|
if depth == 0 {
|
||||||
return NewHashNode(node.Hash())
|
return NewHashNode(node.Hash())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -239,8 +239,7 @@ func isValid(curr Node) bool {
|
||||||
if !isValid(n.Children[i]) {
|
if !isValid(n.Children[i]) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
hn, ok := n.Children[i].(*HashNode)
|
if !isEmpty(n.Children[i]) {
|
||||||
if !ok || !hn.IsEmpty() {
|
|
||||||
count++
|
count++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -342,7 +341,7 @@ func testTrieDelete(t *testing.T, enableGC bool) {
|
||||||
})
|
})
|
||||||
|
|
||||||
require.NoError(t, tr.Delete([]byte{0xAB}))
|
require.NoError(t, tr.Delete([]byte{0xAB}))
|
||||||
require.True(t, tr.root.(*HashNode).IsEmpty())
|
require.IsType(t, EmptyNode{}, tr.root)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("MultipleKeys", func(t *testing.T) {
|
t.Run("MultipleKeys", func(t *testing.T) {
|
||||||
|
@ -505,12 +504,11 @@ func TestTrie_Collapse(t *testing.T) {
|
||||||
require.Equal(t, NewLeafNode([]byte("value")), tr.root)
|
require.Equal(t, NewLeafNode([]byte("value")), tr.root)
|
||||||
})
|
})
|
||||||
t.Run("Hash", func(t *testing.T) {
|
t.Run("Hash", func(t *testing.T) {
|
||||||
t.Run("Empty", func(t *testing.T) {
|
t.Run("EmptyNode", func(t *testing.T) {
|
||||||
tr := NewTrie(new(HashNode), false, newTestStore())
|
tr := NewTrie(EmptyNode{}, false, newTestStore())
|
||||||
require.NotPanics(t, func() { tr.Collapse(1) })
|
require.NotPanics(t, func() { tr.Collapse(1) })
|
||||||
hn, ok := tr.root.(*HashNode)
|
_, ok := tr.root.(EmptyNode)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
require.True(t, hn.IsEmpty())
|
|
||||||
})
|
})
|
||||||
|
|
||||||
h := random.Uint256()
|
h := random.Uint256()
|
||||||
|
|
|
@ -61,7 +61,10 @@ func (aer *AppExecResult) EncodeBinary(w *io.BinWriter) {
|
||||||
for _, it := range aer.Stack {
|
for _, it := range aer.Stack {
|
||||||
stackitem.EncodeBinaryProtected(it, w)
|
stackitem.EncodeBinaryProtected(it, w)
|
||||||
}
|
}
|
||||||
w.WriteArray(aer.Events)
|
w.WriteVarUint(uint64(len(aer.Events)))
|
||||||
|
for i := range aer.Events {
|
||||||
|
aer.Events[i].EncodeBinary(w)
|
||||||
|
}
|
||||||
w.WriteVarBytes([]byte(aer.FaultException))
|
w.WriteVarBytes([]byte(aer.FaultException))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,6 +13,31 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func BenchmarkAppExecResult_EncodeBinary(b *testing.B) {
|
||||||
|
aer := &AppExecResult{
|
||||||
|
Container: random.Uint256(),
|
||||||
|
Execution: Execution{
|
||||||
|
Trigger: trigger.Application,
|
||||||
|
VMState: vm.HaltState,
|
||||||
|
GasConsumed: 12345,
|
||||||
|
Stack: []stackitem.Item{},
|
||||||
|
Events: []NotificationEvent{{
|
||||||
|
ScriptHash: random.Uint160(),
|
||||||
|
Name: "Event",
|
||||||
|
Item: stackitem.NewArray([]stackitem.Item{stackitem.NewBool(true)}),
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
w := io.NewBufBinWriter()
|
||||||
|
b.ReportAllocs()
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
w.Reset()
|
||||||
|
aer.EncodeBinary(w.BinWriter)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestEncodeDecodeNotificationEvent(t *testing.T) {
|
func TestEncodeDecodeNotificationEvent(t *testing.T) {
|
||||||
event := &NotificationEvent{
|
event := &NotificationEvent{
|
||||||
ScriptHash: random.Uint160(),
|
ScriptHash: random.Uint160(),
|
||||||
|
|
|
@ -53,3 +53,14 @@ func BenchmarkDecodeFromBytes(t *testing.B) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func BenchmarkTransaction_Bytes(b *testing.B) {
|
||||||
|
tx, err := NewTransactionFromBytes(benchTx)
|
||||||
|
require.NoError(b, err)
|
||||||
|
|
||||||
|
b.ReportAllocs()
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
_ = tx.Bytes()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -203,7 +203,10 @@ func (t *Transaction) DecodeBinary(br *io.BinReader) {
|
||||||
// EncodeBinary implements Serializable interface.
|
// EncodeBinary implements Serializable interface.
|
||||||
func (t *Transaction) EncodeBinary(bw *io.BinWriter) {
|
func (t *Transaction) EncodeBinary(bw *io.BinWriter) {
|
||||||
t.encodeHashableFields(bw)
|
t.encodeHashableFields(bw)
|
||||||
bw.WriteArray(t.Scripts)
|
bw.WriteVarUint(uint64(len(t.Scripts)))
|
||||||
|
for i := range t.Scripts {
|
||||||
|
t.Scripts[i].EncodeBinary(bw)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// encodeHashableFields encodes the fields that are not used for
|
// encodeHashableFields encodes the fields that are not used for
|
||||||
|
@ -218,8 +221,14 @@ func (t *Transaction) encodeHashableFields(bw *io.BinWriter) {
|
||||||
bw.WriteU64LE(uint64(t.SystemFee))
|
bw.WriteU64LE(uint64(t.SystemFee))
|
||||||
bw.WriteU64LE(uint64(t.NetworkFee))
|
bw.WriteU64LE(uint64(t.NetworkFee))
|
||||||
bw.WriteU32LE(t.ValidUntilBlock)
|
bw.WriteU32LE(t.ValidUntilBlock)
|
||||||
bw.WriteArray(t.Signers)
|
bw.WriteVarUint(uint64(len(t.Signers)))
|
||||||
bw.WriteArray(t.Attributes)
|
for i := range t.Signers {
|
||||||
|
t.Signers[i].EncodeBinary(bw)
|
||||||
|
}
|
||||||
|
bw.WriteVarUint(uint64(len(t.Attributes)))
|
||||||
|
for i := range t.Attributes {
|
||||||
|
t.Attributes[i].EncodeBinary(bw)
|
||||||
|
}
|
||||||
bw.WriteVarBytes(t.Script)
|
bw.WriteVarBytes(t.Script)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -112,8 +112,11 @@ func (w *serContext) serialize(item Item) error {
|
||||||
}
|
}
|
||||||
case *BigInteger:
|
case *BigInteger:
|
||||||
w.data = append(w.data, byte(IntegerT))
|
w.data = append(w.data, byte(IntegerT))
|
||||||
data := bigint.ToBytes(t.Value().(*big.Int))
|
v := t.Value().(*big.Int)
|
||||||
w.appendVarUint(uint64(len(data)))
|
ln := len(w.data)
|
||||||
|
w.data = append(w.data, 0)
|
||||||
|
data := bigint.ToPreallocatedBytes(v, w.data[len(w.data):])
|
||||||
|
w.data[ln] = byte(len(data))
|
||||||
w.data = append(w.data, data...)
|
w.data = append(w.data, data...)
|
||||||
case *Interop:
|
case *Interop:
|
||||||
if w.allowInvalid {
|
if w.allowInvalid {
|
||||||
|
|
Loading…
Reference in a new issue