Merge pull request #2108 from nspcc-dev/optimize-mpt

Some allocation optimizations
This commit is contained in:
Roman Khimov 2021-08-06 14:51:10 +03:00 committed by GitHub
commit b989504d74
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
18 changed files with 224 additions and 130 deletions

View file

@ -1,6 +1,7 @@
package mpt
import (
"bytes"
"fmt"
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
@ -23,7 +24,6 @@ type BaseNodeIface interface {
Hash() util.Uint256
Type() NodeType
Bytes() []byte
EncodeBinaryAsChild(w *io.BinWriter)
}
type flushedNode interface {
@ -55,8 +55,8 @@ func (b *BaseNode) getBytes(n Node) []byte {
// updateHash updates hash field for this BaseNode.
func (b *BaseNode) updateHash(n Node) {
if n.Type() == HashT {
panic("can't update hash for hash node")
if n.Type() == HashT || n.Type() == EmptyT {
panic("can't update hash for empty or hash node")
}
b.hash = hash.DoubleSha256(b.getBytes(n))
b.hashValid = true
@ -64,8 +64,9 @@ func (b *BaseNode) updateHash(n Node) {
// updateCache updates hash and bytes fields for this BaseNode.
func (b *BaseNode) updateBytes(n Node) {
buf := io.NewBufBinWriter()
encodeNodeWithType(n, buf.BinWriter)
buf := bytes.NewBuffer(make([]byte, 0, 1+n.Size()))
bw := io.NewBinWriterFromIO(buf)
encodeNodeWithType(n, bw)
b.bytes = buf.Bytes()
b.bytesValid = true
}
@ -76,19 +77,18 @@ func (b *BaseNode) invalidateCache() {
b.hashValid = false
}
func encodeBinaryAsChild(n Node, w *io.BinWriter) {
if isEmpty(n) {
w.WriteB(byte(EmptyT))
return
}
w.WriteB(byte(HashT))
w.WriteBytes(n.Hash().BytesBE())
}
// encodeNodeWithType encodes node together with it's type.
func encodeNodeWithType(n Node, w *io.BinWriter) {
switch t := n.Type(); t {
case HashT:
hn := n.(*HashNode)
if !hn.hashValid {
w.WriteB(byte(EmptyT))
break
}
fallthrough
default:
w.WriteB(byte(t))
}
w.WriteB(byte(n.Type()))
n.EncodeBinary(w)
}
@ -112,11 +112,7 @@ func DecodeNodeWithType(r *io.BinReader) Node {
case LeafT:
n = new(LeafNode)
case EmptyT:
n = &HashNode{
BaseNode: BaseNode{
hashValid: false,
},
}
n = EmptyNode{}
default:
r.Err = fmt.Errorf("invalid node type: %x", typ)
return nil

View file

@ -62,6 +62,8 @@ func (t *Trie) putBatchIntoNode(curr Node, kv []keyValue) (Node, int, error) {
return t.putBatchIntoExtension(n, kv)
case *HashNode:
return t.putBatchIntoHash(n, kv)
case EmptyNode:
return t.putBatchIntoEmpty(kv)
default:
panic("invalid MPT node type")
}
@ -84,11 +86,9 @@ func (t *Trie) mergeExtension(prefix []byte, sub Node) (Node, error) {
sn.invalidateCache()
t.addRef(sn.Hash(), sn.bytes)
return sn, nil
case *HashNode:
if sn.IsEmpty() {
case EmptyNode:
return sn, nil
}
case *HashNode:
n, err := t.getFromStore(sn.Hash())
if err != nil {
return sn, err
@ -141,8 +141,8 @@ func (t *Trie) putBatchIntoExtensionNoPrefix(key []byte, next Node, kv []keyValu
}
func isEmpty(n Node) bool {
hn, ok := n.(*HashNode)
return ok && hn.IsEmpty()
_, ok := n.(EmptyNode)
return ok
}
// addToBranch puts items into the branch node assuming b is not yet in trie.
@ -190,7 +190,7 @@ func (t *Trie) stripBranch(b *BranchNode) (Node, error) {
}
switch {
case n == 0:
return new(HashNode), nil
return EmptyNode{}, nil
case n == 1:
if lastIndex != lastChild {
return t.mergeExtension([]byte{lastIndex}, b.Children[lastIndex])
@ -219,12 +219,13 @@ func (t *Trie) iterateBatch(kv []keyValue, f func(c byte, kv []keyValue) (int, e
return n, nil
}
func (t *Trie) putBatchIntoHash(curr *HashNode, kv []keyValue) (Node, int, error) {
if curr.IsEmpty() {
func (t *Trie) putBatchIntoEmpty(kv []keyValue) (Node, int, error) {
common := lcpMany(kv)
stripPrefix(len(common), kv)
return t.newSubTrieMany(common, kv, nil)
}
func (t *Trie) putBatchIntoHash(curr *HashNode, kv []keyValue) (Node, int, error) {
result, err := t.getFromStore(curr.hash)
if err != nil {
return curr, 0, err
@ -242,7 +243,7 @@ func (t *Trie) newSubTrieMany(prefix []byte, kv []keyValue, value []byte) (Node,
if len(kv[0].key) == 0 {
if len(kv[0].value) == 0 {
if len(kv) == 1 {
return new(HashNode), 1, nil
return EmptyNode{}, 1, nil
}
node, n, err := t.newSubTrieMany(prefix, kv[1:], nil)
return node, n + 1, err

View file

@ -68,8 +68,8 @@ func testPut(t *testing.T, ps pairs, tr1, tr2 *Trie) {
func TestTrie_PutBatchLeaf(t *testing.T) {
prepareLeaf := func(t *testing.T) (*Trie, *Trie) {
tr1 := NewTrie(new(HashNode), false, newTestStore())
tr2 := NewTrie(new(HashNode), false, newTestStore())
tr1 := NewTrie(EmptyNode{}, false, newTestStore())
tr2 := NewTrie(EmptyNode{}, false, newTestStore())
require.NoError(t, tr1.Put([]byte{0}, []byte("value")))
require.NoError(t, tr2.Put([]byte{0}, []byte("value")))
return tr1, tr2
@ -97,8 +97,8 @@ func TestTrie_PutBatchLeaf(t *testing.T) {
func TestTrie_PutBatchExtension(t *testing.T) {
prepareExtension := func(t *testing.T) (*Trie, *Trie) {
tr1 := NewTrie(new(HashNode), false, newTestStore())
tr2 := NewTrie(new(HashNode), false, newTestStore())
tr1 := NewTrie(EmptyNode{}, false, newTestStore())
tr2 := NewTrie(EmptyNode{}, false, newTestStore())
require.NoError(t, tr1.Put([]byte{1, 2}, []byte("value1")))
require.NoError(t, tr2.Put([]byte{1, 2}, []byte("value1")))
return tr1, tr2
@ -144,8 +144,8 @@ func TestTrie_PutBatchExtension(t *testing.T) {
func TestTrie_PutBatchBranch(t *testing.T) {
prepareBranch := func(t *testing.T) (*Trie, *Trie) {
tr1 := NewTrie(new(HashNode), false, newTestStore())
tr2 := NewTrie(new(HashNode), false, newTestStore())
tr1 := NewTrie(EmptyNode{}, false, newTestStore())
tr2 := NewTrie(EmptyNode{}, false, newTestStore())
require.NoError(t, tr1.Put([]byte{0x00, 2}, []byte("value1")))
require.NoError(t, tr2.Put([]byte{0x00, 2}, []byte("value1")))
require.NoError(t, tr1.Put([]byte{0x10, 3}, []byte("value2")))
@ -175,8 +175,8 @@ func TestTrie_PutBatchBranch(t *testing.T) {
require.IsType(t, (*ExtensionNode)(nil), tr1.root)
})
t.Run("non-empty child is last node", func(t *testing.T) {
tr1 := NewTrie(new(HashNode), false, newTestStore())
tr2 := NewTrie(new(HashNode), false, newTestStore())
tr1 := NewTrie(EmptyNode{}, false, newTestStore())
tr2 := NewTrie(EmptyNode{}, false, newTestStore())
require.NoError(t, tr1.Put([]byte{0x00, 2}, []byte("value1")))
require.NoError(t, tr2.Put([]byte{0x00, 2}, []byte("value1")))
require.NoError(t, tr1.Put([]byte{0x00}, []byte("value2")))
@ -222,8 +222,8 @@ func TestTrie_PutBatchBranch(t *testing.T) {
func TestTrie_PutBatchHash(t *testing.T) {
prepareHash := func(t *testing.T) (*Trie, *Trie) {
tr1 := NewTrie(new(HashNode), false, newTestStore())
tr2 := NewTrie(new(HashNode), false, newTestStore())
tr1 := NewTrie(EmptyNode{}, false, newTestStore())
tr2 := NewTrie(EmptyNode{}, false, newTestStore())
require.NoError(t, tr1.Put([]byte{0x10}, []byte("value1")))
require.NoError(t, tr2.Put([]byte{0x10}, []byte("value1")))
require.NoError(t, tr1.Put([]byte{0x20}, []byte("value2")))
@ -257,8 +257,8 @@ func TestTrie_PutBatchHash(t *testing.T) {
func TestTrie_PutBatchEmpty(t *testing.T) {
t.Run("good", func(t *testing.T) {
tr1 := NewTrie(new(HashNode), false, newTestStore())
tr2 := NewTrie(new(HashNode), false, newTestStore())
tr1 := NewTrie(EmptyNode{}, false, newTestStore())
tr2 := NewTrie(EmptyNode{}, false, newTestStore())
var ps = pairs{
{[]byte{0}, []byte("value0")},
{[]byte{1}, []byte("value1")},
@ -273,15 +273,15 @@ func TestTrie_PutBatchEmpty(t *testing.T) {
{[]byte{2}, nil},
{[]byte{3}, []byte("replace3")},
}
tr1 := NewTrie(new(HashNode), false, newTestStore())
tr2 := NewTrie(new(HashNode), false, newTestStore())
tr1 := NewTrie(EmptyNode{}, false, newTestStore())
tr2 := NewTrie(EmptyNode{}, false, newTestStore())
testIncompletePut(t, ps, 4, tr1, tr2)
})
}
// For the sake of coverage.
func TestTrie_InvalidNodeType(t *testing.T) {
tr := NewTrie(new(HashNode), false, newTestStore())
tr := NewTrie(EmptyNode{}, false, newTestStore())
var b Batch
b.Add([]byte{1}, []byte("value"))
tr.root = Node(nil)
@ -289,8 +289,8 @@ func TestTrie_InvalidNodeType(t *testing.T) {
}
func TestTrie_PutBatch(t *testing.T) {
tr1 := NewTrie(new(HashNode), false, newTestStore())
tr2 := NewTrie(new(HashNode), false, newTestStore())
tr1 := NewTrie(EmptyNode{}, false, newTestStore())
tr2 := NewTrie(EmptyNode{}, false, newTestStore())
var ps = pairs{
{[]byte{1}, []byte{1}},
{[]byte{2}, []byte{3}},
@ -312,11 +312,10 @@ var _ = printNode
// `spew.Dump()`.
func printNode(prefix string, n Node) {
switch tn := n.(type) {
case *HashNode:
if tn.IsEmpty() {
case EmptyNode:
fmt.Printf("%s empty\n", prefix)
return
}
case *HashNode:
fmt.Printf("%s %s\n", prefix, tn.Hash().StringLE())
case *BranchNode:
for i, c := range tn.Children {

View file

@ -27,7 +27,7 @@ var _ Node = (*BranchNode)(nil)
func NewBranchNode() *BranchNode {
b := new(BranchNode)
for i := 0; i < childrenCount; i++ {
b.Children[i] = new(HashNode)
b.Children[i] = EmptyNode{}
}
return b
}
@ -45,19 +45,24 @@ func (b *BranchNode) Bytes() []byte {
return b.getBytes(b)
}
// Size implements Node interface.
func (b *BranchNode) Size() int {
sz := childrenCount
for i := range b.Children {
if !isEmpty(b.Children[i]) {
sz += util.Uint256Size
}
}
return sz
}
// EncodeBinary implements io.Serializable.
func (b *BranchNode) EncodeBinary(w *io.BinWriter) {
for i := 0; i < childrenCount; i++ {
b.Children[i].EncodeBinaryAsChild(w)
encodeBinaryAsChild(b.Children[i], w)
}
}
// EncodeBinaryAsChild implements BaseNode interface.
func (b *BranchNode) EncodeBinaryAsChild(w *io.BinWriter) {
n := &NodeObject{Node: NewHashNode(b.Hash())} // with type
n.EncodeBinary(w)
}
// DecodeBinary implements io.Serializable.
func (b *BranchNode) DecodeBinary(r *io.BinReader) {
for i := 0; i < childrenCount; i++ {

56
pkg/core/mpt/empty.go Normal file
View file

@ -0,0 +1,56 @@
package mpt
import (
"encoding/json"
"errors"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/util"
)
// EmptyNode represents empty node.
type EmptyNode struct{}
// DecodeBinary implements io.Serializable interface.
func (e EmptyNode) DecodeBinary(*io.BinReader) {
}
// EncodeBinary implements io.Serializable interface.
func (e EmptyNode) EncodeBinary(*io.BinWriter) {
}
// Size implements Node interface.
func (EmptyNode) Size() int { return 0 }
// MarshalJSON implements Node interface.
func (e EmptyNode) MarshalJSON() ([]byte, error) {
return []byte(`{}`), nil
}
// UnmarshalJSON implements Node interface.
func (e EmptyNode) UnmarshalJSON(bytes []byte) error {
var m map[string]interface{}
err := json.Unmarshal(bytes, &m)
if err != nil {
return err
}
if len(m) != 0 {
return errors.New("expected empty node")
}
return nil
}
// Hash implements Node interface.
func (e EmptyNode) Hash() util.Uint256 {
panic("can't get hash of an EmptyNode")
}
// Type implements Node interface.
func (e EmptyNode) Type() NodeType {
return EmptyT
}
// Bytes implements Node interface.
func (e EmptyNode) Bytes() []byte {
return nil
}

View file

@ -69,13 +69,13 @@ func (e *ExtensionNode) DecodeBinary(r *io.BinReader) {
// EncodeBinary implements io.Serializable.
func (e ExtensionNode) EncodeBinary(w *io.BinWriter) {
w.WriteVarBytes(e.key)
e.next.EncodeBinaryAsChild(w)
encodeBinaryAsChild(e.next, w)
}
// EncodeBinaryAsChild implements BaseNode interface.
func (e *ExtensionNode) EncodeBinaryAsChild(w *io.BinWriter) {
n := &NodeObject{Node: NewHashNode(e.Hash())} // with type
n.EncodeBinary(w)
// Size implements Node interface.
func (e *ExtensionNode) Size() int {
return io.GetVarSize(len(e.key)) + len(e.key) +
1 + util.Uint256Size // e.next is never empty
}
// MarshalJSON implements json.Marshaler.

View file

@ -27,6 +27,11 @@ func NewHashNode(h util.Uint256) *HashNode {
// Type implements Node interface.
func (h *HashNode) Type() NodeType { return HashT }
// Size implements Node interface.
func (h *HashNode) Size() int {
return util.Uint256Size
}
// Hash implements Node interface.
func (h *HashNode) Hash() util.Uint256 {
if !h.hashValid {
@ -35,9 +40,6 @@ func (h *HashNode) Hash() util.Uint256 {
return h.hash
}
// IsEmpty returns true if h is an empty node i.e. contains no hash.
func (h *HashNode) IsEmpty() bool { return !h.hashValid }
// Bytes returns serialized HashNode.
func (h *HashNode) Bytes() []byte {
return h.getBytes(h)
@ -58,17 +60,8 @@ func (h HashNode) EncodeBinary(w *io.BinWriter) {
w.WriteBytes(h.hash[:])
}
// EncodeBinaryAsChild implements BaseNode interface.
func (h *HashNode) EncodeBinaryAsChild(w *io.BinWriter) {
no := &NodeObject{Node: h} // with type
no.EncodeBinary(w)
}
// MarshalJSON implements json.Marshaler.
func (h *HashNode) MarshalJSON() ([]byte, error) {
if !h.hashValid {
return []byte(`{}`), nil
}
return []byte(`{"hash":"` + h.hash.StringLE() + `"}`), nil
}

View file

@ -56,10 +56,9 @@ func (n LeafNode) EncodeBinary(w *io.BinWriter) {
w.WriteVarBytes(n.value)
}
// EncodeBinaryAsChild implements BaseNode interface.
func (n *LeafNode) EncodeBinaryAsChild(w *io.BinWriter) {
no := &NodeObject{Node: NewHashNode(n.Hash())} // with type
no.EncodeBinary(w)
// Size implements Node interface.
func (n *LeafNode) Size() int {
return io.GetVarSize(len(n.value)) + len(n.value)
}
// MarshalJSON implements json.Marshaler.

View file

@ -33,6 +33,7 @@ type Node interface {
io.Serializable
json.Marshaler
json.Unmarshaler
Size() int
BaseNodeIface
}
@ -68,7 +69,7 @@ func (n *NodeObject) UnmarshalJSON(data []byte) error {
switch len(m) {
case 0:
n.Node = new(HashNode)
n.Node = EmptyNode{}
case 1:
if v, ok := m["hash"]; ok {
var h util.Uint256

View file

@ -27,6 +27,7 @@ func getTestFuncEncode(ok bool, expected, actual Node) func(t *testing.T) {
require.NoError(t, err)
require.Equal(t, expected.Type(), actual.Type())
require.Equal(t, expected.Hash(), actual.Hash())
require.Equal(t, 1+expected.Size(), len(expected.Bytes()))
})
t.Run("JSON", func(t *testing.T) {
bs, err := json.Marshal(expected)
@ -78,9 +79,6 @@ func TestNode_Serializable(t *testing.T) {
t.Run("Raw", getTestFuncEncode(true, h, new(HashNode)))
t.Run("WithType", getTestFuncEncode(true, &NodeObject{h}, new(NodeObject)))
})
t.Run("Empty", func(t *testing.T) { // compare nodes, not hashes
testserdes.EncodeDecodeBinary(t, new(HashNode), new(HashNode))
})
t.Run("InvalidSize", func(t *testing.T) {
buf := io.NewBufBinWriter()
buf.BinWriter.WriteBytes(make([]byte, 13))
@ -111,7 +109,7 @@ func TestInvalidJSON(t *testing.T) {
t.Run("InvalidChildrenCount", func(t *testing.T) {
var cs [childrenCount + 1]Node
for i := range cs {
cs[i] = new(HashNode)
cs[i] = EmptyNode{}
}
data, err := json.Marshal(cs)
require.NoError(t, err)

View file

@ -49,14 +49,12 @@ func (t *Trie) getProof(curr Node, path []byte, proofs *[][]byte) (Node, error)
return n, nil
}
case *HashNode:
if !n.IsEmpty() {
r, err := t.getFromStore(n.Hash())
if err != nil {
return nil, err
}
return t.getProof(r, path, proofs)
}
}
return nil, ErrNotFound
}

View file

@ -35,7 +35,7 @@ var ErrNotFound = errors.New("item not found")
// This also has the benefit, that every `Put` can be considered an atomic operation.
func NewTrie(root Node, enableRefCount bool, store *storage.MemCachedStore) *Trie {
if root == nil {
root = new(HashNode)
root = EmptyNode{}
}
return &Trie{
@ -75,12 +75,11 @@ func (t *Trie) getWithPath(curr Node, path []byte) (Node, []byte, error) {
}
n.Children[i] = r
return n, bs, nil
case EmptyNode:
case *HashNode:
if !n.IsEmpty() {
if r, err := t.getFromStore(n.hash); err == nil {
return t.getWithPath(r, path)
}
}
case *ExtensionNode:
if bytes.HasPrefix(path, n.key) {
r, bs, err := t.getWithPath(n.next, path[len(n.key):])
@ -187,14 +186,13 @@ func (t *Trie) putIntoExtension(curr *ExtensionNode, path []byte, val Node) (Nod
return b, nil
}
func (t *Trie) putIntoEmpty(path []byte, val Node) (Node, error) {
return t.newSubTrie(path, val, true), nil
}
// putIntoHash puts val to trie if current node is a HashNode.
// It returns Node if curr needs to be replaced and error if any.
func (t *Trie) putIntoHash(curr *HashNode, path []byte, val Node) (Node, error) {
if curr.IsEmpty() {
hn := t.newSubTrie(path, val, true)
return hn, nil
}
result, err := t.getFromStore(curr.hash)
if err != nil {
return nil, err
@ -227,6 +225,8 @@ func (t *Trie) putIntoNode(curr Node, path []byte, val Node) (Node, error) {
return t.putIntoExtension(n, path, val)
case *HashNode:
return t.putIntoHash(n, path, val)
case EmptyNode:
return t.putIntoEmpty(path, val)
default:
panic("invalid MPT node type")
}
@ -257,8 +257,7 @@ func (t *Trie) deleteFromBranch(b *BranchNode, path []byte) (Node, error) {
b.invalidateCache()
var count, index int
for i := range b.Children {
h, ok := b.Children[i].(*HashNode)
if !ok || !h.IsEmpty() {
if !isEmpty(b.Children[i]) {
index = i
count++
}
@ -307,10 +306,9 @@ func (t *Trie) deleteFromExtension(n *ExtensionNode, path []byte) (Node, error)
t.removeRef(nxt.Hash(), nxt.bytes)
n.key = append(n.key, nxt.key...)
n.next = nxt.next
case *HashNode:
if nxt.IsEmpty() {
case EmptyNode:
return nxt, nil
}
case *HashNode:
n.next = nxt
default:
n.next = r
@ -327,17 +325,16 @@ func (t *Trie) deleteFromNode(curr Node, path []byte) (Node, error) {
case *LeafNode:
if len(path) == 0 {
t.removeRef(curr.Hash(), curr.Bytes())
return new(HashNode), nil
return EmptyNode{}, nil
}
return curr, nil
case *BranchNode:
return t.deleteFromBranch(n, path)
case *ExtensionNode:
return t.deleteFromExtension(n, path)
case *HashNode:
if n.IsEmpty() {
case EmptyNode:
return n, nil
}
case *HashNode:
newNode, err := t.getFromStore(n.Hash())
if err != nil {
return nil, err
@ -350,7 +347,7 @@ func (t *Trie) deleteFromNode(curr Node, path []byte) (Node, error) {
// StateRoot returns root hash of t.
func (t *Trie) StateRoot() util.Uint256 {
if hn, ok := t.root.(*HashNode); ok && hn.IsEmpty() {
if isEmpty(t.root) {
return util.Uint256{}
}
return t.root.Hash()
@ -486,9 +483,11 @@ func (t *Trie) Collapse(depth int) {
}
func collapse(depth int, node Node) Node {
if _, ok := node.(*HashNode); ok {
switch node.(type) {
case *HashNode, EmptyNode:
return node
} else if depth == 0 {
}
if depth == 0 {
return NewHashNode(node.Hash())
}

View file

@ -239,8 +239,7 @@ func isValid(curr Node) bool {
if !isValid(n.Children[i]) {
return false
}
hn, ok := n.Children[i].(*HashNode)
if !ok || !hn.IsEmpty() {
if !isEmpty(n.Children[i]) {
count++
}
}
@ -342,7 +341,7 @@ func testTrieDelete(t *testing.T, enableGC bool) {
})
require.NoError(t, tr.Delete([]byte{0xAB}))
require.True(t, tr.root.(*HashNode).IsEmpty())
require.IsType(t, EmptyNode{}, tr.root)
})
t.Run("MultipleKeys", func(t *testing.T) {
@ -505,12 +504,11 @@ func TestTrie_Collapse(t *testing.T) {
require.Equal(t, NewLeafNode([]byte("value")), tr.root)
})
t.Run("Hash", func(t *testing.T) {
t.Run("Empty", func(t *testing.T) {
tr := NewTrie(new(HashNode), false, newTestStore())
t.Run("EmptyNode", func(t *testing.T) {
tr := NewTrie(EmptyNode{}, false, newTestStore())
require.NotPanics(t, func() { tr.Collapse(1) })
hn, ok := tr.root.(*HashNode)
_, ok := tr.root.(EmptyNode)
require.True(t, ok)
require.True(t, hn.IsEmpty())
})
h := random.Uint256()

View file

@ -61,7 +61,10 @@ func (aer *AppExecResult) EncodeBinary(w *io.BinWriter) {
for _, it := range aer.Stack {
stackitem.EncodeBinaryProtected(it, w)
}
w.WriteArray(aer.Events)
w.WriteVarUint(uint64(len(aer.Events)))
for i := range aer.Events {
aer.Events[i].EncodeBinary(w)
}
w.WriteVarBytes([]byte(aer.FaultException))
}

View file

@ -13,6 +13,31 @@ import (
"github.com/stretchr/testify/require"
)
func BenchmarkAppExecResult_EncodeBinary(b *testing.B) {
aer := &AppExecResult{
Container: random.Uint256(),
Execution: Execution{
Trigger: trigger.Application,
VMState: vm.HaltState,
GasConsumed: 12345,
Stack: []stackitem.Item{},
Events: []NotificationEvent{{
ScriptHash: random.Uint160(),
Name: "Event",
Item: stackitem.NewArray([]stackitem.Item{stackitem.NewBool(true)}),
}},
},
}
w := io.NewBufBinWriter()
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
w.Reset()
aer.EncodeBinary(w.BinWriter)
}
}
func TestEncodeDecodeNotificationEvent(t *testing.T) {
event := &NotificationEvent{
ScriptHash: random.Uint160(),

View file

@ -53,3 +53,14 @@ func BenchmarkDecodeFromBytes(t *testing.B) {
require.NoError(t, err)
}
}
func BenchmarkTransaction_Bytes(b *testing.B) {
tx, err := NewTransactionFromBytes(benchTx)
require.NoError(b, err)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = tx.Bytes()
}
}

View file

@ -203,7 +203,10 @@ func (t *Transaction) DecodeBinary(br *io.BinReader) {
// EncodeBinary implements Serializable interface.
func (t *Transaction) EncodeBinary(bw *io.BinWriter) {
t.encodeHashableFields(bw)
bw.WriteArray(t.Scripts)
bw.WriteVarUint(uint64(len(t.Scripts)))
for i := range t.Scripts {
t.Scripts[i].EncodeBinary(bw)
}
}
// encodeHashableFields encodes the fields that are not used for
@ -218,8 +221,14 @@ func (t *Transaction) encodeHashableFields(bw *io.BinWriter) {
bw.WriteU64LE(uint64(t.SystemFee))
bw.WriteU64LE(uint64(t.NetworkFee))
bw.WriteU32LE(t.ValidUntilBlock)
bw.WriteArray(t.Signers)
bw.WriteArray(t.Attributes)
bw.WriteVarUint(uint64(len(t.Signers)))
for i := range t.Signers {
t.Signers[i].EncodeBinary(bw)
}
bw.WriteVarUint(uint64(len(t.Attributes)))
for i := range t.Attributes {
t.Attributes[i].EncodeBinary(bw)
}
bw.WriteVarBytes(t.Script)
}

View file

@ -112,8 +112,11 @@ func (w *serContext) serialize(item Item) error {
}
case *BigInteger:
w.data = append(w.data, byte(IntegerT))
data := bigint.ToBytes(t.Value().(*big.Int))
w.appendVarUint(uint64(len(data)))
v := t.Value().(*big.Int)
ln := len(w.data)
w.data = append(w.data, 0)
data := bigint.ToPreallocatedBytes(v, w.data[len(w.data):])
w.data[ln] = byte(len(data))
w.data = append(w.data, data...)
case *Interop:
if w.allowInvalid {