Merge pull request #2117 from nspcc-dev/io-grow
Some io package improvements
This commit is contained in:
commit
1e0c70ecb0
6 changed files with 109 additions and 27 deletions
|
@ -1,7 +1,6 @@
|
|||
package mpt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
|
||||
|
@ -64,10 +63,10 @@ func (b *BaseNode) updateHash(n Node) {
|
|||
|
||||
// updateCache updates hash and bytes fields for this BaseNode.
|
||||
func (b *BaseNode) updateBytes(n Node) {
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1+n.Size()))
|
||||
bw := io.NewBinWriterFromIO(buf)
|
||||
encodeNodeWithType(n, bw)
|
||||
b.bytes = buf.Bytes()
|
||||
bw := io.NewBufBinWriter()
|
||||
bw.Grow(1 + n.Size())
|
||||
encodeNodeWithType(n, bw.BinWriter)
|
||||
b.bytes = bw.Bytes()
|
||||
b.bytesValid = true
|
||||
}
|
||||
|
||||
|
|
39
pkg/core/mpt/bench_test.go
Normal file
39
pkg/core/mpt/bench_test.go
Normal file
|
@ -0,0 +1,39 @@
|
|||
package mpt
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/internal/random"
|
||||
)
|
||||
|
||||
func benchmarkBytes(b *testing.B, n Node) {
|
||||
inv := n.(interface{ invalidateCache() })
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
inv.invalidateCache()
|
||||
_ = n.Bytes()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkBytes(b *testing.B) {
|
||||
b.Run("extension", func(b *testing.B) {
|
||||
n := NewExtensionNode(random.Bytes(10), NewLeafNode(random.Bytes(10)))
|
||||
benchmarkBytes(b, n)
|
||||
})
|
||||
b.Run("leaf", func(b *testing.B) {
|
||||
n := NewLeafNode(make([]byte, 15))
|
||||
benchmarkBytes(b, n)
|
||||
})
|
||||
b.Run("hash", func(b *testing.B) {
|
||||
n := NewHashNode(random.Uint256())
|
||||
benchmarkBytes(b, n)
|
||||
})
|
||||
b.Run("branch", func(b *testing.B) {
|
||||
n := NewBranchNode()
|
||||
n.Children[0] = NewLeafNode(random.Bytes(10))
|
||||
n.Children[4] = NewLeafNode(random.Bytes(10))
|
||||
n.Children[7] = NewLeafNode(random.Bytes(10))
|
||||
n.Children[8] = NewLeafNode(random.Bytes(10))
|
||||
})
|
||||
}
|
|
@ -64,3 +64,14 @@ func BenchmarkTransaction_Bytes(b *testing.B) {
|
|||
_ = tx.Bytes()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGetVarSize(b *testing.B) {
|
||||
tx, err := NewTransactionFromBytes(benchTx)
|
||||
require.NoError(b, err)
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = io.GetVarSize(tx)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package io
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"reflect"
|
||||
|
@ -11,56 +12,47 @@ import (
|
|||
// from a struct with many fields.
|
||||
type BinWriter struct {
|
||||
w io.Writer
|
||||
uv []byte
|
||||
u64 []byte
|
||||
u32 []byte
|
||||
u16 []byte
|
||||
u8 []byte
|
||||
Err error
|
||||
uv [9]byte
|
||||
}
|
||||
|
||||
// NewBinWriterFromIO makes a BinWriter from io.Writer.
|
||||
func NewBinWriterFromIO(iow io.Writer) *BinWriter {
|
||||
uv := make([]byte, 9)
|
||||
u64 := uv[:8]
|
||||
u32 := u64[:4]
|
||||
u16 := u64[:2]
|
||||
u8 := u64[:1]
|
||||
return &BinWriter{w: iow, uv: uv, u64: u64, u32: u32, u16: u16, u8: u8}
|
||||
return &BinWriter{w: iow}
|
||||
}
|
||||
|
||||
// WriteU64LE writes an uint64 value into the underlying io.Writer in
|
||||
// little-endian format.
|
||||
func (w *BinWriter) WriteU64LE(u64 uint64) {
|
||||
binary.LittleEndian.PutUint64(w.u64, u64)
|
||||
w.WriteBytes(w.u64)
|
||||
binary.LittleEndian.PutUint64(w.uv[:8], u64)
|
||||
w.WriteBytes(w.uv[:8])
|
||||
}
|
||||
|
||||
// WriteU32LE writes an uint32 value into the underlying io.Writer in
|
||||
// little-endian format.
|
||||
func (w *BinWriter) WriteU32LE(u32 uint32) {
|
||||
binary.LittleEndian.PutUint32(w.u32, u32)
|
||||
w.WriteBytes(w.u32)
|
||||
binary.LittleEndian.PutUint32(w.uv[:4], u32)
|
||||
w.WriteBytes(w.uv[:4])
|
||||
}
|
||||
|
||||
// WriteU16LE writes an uint16 value into the underlying io.Writer in
|
||||
// little-endian format.
|
||||
func (w *BinWriter) WriteU16LE(u16 uint16) {
|
||||
binary.LittleEndian.PutUint16(w.u16, u16)
|
||||
w.WriteBytes(w.u16)
|
||||
binary.LittleEndian.PutUint16(w.uv[:2], u16)
|
||||
w.WriteBytes(w.uv[:2])
|
||||
}
|
||||
|
||||
// WriteU16BE writes an uint16 value into the underlying io.Writer in
|
||||
// big-endian format.
|
||||
func (w *BinWriter) WriteU16BE(u16 uint16) {
|
||||
binary.BigEndian.PutUint16(w.u16, u16)
|
||||
w.WriteBytes(w.u16)
|
||||
binary.BigEndian.PutUint16(w.uv[:2], u16)
|
||||
w.WriteBytes(w.uv[:2])
|
||||
}
|
||||
|
||||
// WriteB writes a byte into the underlying io.Writer.
|
||||
func (w *BinWriter) WriteB(u8 byte) {
|
||||
w.u8[0] = u8
|
||||
w.WriteBytes(w.u8)
|
||||
w.uv[0] = u8
|
||||
w.WriteBytes(w.uv[:1])
|
||||
}
|
||||
|
||||
// WriteBool writes a boolean value into the underlying io.Writer encoded as
|
||||
|
@ -108,7 +100,7 @@ func (w *BinWriter) WriteVarUint(val uint64) {
|
|||
return
|
||||
}
|
||||
|
||||
n := PutVarUint(w.uv, val)
|
||||
n := PutVarUint(w.uv[:], val)
|
||||
w.WriteBytes(w.uv[:n])
|
||||
}
|
||||
|
||||
|
@ -153,3 +145,11 @@ func (w *BinWriter) WriteVarBytes(b []byte) {
|
|||
func (w *BinWriter) WriteString(s string) {
|
||||
w.WriteVarBytes([]byte(s))
|
||||
}
|
||||
|
||||
// Grow tries to increase underlying buffer capacity so that at least n bytes
|
||||
// can be written without reallocation. If the writer is not a buffer, this is a no-op.
|
||||
func (w *BinWriter) Grow(n int) {
|
||||
if b, ok := w.w.(*bytes.Buffer); ok {
|
||||
b.Grow(n)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -182,6 +182,11 @@ func (m *Message) Encode(br *io.BinWriter) error {
|
|||
if err := m.tryCompressPayload(); err != nil {
|
||||
return err
|
||||
}
|
||||
growSize := 2 + 1 // header + empty payload
|
||||
if m.compressedPayload != nil {
|
||||
growSize += 8 + len(m.compressedPayload) // varint + byte-slice
|
||||
}
|
||||
br.Grow(growSize)
|
||||
br.WriteB(byte(m.Flags))
|
||||
br.WriteB(byte(m.Command))
|
||||
if m.compressedPayload != nil {
|
||||
|
|
|
@ -52,6 +52,34 @@ func TestEncodeDecodeVersion(t *testing.T) {
|
|||
require.NotEqual(t, len(expected.compressedPayload), len(uncompressed))
|
||||
}
|
||||
|
||||
func BenchmarkMessageBytes(b *testing.B) {
|
||||
// shouldn't try to compress headers payload
|
||||
ep := &payload.Extensible{
|
||||
Category: "consensus",
|
||||
ValidBlockStart: rand.Uint32(),
|
||||
ValidBlockEnd: rand.Uint32(),
|
||||
Sender: util.Uint160{},
|
||||
Data: make([]byte, 300),
|
||||
Witness: transaction.Witness{
|
||||
InvocationScript: make([]byte, 33),
|
||||
VerificationScript: make([]byte, 40),
|
||||
},
|
||||
}
|
||||
random.Fill(ep.Data)
|
||||
random.Fill(ep.Witness.InvocationScript)
|
||||
random.Fill(ep.Witness.VerificationScript)
|
||||
msg := NewMessage(CMDExtensible, ep)
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := msg.Bytes()
|
||||
if err != nil {
|
||||
b.FailNow()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeHeaders(t *testing.T) {
|
||||
// shouldn't try to compress headers payload
|
||||
headers := &payload.Headers{Hdrs: make([]*block.Header, CompressionMinSize)}
|
||||
|
|
Loading…
Reference in a new issue