From 26339c75dc00819e12b5e7677ba1b1fde8f264e4 Mon Sep 17 00:00:00 2001 From: Roman Khimov Date: Thu, 10 Sep 2020 14:43:24 +0300 Subject: [PATCH 1/6] vm/core: drop old key caching system Obsoleted by f5f58a7e91774fad2ce2c65d0f288478d4bb3a4b. --- pkg/core/blockchain.go | 41 +++++++------------------------------ pkg/core/blockchain_test.go | 16 +++++++-------- pkg/vm/vm.go | 39 ++++++++--------------------------- pkg/vm/vm_test.go | 20 ------------------ 4 files changed, 23 insertions(+), 93 deletions(-) diff --git a/pkg/core/blockchain.go b/pkg/core/blockchain.go index 6c577c0de..cb2cb67fc 100644 --- a/pkg/core/blockchain.go +++ b/pkg/core/blockchain.go @@ -115,11 +115,6 @@ type Blockchain struct { memPool *mempool.Pool - // This lock protects concurrent access to keyCache. - keyCacheLock sync.RWMutex - // cache for block verification keys. - keyCache map[util.Uint160]map[string]*keys.PublicKey - sbCommittee keys.PublicKeys log *zap.Logger @@ -169,7 +164,6 @@ func NewBlockchain(s storage.Store, cfg config.ProtocolConfiguration, log *zap.L stopCh: make(chan struct{}), runToExitCh: make(chan struct{}), memPool: mempool.New(cfg.MemPoolSize), - keyCache: make(map[util.Uint160]map[string]*keys.PublicKey), sbCommittee: committee, log: log, events: make(chan bcEvent), @@ -771,8 +765,8 @@ func (bc *Blockchain) processNEP5Transfer(cache *dao.Cached, h util.Uint256, b * if nativeContract != nil { id = nativeContract.Metadata().ContractID } else { - assetContract := bc.GetContractState(sc) - if assetContract == nil { + assetContract, err := cache.GetContractState(sc) + if err != nil { return } id = assetContract.ID @@ -1482,7 +1476,7 @@ var ( ) // initVerificationVM initializes VM for witness check. -func initVerificationVM(ic *interop.Context, hash util.Uint160, witness *transaction.Witness, keyCache map[string]*keys.PublicKey) error { +func initVerificationVM(ic *interop.Context, hash util.Uint160, witness *transaction.Witness) error { var offset int var initMD *manifest.Method verification := witness.VerificationScript @@ -1511,9 +1505,6 @@ func initVerificationVM(ic *interop.Context, hash util.Uint160, witness *transac v.Call(v.Context(), initMD.Offset) } v.LoadScript(witness.InvocationScript) - if keyCache != nil { - v.SetPublicKeys(keyCache) - } return nil } @@ -1521,11 +1512,11 @@ func initVerificationVM(ic *interop.Context, hash util.Uint160, witness *transac func (bc *Blockchain) VerifyWitness(h util.Uint160, c crypto.Verifiable, w *transaction.Witness, gas int64) error { ic := bc.newInteropContext(trigger.Verification, bc.dao, nil, nil) ic.Container = c - return bc.verifyHashAgainstScript(h, w, ic, true, gas) + return bc.verifyHashAgainstScript(h, w, ic, gas) } // verifyHashAgainstScript verifies given hash against the given witness. -func (bc *Blockchain) verifyHashAgainstScript(hash util.Uint160, witness *transaction.Witness, interopCtx *interop.Context, useKeys bool, gas int64) error { +func (bc *Blockchain) verifyHashAgainstScript(hash util.Uint160, witness *transaction.Witness, interopCtx *interop.Context, gas int64) error { gasPolicy := bc.contracts.Policy.GetMaxVerificationGas(interopCtx.DAO) if gas > gasPolicy { gas = gasPolicy @@ -1534,15 +1525,7 @@ func (bc *Blockchain) verifyHashAgainstScript(hash util.Uint160, witness *transa vm := interopCtx.SpawnVM() vm.SetPriceGetter(getPrice) vm.GasLimit = gas - var keyCache map[string]*keys.PublicKey - if useKeys { - bc.keyCacheLock.RLock() - if bc.keyCache[hash] != nil { - keyCache = bc.keyCache[hash] - } - bc.keyCacheLock.RUnlock() - } - if err := initVerificationVM(interopCtx, hash, witness, keyCache); err != nil { + if err := initVerificationVM(interopCtx, hash, witness); err != nil { return err } err := vm.Run() @@ -1561,16 +1544,6 @@ func (bc *Blockchain) verifyHashAgainstScript(hash util.Uint160, witness *transa if vm.Estack().Len() != 0 { return fmt.Errorf("%w: expected exactly one returned value", ErrVerificationFailed) } - if useKeys { - bc.keyCacheLock.RLock() - _, ok := bc.keyCache[hash] - bc.keyCacheLock.RUnlock() - if !ok { - bc.keyCacheLock.Lock() - bc.keyCache[hash] = vm.GetPublicKeys() - bc.keyCacheLock.Unlock() - } - } } else { return fmt.Errorf("%w: no result returned from the script", ErrVerificationFailed) } @@ -1589,7 +1562,7 @@ func (bc *Blockchain) verifyTxWitnesses(t *transaction.Transaction, block *block } interopCtx := bc.newInteropContext(trigger.Verification, bc.dao, block, t) for i := range t.Signers { - err := bc.verifyHashAgainstScript(t.Signers[i].Account, &t.Scripts[i], interopCtx, false, t.NetworkFee) + err := bc.verifyHashAgainstScript(t.Signers[i].Account, &t.Scripts[i], interopCtx, t.NetworkFee) if err != nil { return fmt.Errorf("witness #%d: %w", i, err) } diff --git a/pkg/core/blockchain_test.go b/pkg/core/blockchain_test.go index bf56275a6..bbf3c6351 100644 --- a/pkg/core/blockchain_test.go +++ b/pkg/core/blockchain_test.go @@ -394,22 +394,22 @@ func TestVerifyHashAgainstScript(t *testing.T) { newH := cs.ScriptHash() newH[0] = ^newH[0] w := &transaction.Witness{InvocationScript: []byte{byte(opcode.PUSH4)}} - err := bc.verifyHashAgainstScript(newH, w, ic, false, gas) + err := bc.verifyHashAgainstScript(newH, w, ic, gas) require.True(t, errors.Is(err, ErrUnknownVerificationContract)) }) t.Run("Invalid", func(t *testing.T) { w := &transaction.Witness{InvocationScript: []byte{byte(opcode.PUSH4)}} - err := bc.verifyHashAgainstScript(csInvalid.ScriptHash(), w, ic, false, gas) + err := bc.verifyHashAgainstScript(csInvalid.ScriptHash(), w, ic, gas) require.True(t, errors.Is(err, ErrInvalidVerificationContract)) }) t.Run("ValidSignature", func(t *testing.T) { w := &transaction.Witness{InvocationScript: []byte{byte(opcode.PUSH4)}} - err := bc.verifyHashAgainstScript(cs.ScriptHash(), w, ic, false, gas) + err := bc.verifyHashAgainstScript(cs.ScriptHash(), w, ic, gas) require.NoError(t, err) }) t.Run("InvalidSignature", func(t *testing.T) { w := &transaction.Witness{InvocationScript: []byte{byte(opcode.PUSH3)}} - err := bc.verifyHashAgainstScript(cs.ScriptHash(), w, ic, false, gas) + err := bc.verifyHashAgainstScript(cs.ScriptHash(), w, ic, gas) require.True(t, errors.Is(err, ErrVerificationFailed)) }) }) @@ -419,7 +419,7 @@ func TestVerifyHashAgainstScript(t *testing.T) { InvocationScript: []byte{byte(opcode.NOP)}, VerificationScript: verif, } - err := bc.verifyHashAgainstScript(hash.Hash160(verif), w, ic, false, 1) + err := bc.verifyHashAgainstScript(hash.Hash160(verif), w, ic, 1) require.True(t, errors.Is(err, ErrVerificationFailed)) }) t.Run("NoResult", func(t *testing.T) { @@ -428,7 +428,7 @@ func TestVerifyHashAgainstScript(t *testing.T) { InvocationScript: []byte{byte(opcode.PUSH1)}, VerificationScript: verif, } - err := bc.verifyHashAgainstScript(hash.Hash160(verif), w, ic, false, gas) + err := bc.verifyHashAgainstScript(hash.Hash160(verif), w, ic, gas) require.True(t, errors.Is(err, ErrVerificationFailed)) }) t.Run("BadResult", func(t *testing.T) { @@ -439,7 +439,7 @@ func TestVerifyHashAgainstScript(t *testing.T) { InvocationScript: []byte{byte(opcode.NOP)}, VerificationScript: verif, } - err := bc.verifyHashAgainstScript(hash.Hash160(verif), w, ic, false, gas) + err := bc.verifyHashAgainstScript(hash.Hash160(verif), w, ic, gas) require.True(t, errors.Is(err, ErrVerificationFailed)) }) t.Run("TooManyResults", func(t *testing.T) { @@ -448,7 +448,7 @@ func TestVerifyHashAgainstScript(t *testing.T) { InvocationScript: []byte{byte(opcode.PUSH1), byte(opcode.PUSH1)}, VerificationScript: verif, } - err := bc.verifyHashAgainstScript(hash.Hash160(verif), w, ic, false, gas) + err := bc.verifyHashAgainstScript(hash.Hash160(verif), w, ic, gas) require.True(t, errors.Is(err, ErrVerificationFailed)) }) } diff --git a/pkg/vm/vm.go b/pkg/vm/vm.go index 0a426fb6a..17d6521e6 100644 --- a/pkg/vm/vm.go +++ b/pkg/vm/vm.go @@ -80,9 +80,6 @@ type VM struct { SyscallHandler func(v *VM, id uint32) error trigger trigger.Type - - // Public keys cache. - keys map[string]*keys.PublicKey } // New returns a new VM object ready to load AVM bytecode scripts. @@ -96,7 +93,6 @@ func NewWithTrigger(t trigger.Type) *VM { state: NoneState, istack: NewStack("invocation"), refs: newRefCounter(), - keys: make(map[string]*keys.PublicKey), trigger: t, SyscallHandler: defaultSyscallHandler, @@ -140,17 +136,6 @@ func (v *VM) Istack() *Stack { return v.istack } -// SetPublicKeys sets internal key cache to the specified value (note -// that it doesn't copy them). -func (v *VM) SetPublicKeys(keys map[string]*keys.PublicKey) { - v.keys = keys -} - -// GetPublicKeys returns internal key cache (note that it doesn't copy it). -func (v *VM) GetPublicKeys() map[string]*keys.PublicKey { - return v.keys -} - // LoadArgs loads in the arguments used in the Mian entry point. func (v *VM) LoadArgs(method []byte, args []stackitem.Item) { if len(args) > 0 { @@ -1586,8 +1571,8 @@ func CheckMultisigPar(v *VM, curve elliptic.Curve, h []byte, pkeys [][]byte, sig go worker(tasks, results) } - tasks <- task{pub: v.bytesToPublicKey(pkeys[k1], curve), signum: s1} - tasks <- task{pub: v.bytesToPublicKey(pkeys[k2], curve), signum: s2} + tasks <- task{pub: bytesToPublicKey(pkeys[k1], curve), signum: s1} + tasks <- task{pub: bytesToPublicKey(pkeys[k2], curve), signum: s2} sigok := true taskCount := 2 @@ -1631,7 +1616,7 @@ loop: nextKey = k2 } taskCount++ - tasks <- task{pub: v.bytesToPublicKey(pkeys[nextKey], curve), signum: nextSig} + tasks <- task{pub: bytesToPublicKey(pkeys[nextKey], curve), signum: nextSig} } close(tasks) @@ -1641,7 +1626,7 @@ loop: func checkMultisig1(v *VM, curve elliptic.Curve, h []byte, pkeys [][]byte, sig []byte) bool { for i := range pkeys { - pkey := v.bytesToPublicKey(pkeys[i], curve) + pkey := bytesToPublicKey(pkeys[i], curve) if pkey.Verify(sig, h) { return true } @@ -1696,18 +1681,10 @@ func (v *VM) checkInvocationStackSize() { // bytesToPublicKey is a helper deserializing keys using cache and panicing on // error. -func (v *VM) bytesToPublicKey(b []byte, curve elliptic.Curve) *keys.PublicKey { - var pkey *keys.PublicKey - s := string(b) - if v.keys[s] != nil { - pkey = v.keys[s] - } else { - var err error - pkey, err = keys.NewPublicKeyFromBytes(b, curve) - if err != nil { - panic(err.Error()) - } - v.keys[s] = pkey +func bytesToPublicKey(b []byte, curve elliptic.Curve) *keys.PublicKey { + pkey, err := keys.NewPublicKeyFromBytes(b, curve) + if err != nil { + panic(err.Error()) } return pkey } diff --git a/pkg/vm/vm_test.go b/pkg/vm/vm_test.go index 198d2183e..c32424ec2 100644 --- a/pkg/vm/vm_test.go +++ b/pkg/vm/vm_test.go @@ -2,7 +2,6 @@ package vm import ( "bytes" - "crypto/elliptic" "encoding/binary" "encoding/hex" "errors" @@ -103,25 +102,6 @@ func TestAddGas(t *testing.T) { require.False(t, v.AddGas(5)) } -func TestBytesToPublicKey(t *testing.T) { - v := newTestVM() - cache := v.GetPublicKeys() - assert.Equal(t, 0, len(cache)) - keyHex := "03b209fd4f53a7170ea4444e0cb0a6bb6a53c2bd016926989cf85f9b0fba17a70c" - keyBytes, _ := hex.DecodeString(keyHex) - key := v.bytesToPublicKey(keyBytes, elliptic.P256()) - assert.NotNil(t, key) - key2 := v.bytesToPublicKey(keyBytes, elliptic.P256()) - assert.Equal(t, key, key2) - - cache = v.GetPublicKeys() - assert.Equal(t, 1, len(cache)) - assert.NotNil(t, cache[string(keyBytes)]) - - keyBytes[0] = 0xff - require.Panics(t, func() { v.bytesToPublicKey(keyBytes, elliptic.P256()) }) -} - func TestPushBytes1to75(t *testing.T) { buf := io.NewBufBinWriter() for i := 1; i <= 75; i++ { From 7310e748e3b05edf2901e597f6354aaa74c52035 Mon Sep 17 00:00:00 2001 From: Roman Khimov Date: Thu, 10 Sep 2020 15:02:03 +0300 Subject: [PATCH 2/6] core: reuse mempool from AddBlock() for bc.isTxStillRelevant() It's already there most of the time and creating another slice is just a waste of time. Checking for presence with map is also a little faster. --- pkg/core/blockchain.go | 62 +++++++++++++++++++----------------------- 1 file changed, 28 insertions(+), 34 deletions(-) diff --git a/pkg/core/blockchain.go b/pkg/core/blockchain.go index cb2cb67fc..6a7b6526a 100644 --- a/pkg/core/blockchain.go +++ b/pkg/core/blockchain.go @@ -4,7 +4,6 @@ import ( "errors" "fmt" "math/big" - "sort" "sync" "sync/atomic" "time" @@ -200,7 +199,7 @@ func (bc *Blockchain) init() error { if err != nil { return err } - return bc.storeBlock(genesisBlock) + return bc.storeBlock(genesisBlock, nil) } if ver != version { return fmt.Errorf("storage version mismatch betweeen %s and %s", version, ver) @@ -413,6 +412,7 @@ func (bc *Blockchain) AddBlock(block *block.Block) error { bc.addLock.Lock() defer bc.addLock.Unlock() + var mp *mempool.Pool expectedHeight := bc.BlockHeight() + 1 if expectedHeight != block.Index { return fmt.Errorf("expected %d, got %d: %w", expectedHeight, block.Index, ErrInvalidBlockIndex) @@ -430,28 +430,26 @@ func (bc *Blockchain) AddBlock(block *block.Block) error { if err != nil { return fmt.Errorf("block %s is invalid: %w", block.Hash().StringLE(), err) } - if bc.config.VerifyTransactions { - var mp = mempool.New(len(block.Transactions)) - for _, tx := range block.Transactions { - var err error - // Transactions are verified before adding them - // into the pool, so there is no point in doing - // it again even if we're verifying in-block transactions. - if bc.memPool.ContainsKey(tx.Hash()) { - err = mp.Add(tx, bc) - if err == nil { - continue - } - } else { - err = bc.verifyAndPoolTx(tx, mp) - } - if err != nil { - return fmt.Errorf("transaction %s failed to verify: %w", tx.Hash().StringLE(), err) + mp = mempool.New(len(block.Transactions)) + for _, tx := range block.Transactions { + var err error + // Transactions are verified before adding them + // into the pool, so there is no point in doing + // it again even if we're verifying in-block transactions. + if bc.memPool.ContainsKey(tx.Hash()) { + err = mp.Add(tx, bc) + if err == nil { + continue } + } else { + err = bc.verifyAndPoolTx(tx, mp) + } + if err != nil && bc.config.VerifyTransactions { + return fmt.Errorf("transaction %s failed to verify: %w", tx.Hash().StringLE(), err) } } } - return bc.storeBlock(block) + return bc.storeBlock(block, mp) } // AddHeaders processes the given headers and add them to the @@ -569,7 +567,7 @@ func (bc *Blockchain) GetStateRoot(height uint32) (*state.MPTRootState, error) { // storeBlock performs chain update using the block given, it executes all // transactions with all appropriate side-effects and updates Blockchain state. // This is the only way to change Blockchain state. -func (bc *Blockchain) storeBlock(block *block.Block) error { +func (bc *Blockchain) storeBlock(block *block.Block, txpool *mempool.Pool) error { cache := dao.NewCached(bc.dao) writeBuf := io.NewBufBinWriter() appExecResults := make([]*state.AppExecResult, 0, 1+len(block.Transactions)) @@ -612,8 +610,7 @@ func (bc *Blockchain) storeBlock(block *block.Block) error { writeBuf.Reset() } - var txHashes = make([]util.Uint256, len(block.Transactions)) - for i, tx := range block.Transactions { + for _, tx := range block.Transactions { if err := cache.StoreAsTransaction(tx, block.Index, writeBuf); err != nil { return err } @@ -654,11 +651,7 @@ func (bc *Blockchain) storeBlock(block *block.Block) error { return fmt.Errorf("failed to store tx exec result: %w", err) } writeBuf.Reset() - txHashes[i] = tx.Hash() } - sort.Slice(txHashes, func(i, j int) bool { - return txHashes[i].CompareTo(txHashes[j]) < 0 - }) root := bc.dao.MPT.StateRoot() var prevHash util.Uint256 @@ -700,7 +693,7 @@ func (bc *Blockchain) storeBlock(block *block.Block) error { } bc.topBlock.Store(block) atomic.StoreUint32(&bc.blockHeight, block.Index) - bc.memPool.RemoveStale(func(tx *transaction.Transaction) bool { return bc.isTxStillRelevant(tx, txHashes) }, bc) + bc.memPool.RemoveStale(func(tx *transaction.Transaction) bool { return bc.isTxStillRelevant(tx, txpool) }, bc) bc.lock.Unlock() updateBlockHeightMetric(block.Index) @@ -1305,17 +1298,18 @@ func (bc *Blockchain) verifyTxAttributes(tx *transaction.Transaction) error { // isTxStillRelevant is a callback for mempool transaction filtering after the // new block addition. It returns false for transactions added by the new block -// (passed via txHashes) and does witness reverification for non-standard +// (passed via txpool) and does witness reverification for non-standard // contracts. It operates under the assumption that full transaction verification // was already done so we don't need to check basic things like size, input/output // correctness, presence in blocks before the new one, etc. -func (bc *Blockchain) isTxStillRelevant(t *transaction.Transaction, txHashes []util.Uint256) bool { +func (bc *Blockchain) isTxStillRelevant(t *transaction.Transaction, txpool *mempool.Pool) bool { var recheckWitness bool - index := sort.Search(len(txHashes), func(i int) bool { - return txHashes[i].CompareTo(t.Hash()) >= 0 - }) - if index < len(txHashes) && txHashes[index].Equals(t.Hash()) { + if txpool == nil { + if bc.dao.HasTransaction(t.Hash()) { + return false + } + } else if txpool.ContainsKey(t.Hash()) { return false } if err := bc.verifyTxAttributes(t); err != nil { From fc7ea6217dcad60c6ac3dbafa607e09c3976565b Mon Sep 17 00:00:00 2001 From: Roman Khimov Date: Thu, 10 Sep 2020 15:20:04 +0300 Subject: [PATCH 3/6] mempool: avoid reassigning utilityBalanceAndFees value It's not needed, we're either creating a new one and assigning it 6 lines above or we're changing already existing big.Int via a pointer, so no update is needed. --- pkg/core/mempool/mem_pool.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/core/mempool/mem_pool.go b/pkg/core/mempool/mem_pool.go index 9e82c6b25..31e34f4fb 100644 --- a/pkg/core/mempool/mem_pool.go +++ b/pkg/core/mempool/mem_pool.go @@ -121,7 +121,6 @@ func (mp *Pool) tryAddSendersFee(tx *transaction.Transaction, feer Feer, needChe return false } senderFee.feeSum.Add(senderFee.feeSum, big.NewInt(tx.SystemFee+tx.NetworkFee)) - mp.fees[tx.Sender()] = senderFee return true } From 83fc38ae3a3e63c8836b68b800e503c1b0a58c07 Mon Sep 17 00:00:00 2001 From: Roman Khimov Date: Thu, 10 Sep 2020 15:35:19 +0300 Subject: [PATCH 4/6] mempool: don't create new big.Int in tryAddSendersFee() if possible Do a little less allocations. --- pkg/core/mempool/mem_pool.go | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/pkg/core/mempool/mem_pool.go b/pkg/core/mempool/mem_pool.go index 31e34f4fb..a0800ccf3 100644 --- a/pkg/core/mempool/mem_pool.go +++ b/pkg/core/mempool/mem_pool.go @@ -117,25 +117,30 @@ func (mp *Pool) tryAddSendersFee(tx *transaction.Transaction, feer Feer, needChe senderFee.feeSum = big.NewInt(0) mp.fees[tx.Sender()] = senderFee } - if needCheck && checkBalance(tx, senderFee) != nil { - return false + if needCheck { + newFeeSum, err := checkBalance(tx, senderFee) + if err != nil { + return false + } + senderFee.feeSum.Set(newFeeSum) + } else { + senderFee.feeSum.Add(senderFee.feeSum, big.NewInt(tx.SystemFee+tx.NetworkFee)) } - senderFee.feeSum.Add(senderFee.feeSum, big.NewInt(tx.SystemFee+tx.NetworkFee)) return true } -// checkBalance returns nil in case when sender has enough GAS to pay for the -// transaction -func checkBalance(tx *transaction.Transaction, balance utilityBalanceAndFees) error { +// checkBalance returns new cumulative fee balance for account or an error in +// case sender doesn't have enough GAS to pay for the transaction. +func checkBalance(tx *transaction.Transaction, balance utilityBalanceAndFees) (*big.Int, error) { txFee := big.NewInt(tx.SystemFee + tx.NetworkFee) if balance.balance.Cmp(txFee) < 0 { - return ErrInsufficientFunds + return nil, ErrInsufficientFunds } - needFee := txFee.Add(txFee, balance.feeSum) - if balance.balance.Cmp(needFee) < 0 { - return ErrConflict + txFee.Add(txFee, balance.feeSum) + if balance.balance.Cmp(txFee) < 0 { + return nil, ErrConflict } - return nil + return txFee, nil } // Add tries to add given transaction to the Pool. @@ -299,7 +304,8 @@ func (mp *Pool) checkTxConflicts(tx *transaction.Transaction, fee Feer) error { senderFee.balance = fee.GetUtilityTokenBalance(tx.Sender()) senderFee.feeSum = big.NewInt(0) } - return checkBalance(tx, senderFee) + _, err := checkBalance(tx, senderFee) + return err } // Verify checks if a Sender of tx is able to pay for it (and all the other From a6a1df4e0d8b9ae576826fc259013dbb8ad10320 Mon Sep 17 00:00:00 2001 From: Roman Khimov Date: Thu, 10 Sep 2020 16:34:33 +0300 Subject: [PATCH 5/6] consensus: update dbft, use new timer --- go.mod | 2 +- go.sum | 4 ++-- pkg/consensus/consensus.go | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 423d53d12..94fb4c3ba 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/gorilla/websocket v1.4.2 github.com/hashicorp/golang-lru v0.5.4 github.com/mr-tron/base58 v1.1.2 - github.com/nspcc-dev/dbft v0.0.0-20200904131615-4443b3066b8b + github.com/nspcc-dev/dbft v0.0.0-20200911152629-be965ee4d449 github.com/nspcc-dev/rfc6979 v0.2.0 github.com/pierrec/lz4 v2.5.2+incompatible github.com/prometheus/client_golang v1.2.1 diff --git a/go.sum b/go.sum index 67e0a6025..321bc7d2b 100644 --- a/go.sum +++ b/go.sum @@ -165,8 +165,8 @@ github.com/nspcc-dev/dbft v0.0.0-20200117124306-478e5cfbf03a h1:ajvxgEe9qY4vvoSm github.com/nspcc-dev/dbft v0.0.0-20200117124306-478e5cfbf03a/go.mod h1:/YFK+XOxxg0Bfm6P92lY5eDSLYfp06XOdL8KAVgXjVk= github.com/nspcc-dev/dbft v0.0.0-20200219114139-199d286ed6c1 h1:yEx9WznS+rjE0jl0dLujCxuZSIb+UTjF+005TJu/nNI= github.com/nspcc-dev/dbft v0.0.0-20200219114139-199d286ed6c1/go.mod h1:O0qtn62prQSqizzoagHmuuKoz8QMkU3SzBoKdEvm3aQ= -github.com/nspcc-dev/dbft v0.0.0-20200904131615-4443b3066b8b h1:UgjsL0bhHWF/g7iK673brucYjwpB4MM0Sgg2iVaOYGc= -github.com/nspcc-dev/dbft v0.0.0-20200904131615-4443b3066b8b/go.mod h1:1FYQXSbb6/9HQIkoF8XO7W/S8N7AZRkBsgwbcXRvk0E= +github.com/nspcc-dev/dbft v0.0.0-20200911152629-be965ee4d449 h1:Nw9n2W8pGj3m/J1uY04LvzInU1FE2E+0HkdNSTj/FPg= +github.com/nspcc-dev/dbft v0.0.0-20200911152629-be965ee4d449/go.mod h1:1FYQXSbb6/9HQIkoF8XO7W/S8N7AZRkBsgwbcXRvk0E= github.com/nspcc-dev/neo-go v0.73.1-pre.0.20200303142215-f5a1b928ce09/go.mod h1:pPYwPZ2ks+uMnlRLUyXOpLieaDQSEaf4NM3zHVbRjmg= github.com/nspcc-dev/neofs-crypto v0.2.0 h1:ftN+59WqxSWz/RCgXYOfhmltOOqU+udsNQSvN6wkFck= github.com/nspcc-dev/neofs-crypto v0.2.0/go.mod h1:F/96fUzPM3wR+UGsPi3faVNmFlA9KAEAUQR7dMxZmNA= diff --git a/pkg/consensus/consensus.go b/pkg/consensus/consensus.go index f8d5bb12b..9b29cbde7 100644 --- a/pkg/consensus/consensus.go +++ b/pkg/consensus/consensus.go @@ -193,7 +193,8 @@ func (s *service) Start() { func (s *service) eventLoop() { for { select { - case hv := <-s.dbft.Timer.C(): + case <-s.dbft.Timer.C(): + hv := s.dbft.Timer.HV() s.log.Debug("timer fired", zap.Uint32("height", hv.Height), zap.Uint("view", uint(hv.View))) From 19c69618c5fb68b779e10591d727c5d8d6573bf2 Mon Sep 17 00:00:00 2001 From: Roman Khimov Date: Thu, 10 Sep 2020 19:28:16 +0300 Subject: [PATCH 6/6] transaction: cache tx size, don't serialize it over and over again --- pkg/core/blockchain.go | 4 ++-- pkg/core/blockchain_test.go | 2 +- pkg/core/transaction/transaction.go | 24 ++++++++++++++++-------- pkg/core/transaction/transaction_test.go | 1 + pkg/rpc/server/server_test.go | 1 + 5 files changed, 21 insertions(+), 11 deletions(-) diff --git a/pkg/core/blockchain.go b/pkg/core/blockchain.go index 6a7b6526a..13266c65c 100644 --- a/pkg/core/blockchain.go +++ b/pkg/core/blockchain.go @@ -1181,7 +1181,7 @@ func (bc *Blockchain) ApplyPolicyToTxSet(txes []*transaction.Transaction) []*tra ) blockSize = uint32(io.GetVarSize(new(block.Block)) + io.GetVarSize(len(txes)+1)) for i, tx := range txes { - blockSize += uint32(io.GetVarSize(tx)) + blockSize += uint32(tx.Size()) sysFee += tx.SystemFee if blockSize > maxBlockSize || sysFee > maxBlockSysFee { txes = txes[:i] @@ -1234,7 +1234,7 @@ func (bc *Blockchain) verifyAndPoolTx(t *transaction.Transaction, pool *mempool. // Only one %w can be used. return fmt.Errorf("%w: %v", ErrPolicy, err) } - size := io.GetVarSize(t) + size := t.Size() if size > transaction.MaxTransactionSize { return fmt.Errorf("%w: (%d > MaxTransactionSize %d)", ErrTxTooBig, size, transaction.MaxTransactionSize) } diff --git a/pkg/core/blockchain_test.go b/pkg/core/blockchain_test.go index bbf3c6351..7d2732ed1 100644 --- a/pkg/core/blockchain_test.go +++ b/pkg/core/blockchain_test.go @@ -524,8 +524,8 @@ func TestGetTransaction(t *testing.T) { tx, height, err := bc.GetTransaction(block.Transactions[0].Hash()) require.Nil(t, err) assert.Equal(t, block.Index, height) + assert.Equal(t, txSize, tx.Size()) assert.Equal(t, block.Transactions[0], tx) - assert.Equal(t, txSize, io.GetVarSize(tx)) assert.Equal(t, 1, io.GetVarSize(tx.Attributes)) assert.Equal(t, 1, io.GetVarSize(tx.Scripts)) assert.NoError(t, bc.persist()) diff --git a/pkg/core/transaction/transaction.go b/pkg/core/transaction/transaction.go index 021708d67..bcf4d2af8 100644 --- a/pkg/core/transaction/transaction.go +++ b/pkg/core/transaction/transaction.go @@ -62,8 +62,8 @@ type Transaction struct { // for correct signing/verification. Network netmode.Magic - // feePerByte is the ratio of NetworkFee and tx size, used for calculating tx priority. - feePerByte int64 + // size is transaction's serialized size. + size int // Hash of the transaction (double SHA256). hash util.Uint256 @@ -158,6 +158,7 @@ func (t *Transaction) DecodeBinary(br *io.BinReader) { // to do it anymore. if br.Err == nil { br.Err = t.createHash() + _ = t.Size() } } @@ -252,18 +253,22 @@ func NewTransactionFromBytes(network netmode.Magic, b []byte) (*Transaction, err if r.Err == nil { return nil, errors.New("additional data after the transaction") } - tx.feePerByte = tx.NetworkFee / int64(len(b)) + tx.size = len(b) return tx, nil } // FeePerByte returns NetworkFee of the transaction divided by // its size func (t *Transaction) FeePerByte() int64 { - if t.feePerByte != 0 { - return t.feePerByte + return t.NetworkFee / int64(t.Size()) +} + +// Size returns size of the serialized transaction. +func (t *Transaction) Size() int { + if t.size == 0 { + t.size = io.GetVarSize(t) } - t.feePerByte = t.NetworkFee / int64(io.GetVarSize(t)) - return t.feePerByte + return t.size } // Sender returns the sender of the transaction which is always on the first place @@ -296,7 +301,7 @@ type transactionJSON struct { func (t *Transaction) MarshalJSON() ([]byte, error) { tx := transactionJSON{ TxID: t.Hash(), - Size: io.GetVarSize(t), + Size: t.Size(), Version: t.Version, Nonce: t.Nonce, Sender: address.Uint160ToString(t.Sender()), @@ -329,6 +334,9 @@ func (t *Transaction) UnmarshalJSON(data []byte) error { if t.Hash() != tx.TxID { return errors.New("txid doesn't match transaction hash") } + if t.Size() != tx.Size { + return errors.New("'size' doesn't match transaction size") + } return t.isValid() } diff --git a/pkg/core/transaction/transaction_test.go b/pkg/core/transaction/transaction_test.go index 216e7ca27..fd7705477 100644 --- a/pkg/core/transaction/transaction_test.go +++ b/pkg/core/transaction/transaction_test.go @@ -73,6 +73,7 @@ func TestNew(t *testing.T) { assert.Equal(t, script, tx.Script) // Update hash fields to match tx2 that is gonna autoupdate them on decode. _ = tx.Hash() + _ = tx.Size() testserdes.EncodeDecodeBinary(t, tx, &Transaction{Network: netmode.UnitTestNet}) } diff --git a/pkg/rpc/server/server_test.go b/pkg/rpc/server/server_test.go index bf2ea7ed3..f7f5eb87c 100644 --- a/pkg/rpc/server/server_test.go +++ b/pkg/rpc/server/server_test.go @@ -811,6 +811,7 @@ func testRPCProtocol(t *testing.T, doRPCCall func(string, string, *testing.T) [] t.Run("getrawtransaction 2 arguments, verbose", func(t *testing.T) { block, _ := chain.GetBlock(chain.GetHeaderHash(0)) TXHash := block.Transactions[0].Hash() + _ = block.Transactions[0].Size() rpc := fmt.Sprintf(`{"jsonrpc": "2.0", "id": 1, "method": "getrawtransaction", "params": ["%s", 1]}"`, TXHash.StringLE()) body := doRPCCall(rpc, httpSrv.URL, t) txOut := checkErrGetResult(t, body, false)