forked from TrueCloudLab/neoneo-go
commit
3f27cf5901
11 changed files with 95 additions and 155 deletions
2
go.mod
2
go.mod
|
@ -10,7 +10,7 @@ require (
|
|||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/hashicorp/golang-lru v0.5.4
|
||||
github.com/mr-tron/base58 v1.1.2
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200904131615-4443b3066b8b
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200911152629-be965ee4d449
|
||||
github.com/nspcc-dev/rfc6979 v0.2.0
|
||||
github.com/pierrec/lz4 v2.5.2+incompatible
|
||||
github.com/prometheus/client_golang v1.2.1
|
||||
|
|
4
go.sum
4
go.sum
|
@ -165,8 +165,8 @@ github.com/nspcc-dev/dbft v0.0.0-20200117124306-478e5cfbf03a h1:ajvxgEe9qY4vvoSm
|
|||
github.com/nspcc-dev/dbft v0.0.0-20200117124306-478e5cfbf03a/go.mod h1:/YFK+XOxxg0Bfm6P92lY5eDSLYfp06XOdL8KAVgXjVk=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200219114139-199d286ed6c1 h1:yEx9WznS+rjE0jl0dLujCxuZSIb+UTjF+005TJu/nNI=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200219114139-199d286ed6c1/go.mod h1:O0qtn62prQSqizzoagHmuuKoz8QMkU3SzBoKdEvm3aQ=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200904131615-4443b3066b8b h1:UgjsL0bhHWF/g7iK673brucYjwpB4MM0Sgg2iVaOYGc=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200904131615-4443b3066b8b/go.mod h1:1FYQXSbb6/9HQIkoF8XO7W/S8N7AZRkBsgwbcXRvk0E=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200911152629-be965ee4d449 h1:Nw9n2W8pGj3m/J1uY04LvzInU1FE2E+0HkdNSTj/FPg=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200911152629-be965ee4d449/go.mod h1:1FYQXSbb6/9HQIkoF8XO7W/S8N7AZRkBsgwbcXRvk0E=
|
||||
github.com/nspcc-dev/neo-go v0.73.1-pre.0.20200303142215-f5a1b928ce09/go.mod h1:pPYwPZ2ks+uMnlRLUyXOpLieaDQSEaf4NM3zHVbRjmg=
|
||||
github.com/nspcc-dev/neofs-crypto v0.2.0 h1:ftN+59WqxSWz/RCgXYOfhmltOOqU+udsNQSvN6wkFck=
|
||||
github.com/nspcc-dev/neofs-crypto v0.2.0/go.mod h1:F/96fUzPM3wR+UGsPi3faVNmFlA9KAEAUQR7dMxZmNA=
|
||||
|
|
|
@ -193,7 +193,8 @@ func (s *service) Start() {
|
|||
func (s *service) eventLoop() {
|
||||
for {
|
||||
select {
|
||||
case hv := <-s.dbft.Timer.C():
|
||||
case <-s.dbft.Timer.C():
|
||||
hv := s.dbft.Timer.HV()
|
||||
s.log.Debug("timer fired",
|
||||
zap.Uint32("height", hv.Height),
|
||||
zap.Uint("view", uint(hv.View)))
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
@ -115,11 +114,6 @@ type Blockchain struct {
|
|||
|
||||
memPool *mempool.Pool
|
||||
|
||||
// This lock protects concurrent access to keyCache.
|
||||
keyCacheLock sync.RWMutex
|
||||
// cache for block verification keys.
|
||||
keyCache map[util.Uint160]map[string]*keys.PublicKey
|
||||
|
||||
sbCommittee keys.PublicKeys
|
||||
|
||||
log *zap.Logger
|
||||
|
@ -169,7 +163,6 @@ func NewBlockchain(s storage.Store, cfg config.ProtocolConfiguration, log *zap.L
|
|||
stopCh: make(chan struct{}),
|
||||
runToExitCh: make(chan struct{}),
|
||||
memPool: mempool.New(cfg.MemPoolSize),
|
||||
keyCache: make(map[util.Uint160]map[string]*keys.PublicKey),
|
||||
sbCommittee: committee,
|
||||
log: log,
|
||||
events: make(chan bcEvent),
|
||||
|
@ -206,7 +199,7 @@ func (bc *Blockchain) init() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return bc.storeBlock(genesisBlock)
|
||||
return bc.storeBlock(genesisBlock, nil)
|
||||
}
|
||||
if ver != version {
|
||||
return fmt.Errorf("storage version mismatch betweeen %s and %s", version, ver)
|
||||
|
@ -419,6 +412,7 @@ func (bc *Blockchain) AddBlock(block *block.Block) error {
|
|||
bc.addLock.Lock()
|
||||
defer bc.addLock.Unlock()
|
||||
|
||||
var mp *mempool.Pool
|
||||
expectedHeight := bc.BlockHeight() + 1
|
||||
if expectedHeight != block.Index {
|
||||
return fmt.Errorf("expected %d, got %d: %w", expectedHeight, block.Index, ErrInvalidBlockIndex)
|
||||
|
@ -436,8 +430,7 @@ func (bc *Blockchain) AddBlock(block *block.Block) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("block %s is invalid: %w", block.Hash().StringLE(), err)
|
||||
}
|
||||
if bc.config.VerifyTransactions {
|
||||
var mp = mempool.New(len(block.Transactions))
|
||||
mp = mempool.New(len(block.Transactions))
|
||||
for _, tx := range block.Transactions {
|
||||
var err error
|
||||
// Transactions are verified before adding them
|
||||
|
@ -451,13 +444,12 @@ func (bc *Blockchain) AddBlock(block *block.Block) error {
|
|||
} else {
|
||||
err = bc.verifyAndPoolTx(tx, mp)
|
||||
}
|
||||
if err != nil {
|
||||
if err != nil && bc.config.VerifyTransactions {
|
||||
return fmt.Errorf("transaction %s failed to verify: %w", tx.Hash().StringLE(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return bc.storeBlock(block)
|
||||
return bc.storeBlock(block, mp)
|
||||
}
|
||||
|
||||
// AddHeaders processes the given headers and add them to the
|
||||
|
@ -575,7 +567,7 @@ func (bc *Blockchain) GetStateRoot(height uint32) (*state.MPTRootState, error) {
|
|||
// storeBlock performs chain update using the block given, it executes all
|
||||
// transactions with all appropriate side-effects and updates Blockchain state.
|
||||
// This is the only way to change Blockchain state.
|
||||
func (bc *Blockchain) storeBlock(block *block.Block) error {
|
||||
func (bc *Blockchain) storeBlock(block *block.Block, txpool *mempool.Pool) error {
|
||||
cache := dao.NewCached(bc.dao)
|
||||
writeBuf := io.NewBufBinWriter()
|
||||
appExecResults := make([]*state.AppExecResult, 0, 1+len(block.Transactions))
|
||||
|
@ -618,8 +610,7 @@ func (bc *Blockchain) storeBlock(block *block.Block) error {
|
|||
writeBuf.Reset()
|
||||
}
|
||||
|
||||
var txHashes = make([]util.Uint256, len(block.Transactions))
|
||||
for i, tx := range block.Transactions {
|
||||
for _, tx := range block.Transactions {
|
||||
if err := cache.StoreAsTransaction(tx, block.Index, writeBuf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -660,11 +651,7 @@ func (bc *Blockchain) storeBlock(block *block.Block) error {
|
|||
return fmt.Errorf("failed to store tx exec result: %w", err)
|
||||
}
|
||||
writeBuf.Reset()
|
||||
txHashes[i] = tx.Hash()
|
||||
}
|
||||
sort.Slice(txHashes, func(i, j int) bool {
|
||||
return txHashes[i].CompareTo(txHashes[j]) < 0
|
||||
})
|
||||
|
||||
root := bc.dao.MPT.StateRoot()
|
||||
var prevHash util.Uint256
|
||||
|
@ -706,7 +693,7 @@ func (bc *Blockchain) storeBlock(block *block.Block) error {
|
|||
}
|
||||
bc.topBlock.Store(block)
|
||||
atomic.StoreUint32(&bc.blockHeight, block.Index)
|
||||
bc.memPool.RemoveStale(func(tx *transaction.Transaction) bool { return bc.isTxStillRelevant(tx, txHashes) }, bc)
|
||||
bc.memPool.RemoveStale(func(tx *transaction.Transaction) bool { return bc.isTxStillRelevant(tx, txpool) }, bc)
|
||||
bc.lock.Unlock()
|
||||
|
||||
updateBlockHeightMetric(block.Index)
|
||||
|
@ -771,8 +758,8 @@ func (bc *Blockchain) processNEP5Transfer(cache *dao.Cached, h util.Uint256, b *
|
|||
if nativeContract != nil {
|
||||
id = nativeContract.Metadata().ContractID
|
||||
} else {
|
||||
assetContract := bc.GetContractState(sc)
|
||||
if assetContract == nil {
|
||||
assetContract, err := cache.GetContractState(sc)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
id = assetContract.ID
|
||||
|
@ -1194,7 +1181,7 @@ func (bc *Blockchain) ApplyPolicyToTxSet(txes []*transaction.Transaction) []*tra
|
|||
)
|
||||
blockSize = uint32(io.GetVarSize(new(block.Block)) + io.GetVarSize(len(txes)+1))
|
||||
for i, tx := range txes {
|
||||
blockSize += uint32(io.GetVarSize(tx))
|
||||
blockSize += uint32(tx.Size())
|
||||
sysFee += tx.SystemFee
|
||||
if blockSize > maxBlockSize || sysFee > maxBlockSysFee {
|
||||
txes = txes[:i]
|
||||
|
@ -1247,7 +1234,7 @@ func (bc *Blockchain) verifyAndPoolTx(t *transaction.Transaction, pool *mempool.
|
|||
// Only one %w can be used.
|
||||
return fmt.Errorf("%w: %v", ErrPolicy, err)
|
||||
}
|
||||
size := io.GetVarSize(t)
|
||||
size := t.Size()
|
||||
if size > transaction.MaxTransactionSize {
|
||||
return fmt.Errorf("%w: (%d > MaxTransactionSize %d)", ErrTxTooBig, size, transaction.MaxTransactionSize)
|
||||
}
|
||||
|
@ -1311,17 +1298,18 @@ func (bc *Blockchain) verifyTxAttributes(tx *transaction.Transaction) error {
|
|||
|
||||
// isTxStillRelevant is a callback for mempool transaction filtering after the
|
||||
// new block addition. It returns false for transactions added by the new block
|
||||
// (passed via txHashes) and does witness reverification for non-standard
|
||||
// (passed via txpool) and does witness reverification for non-standard
|
||||
// contracts. It operates under the assumption that full transaction verification
|
||||
// was already done so we don't need to check basic things like size, input/output
|
||||
// correctness, presence in blocks before the new one, etc.
|
||||
func (bc *Blockchain) isTxStillRelevant(t *transaction.Transaction, txHashes []util.Uint256) bool {
|
||||
func (bc *Blockchain) isTxStillRelevant(t *transaction.Transaction, txpool *mempool.Pool) bool {
|
||||
var recheckWitness bool
|
||||
|
||||
index := sort.Search(len(txHashes), func(i int) bool {
|
||||
return txHashes[i].CompareTo(t.Hash()) >= 0
|
||||
})
|
||||
if index < len(txHashes) && txHashes[index].Equals(t.Hash()) {
|
||||
if txpool == nil {
|
||||
if bc.dao.HasTransaction(t.Hash()) {
|
||||
return false
|
||||
}
|
||||
} else if txpool.ContainsKey(t.Hash()) {
|
||||
return false
|
||||
}
|
||||
if err := bc.verifyTxAttributes(t); err != nil {
|
||||
|
@ -1482,7 +1470,7 @@ var (
|
|||
)
|
||||
|
||||
// initVerificationVM initializes VM for witness check.
|
||||
func initVerificationVM(ic *interop.Context, hash util.Uint160, witness *transaction.Witness, keyCache map[string]*keys.PublicKey) error {
|
||||
func initVerificationVM(ic *interop.Context, hash util.Uint160, witness *transaction.Witness) error {
|
||||
var offset int
|
||||
var initMD *manifest.Method
|
||||
verification := witness.VerificationScript
|
||||
|
@ -1511,9 +1499,6 @@ func initVerificationVM(ic *interop.Context, hash util.Uint160, witness *transac
|
|||
v.Call(v.Context(), initMD.Offset)
|
||||
}
|
||||
v.LoadScript(witness.InvocationScript)
|
||||
if keyCache != nil {
|
||||
v.SetPublicKeys(keyCache)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1521,11 +1506,11 @@ func initVerificationVM(ic *interop.Context, hash util.Uint160, witness *transac
|
|||
func (bc *Blockchain) VerifyWitness(h util.Uint160, c crypto.Verifiable, w *transaction.Witness, gas int64) error {
|
||||
ic := bc.newInteropContext(trigger.Verification, bc.dao, nil, nil)
|
||||
ic.Container = c
|
||||
return bc.verifyHashAgainstScript(h, w, ic, true, gas)
|
||||
return bc.verifyHashAgainstScript(h, w, ic, gas)
|
||||
}
|
||||
|
||||
// verifyHashAgainstScript verifies given hash against the given witness.
|
||||
func (bc *Blockchain) verifyHashAgainstScript(hash util.Uint160, witness *transaction.Witness, interopCtx *interop.Context, useKeys bool, gas int64) error {
|
||||
func (bc *Blockchain) verifyHashAgainstScript(hash util.Uint160, witness *transaction.Witness, interopCtx *interop.Context, gas int64) error {
|
||||
gasPolicy := bc.contracts.Policy.GetMaxVerificationGas(interopCtx.DAO)
|
||||
if gas > gasPolicy {
|
||||
gas = gasPolicy
|
||||
|
@ -1534,15 +1519,7 @@ func (bc *Blockchain) verifyHashAgainstScript(hash util.Uint160, witness *transa
|
|||
vm := interopCtx.SpawnVM()
|
||||
vm.SetPriceGetter(getPrice)
|
||||
vm.GasLimit = gas
|
||||
var keyCache map[string]*keys.PublicKey
|
||||
if useKeys {
|
||||
bc.keyCacheLock.RLock()
|
||||
if bc.keyCache[hash] != nil {
|
||||
keyCache = bc.keyCache[hash]
|
||||
}
|
||||
bc.keyCacheLock.RUnlock()
|
||||
}
|
||||
if err := initVerificationVM(interopCtx, hash, witness, keyCache); err != nil {
|
||||
if err := initVerificationVM(interopCtx, hash, witness); err != nil {
|
||||
return err
|
||||
}
|
||||
err := vm.Run()
|
||||
|
@ -1561,16 +1538,6 @@ func (bc *Blockchain) verifyHashAgainstScript(hash util.Uint160, witness *transa
|
|||
if vm.Estack().Len() != 0 {
|
||||
return fmt.Errorf("%w: expected exactly one returned value", ErrVerificationFailed)
|
||||
}
|
||||
if useKeys {
|
||||
bc.keyCacheLock.RLock()
|
||||
_, ok := bc.keyCache[hash]
|
||||
bc.keyCacheLock.RUnlock()
|
||||
if !ok {
|
||||
bc.keyCacheLock.Lock()
|
||||
bc.keyCache[hash] = vm.GetPublicKeys()
|
||||
bc.keyCacheLock.Unlock()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("%w: no result returned from the script", ErrVerificationFailed)
|
||||
}
|
||||
|
@ -1589,7 +1556,7 @@ func (bc *Blockchain) verifyTxWitnesses(t *transaction.Transaction, block *block
|
|||
}
|
||||
interopCtx := bc.newInteropContext(trigger.Verification, bc.dao, block, t)
|
||||
for i := range t.Signers {
|
||||
err := bc.verifyHashAgainstScript(t.Signers[i].Account, &t.Scripts[i], interopCtx, false, t.NetworkFee)
|
||||
err := bc.verifyHashAgainstScript(t.Signers[i].Account, &t.Scripts[i], interopCtx, t.NetworkFee)
|
||||
if err != nil {
|
||||
return fmt.Errorf("witness #%d: %w", i, err)
|
||||
}
|
||||
|
|
|
@ -394,22 +394,22 @@ func TestVerifyHashAgainstScript(t *testing.T) {
|
|||
newH := cs.ScriptHash()
|
||||
newH[0] = ^newH[0]
|
||||
w := &transaction.Witness{InvocationScript: []byte{byte(opcode.PUSH4)}}
|
||||
err := bc.verifyHashAgainstScript(newH, w, ic, false, gas)
|
||||
err := bc.verifyHashAgainstScript(newH, w, ic, gas)
|
||||
require.True(t, errors.Is(err, ErrUnknownVerificationContract))
|
||||
})
|
||||
t.Run("Invalid", func(t *testing.T) {
|
||||
w := &transaction.Witness{InvocationScript: []byte{byte(opcode.PUSH4)}}
|
||||
err := bc.verifyHashAgainstScript(csInvalid.ScriptHash(), w, ic, false, gas)
|
||||
err := bc.verifyHashAgainstScript(csInvalid.ScriptHash(), w, ic, gas)
|
||||
require.True(t, errors.Is(err, ErrInvalidVerificationContract))
|
||||
})
|
||||
t.Run("ValidSignature", func(t *testing.T) {
|
||||
w := &transaction.Witness{InvocationScript: []byte{byte(opcode.PUSH4)}}
|
||||
err := bc.verifyHashAgainstScript(cs.ScriptHash(), w, ic, false, gas)
|
||||
err := bc.verifyHashAgainstScript(cs.ScriptHash(), w, ic, gas)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
t.Run("InvalidSignature", func(t *testing.T) {
|
||||
w := &transaction.Witness{InvocationScript: []byte{byte(opcode.PUSH3)}}
|
||||
err := bc.verifyHashAgainstScript(cs.ScriptHash(), w, ic, false, gas)
|
||||
err := bc.verifyHashAgainstScript(cs.ScriptHash(), w, ic, gas)
|
||||
require.True(t, errors.Is(err, ErrVerificationFailed))
|
||||
})
|
||||
})
|
||||
|
@ -419,7 +419,7 @@ func TestVerifyHashAgainstScript(t *testing.T) {
|
|||
InvocationScript: []byte{byte(opcode.NOP)},
|
||||
VerificationScript: verif,
|
||||
}
|
||||
err := bc.verifyHashAgainstScript(hash.Hash160(verif), w, ic, false, 1)
|
||||
err := bc.verifyHashAgainstScript(hash.Hash160(verif), w, ic, 1)
|
||||
require.True(t, errors.Is(err, ErrVerificationFailed))
|
||||
})
|
||||
t.Run("NoResult", func(t *testing.T) {
|
||||
|
@ -428,7 +428,7 @@ func TestVerifyHashAgainstScript(t *testing.T) {
|
|||
InvocationScript: []byte{byte(opcode.PUSH1)},
|
||||
VerificationScript: verif,
|
||||
}
|
||||
err := bc.verifyHashAgainstScript(hash.Hash160(verif), w, ic, false, gas)
|
||||
err := bc.verifyHashAgainstScript(hash.Hash160(verif), w, ic, gas)
|
||||
require.True(t, errors.Is(err, ErrVerificationFailed))
|
||||
})
|
||||
t.Run("BadResult", func(t *testing.T) {
|
||||
|
@ -439,7 +439,7 @@ func TestVerifyHashAgainstScript(t *testing.T) {
|
|||
InvocationScript: []byte{byte(opcode.NOP)},
|
||||
VerificationScript: verif,
|
||||
}
|
||||
err := bc.verifyHashAgainstScript(hash.Hash160(verif), w, ic, false, gas)
|
||||
err := bc.verifyHashAgainstScript(hash.Hash160(verif), w, ic, gas)
|
||||
require.True(t, errors.Is(err, ErrVerificationFailed))
|
||||
})
|
||||
t.Run("TooManyResults", func(t *testing.T) {
|
||||
|
@ -448,7 +448,7 @@ func TestVerifyHashAgainstScript(t *testing.T) {
|
|||
InvocationScript: []byte{byte(opcode.PUSH1), byte(opcode.PUSH1)},
|
||||
VerificationScript: verif,
|
||||
}
|
||||
err := bc.verifyHashAgainstScript(hash.Hash160(verif), w, ic, false, gas)
|
||||
err := bc.verifyHashAgainstScript(hash.Hash160(verif), w, ic, gas)
|
||||
require.True(t, errors.Is(err, ErrVerificationFailed))
|
||||
})
|
||||
}
|
||||
|
@ -524,8 +524,8 @@ func TestGetTransaction(t *testing.T) {
|
|||
tx, height, err := bc.GetTransaction(block.Transactions[0].Hash())
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, block.Index, height)
|
||||
assert.Equal(t, txSize, tx.Size())
|
||||
assert.Equal(t, block.Transactions[0], tx)
|
||||
assert.Equal(t, txSize, io.GetVarSize(tx))
|
||||
assert.Equal(t, 1, io.GetVarSize(tx.Attributes))
|
||||
assert.Equal(t, 1, io.GetVarSize(tx.Scripts))
|
||||
assert.NoError(t, bc.persist())
|
||||
|
|
|
@ -117,26 +117,30 @@ func (mp *Pool) tryAddSendersFee(tx *transaction.Transaction, feer Feer, needChe
|
|||
senderFee.feeSum = big.NewInt(0)
|
||||
mp.fees[tx.Sender()] = senderFee
|
||||
}
|
||||
if needCheck && checkBalance(tx, senderFee) != nil {
|
||||
if needCheck {
|
||||
newFeeSum, err := checkBalance(tx, senderFee)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
senderFee.feeSum.Set(newFeeSum)
|
||||
} else {
|
||||
senderFee.feeSum.Add(senderFee.feeSum, big.NewInt(tx.SystemFee+tx.NetworkFee))
|
||||
mp.fees[tx.Sender()] = senderFee
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// checkBalance returns nil in case when sender has enough GAS to pay for the
|
||||
// transaction
|
||||
func checkBalance(tx *transaction.Transaction, balance utilityBalanceAndFees) error {
|
||||
// checkBalance returns new cumulative fee balance for account or an error in
|
||||
// case sender doesn't have enough GAS to pay for the transaction.
|
||||
func checkBalance(tx *transaction.Transaction, balance utilityBalanceAndFees) (*big.Int, error) {
|
||||
txFee := big.NewInt(tx.SystemFee + tx.NetworkFee)
|
||||
if balance.balance.Cmp(txFee) < 0 {
|
||||
return ErrInsufficientFunds
|
||||
return nil, ErrInsufficientFunds
|
||||
}
|
||||
needFee := txFee.Add(txFee, balance.feeSum)
|
||||
if balance.balance.Cmp(needFee) < 0 {
|
||||
return ErrConflict
|
||||
txFee.Add(txFee, balance.feeSum)
|
||||
if balance.balance.Cmp(txFee) < 0 {
|
||||
return nil, ErrConflict
|
||||
}
|
||||
return nil
|
||||
return txFee, nil
|
||||
}
|
||||
|
||||
// Add tries to add given transaction to the Pool.
|
||||
|
@ -300,7 +304,8 @@ func (mp *Pool) checkTxConflicts(tx *transaction.Transaction, fee Feer) error {
|
|||
senderFee.balance = fee.GetUtilityTokenBalance(tx.Sender())
|
||||
senderFee.feeSum = big.NewInt(0)
|
||||
}
|
||||
return checkBalance(tx, senderFee)
|
||||
_, err := checkBalance(tx, senderFee)
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify checks if a Sender of tx is able to pay for it (and all the other
|
||||
|
|
|
@ -62,8 +62,8 @@ type Transaction struct {
|
|||
// for correct signing/verification.
|
||||
Network netmode.Magic
|
||||
|
||||
// feePerByte is the ratio of NetworkFee and tx size, used for calculating tx priority.
|
||||
feePerByte int64
|
||||
// size is transaction's serialized size.
|
||||
size int
|
||||
|
||||
// Hash of the transaction (double SHA256).
|
||||
hash util.Uint256
|
||||
|
@ -158,6 +158,7 @@ func (t *Transaction) DecodeBinary(br *io.BinReader) {
|
|||
// to do it anymore.
|
||||
if br.Err == nil {
|
||||
br.Err = t.createHash()
|
||||
_ = t.Size()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -252,18 +253,22 @@ func NewTransactionFromBytes(network netmode.Magic, b []byte) (*Transaction, err
|
|||
if r.Err == nil {
|
||||
return nil, errors.New("additional data after the transaction")
|
||||
}
|
||||
tx.feePerByte = tx.NetworkFee / int64(len(b))
|
||||
tx.size = len(b)
|
||||
return tx, nil
|
||||
}
|
||||
|
||||
// FeePerByte returns NetworkFee of the transaction divided by
|
||||
// its size
|
||||
func (t *Transaction) FeePerByte() int64 {
|
||||
if t.feePerByte != 0 {
|
||||
return t.feePerByte
|
||||
return t.NetworkFee / int64(t.Size())
|
||||
}
|
||||
|
||||
// Size returns size of the serialized transaction.
|
||||
func (t *Transaction) Size() int {
|
||||
if t.size == 0 {
|
||||
t.size = io.GetVarSize(t)
|
||||
}
|
||||
t.feePerByte = t.NetworkFee / int64(io.GetVarSize(t))
|
||||
return t.feePerByte
|
||||
return t.size
|
||||
}
|
||||
|
||||
// Sender returns the sender of the transaction which is always on the first place
|
||||
|
@ -296,7 +301,7 @@ type transactionJSON struct {
|
|||
func (t *Transaction) MarshalJSON() ([]byte, error) {
|
||||
tx := transactionJSON{
|
||||
TxID: t.Hash(),
|
||||
Size: io.GetVarSize(t),
|
||||
Size: t.Size(),
|
||||
Version: t.Version,
|
||||
Nonce: t.Nonce,
|
||||
Sender: address.Uint160ToString(t.Sender()),
|
||||
|
@ -329,6 +334,9 @@ func (t *Transaction) UnmarshalJSON(data []byte) error {
|
|||
if t.Hash() != tx.TxID {
|
||||
return errors.New("txid doesn't match transaction hash")
|
||||
}
|
||||
if t.Size() != tx.Size {
|
||||
return errors.New("'size' doesn't match transaction size")
|
||||
}
|
||||
|
||||
return t.isValid()
|
||||
}
|
||||
|
|
|
@ -73,6 +73,7 @@ func TestNew(t *testing.T) {
|
|||
assert.Equal(t, script, tx.Script)
|
||||
// Update hash fields to match tx2 that is gonna autoupdate them on decode.
|
||||
_ = tx.Hash()
|
||||
_ = tx.Size()
|
||||
testserdes.EncodeDecodeBinary(t, tx, &Transaction{Network: netmode.UnitTestNet})
|
||||
}
|
||||
|
||||
|
|
|
@ -811,6 +811,7 @@ func testRPCProtocol(t *testing.T, doRPCCall func(string, string, *testing.T) []
|
|||
t.Run("getrawtransaction 2 arguments, verbose", func(t *testing.T) {
|
||||
block, _ := chain.GetBlock(chain.GetHeaderHash(0))
|
||||
TXHash := block.Transactions[0].Hash()
|
||||
_ = block.Transactions[0].Size()
|
||||
rpc := fmt.Sprintf(`{"jsonrpc": "2.0", "id": 1, "method": "getrawtransaction", "params": ["%s", 1]}"`, TXHash.StringLE())
|
||||
body := doRPCCall(rpc, httpSrv.URL, t)
|
||||
txOut := checkErrGetResult(t, body, false)
|
||||
|
|
35
pkg/vm/vm.go
35
pkg/vm/vm.go
|
@ -80,9 +80,6 @@ type VM struct {
|
|||
SyscallHandler func(v *VM, id uint32) error
|
||||
|
||||
trigger trigger.Type
|
||||
|
||||
// Public keys cache.
|
||||
keys map[string]*keys.PublicKey
|
||||
}
|
||||
|
||||
// New returns a new VM object ready to load AVM bytecode scripts.
|
||||
|
@ -96,7 +93,6 @@ func NewWithTrigger(t trigger.Type) *VM {
|
|||
state: NoneState,
|
||||
istack: NewStack("invocation"),
|
||||
refs: newRefCounter(),
|
||||
keys: make(map[string]*keys.PublicKey),
|
||||
trigger: t,
|
||||
|
||||
SyscallHandler: defaultSyscallHandler,
|
||||
|
@ -140,17 +136,6 @@ func (v *VM) Istack() *Stack {
|
|||
return v.istack
|
||||
}
|
||||
|
||||
// SetPublicKeys sets internal key cache to the specified value (note
|
||||
// that it doesn't copy them).
|
||||
func (v *VM) SetPublicKeys(keys map[string]*keys.PublicKey) {
|
||||
v.keys = keys
|
||||
}
|
||||
|
||||
// GetPublicKeys returns internal key cache (note that it doesn't copy it).
|
||||
func (v *VM) GetPublicKeys() map[string]*keys.PublicKey {
|
||||
return v.keys
|
||||
}
|
||||
|
||||
// LoadArgs loads in the arguments used in the Mian entry point.
|
||||
func (v *VM) LoadArgs(method []byte, args []stackitem.Item) {
|
||||
if len(args) > 0 {
|
||||
|
@ -1586,8 +1571,8 @@ func CheckMultisigPar(v *VM, curve elliptic.Curve, h []byte, pkeys [][]byte, sig
|
|||
go worker(tasks, results)
|
||||
}
|
||||
|
||||
tasks <- task{pub: v.bytesToPublicKey(pkeys[k1], curve), signum: s1}
|
||||
tasks <- task{pub: v.bytesToPublicKey(pkeys[k2], curve), signum: s2}
|
||||
tasks <- task{pub: bytesToPublicKey(pkeys[k1], curve), signum: s1}
|
||||
tasks <- task{pub: bytesToPublicKey(pkeys[k2], curve), signum: s2}
|
||||
|
||||
sigok := true
|
||||
taskCount := 2
|
||||
|
@ -1631,7 +1616,7 @@ loop:
|
|||
nextKey = k2
|
||||
}
|
||||
taskCount++
|
||||
tasks <- task{pub: v.bytesToPublicKey(pkeys[nextKey], curve), signum: nextSig}
|
||||
tasks <- task{pub: bytesToPublicKey(pkeys[nextKey], curve), signum: nextSig}
|
||||
}
|
||||
|
||||
close(tasks)
|
||||
|
@ -1641,7 +1626,7 @@ loop:
|
|||
|
||||
func checkMultisig1(v *VM, curve elliptic.Curve, h []byte, pkeys [][]byte, sig []byte) bool {
|
||||
for i := range pkeys {
|
||||
pkey := v.bytesToPublicKey(pkeys[i], curve)
|
||||
pkey := bytesToPublicKey(pkeys[i], curve)
|
||||
if pkey.Verify(sig, h) {
|
||||
return true
|
||||
}
|
||||
|
@ -1696,19 +1681,11 @@ func (v *VM) checkInvocationStackSize() {
|
|||
|
||||
// bytesToPublicKey is a helper deserializing keys using cache and panicing on
|
||||
// error.
|
||||
func (v *VM) bytesToPublicKey(b []byte, curve elliptic.Curve) *keys.PublicKey {
|
||||
var pkey *keys.PublicKey
|
||||
s := string(b)
|
||||
if v.keys[s] != nil {
|
||||
pkey = v.keys[s]
|
||||
} else {
|
||||
var err error
|
||||
pkey, err = keys.NewPublicKeyFromBytes(b, curve)
|
||||
func bytesToPublicKey(b []byte, curve elliptic.Curve) *keys.PublicKey {
|
||||
pkey, err := keys.NewPublicKeyFromBytes(b, curve)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
v.keys[s] = pkey
|
||||
}
|
||||
return pkey
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package vm
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/elliptic"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
|
@ -103,25 +102,6 @@ func TestAddGas(t *testing.T) {
|
|||
require.False(t, v.AddGas(5))
|
||||
}
|
||||
|
||||
func TestBytesToPublicKey(t *testing.T) {
|
||||
v := newTestVM()
|
||||
cache := v.GetPublicKeys()
|
||||
assert.Equal(t, 0, len(cache))
|
||||
keyHex := "03b209fd4f53a7170ea4444e0cb0a6bb6a53c2bd016926989cf85f9b0fba17a70c"
|
||||
keyBytes, _ := hex.DecodeString(keyHex)
|
||||
key := v.bytesToPublicKey(keyBytes, elliptic.P256())
|
||||
assert.NotNil(t, key)
|
||||
key2 := v.bytesToPublicKey(keyBytes, elliptic.P256())
|
||||
assert.Equal(t, key, key2)
|
||||
|
||||
cache = v.GetPublicKeys()
|
||||
assert.Equal(t, 1, len(cache))
|
||||
assert.NotNil(t, cache[string(keyBytes)])
|
||||
|
||||
keyBytes[0] = 0xff
|
||||
require.Panics(t, func() { v.bytesToPublicKey(keyBytes, elliptic.P256()) })
|
||||
}
|
||||
|
||||
func TestPushBytes1to75(t *testing.T) {
|
||||
buf := io.NewBufBinWriter()
|
||||
for i := 1; i <= 75; i++ {
|
||||
|
|
Loading…
Reference in a new issue