2018-02-01 20:28:45 +00:00
package core
2018-02-04 19:54:51 +00:00
import (
2020-09-24 13:33:40 +00:00
"bytes"
2020-08-06 14:44:08 +00:00
"errors"
2018-03-09 15:55:25 +00:00
"fmt"
2020-11-13 13:54:38 +00:00
"math"
2019-11-15 14:52:47 +00:00
"math/big"
2020-09-21 12:34:04 +00:00
"sort"
2020-02-04 15:43:21 +00:00
"sync"
2018-03-09 15:55:25 +00:00
"sync/atomic"
2018-02-04 19:54:51 +00:00
"time"
2020-03-25 15:30:21 +00:00
"github.com/nspcc-dev/neo-go/pkg/config"
2020-03-03 14:21:42 +00:00
"github.com/nspcc-dev/neo-go/pkg/core/block"
2020-11-27 10:55:48 +00:00
"github.com/nspcc-dev/neo-go/pkg/core/blockchainer"
2020-09-28 11:58:04 +00:00
"github.com/nspcc-dev/neo-go/pkg/core/blockchainer/services"
2020-04-07 09:41:12 +00:00
"github.com/nspcc-dev/neo-go/pkg/core/dao"
2020-04-08 10:35:39 +00:00
"github.com/nspcc-dev/neo-go/pkg/core/interop"
2021-01-19 08:23:39 +00:00
"github.com/nspcc-dev/neo-go/pkg/core/interop/contract"
2020-03-03 14:21:42 +00:00
"github.com/nspcc-dev/neo-go/pkg/core/mempool"
2020-03-19 15:52:37 +00:00
"github.com/nspcc-dev/neo-go/pkg/core/native"
2021-03-23 10:37:30 +00:00
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
2020-03-03 14:21:42 +00:00
"github.com/nspcc-dev/neo-go/pkg/core/state"
2021-01-29 14:33:24 +00:00
"github.com/nspcc-dev/neo-go/pkg/core/stateroot"
2020-03-03 14:21:42 +00:00
"github.com/nspcc-dev/neo-go/pkg/core/storage"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
2020-06-04 14:19:30 +00:00
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
2020-03-03 14:21:42 +00:00
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
2020-06-04 18:11:27 +00:00
"github.com/nspcc-dev/neo-go/pkg/encoding/bigint"
2021-07-20 12:54:56 +00:00
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
2020-03-03 14:21:42 +00:00
"github.com/nspcc-dev/neo-go/pkg/io"
2021-01-26 15:00:08 +00:00
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
2020-12-29 10:45:49 +00:00
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
2020-08-13 15:42:53 +00:00
"github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest"
2020-03-03 14:21:42 +00:00
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm"
2020-06-03 12:55:06 +00:00
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
2019-12-30 07:43:05 +00:00
"go.uber.org/zap"
2018-02-04 19:54:51 +00:00
)
2019-10-22 14:56:03 +00:00
// Tuning parameters.
2018-02-01 20:28:45 +00:00
const (
2018-03-09 15:55:25 +00:00
headerBatchCount = 2000
2021-07-28 16:57:24 +00:00
version = "0.1.1"
2019-09-30 14:27:41 +00:00
2021-07-20 12:54:56 +00:00
defaultInitialGAS = 52000000_00000000
2020-11-27 10:55:48 +00:00
defaultMemPoolSize = 50000
defaultP2PNotaryRequestPayloadPoolSize = 1000
2021-03-15 10:00:04 +00:00
defaultMaxBlockSize = 262144
2021-03-15 10:51:07 +00:00
defaultMaxBlockSystemFee = 900000000000
2021-02-17 15:22:57 +00:00
defaultMaxTraceableBlocks = 2102400 // 1 year of 15s blocks
defaultMaxTransactionsPerBlock = 512
2021-07-25 12:00:44 +00:00
// HeaderVerificationGasLimit is the maximum amount of GAS for block header verification.
HeaderVerificationGasLimit = 3_00000000 // 3 GAS
2018-02-01 20:28:45 +00:00
)
2020-02-04 15:43:21 +00:00
var (
// ErrAlreadyExists is returned when trying to add some already existing
// transaction into the pool (not specifying whether it exists in the
// chain or mempool).
ErrAlreadyExists = errors . New ( "already exists" )
// ErrOOM is returned when adding transaction to the memory pool because
// it reached its full capacity.
ErrOOM = errors . New ( "no space left in the memory pool" )
2020-02-18 17:16:38 +00:00
// ErrPolicy is returned on attempt to add transaction that doesn't
// comply with node's configured policy into the mempool.
ErrPolicy = errors . New ( "not allowed by policy" )
2020-03-04 10:06:18 +00:00
// ErrInvalidBlockIndex is returned when trying to add block with index
// other than expected height of the blockchain.
ErrInvalidBlockIndex error = errors . New ( "invalid block index" )
2020-10-15 11:45:29 +00:00
// ErrHasConflicts is returned when trying to add some transaction which
// conflicts with other transaction in the chain or pool according to
// Conflicts attribute.
ErrHasConflicts = errors . New ( "has conflicts" )
2020-02-04 15:43:21 +00:00
)
2018-02-01 20:28:45 +00:00
var (
2020-08-26 09:07:30 +00:00
persistInterval = 1 * time . Second
2018-02-01 20:28:45 +00:00
)
2020-05-07 19:04:10 +00:00
// Blockchain represents the blockchain. It maintans internal state representing
// the state of the ledger that can be accessed in various ways and changed by
// adding new blocks or headers.
2018-02-01 20:28:45 +00:00
type Blockchain struct {
2018-03-25 10:45:54 +00:00
config config . ProtocolConfiguration
2020-02-04 15:43:21 +00:00
// The only way chain state changes is by adding blocks, so we can't
// allow concurrent block additions. It differs from the next lock in
// that it's only for AddBlock method itself, the chain state is
// protected by the lock below, but holding it during all of AddBlock
// is too expensive (because the state only changes when persisting
// change cache).
addLock sync . Mutex
// This lock ensures blockchain immutability for operations that need
// that while performing their tasks. It's mostly used as a read lock
// with the only writer being the block addition logic.
lock sync . RWMutex
2019-11-25 17:39:11 +00:00
// Data access object for CRUD operations around storage.
2020-04-07 09:41:12 +00:00
dao * dao . Simple
2019-09-26 15:14:00 +00:00
2018-03-09 15:55:25 +00:00
// Current index/height of the highest block.
// Read access should always be called by BlockHeight().
2019-09-26 15:14:00 +00:00
// Write access should only happen in storeBlock().
2018-03-09 15:55:25 +00:00
blockHeight uint32
2018-02-04 19:54:51 +00:00
2019-12-23 16:18:12 +00:00
// Current top Block wrapped in an atomic.Value for safe access.
topBlock atomic . Value
2019-09-24 15:51:20 +00:00
// Current persisted block count.
persistedHeight uint32
2018-03-17 11:53:21 +00:00
// Number of headers stored in the chain file.
2018-02-06 06:43:32 +00:00
storedHeaderCount uint32
2020-09-16 11:28:18 +00:00
// Header hashes list with associated lock.
headerHashesLock sync . RWMutex
headerHashes [ ] util . Uint256
2018-03-10 12:04:06 +00:00
2019-11-07 17:47:48 +00:00
// Stop synchronization mechanisms.
stopCh chan struct { }
runToExitCh chan struct { }
2020-08-19 16:27:15 +00:00
memPool * mempool . Pool
2019-12-10 16:13:29 +00:00
2020-11-27 10:55:48 +00:00
// postBlock is a set of callback methods which should be run under the Blockchain lock after new block is persisted.
// Block's transactions are passed via mempool.
postBlock [ ] func ( blockchainer . Blockchainer , * mempool . Pool , * block . Block )
2020-08-05 08:30:14 +00:00
sbCommittee keys . PublicKeys
2020-06-23 15:15:55 +00:00
2019-12-30 07:43:05 +00:00
log * zap . Logger
2020-02-06 15:47:03 +00:00
lastBatch * storage . MemBatch
2020-03-19 15:52:37 +00:00
contracts native . Contracts
2020-05-12 14:20:41 +00:00
2021-01-18 12:52:51 +00:00
extensible atomic . Value
2021-03-15 10:00:04 +00:00
// defaultBlockWitness stores transaction.Witness with m out of n multisig,
// where n = ValidatorsCount.
defaultBlockWitness atomic . Value
2021-01-29 14:33:24 +00:00
stateRoot * stateroot . Module
2020-05-12 14:20:41 +00:00
// Notification subsystem.
events chan bcEvent
subCh chan interface { }
unsubCh chan interface { }
}
// bcEvent is an internal event generated by the Blockchain and then
// broadcasted to other parties. It joins the new block and associated
// invocation logs, all the other events visible from outside can be produced
// from this combination.
type bcEvent struct {
block * block . Block
appExecResults [ ] * state . AppExecResult
2018-02-01 20:28:45 +00:00
}
2019-10-22 14:56:03 +00:00
// NewBlockchain returns a new blockchain object the will use the
2020-05-07 19:04:10 +00:00
// given Store as its underlying storage. For it to work correctly you need
// to spawn a goroutine for its Run method after this initialization.
2019-12-30 07:43:05 +00:00
func NewBlockchain ( s storage . Store , cfg config . ProtocolConfiguration , log * zap . Logger ) ( * Blockchain , error ) {
if log == nil {
return nil , errors . New ( "empty logger" )
}
2021-07-20 12:54:56 +00:00
if cfg . InitialGASSupply <= 0 {
cfg . InitialGASSupply = fixedn . Fixed8 ( defaultInitialGAS )
log . Info ( "initial gas supply is not set or wrong, setting default value" , zap . String ( "InitialGASSupply" , cfg . InitialGASSupply . String ( ) ) )
}
2020-01-22 14:28:02 +00:00
if cfg . MemPoolSize <= 0 {
cfg . MemPoolSize = defaultMemPoolSize
log . Info ( "mempool size is not set or wrong, setting default value" , zap . Int ( "MemPoolSize" , cfg . MemPoolSize ) )
}
2020-11-27 10:55:48 +00:00
if cfg . P2PSigExtensions && cfg . P2PNotaryRequestPayloadPoolSize <= 0 {
cfg . P2PNotaryRequestPayloadPoolSize = defaultP2PNotaryRequestPayloadPoolSize
log . Info ( "P2PNotaryRequestPayloadPool size is not set or wrong, setting default value" , zap . Int ( "P2PNotaryRequestPayloadPoolSize" , cfg . P2PNotaryRequestPayloadPoolSize ) )
}
2021-03-15 10:00:04 +00:00
if cfg . MaxBlockSize == 0 {
cfg . MaxBlockSize = defaultMaxBlockSize
log . Info ( "MaxBlockSize is not set or wrong, setting default value" , zap . Uint32 ( "MaxBlockSize" , cfg . MaxBlockSize ) )
}
2021-03-15 10:51:07 +00:00
if cfg . MaxBlockSystemFee <= 0 {
cfg . MaxBlockSystemFee = defaultMaxBlockSystemFee
log . Info ( "MaxBlockSystemFee is not set or wrong, setting default value" , zap . Int64 ( "MaxBlockSystemFee" , cfg . MaxBlockSystemFee ) )
}
2020-11-05 19:01:12 +00:00
if cfg . MaxTraceableBlocks == 0 {
cfg . MaxTraceableBlocks = defaultMaxTraceableBlocks
log . Info ( "MaxTraceableBlocks is not set or wrong, using default value" , zap . Uint32 ( "MaxTraceableBlocks" , cfg . MaxTraceableBlocks ) )
}
2021-02-17 15:22:57 +00:00
if cfg . MaxTransactionsPerBlock == 0 {
cfg . MaxTransactionsPerBlock = defaultMaxTransactionsPerBlock
log . Info ( "MaxTransactionsPerBlock is not set or wrong, using default value" ,
zap . Uint16 ( "MaxTransactionsPerBlock" , cfg . MaxTransactionsPerBlock ) )
}
2021-05-17 08:07:08 +00:00
if cfg . MaxValidUntilBlockIncrement == 0 {
const secondsPerDay = int ( 24 * time . Hour / time . Second )
cfg . MaxValidUntilBlockIncrement = uint32 ( secondsPerDay / cfg . SecondsPerBlock )
log . Info ( "MaxValidUntilBlockIncrement is not set or wrong, using default value" ,
zap . Uint32 ( "MaxValidUntilBlockIncrement" , cfg . MaxValidUntilBlockIncrement ) )
}
2020-08-05 08:30:14 +00:00
committee , err := committeeFromConfig ( cfg )
2020-06-23 15:15:55 +00:00
if err != nil {
return nil , err
}
2021-03-11 11:39:51 +00:00
if len ( cfg . NativeUpdateHistories ) == 0 {
cfg . NativeUpdateHistories = map [ string ] [ ] uint32 { }
log . Info ( "NativeActivations are not set, using default values" )
}
2018-03-09 15:55:25 +00:00
bc := & Blockchain {
2020-09-16 11:28:18 +00:00
config : cfg ,
2021-03-25 19:32:16 +00:00
dao : dao . NewSimple ( s , cfg . StateRootInHeader ) ,
2020-09-16 11:28:18 +00:00
stopCh : make ( chan struct { } ) ,
runToExitCh : make ( chan struct { } ) ,
2021-01-15 12:40:15 +00:00
memPool : mempool . New ( cfg . MemPoolSize , 0 , false ) ,
2020-09-16 11:28:18 +00:00
sbCommittee : committee ,
log : log ,
events : make ( chan bcEvent ) ,
subCh : make ( chan interface { } ) ,
unsubCh : make ( chan interface { } ) ,
2021-07-20 12:54:56 +00:00
contracts : * native . NewContracts ( cfg ) ,
2018-03-09 15:55:25 +00:00
}
2018-02-06 06:43:32 +00:00
2021-01-29 14:33:24 +00:00
bc . stateRoot = stateroot . NewModule ( bc , bc . log , bc . dao . Store )
2021-02-01 16:00:07 +00:00
bc . contracts . Designate . StateRootService = bc . stateRoot
2021-01-29 14:33:24 +00:00
Implement rpc server method: sendrawtransaction (#174)
* Added new config attributes: 'SecondsPerBlock','LowPriorityThreshold'
* Added new files:
* Added new method: CompareTo
* Fixed empty Slice case
* Added new methods: LessThan, GreaterThan, Equal, CompareTo
* Added new method: InputIntersection
* Added MaxTransactionSize, GroupOutputByAssetID
* Added ned method: ScriptHash
* Added new method: IsDoubleSpend
* Refactor blockchainer, Added Feer interface, Verify and GetMemPool method
* 1) Added MemPool
2) Added new methods to satisfy the blockchainer interface: IsLowPriority, Verify, GetMemPool
* Added new methods: RelayTxn, RelayDirectly
* Fixed tests
* Implemented RPC server method sendrawtransaction
* Refactor getrawtransaction, sendrawtransaction in separate methods
* Moved 'secondsPerBlock' to config file
* Implemented Kim suggestions:
1) Fixed data race issues
2) refactor Verify method
3) Get rid of unused InputIntersection method due to refactoring Verify method
4) Fixed bug in https://github.com/CityOfZion/neo-go/pull/174#discussion_r264108135
5) minor simplications of the code
* Fixed minor issues related to
1) space
2) getter methods do not need pointer on the receiver
3) error message
4) refactoring CompareTo method in uint256.go
* Fixed small issues
* Use sync.RWMutex instead of sync.Mutex
* Refined (R)Lock/(R)Unlock
* return error instead of bool in Verify methods
2019-03-20 12:30:05 +00:00
if err := bc . init ( ) ; err != nil {
2018-03-17 11:53:21 +00:00
return nil , err
}
return bc , nil
2018-02-04 19:54:51 +00:00
}
2020-09-28 11:58:04 +00:00
// SetOracle sets oracle module. It doesn't protected by mutex and
// must be called before `bc.Run()` to avoid data race.
func ( bc * Blockchain ) SetOracle ( mod services . Oracle ) {
2021-02-15 14:06:00 +00:00
orc := bc . contracts . Oracle
md , ok := orc . GetMethod ( manifest . MethodVerify , - 1 )
if ! ok {
panic ( fmt . Errorf ( "%s method not found" , manifest . MethodVerify ) )
}
mod . UpdateNativeContract ( orc . NEF . Script , orc . GetOracleResponseScript ( ) ,
orc . Hash , md . MD . Offset )
orc . Module . Store ( mod )
2020-09-28 11:58:04 +00:00
bc . contracts . Designate . OracleService . Store ( mod )
}
2020-12-30 08:01:13 +00:00
// SetNotary sets notary module. It doesn't protected by mutex and
// must be called before `bc.Run()` to avoid data race.
func ( bc * Blockchain ) SetNotary ( mod services . Notary ) {
bc . contracts . Designate . NotaryService . Store ( mod )
}
2018-03-17 11:53:21 +00:00
func ( bc * Blockchain ) init ( ) error {
2018-04-09 16:58:09 +00:00
// If we could not find the version in the Store, we know that there is nothing stored.
2019-11-25 17:39:11 +00:00
ver , err := bc . dao . GetVersion ( )
2018-04-09 16:58:09 +00:00
if err != nil {
2019-12-30 07:43:05 +00:00
bc . log . Info ( "no storage version found! creating genesis block" )
2019-11-25 17:39:11 +00:00
if err = bc . dao . PutVersion ( version ) ; err != nil {
2019-01-25 11:20:35 +00:00
return err
}
2019-09-11 17:28:49 +00:00
genesisBlock , err := createGenesisBlock ( bc . config )
if err != nil {
return err
}
2020-09-16 11:28:18 +00:00
bc . headerHashes = [ ] util . Uint256 { genesisBlock . Hash ( ) }
2019-11-25 17:39:11 +00:00
err = bc . dao . PutCurrentHeader ( hashAndIndexToBytes ( genesisBlock . Hash ( ) , genesisBlock . Index ) )
2019-11-06 14:56:06 +00:00
if err != nil {
return err
}
2021-01-29 14:33:24 +00:00
if err := bc . stateRoot . Init ( 0 , bc . config . KeepOnlyLatestState ) ; err != nil {
2020-10-21 13:58:41 +00:00
return fmt . Errorf ( "can't init MPT: %w" , err )
}
2020-09-10 12:02:03 +00:00
return bc . storeBlock ( genesisBlock , nil )
2018-04-09 16:58:09 +00:00
}
if ver != version {
return fmt . Errorf ( "storage version mismatch betweeen %s and %s" , version , ver )
2018-03-25 10:45:54 +00:00
}
// At this point there was no version found in the storage which
// implies a creating fresh storage with the version specified
// and the genesis block as first block.
2019-12-30 07:43:05 +00:00
bc . log . Info ( "restoring blockchain" , zap . String ( "version" , version ) )
2018-03-25 10:45:54 +00:00
2019-11-25 17:39:11 +00:00
bHeight , err := bc . dao . GetCurrentBlockHeight ( )
2018-03-25 10:45:54 +00:00
if err != nil {
2018-03-17 11:53:21 +00:00
return err
}
2018-04-09 16:58:09 +00:00
bc . blockHeight = bHeight
2019-09-24 15:51:20 +00:00
bc . persistedHeight = bHeight
2021-01-29 14:33:24 +00:00
if err = bc . stateRoot . Init ( bHeight , bc . config . KeepOnlyLatestState ) ; err != nil {
2020-08-06 14:44:08 +00:00
return fmt . Errorf ( "can't init MPT at height %d: %w" , bHeight , err )
2020-05-29 14:20:00 +00:00
}
2018-03-17 11:53:21 +00:00
2020-09-16 11:28:18 +00:00
bc . headerHashes , err = bc . dao . GetHeaderHashes ( )
2018-03-17 11:53:21 +00:00
if err != nil {
return err
}
2018-03-25 10:45:54 +00:00
2020-09-16 11:28:18 +00:00
bc . storedHeaderCount = uint32 ( len ( bc . headerHashes ) )
2018-03-17 11:53:21 +00:00
2019-11-25 17:39:11 +00:00
currHeaderHeight , currHeaderHash , err := bc . dao . GetCurrentHeaderHeight ( )
2018-03-17 11:53:21 +00:00
if err != nil {
return err
}
2019-11-06 14:58:19 +00:00
if bc . storedHeaderCount == 0 && currHeaderHeight == 0 {
2020-09-16 11:28:18 +00:00
bc . headerHashes = append ( bc . headerHashes , currHeaderHash )
2019-11-06 14:58:19 +00:00
}
2018-03-17 11:53:21 +00:00
2018-04-09 16:58:09 +00:00
// There is a high chance that the Node is stopped before the next
2018-03-17 11:53:21 +00:00
// batch of 2000 headers was stored. Via the currentHeaders stored we can sync
// that with stored blocks.
2019-11-29 11:22:31 +00:00
if currHeaderHeight >= bc . storedHeaderCount {
2018-03-17 11:53:21 +00:00
hash := currHeaderHash
2019-10-21 05:37:01 +00:00
var targetHash util . Uint256
2020-09-16 11:28:18 +00:00
if len ( bc . headerHashes ) > 0 {
targetHash = bc . headerHashes [ len ( bc . headerHashes ) - 1 ]
2019-10-21 05:37:01 +00:00
} else {
genesisBlock , err := createGenesisBlock ( bc . config )
if err != nil {
return err
}
targetHash = genesisBlock . Hash ( )
2020-09-16 11:28:18 +00:00
bc . headerHashes = append ( bc . headerHashes , targetHash )
2019-10-21 05:37:01 +00:00
}
2020-01-14 12:32:07 +00:00
headers := make ( [ ] * block . Header , 0 )
2018-03-17 11:53:21 +00:00
for hash != targetHash {
2019-02-20 17:39:32 +00:00
header , err := bc . GetHeader ( hash )
2018-03-17 11:53:21 +00:00
if err != nil {
2020-08-06 16:09:57 +00:00
return fmt . Errorf ( "could not get header %s: %w" , hash , err )
2018-03-17 11:53:21 +00:00
}
headers = append ( headers , header )
hash = header . PrevHash
}
headerSliceReverse ( headers )
2019-09-25 14:52:46 +00:00
for _ , h := range headers {
2020-09-16 11:28:18 +00:00
bc . headerHashes = append ( bc . headerHashes , h . Hash ( ) )
2018-03-17 11:53:21 +00:00
}
}
core: add InitializeCache method to NEO native contracts
There might be a case when cached contract values store nil (e.g.
after restoring chain from dump). We should always initialize cached
values irrespective to the (NEO).Initialize method.
This commit fixes a bug introduced in 83e94d3
when 4-nodes privnet is failing after restoring from dump:
```
$ docker logs neo_go_node_one
=> Try to restore blocks before running node
2020-09-30T11:55:49.122Z INFO no storage version found! creating genesis block
2020-09-30T11:55:49.124Z INFO service hasn't started since it's disabled {"service": "Pprof"}
2020-09-30T11:55:49.124Z INFO service hasn't started since it's disabled {"service": "Prometheus"}
2020-09-30T11:55:49.124Z INFO skipped genesis block {"hash": "3792eaa22c196399a114666fd491c4b9ac52491d9abb1f633a8036a8ac81e4db"}
2020-09-30T11:55:49.141Z INFO shutting down service {"service": "Pprof", "endpoint": ":30001"}
2020-09-30T11:55:49.141Z INFO shutting down service {"service": "Prometheus", "endpoint": ":40001"}
2020-09-30T11:55:49.141Z INFO blockchain persist completed {"persistedBlocks": 3, "persistedKeys": 146, "headerHeight": 3, "blockHeight": 3, "took": "324.27µs"}
2020-09-30T11:55:49.150Z INFO restoring blockchain {"version": "0.1.0"}
2020-09-30T11:55:49.150Z INFO service hasn't started since it's disabled {"service": "Prometheus"}
2020-09-30T11:55:49.151Z INFO service hasn't started since it's disabled {"service": "Pprof"}
2020-09-30T11:55:49.443Z INFO starting rpc-server {"endpoint": ":30333"}
2020-09-30T11:55:49.443Z INFO node started {"blockHeight": 3, "headerHeight": 3}
_ ____________ __________
/ | / / ____/ __ \ / ____/ __ \
/ |/ / __/ / / / /_____/ / __/ / / /
/ /| / /___/ /_/ /_____/ /_/ / /_/ /
/_/ |_/_____/\____/ \____/\____/
/NEO-GO:/
2020-09-30T11:55:49.444Z INFO new peer connected {"addr": "172.23.0.5:39638", "peerCount": 1}
2020-09-30T11:55:49.444Z INFO new peer connected {"addr": "172.23.0.5:20333", "peerCount": 2}
2020-09-30T11:55:49.444Z WARN peer disconnected {"addr": "172.23.0.5:20333", "reason": "identical node id", "peerCount": 1}
2020-09-30T11:55:49.445Z WARN peer disconnected {"addr": "172.23.0.5:39638", "reason": "identical node id", "peerCount": 0}
2020-09-30T11:55:49.445Z INFO new peer connected {"addr": "172.23.0.3:20335", "peerCount": 1}
2020-09-30T11:55:49.445Z INFO new peer connected {"addr": "172.23.0.2:20334", "peerCount": 2}
2020-09-30T11:55:49.445Z INFO started protocol {"addr": "172.23.0.3:20335", "userAgent": "/NEO-GO:/", "startHeight": 3, "id": 1339919829}
2020-09-30T11:55:49.445Z INFO new peer connected {"addr": "172.23.0.4:20336", "peerCount": 3}
2020-09-30T11:55:49.445Z INFO started protocol {"addr": "172.23.0.4:20336", "userAgent": "/NEO-GO:/", "startHeight": 3, "id": 4036722359}
2020-09-30T11:55:49.445Z INFO node reached synchronized state, starting consensus
2020-09-30T11:55:49.445Z INFO started protocol {"addr": "172.23.0.2:20334", "userAgent": "/NEO-GO:/", "startHeight": 3, "id": 1557367037}
panic: runtime error: integer divide by zero
goroutine 132 [running]:
github.com/nspcc-dev/dbft.(*Context).GetPrimaryIndex(...)
github.com/nspcc-dev/dbft@v0.0.0-20200925163137-8f3b9ab3b720/context.go:83
github.com/nspcc-dev/dbft.(*Context).reset(0xc0000e0780, 0x0)
github.com/nspcc-dev/dbft@v0.0.0-20200925163137-8f3b9ab3b720/context.go:208 +0x64b
github.com/nspcc-dev/dbft.(*DBFT).InitializeConsensus(0xc0000e0780, 0x964800)
github.com/nspcc-dev/dbft@v0.0.0-20200925163137-8f3b9ab3b720/dbft.go:87 +0x51
github.com/nspcc-dev/dbft.(*DBFT).Start(0xc0000e0780)
github.com/nspcc-dev/dbft@v0.0.0-20200925163137-8f3b9ab3b720/dbft.go:81 +0x4b
github.com/nspcc-dev/neo-go/pkg/consensus.(*service).Start(0xc0001a2160)
github.com/nspcc-dev/neo-go/pkg/consensus/consensus.go:206 +0x56
github.com/nspcc-dev/neo-go/pkg/network.(*Server).tryStartConsensus(0xc0000ec500)
github.com/nspcc-dev/neo-go/pkg/network/server.go:311 +0xda
github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleMessage(0xc0000ec500, 0x104d800, 0xc000222090, 0xc0000a6f10, 0x0, 0x0)
github.com/nspcc-dev/neo-go/pkg/network/server.go:781 +0xa7a
github.com/nspcc-dev/neo-go/pkg/network.(*TCPPeer).handleConn(0xc000222090)
github.com/nspcc-dev/neo-go/pkg/network/tcp_peer.go:162 +0x2e7
created by github.com/nspcc-dev/neo-go/pkg/network.(*TCPTransport).Dial
github.com/nspcc-dev/neo-go/pkg/network/tcp_transport.go:40 +0x1ac
```
2020-10-02 11:44:42 +00:00
err = bc . contracts . NEO . InitializeCache ( bc , bc . dao )
if err != nil {
return fmt . Errorf ( "can't init cache for NEO native contract: %w" , err )
}
2020-12-15 10:53:35 +00:00
err = bc . contracts . Management . InitializeCache ( bc . dao )
if err != nil {
return fmt . Errorf ( "can't init cache for Management native contract: %w" , err )
}
2021-06-10 18:02:51 +00:00
// Check autogenerated native contracts' manifests and NEFs against the stored ones.
// Need to be done after native Management cache initialisation to be able to get
// contract state from DAO via high-level bc API.
for _ , c := range bc . contracts . Contracts {
md := c . Metadata ( )
history := md . UpdateHistory
if len ( history ) == 0 || history [ 0 ] > bHeight {
continue
}
storedCS := bc . GetContractState ( md . Hash )
if storedCS == nil {
return fmt . Errorf ( "native contract %s is not stored" , md . Name )
}
2021-07-17 15:37:33 +00:00
storedCSBytes , err := stackitem . SerializeConvertible ( storedCS )
if err != nil {
return fmt . Errorf ( "failed to check native %s state against autogenerated one: %w" , md . Name , err )
2021-06-10 18:02:51 +00:00
}
autogenCS := & state . Contract {
ContractBase : md . ContractBase ,
UpdateCounter : storedCS . UpdateCounter , // it can be restored only from the DB, so use the stored value.
}
2021-07-17 15:37:33 +00:00
autogenCSBytes , err := stackitem . SerializeConvertible ( autogenCS )
if err != nil {
return fmt . Errorf ( "failed to check native %s state against autogenerated one: %w" , md . Name , err )
2021-06-10 18:02:51 +00:00
}
if ! bytes . Equal ( storedCSBytes , autogenCSBytes ) {
return fmt . Errorf ( "native %s: version mismatch (stored contract state differs from autogenerated one), " +
"try to resynchronize the node from the genesis" , md . Name )
}
}
2021-01-18 12:52:51 +00:00
return bc . updateExtensibleWhitelist ( bHeight )
2018-03-09 15:55:25 +00:00
}
2020-05-07 19:04:10 +00:00
// Run runs chain loop, it needs to be run as goroutine and executing it is
// critical for correct Blockchain operation.
2019-11-07 17:47:48 +00:00
func ( bc * Blockchain ) Run ( ) {
2018-03-17 11:53:21 +00:00
persistTimer := time . NewTimer ( persistInterval )
2019-09-16 15:52:47 +00:00
defer func ( ) {
persistTimer . Stop ( )
2019-10-21 07:04:58 +00:00
if err := bc . persist ( ) ; err != nil {
2019-12-30 07:43:05 +00:00
bc . log . Warn ( "failed to persist" , zap . Error ( err ) )
2019-09-26 15:14:00 +00:00
}
2020-04-07 09:41:12 +00:00
if err := bc . dao . Store . Close ( ) ; err != nil {
2019-12-30 07:43:05 +00:00
bc . log . Warn ( "failed to close db" , zap . Error ( err ) )
2019-09-16 15:52:47 +00:00
}
2019-11-07 17:47:48 +00:00
close ( bc . runToExitCh )
2019-09-16 15:52:47 +00:00
} ( )
2020-05-12 14:20:41 +00:00
go bc . notificationDispatcher ( )
2018-03-09 15:55:25 +00:00
for {
select {
2019-11-07 17:47:48 +00:00
case <- bc . stopCh :
2019-02-19 11:48:48 +00:00
return
2018-03-14 09:36:59 +00:00
case <- persistTimer . C :
2019-09-18 15:21:16 +00:00
go func ( ) {
2019-10-21 07:04:58 +00:00
err := bc . persist ( )
2019-09-22 17:06:52 +00:00
if err != nil {
2019-12-30 07:43:05 +00:00
bc . log . Warn ( "failed to persist blockchain" , zap . Error ( err ) )
2019-09-22 17:06:52 +00:00
}
2020-02-24 14:17:25 +00:00
persistTimer . Reset ( persistInterval )
2019-09-18 15:21:16 +00:00
} ( )
2018-03-09 15:55:25 +00:00
}
2018-02-04 19:54:51 +00:00
}
}
2020-05-12 14:20:41 +00:00
// notificationDispatcher manages subscription to events and broadcasts new events.
func ( bc * Blockchain ) notificationDispatcher ( ) {
var (
// These are just sets of subscribers, though modelled as maps
// for ease of management (not a lot of subscriptions is really
// expected, but maps are convenient for adding/deleting elements).
blockFeed = make ( map [ chan <- * block . Block ] bool )
txFeed = make ( map [ chan <- * transaction . Transaction ] bool )
notificationFeed = make ( map [ chan <- * state . NotificationEvent ] bool )
executionFeed = make ( map [ chan <- * state . AppExecResult ] bool )
)
for {
select {
case <- bc . stopCh :
return
case sub := <- bc . subCh :
switch ch := sub . ( type ) {
case chan <- * block . Block :
blockFeed [ ch ] = true
case chan <- * transaction . Transaction :
txFeed [ ch ] = true
case chan <- * state . NotificationEvent :
notificationFeed [ ch ] = true
case chan <- * state . AppExecResult :
executionFeed [ ch ] = true
default :
panic ( fmt . Sprintf ( "bad subscription: %T" , sub ) )
}
case unsub := <- bc . unsubCh :
switch ch := unsub . ( type ) {
case chan <- * block . Block :
delete ( blockFeed , ch )
case chan <- * transaction . Transaction :
delete ( txFeed , ch )
case chan <- * state . NotificationEvent :
delete ( notificationFeed , ch )
case chan <- * state . AppExecResult :
delete ( executionFeed , ch )
default :
panic ( fmt . Sprintf ( "bad unsubscription: %T" , unsub ) )
}
case event := <- bc . events :
// We don't want to waste time looping through transactions when there are no
// subscribers.
if len ( txFeed ) != 0 || len ( notificationFeed ) != 0 || len ( executionFeed ) != 0 {
2020-06-18 11:19:55 +00:00
aer := event . appExecResults [ 0 ]
2020-11-11 15:43:28 +00:00
if ! aer . Container . Equals ( event . block . Hash ( ) ) {
2020-06-18 11:19:55 +00:00
panic ( "inconsistent application execution results" )
}
for ch := range executionFeed {
ch <- aer
}
for i := range aer . Events {
for ch := range notificationFeed {
ch <- & aer . Events [ i ]
}
}
aerIdx := 1
2020-05-12 14:20:41 +00:00
for _ , tx := range event . block . Transactions {
2020-06-05 13:07:04 +00:00
aer := event . appExecResults [ aerIdx ]
2020-11-11 15:43:28 +00:00
if ! aer . Container . Equals ( tx . Hash ( ) ) {
2020-06-05 13:07:04 +00:00
panic ( "inconsistent application execution results" )
}
aerIdx ++
for ch := range executionFeed {
ch <- aer
}
2020-07-27 14:57:53 +00:00
if aer . VMState == vm . HaltState {
2020-06-05 13:07:04 +00:00
for i := range aer . Events {
for ch := range notificationFeed {
ch <- & aer . Events [ i ]
2020-05-12 14:20:41 +00:00
}
}
}
for ch := range txFeed {
ch <- tx
}
}
2020-09-23 08:48:31 +00:00
aer = event . appExecResults [ aerIdx ]
2020-11-11 15:43:28 +00:00
if ! aer . Container . Equals ( event . block . Hash ( ) ) {
2020-09-23 08:48:31 +00:00
panic ( "inconsistent application execution results" )
}
for ch := range executionFeed {
ch <- aer
}
for i := range aer . Events {
for ch := range notificationFeed {
ch <- & aer . Events [ i ]
}
}
2020-05-12 14:20:41 +00:00
}
for ch := range blockFeed {
ch <- event . block
}
}
}
}
2019-11-07 17:47:48 +00:00
// Close stops Blockchain's internal loop, syncs changes to persistent storage
// and closes it. The Blockchain is no longer functional after the call to Close.
func ( bc * Blockchain ) Close ( ) {
core: prevent panic on forced exit
If we're to close the Blockchain while it's storing a block this might happen:
panic: assignment to entry in nil map
goroutine 63 [running]:
github.com/nspcc-dev/neo-go/pkg/core/storage.(*MemoryStore).put(...)
/home/rik/dev/neo-go/pkg/core/storage/memory_store.go:53
github.com/nspcc-dev/neo-go/pkg/core/storage.(*MemCachedStore).Persist(0xc000673a40, 0x0, 0x0, 0x0)
/home/rik/dev/neo-go/pkg/core/storage/memcached_store.go:118 +0x245
github.com/nspcc-dev/neo-go/pkg/core/dao.(*Simple).Persist(0xc003dbdcc0, 0xc003b18900, 0xc002505538, 0xc001c510e0)
/home/rik/dev/neo-go/pkg/core/dao/dao.go:543 +0x2e
github.com/nspcc-dev/neo-go/pkg/core/dao.(*Cached).Persist(0xc003b189c0, 0xc000240000, 0x0, 0x0)
/home/rik/dev/neo-go/pkg/core/dao/cacheddao.go:169 +0x756
github.com/nspcc-dev/neo-go/pkg/core.(*Blockchain).storeBlock(0xc0001f4000, 0xc003611040, 0x0, 0x10)
/home/rik/dev/neo-go/pkg/core/blockchain.go:647 +0xfa2
github.com/nspcc-dev/neo-go/pkg/core.(*Blockchain).AddBlock(0xc0001f4000, 0xc003611040, 0x0, 0x0)
/home/rik/dev/neo-go/pkg/core/blockchain.go:447 +0xee
github.com/nspcc-dev/neo-go/pkg/network.(*blockQueue).run(0xc0006f4120)
/home/rik/dev/neo-go/pkg/network/blockqueue.go:48 +0x158
created by github.com/nspcc-dev/neo-go/pkg/network.(*Server).Start
/home/rik/dev/neo-go/pkg/network/server.go:179 +0x2b5
2020-06-18 18:35:17 +00:00
// If there is a block addition in progress, wait for it to finish and
// don't allow new ones.
bc . addLock . Lock ( )
2019-11-07 17:47:48 +00:00
close ( bc . stopCh )
<- bc . runToExitCh
core: prevent panic on forced exit
If we're to close the Blockchain while it's storing a block this might happen:
panic: assignment to entry in nil map
goroutine 63 [running]:
github.com/nspcc-dev/neo-go/pkg/core/storage.(*MemoryStore).put(...)
/home/rik/dev/neo-go/pkg/core/storage/memory_store.go:53
github.com/nspcc-dev/neo-go/pkg/core/storage.(*MemCachedStore).Persist(0xc000673a40, 0x0, 0x0, 0x0)
/home/rik/dev/neo-go/pkg/core/storage/memcached_store.go:118 +0x245
github.com/nspcc-dev/neo-go/pkg/core/dao.(*Simple).Persist(0xc003dbdcc0, 0xc003b18900, 0xc002505538, 0xc001c510e0)
/home/rik/dev/neo-go/pkg/core/dao/dao.go:543 +0x2e
github.com/nspcc-dev/neo-go/pkg/core/dao.(*Cached).Persist(0xc003b189c0, 0xc000240000, 0x0, 0x0)
/home/rik/dev/neo-go/pkg/core/dao/cacheddao.go:169 +0x756
github.com/nspcc-dev/neo-go/pkg/core.(*Blockchain).storeBlock(0xc0001f4000, 0xc003611040, 0x0, 0x10)
/home/rik/dev/neo-go/pkg/core/blockchain.go:647 +0xfa2
github.com/nspcc-dev/neo-go/pkg/core.(*Blockchain).AddBlock(0xc0001f4000, 0xc003611040, 0x0, 0x0)
/home/rik/dev/neo-go/pkg/core/blockchain.go:447 +0xee
github.com/nspcc-dev/neo-go/pkg/network.(*blockQueue).run(0xc0006f4120)
/home/rik/dev/neo-go/pkg/network/blockqueue.go:48 +0x158
created by github.com/nspcc-dev/neo-go/pkg/network.(*Server).Start
/home/rik/dev/neo-go/pkg/network/server.go:179 +0x2b5
2020-06-18 18:35:17 +00:00
bc . addLock . Unlock ( )
2019-11-07 17:47:48 +00:00
}
2019-09-26 15:14:00 +00:00
// AddBlock accepts successive block for the Blockchain, verifies it and
// stores internally. Eventually it will be persisted to the backing storage.
2020-01-14 12:32:07 +00:00
func ( bc * Blockchain ) AddBlock ( block * block . Block ) error {
2020-02-04 15:43:21 +00:00
bc . addLock . Lock ( )
defer bc . addLock . Unlock ( )
2020-09-10 12:02:03 +00:00
var mp * mempool . Pool
2019-09-26 15:14:00 +00:00
expectedHeight := bc . BlockHeight ( ) + 1
if expectedHeight != block . Index {
2020-08-06 15:34:44 +00:00
return fmt . Errorf ( "expected %d, got %d: %w" , expectedHeight , block . Index , ErrInvalidBlockIndex )
2018-03-09 15:55:25 +00:00
}
2020-11-17 12:57:50 +00:00
if bc . config . StateRootInHeader != block . StateRootEnabled {
return fmt . Errorf ( "%w: %v != %v" ,
ErrHdrStateRootSetting , bc . config . StateRootInHeader , block . StateRootEnabled )
}
2020-02-29 14:52:09 +00:00
2020-09-16 11:28:18 +00:00
if block . Index == bc . HeaderHeight ( ) + 1 {
2021-03-01 13:44:47 +00:00
err := bc . addHeaders ( bc . config . VerifyBlocks , & block . Header )
2020-02-29 14:52:09 +00:00
if err != nil {
return err
}
}
2019-10-11 14:46:47 +00:00
if bc . config . VerifyBlocks {
2020-09-16 09:33:39 +00:00
merkle := block . ComputeMerkleRoot ( )
if ! block . MerkleRoot . Equals ( merkle ) {
return errors . New ( "invalid block: MerkleRoot mismatch" )
2019-09-30 14:35:11 +00:00
}
2021-01-15 12:40:15 +00:00
mp = mempool . New ( len ( block . Transactions ) , 0 , false )
2020-09-10 12:02:03 +00:00
for _ , tx := range block . Transactions {
var err error
// Transactions are verified before adding them
// into the pool, so there is no point in doing
// it again even if we're verifying in-block transactions.
if bc . memPool . ContainsKey ( tx . Hash ( ) ) {
err = mp . Add ( tx , bc )
if err == nil {
continue
2019-10-11 14:46:47 +00:00
}
2020-09-10 12:02:03 +00:00
} else {
2020-11-27 10:55:48 +00:00
err = bc . verifyAndPoolTx ( tx , mp , bc )
2020-09-10 12:02:03 +00:00
}
if err != nil && bc . config . VerifyTransactions {
return fmt . Errorf ( "transaction %s failed to verify: %w" , tx . Hash ( ) . StringLE ( ) , err )
2019-09-30 14:35:11 +00:00
}
}
2018-02-04 19:54:51 +00:00
}
2020-09-10 12:02:03 +00:00
return bc . storeBlock ( block , mp )
2018-02-04 19:54:51 +00:00
}
2019-10-22 14:56:03 +00:00
// AddHeaders processes the given headers and add them to the
2020-03-03 12:34:03 +00:00
// HeaderHashList. It expects headers to be sorted by index.
2020-02-29 14:52:09 +00:00
func ( bc * Blockchain ) AddHeaders ( headers ... * block . Header ) error {
return bc . addHeaders ( bc . config . VerifyBlocks , headers ... )
}
2020-03-03 12:34:03 +00:00
// addHeaders is an internal implementation of AddHeaders (`verify` parameter
// tells it to verify or not verify given headers).
2020-09-16 14:45:12 +00:00
func ( bc * Blockchain ) addHeaders ( verify bool , headers ... * block . Header ) error {
2018-03-09 15:55:25 +00:00
var (
start = time . Now ( )
2020-04-07 09:41:12 +00:00
batch = bc . dao . Store . Batch ( )
2020-09-16 14:45:12 +00:00
err error
2018-03-09 15:55:25 +00:00
)
2020-03-03 12:34:03 +00:00
if len ( headers ) > 0 {
var i int
curHeight := bc . HeaderHeight ( )
for i = range headers {
if headers [ i ] . Index > curHeight {
break
}
}
headers = headers [ i : ]
}
2020-02-29 14:52:09 +00:00
if len ( headers ) == 0 {
return nil
} else if verify {
// Verify that the chain of the headers is consistent.
var lastHeader * block . Header
if lastHeader , err = bc . GetHeader ( headers [ 0 ] . PrevHash ) ; err != nil {
2020-08-06 16:09:57 +00:00
return fmt . Errorf ( "previous header was not found: %w" , err )
2020-02-29 14:52:09 +00:00
}
for _ , h := range headers {
if err = bc . verifyHeader ( h , lastHeader ) ; err != nil {
2020-09-16 14:45:12 +00:00
return err
2020-02-29 14:52:09 +00:00
}
lastHeader = h
}
}
2020-09-16 11:28:18 +00:00
buf := io . NewBufBinWriter ( )
bc . headerHashesLock . Lock ( )
defer bc . headerHashesLock . Unlock ( )
oldlen := len ( bc . headerHashes )
var lastHeader * block . Header
for _ , h := range headers {
2020-09-16 13:33:42 +00:00
if int ( h . Index ) != len ( bc . headerHashes ) {
2020-09-16 11:28:18 +00:00
continue
2018-02-06 06:43:32 +00:00
}
2020-09-16 11:28:18 +00:00
bc . headerHashes = append ( bc . headerHashes , h . Hash ( ) )
h . EncodeBinary ( buf . BinWriter )
2021-03-01 13:44:47 +00:00
buf . BinWriter . WriteB ( 0 )
2020-09-16 11:28:18 +00:00
if buf . Err != nil {
return buf . Err
2018-03-09 15:55:25 +00:00
}
2020-09-16 11:28:18 +00:00
2020-11-25 10:59:30 +00:00
key := storage . AppendPrefix ( storage . DataBlock , h . Hash ( ) . BytesBE ( ) )
2018-03-14 09:36:59 +00:00
batch . Put ( key , buf . Bytes ( ) )
2018-03-09 15:55:25 +00:00
buf . Reset ( )
2020-09-16 11:28:18 +00:00
lastHeader = h
2018-02-04 19:54:51 +00:00
}
2020-09-16 11:28:18 +00:00
if oldlen != len ( bc . headerHashes ) {
for int ( lastHeader . Index ) - headerBatchCount >= int ( bc . storedHeaderCount ) {
buf . WriteArray ( bc . headerHashes [ bc . storedHeaderCount : bc . storedHeaderCount + headerBatchCount ] )
if buf . Err != nil {
return buf . Err
}
2018-02-04 19:54:51 +00:00
2020-09-16 11:28:18 +00:00
key := storage . AppendPrefixInt ( storage . IXHeaderHashList , int ( bc . storedHeaderCount ) )
batch . Put ( key , buf . Bytes ( ) )
bc . storedHeaderCount += headerBatchCount
}
2018-02-06 06:43:32 +00:00
2020-09-16 11:28:18 +00:00
batch . Put ( storage . SYSCurrentHeader . Bytes ( ) , hashAndIndexToBytes ( lastHeader . Hash ( ) , lastHeader . Index ) )
updateHeaderHeightMetric ( len ( bc . headerHashes ) - 1 )
if err = bc . dao . Store . PutBatch ( batch ) ; err != nil {
return err
}
bc . log . Debug ( "done processing headers" ,
zap . Int ( "headerIndex" , len ( bc . headerHashes ) - 1 ) ,
zap . Uint32 ( "blockHeight" , bc . BlockHeight ( ) ) ,
zap . Duration ( "took" , time . Since ( start ) ) )
}
2018-02-04 19:54:51 +00:00
return nil
}
2021-01-29 14:33:24 +00:00
// GetStateModule returns state root service instance.
func ( bc * Blockchain ) GetStateModule ( ) blockchainer . StateRoot {
return bc . stateRoot
2020-05-29 14:20:00 +00:00
}
2020-07-16 20:06:17 +00:00
// storeBlock performs chain update using the block given, it executes all
// transactions with all appropriate side-effects and updates Blockchain state.
// This is the only way to change Blockchain state.
2020-09-10 12:02:03 +00:00
func ( bc * Blockchain ) storeBlock ( block * block . Block , txpool * mempool . Pool ) error {
core: spread storeBlock actions to three goroutines
Block processing consists of:
* saving block/transactions to the DB
* executing blocks/transactions
* processing notifications/saving AERs
* updating MPT
* atomically updating Blockchain state
Of these the first one is completely independent of others, it can be done in
a separate routine easily. The third one technically depends on the second,
it just doesn't have data until something is executed. At the same time it
doesn't affect future executions in any way, so we can offload
AER/notification processing to separate goroutine (while the main thread
proceeds with other transactions).
MPT update depends on all executions, so it can't be offloaded, but it can be
done concurrently to AER processing. And only the last thing actually needs
all previous ones to be finished, so it's a natural synchronization point.
So we spawn two additional routines and let the main one execute transactions
and update MPT as fast as it can. While technically all of these routines
could share single DAO (they are working with different KV sets) benchmarking
shows that using separate DAOs and then persisting them to lower one actually
works about 7-8%% better. At the same time we can simplify DAOs used, Cached
one is only relevant for AER processing because it caches NEP-17 tracking
data, everything else can do just fine with Simple.
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 50825
with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks
with lots of transactions while RC4 dump mostly consists of empty blocks.
Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53):
Ryzen 9 5950X:
RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05%
TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86%
CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62%
Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08%
Core i7-8565U:
RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96%
TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81%
CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87%
Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62%
DB restore:
real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80%
user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64%
sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32%
After the change:
Ryzen 9 5950X:
RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54%
TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13%
CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65%
Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66%
Core i7-8565U:
RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49%
TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50%
CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82%
Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18%
DB restore:
real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18%
user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81%
sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21%
On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM
more used. Core i7-8565U changes don't seem to be statistically significant:
1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM
used. It also is 1% worse in DB restore time.
The result is somewhat expected, on a powerful machine with lots of spare
cores we get 10%+ better results while on average resource-constrained laptop it
doesn't change much (the machine is already saturated). Overall, this seems to
be worthwhile.
2020-12-29 15:25:21 +00:00
var (
cache = bc . dao . GetWrapped ( )
appExecResults = make ( [ ] * state . AppExecResult , 0 , 2 + len ( block . Transactions ) )
aerchan = make ( chan * state . AppExecResult , len ( block . Transactions ) / 8 ) // Tested 8 and 4 with no practical difference, but feel free to test more and tune.
aerdone = make ( chan error )
blockdone = make ( chan error )
)
go func ( ) {
var (
kvcache = cache . GetWrapped ( )
writeBuf = io . NewBufBinWriter ( )
)
if err := kvcache . StoreAsBlock ( block , writeBuf ) ; err != nil {
blockdone <- err
return
}
writeBuf . Reset ( )
2019-01-25 11:20:35 +00:00
core: spread storeBlock actions to three goroutines
Block processing consists of:
* saving block/transactions to the DB
* executing blocks/transactions
* processing notifications/saving AERs
* updating MPT
* atomically updating Blockchain state
Of these the first one is completely independent of others, it can be done in
a separate routine easily. The third one technically depends on the second,
it just doesn't have data until something is executed. At the same time it
doesn't affect future executions in any way, so we can offload
AER/notification processing to separate goroutine (while the main thread
proceeds with other transactions).
MPT update depends on all executions, so it can't be offloaded, but it can be
done concurrently to AER processing. And only the last thing actually needs
all previous ones to be finished, so it's a natural synchronization point.
So we spawn two additional routines and let the main one execute transactions
and update MPT as fast as it can. While technically all of these routines
could share single DAO (they are working with different KV sets) benchmarking
shows that using separate DAOs and then persisting them to lower one actually
works about 7-8%% better. At the same time we can simplify DAOs used, Cached
one is only relevant for AER processing because it caches NEP-17 tracking
data, everything else can do just fine with Simple.
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 50825
with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks
with lots of transactions while RC4 dump mostly consists of empty blocks.
Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53):
Ryzen 9 5950X:
RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05%
TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86%
CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62%
Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08%
Core i7-8565U:
RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96%
TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81%
CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87%
Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62%
DB restore:
real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80%
user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64%
sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32%
After the change:
Ryzen 9 5950X:
RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54%
TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13%
CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65%
Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66%
Core i7-8565U:
RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49%
TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50%
CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82%
Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18%
DB restore:
real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18%
user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81%
sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21%
On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM
more used. Core i7-8565U changes don't seem to be statistically significant:
1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM
used. It also is 1% worse in DB restore time.
The result is somewhat expected, on a powerful machine with lots of spare
cores we get 10%+ better results while on average resource-constrained laptop it
doesn't change much (the machine is already saturated). Overall, this seems to
be worthwhile.
2020-12-29 15:25:21 +00:00
if err := kvcache . StoreAsCurrentBlock ( block , writeBuf ) ; err != nil {
blockdone <- err
return
}
writeBuf . Reset ( )
for _ , tx := range block . Transactions {
if err := kvcache . StoreAsTransaction ( tx , block . Index , writeBuf ) ; err != nil {
blockdone <- err
return
}
2018-03-14 09:36:59 +00:00
core: spread storeBlock actions to three goroutines
Block processing consists of:
* saving block/transactions to the DB
* executing blocks/transactions
* processing notifications/saving AERs
* updating MPT
* atomically updating Blockchain state
Of these the first one is completely independent of others, it can be done in
a separate routine easily. The third one technically depends on the second,
it just doesn't have data until something is executed. At the same time it
doesn't affect future executions in any way, so we can offload
AER/notification processing to separate goroutine (while the main thread
proceeds with other transactions).
MPT update depends on all executions, so it can't be offloaded, but it can be
done concurrently to AER processing. And only the last thing actually needs
all previous ones to be finished, so it's a natural synchronization point.
So we spawn two additional routines and let the main one execute transactions
and update MPT as fast as it can. While technically all of these routines
could share single DAO (they are working with different KV sets) benchmarking
shows that using separate DAOs and then persisting them to lower one actually
works about 7-8%% better. At the same time we can simplify DAOs used, Cached
one is only relevant for AER processing because it caches NEP-17 tracking
data, everything else can do just fine with Simple.
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 50825
with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks
with lots of transactions while RC4 dump mostly consists of empty blocks.
Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53):
Ryzen 9 5950X:
RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05%
TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86%
CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62%
Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08%
Core i7-8565U:
RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96%
TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81%
CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87%
Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62%
DB restore:
real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80%
user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64%
sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32%
After the change:
Ryzen 9 5950X:
RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54%
TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13%
CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65%
Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66%
Core i7-8565U:
RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49%
TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50%
CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82%
Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18%
DB restore:
real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18%
user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81%
sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21%
On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM
more used. Core i7-8565U changes don't seem to be statistically significant:
1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM
used. It also is 1% worse in DB restore time.
The result is somewhat expected, on a powerful machine with lots of spare
cores we get 10%+ better results while on average resource-constrained laptop it
doesn't change much (the machine is already saturated). Overall, this seems to
be worthwhile.
2020-12-29 15:25:21 +00:00
writeBuf . Reset ( )
if bc . config . P2PSigExtensions {
for _ , attr := range tx . GetAttributes ( transaction . ConflictsT ) {
hash := attr . Value . ( * transaction . Conflicts ) . Hash
dummyTx := transaction . NewTrimmedTX ( hash )
dummyTx . Version = transaction . DummyVersion
if err := kvcache . StoreAsTransaction ( dummyTx , block . Index , writeBuf ) ; err != nil {
blockdone <- fmt . Errorf ( "failed to store conflicting transaction %s for transaction %s: %w" , hash . StringLE ( ) , tx . Hash ( ) . StringLE ( ) , err )
return
}
writeBuf . Reset ( )
}
}
}
if bc . config . RemoveUntraceableBlocks {
if block . Index > bc . config . MaxTraceableBlocks {
index := block . Index - bc . config . MaxTraceableBlocks // is at least 1
err := kvcache . DeleteBlock ( bc . headerHashes [ index ] , writeBuf )
if err != nil {
bc . log . Warn ( "error while removing old block" ,
zap . Uint32 ( "index" , index ) ,
zap . Error ( err ) )
}
writeBuf . Reset ( )
}
}
_ , err := kvcache . Persist ( )
if err != nil {
blockdone <- err
}
close ( blockdone )
} ( )
go func ( ) {
var (
kvcache = dao . NewCached ( cache )
writeBuf = io . NewBufBinWriter ( )
err error
appendBlock bool
)
for aer := range aerchan {
if aer . Container == block . Hash ( ) && appendBlock {
err = kvcache . AppendAppExecResult ( aer , writeBuf )
} else {
err = kvcache . PutAppExecResult ( aer , writeBuf )
if aer . Container == block . Hash ( ) {
appendBlock = true
}
}
if err != nil {
err = fmt . Errorf ( "failed to store exec result: %w" , err )
break
}
if aer . Execution . VMState == vm . HaltState {
for j := range aer . Execution . Events {
bc . handleNotification ( & aer . Execution . Events [ j ] , kvcache , block , aer . Container )
}
}
writeBuf . Reset ( )
}
if err != nil {
aerdone <- err
return
}
_ , err = kvcache . Persist ( )
if err != nil {
aerdone <- err
}
close ( aerdone )
} ( )
2020-12-08 15:28:00 +00:00
aer , err := bc . runPersist ( bc . contracts . GetPersistScript ( ) , block , cache , trigger . OnPersist )
if err != nil {
core: spread storeBlock actions to three goroutines
Block processing consists of:
* saving block/transactions to the DB
* executing blocks/transactions
* processing notifications/saving AERs
* updating MPT
* atomically updating Blockchain state
Of these the first one is completely independent of others, it can be done in
a separate routine easily. The third one technically depends on the second,
it just doesn't have data until something is executed. At the same time it
doesn't affect future executions in any way, so we can offload
AER/notification processing to separate goroutine (while the main thread
proceeds with other transactions).
MPT update depends on all executions, so it can't be offloaded, but it can be
done concurrently to AER processing. And only the last thing actually needs
all previous ones to be finished, so it's a natural synchronization point.
So we spawn two additional routines and let the main one execute transactions
and update MPT as fast as it can. While technically all of these routines
could share single DAO (they are working with different KV sets) benchmarking
shows that using separate DAOs and then persisting them to lower one actually
works about 7-8%% better. At the same time we can simplify DAOs used, Cached
one is only relevant for AER processing because it caches NEP-17 tracking
data, everything else can do just fine with Simple.
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 50825
with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks
with lots of transactions while RC4 dump mostly consists of empty blocks.
Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53):
Ryzen 9 5950X:
RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05%
TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86%
CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62%
Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08%
Core i7-8565U:
RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96%
TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81%
CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87%
Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62%
DB restore:
real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80%
user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64%
sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32%
After the change:
Ryzen 9 5950X:
RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54%
TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13%
CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65%
Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66%
Core i7-8565U:
RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49%
TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50%
CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82%
Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18%
DB restore:
real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18%
user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81%
sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21%
On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM
more used. Core i7-8565U changes don't seem to be statistically significant:
1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM
used. It also is 1% worse in DB restore time.
The result is somewhat expected, on a powerful machine with lots of spare
cores we get 10%+ better results while on average resource-constrained laptop it
doesn't change much (the machine is already saturated). Overall, this seems to
be worthwhile.
2020-12-29 15:25:21 +00:00
// Release goroutines, don't care about errors, we already have one.
close ( aerchan )
<- blockdone
<- aerdone
2020-12-08 15:28:00 +00:00
return fmt . Errorf ( "onPersist failed: %w" , err )
}
appExecResults = append ( appExecResults , aer )
core: spread storeBlock actions to three goroutines
Block processing consists of:
* saving block/transactions to the DB
* executing blocks/transactions
* processing notifications/saving AERs
* updating MPT
* atomically updating Blockchain state
Of these the first one is completely independent of others, it can be done in
a separate routine easily. The third one technically depends on the second,
it just doesn't have data until something is executed. At the same time it
doesn't affect future executions in any way, so we can offload
AER/notification processing to separate goroutine (while the main thread
proceeds with other transactions).
MPT update depends on all executions, so it can't be offloaded, but it can be
done concurrently to AER processing. And only the last thing actually needs
all previous ones to be finished, so it's a natural synchronization point.
So we spawn two additional routines and let the main one execute transactions
and update MPT as fast as it can. While technically all of these routines
could share single DAO (they are working with different KV sets) benchmarking
shows that using separate DAOs and then persisting them to lower one actually
works about 7-8%% better. At the same time we can simplify DAOs used, Cached
one is only relevant for AER processing because it caches NEP-17 tracking
data, everything else can do just fine with Simple.
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 50825
with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks
with lots of transactions while RC4 dump mostly consists of empty blocks.
Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53):
Ryzen 9 5950X:
RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05%
TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86%
CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62%
Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08%
Core i7-8565U:
RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96%
TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81%
CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87%
Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62%
DB restore:
real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80%
user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64%
sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32%
After the change:
Ryzen 9 5950X:
RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54%
TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13%
CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65%
Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66%
Core i7-8565U:
RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49%
TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50%
CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82%
Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18%
DB restore:
real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18%
user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81%
sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21%
On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM
more used. Core i7-8565U changes don't seem to be statistically significant:
1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM
used. It also is 1% worse in DB restore time.
The result is somewhat expected, on a powerful machine with lots of spare
cores we get 10%+ better results while on average resource-constrained laptop it
doesn't change much (the machine is already saturated). Overall, this seems to
be worthwhile.
2020-12-29 15:25:21 +00:00
aerchan <- aer
2020-06-17 10:57:54 +00:00
2020-09-10 12:02:03 +00:00
for _ , tx := range block . Transactions {
2020-06-05 13:07:04 +00:00
systemInterop := bc . newInteropContext ( trigger . Application , cache , block , tx )
2020-07-28 13:38:00 +00:00
v := systemInterop . SpawnVM ( )
2020-12-29 10:45:49 +00:00
v . LoadScriptWithFlags ( tx . Script , callflag . All )
2021-02-05 08:25:22 +00:00
v . SetPriceGetter ( systemInterop . GetPrice )
2021-01-19 08:23:39 +00:00
v . LoadToken = contract . LoadToken ( systemInterop )
2020-06-18 19:17:48 +00:00
v . GasLimit = tx . SystemFee
2020-01-20 12:31:12 +00:00
2020-06-05 13:07:04 +00:00
err := v . Run ( )
2020-10-05 14:04:17 +00:00
var faultException string
2020-06-05 13:07:04 +00:00
if ! v . HasFailed ( ) {
_ , err := systemInterop . DAO . Persist ( )
if err != nil {
core: spread storeBlock actions to three goroutines
Block processing consists of:
* saving block/transactions to the DB
* executing blocks/transactions
* processing notifications/saving AERs
* updating MPT
* atomically updating Blockchain state
Of these the first one is completely independent of others, it can be done in
a separate routine easily. The third one technically depends on the second,
it just doesn't have data until something is executed. At the same time it
doesn't affect future executions in any way, so we can offload
AER/notification processing to separate goroutine (while the main thread
proceeds with other transactions).
MPT update depends on all executions, so it can't be offloaded, but it can be
done concurrently to AER processing. And only the last thing actually needs
all previous ones to be finished, so it's a natural synchronization point.
So we spawn two additional routines and let the main one execute transactions
and update MPT as fast as it can. While technically all of these routines
could share single DAO (they are working with different KV sets) benchmarking
shows that using separate DAOs and then persisting them to lower one actually
works about 7-8%% better. At the same time we can simplify DAOs used, Cached
one is only relevant for AER processing because it caches NEP-17 tracking
data, everything else can do just fine with Simple.
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 50825
with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks
with lots of transactions while RC4 dump mostly consists of empty blocks.
Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53):
Ryzen 9 5950X:
RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05%
TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86%
CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62%
Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08%
Core i7-8565U:
RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96%
TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81%
CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87%
Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62%
DB restore:
real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80%
user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64%
sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32%
After the change:
Ryzen 9 5950X:
RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54%
TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13%
CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65%
Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66%
Core i7-8565U:
RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49%
TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50%
CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82%
Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18%
DB restore:
real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18%
user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81%
sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21%
On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM
more used. Core i7-8565U changes don't seem to be statistically significant:
1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM
used. It also is 1% worse in DB restore time.
The result is somewhat expected, on a powerful machine with lots of spare
cores we get 10%+ better results while on average resource-constrained laptop it
doesn't change much (the machine is already saturated). Overall, this seems to
be worthwhile.
2020-12-29 15:25:21 +00:00
// Release goroutines, don't care about errors, we already have one.
close ( aerchan )
<- blockdone
<- aerdone
2020-08-06 14:44:08 +00:00
return fmt . Errorf ( "failed to persist invocation results: %w" , err )
2020-06-05 13:07:04 +00:00
}
} else {
bc . log . Warn ( "contract invocation failed" ,
zap . String ( "tx" , tx . Hash ( ) . StringLE ( ) ) ,
zap . Uint32 ( "block" , block . Index ) ,
zap . Error ( err ) )
2020-10-05 14:04:17 +00:00
faultException = err . Error ( )
2020-06-05 13:07:04 +00:00
}
aer := & state . AppExecResult {
2020-11-11 15:43:28 +00:00
Container : tx . Hash ( ) ,
Execution : state . Execution {
Trigger : trigger . Application ,
VMState : v . State ( ) ,
GasConsumed : v . GasConsumed ( ) ,
Stack : v . Estack ( ) . ToArray ( ) ,
Events : systemInterop . Notifications ,
FaultException : faultException ,
} ,
2020-06-05 13:07:04 +00:00
}
appExecResults = append ( appExecResults , aer )
core: spread storeBlock actions to three goroutines
Block processing consists of:
* saving block/transactions to the DB
* executing blocks/transactions
* processing notifications/saving AERs
* updating MPT
* atomically updating Blockchain state
Of these the first one is completely independent of others, it can be done in
a separate routine easily. The third one technically depends on the second,
it just doesn't have data until something is executed. At the same time it
doesn't affect future executions in any way, so we can offload
AER/notification processing to separate goroutine (while the main thread
proceeds with other transactions).
MPT update depends on all executions, so it can't be offloaded, but it can be
done concurrently to AER processing. And only the last thing actually needs
all previous ones to be finished, so it's a natural synchronization point.
So we spawn two additional routines and let the main one execute transactions
and update MPT as fast as it can. While technically all of these routines
could share single DAO (they are working with different KV sets) benchmarking
shows that using separate DAOs and then persisting them to lower one actually
works about 7-8%% better. At the same time we can simplify DAOs used, Cached
one is only relevant for AER processing because it caches NEP-17 tracking
data, everything else can do just fine with Simple.
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 50825
with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks
with lots of transactions while RC4 dump mostly consists of empty blocks.
Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53):
Ryzen 9 5950X:
RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05%
TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86%
CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62%
Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08%
Core i7-8565U:
RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96%
TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81%
CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87%
Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62%
DB restore:
real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80%
user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64%
sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32%
After the change:
Ryzen 9 5950X:
RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54%
TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13%
CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65%
Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66%
Core i7-8565U:
RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49%
TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50%
CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82%
Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18%
DB restore:
real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18%
user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81%
sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21%
On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM
more used. Core i7-8565U changes don't seem to be statistically significant:
1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM
used. It also is 1% worse in DB restore time.
The result is somewhat expected, on a powerful machine with lots of spare
cores we get 10%+ better results while on average resource-constrained laptop it
doesn't change much (the machine is already saturated). Overall, this seems to
be worthwhile.
2020-12-29 15:25:21 +00:00
aerchan <- aer
2018-03-21 16:11:04 +00:00
}
2020-02-06 15:47:03 +00:00
2020-12-08 15:28:00 +00:00
aer , err = bc . runPersist ( bc . contracts . GetPostPersistScript ( ) , block , cache , trigger . PostPersist )
2020-09-23 08:48:31 +00:00
if err != nil {
core: spread storeBlock actions to three goroutines
Block processing consists of:
* saving block/transactions to the DB
* executing blocks/transactions
* processing notifications/saving AERs
* updating MPT
* atomically updating Blockchain state
Of these the first one is completely independent of others, it can be done in
a separate routine easily. The third one technically depends on the second,
it just doesn't have data until something is executed. At the same time it
doesn't affect future executions in any way, so we can offload
AER/notification processing to separate goroutine (while the main thread
proceeds with other transactions).
MPT update depends on all executions, so it can't be offloaded, but it can be
done concurrently to AER processing. And only the last thing actually needs
all previous ones to be finished, so it's a natural synchronization point.
So we spawn two additional routines and let the main one execute transactions
and update MPT as fast as it can. While technically all of these routines
could share single DAO (they are working with different KV sets) benchmarking
shows that using separate DAOs and then persisting them to lower one actually
works about 7-8%% better. At the same time we can simplify DAOs used, Cached
one is only relevant for AER processing because it caches NEP-17 tracking
data, everything else can do just fine with Simple.
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 50825
with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks
with lots of transactions while RC4 dump mostly consists of empty blocks.
Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53):
Ryzen 9 5950X:
RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05%
TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86%
CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62%
Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08%
Core i7-8565U:
RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96%
TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81%
CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87%
Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62%
DB restore:
real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80%
user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64%
sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32%
After the change:
Ryzen 9 5950X:
RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54%
TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13%
CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65%
Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66%
Core i7-8565U:
RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49%
TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50%
CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82%
Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18%
DB restore:
real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18%
user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81%
sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21%
On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM
more used. Core i7-8565U changes don't seem to be statistically significant:
1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM
used. It also is 1% worse in DB restore time.
The result is somewhat expected, on a powerful machine with lots of spare
cores we get 10%+ better results while on average resource-constrained laptop it
doesn't change much (the machine is already saturated). Overall, this seems to
be worthwhile.
2020-12-29 15:25:21 +00:00
// Release goroutines, don't care about errors, we already have one.
close ( aerchan )
<- blockdone
<- aerdone
2020-09-23 08:48:31 +00:00
return fmt . Errorf ( "postPersist failed: %w" , err )
}
appExecResults = append ( appExecResults , aer )
core: spread storeBlock actions to three goroutines
Block processing consists of:
* saving block/transactions to the DB
* executing blocks/transactions
* processing notifications/saving AERs
* updating MPT
* atomically updating Blockchain state
Of these the first one is completely independent of others, it can be done in
a separate routine easily. The third one technically depends on the second,
it just doesn't have data until something is executed. At the same time it
doesn't affect future executions in any way, so we can offload
AER/notification processing to separate goroutine (while the main thread
proceeds with other transactions).
MPT update depends on all executions, so it can't be offloaded, but it can be
done concurrently to AER processing. And only the last thing actually needs
all previous ones to be finished, so it's a natural synchronization point.
So we spawn two additional routines and let the main one execute transactions
and update MPT as fast as it can. While technically all of these routines
could share single DAO (they are working with different KV sets) benchmarking
shows that using separate DAOs and then persisting them to lower one actually
works about 7-8%% better. At the same time we can simplify DAOs used, Cached
one is only relevant for AER processing because it caches NEP-17 tracking
data, everything else can do just fine with Simple.
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 50825
with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks
with lots of transactions while RC4 dump mostly consists of empty blocks.
Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53):
Ryzen 9 5950X:
RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05%
TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86%
CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62%
Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08%
Core i7-8565U:
RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96%
TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81%
CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87%
Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62%
DB restore:
real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80%
user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64%
sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32%
After the change:
Ryzen 9 5950X:
RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54%
TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13%
CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65%
Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66%
Core i7-8565U:
RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49%
TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50%
CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82%
Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18%
DB restore:
real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18%
user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81%
sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21%
On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM
more used. Core i7-8565U changes don't seem to be statistically significant:
1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM
used. It also is 1% worse in DB restore time.
The result is somewhat expected, on a powerful machine with lots of spare
cores we get 10%+ better results while on average resource-constrained laptop it
doesn't change much (the machine is already saturated). Overall, this seems to
be worthwhile.
2020-12-29 15:25:21 +00:00
aerchan <- aer
close ( aerchan )
d := cache . ( * dao . Simple )
2021-01-29 14:33:24 +00:00
b := d . GetMPTBatch ( )
2021-03-26 21:13:19 +00:00
mpt , sr , err := bc . stateRoot . AddMPTBatch ( block . Index , b , d . Store )
if err != nil {
core: spread storeBlock actions to three goroutines
Block processing consists of:
* saving block/transactions to the DB
* executing blocks/transactions
* processing notifications/saving AERs
* updating MPT
* atomically updating Blockchain state
Of these the first one is completely independent of others, it can be done in
a separate routine easily. The third one technically depends on the second,
it just doesn't have data until something is executed. At the same time it
doesn't affect future executions in any way, so we can offload
AER/notification processing to separate goroutine (while the main thread
proceeds with other transactions).
MPT update depends on all executions, so it can't be offloaded, but it can be
done concurrently to AER processing. And only the last thing actually needs
all previous ones to be finished, so it's a natural synchronization point.
So we spawn two additional routines and let the main one execute transactions
and update MPT as fast as it can. While technically all of these routines
could share single DAO (they are working with different KV sets) benchmarking
shows that using separate DAOs and then persisting them to lower one actually
works about 7-8%% better. At the same time we can simplify DAOs used, Cached
one is only relevant for AER processing because it caches NEP-17 tracking
data, everything else can do just fine with Simple.
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 50825
with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks
with lots of transactions while RC4 dump mostly consists of empty blocks.
Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53):
Ryzen 9 5950X:
RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05%
TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86%
CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62%
Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08%
Core i7-8565U:
RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96%
TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81%
CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87%
Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62%
DB restore:
real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80%
user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64%
sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32%
After the change:
Ryzen 9 5950X:
RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54%
TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13%
CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65%
Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66%
Core i7-8565U:
RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49%
TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50%
CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82%
Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18%
DB restore:
real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18%
user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81%
sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21%
On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM
more used. Core i7-8565U changes don't seem to be statistically significant:
1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM
used. It also is 1% worse in DB restore time.
The result is somewhat expected, on a powerful machine with lots of spare
cores we get 10%+ better results while on average resource-constrained laptop it
doesn't change much (the machine is already saturated). Overall, this seems to
be worthwhile.
2020-12-29 15:25:21 +00:00
// Release goroutines, don't care about errors, we already have one.
<- blockdone
<- aerdone
2020-12-26 10:27:59 +00:00
// Here MPT can be left in a half-applied state.
// However if this error occurs, this is a bug somewhere in code
// because changes applied are the ones from HALTed transactions.
2020-12-24 16:32:27 +00:00
return fmt . Errorf ( "error while trying to apply MPT changes: %w" , err )
}
2021-06-29 15:28:44 +00:00
if bc . config . StateRootInHeader && bc . HeaderHeight ( ) > sr . Index {
h , err := bc . GetHeader ( bc . GetHeaderHash ( int ( sr . Index ) + 1 ) )
if err != nil {
core: spread storeBlock actions to three goroutines
Block processing consists of:
* saving block/transactions to the DB
* executing blocks/transactions
* processing notifications/saving AERs
* updating MPT
* atomically updating Blockchain state
Of these the first one is completely independent of others, it can be done in
a separate routine easily. The third one technically depends on the second,
it just doesn't have data until something is executed. At the same time it
doesn't affect future executions in any way, so we can offload
AER/notification processing to separate goroutine (while the main thread
proceeds with other transactions).
MPT update depends on all executions, so it can't be offloaded, but it can be
done concurrently to AER processing. And only the last thing actually needs
all previous ones to be finished, so it's a natural synchronization point.
So we spawn two additional routines and let the main one execute transactions
and update MPT as fast as it can. While technically all of these routines
could share single DAO (they are working with different KV sets) benchmarking
shows that using separate DAOs and then persisting them to lower one actually
works about 7-8%% better. At the same time we can simplify DAOs used, Cached
one is only relevant for AER processing because it caches NEP-17 tracking
data, everything else can do just fine with Simple.
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 50825
with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks
with lots of transactions while RC4 dump mostly consists of empty blocks.
Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53):
Ryzen 9 5950X:
RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05%
TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86%
CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62%
Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08%
Core i7-8565U:
RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96%
TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81%
CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87%
Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62%
DB restore:
real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80%
user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64%
sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32%
After the change:
Ryzen 9 5950X:
RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54%
TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13%
CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65%
Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66%
Core i7-8565U:
RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49%
TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50%
CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82%
Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18%
DB restore:
real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18%
user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81%
sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21%
On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM
more used. Core i7-8565U changes don't seem to be statistically significant:
1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM
used. It also is 1% worse in DB restore time.
The result is somewhat expected, on a powerful machine with lots of spare
cores we get 10%+ better results while on average resource-constrained laptop it
doesn't change much (the machine is already saturated). Overall, this seems to
be worthwhile.
2020-12-29 15:25:21 +00:00
err = fmt . Errorf ( "failed to get next header: %w" , err )
} else if h . PrevStateRoot != sr . Root {
err = fmt . Errorf ( "local stateroot and next header's PrevStateRoot mismatch: %s vs %s" , sr . Root . StringBE ( ) , h . PrevStateRoot . StringBE ( ) )
2021-06-29 15:28:44 +00:00
}
core: spread storeBlock actions to three goroutines
Block processing consists of:
* saving block/transactions to the DB
* executing blocks/transactions
* processing notifications/saving AERs
* updating MPT
* atomically updating Blockchain state
Of these the first one is completely independent of others, it can be done in
a separate routine easily. The third one technically depends on the second,
it just doesn't have data until something is executed. At the same time it
doesn't affect future executions in any way, so we can offload
AER/notification processing to separate goroutine (while the main thread
proceeds with other transactions).
MPT update depends on all executions, so it can't be offloaded, but it can be
done concurrently to AER processing. And only the last thing actually needs
all previous ones to be finished, so it's a natural synchronization point.
So we spawn two additional routines and let the main one execute transactions
and update MPT as fast as it can. While technically all of these routines
could share single DAO (they are working with different KV sets) benchmarking
shows that using separate DAOs and then persisting them to lower one actually
works about 7-8%% better. At the same time we can simplify DAOs used, Cached
one is only relevant for AER processing because it caches NEP-17 tracking
data, everything else can do just fine with Simple.
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 50825
with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks
with lots of transactions while RC4 dump mostly consists of empty blocks.
Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53):
Ryzen 9 5950X:
RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05%
TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86%
CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62%
Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08%
Core i7-8565U:
RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96%
TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81%
CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87%
Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62%
DB restore:
real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80%
user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64%
sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32%
After the change:
Ryzen 9 5950X:
RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54%
TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13%
CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65%
Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66%
Core i7-8565U:
RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49%
TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50%
CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82%
Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18%
DB restore:
real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18%
user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81%
sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21%
On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM
more used. Core i7-8565U changes don't seem to be statistically significant:
1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM
used. It also is 1% worse in DB restore time.
The result is somewhat expected, on a powerful machine with lots of spare
cores we get 10%+ better results while on average resource-constrained laptop it
doesn't change much (the machine is already saturated). Overall, this seems to
be worthwhile.
2020-12-29 15:25:21 +00:00
if err != nil {
// Release goroutines, don't care about errors, we already have one.
<- blockdone
<- aerdone
return err
2021-06-29 15:28:44 +00:00
}
}
2020-12-24 16:32:27 +00:00
2020-05-12 14:45:17 +00:00
if bc . config . SaveStorageBatch {
core: spread storeBlock actions to three goroutines
Block processing consists of:
* saving block/transactions to the DB
* executing blocks/transactions
* processing notifications/saving AERs
* updating MPT
* atomically updating Blockchain state
Of these the first one is completely independent of others, it can be done in
a separate routine easily. The third one technically depends on the second,
it just doesn't have data until something is executed. At the same time it
doesn't affect future executions in any way, so we can offload
AER/notification processing to separate goroutine (while the main thread
proceeds with other transactions).
MPT update depends on all executions, so it can't be offloaded, but it can be
done concurrently to AER processing. And only the last thing actually needs
all previous ones to be finished, so it's a natural synchronization point.
So we spawn two additional routines and let the main one execute transactions
and update MPT as fast as it can. While technically all of these routines
could share single DAO (they are working with different KV sets) benchmarking
shows that using separate DAOs and then persisting them to lower one actually
works about 7-8%% better. At the same time we can simplify DAOs used, Cached
one is only relevant for AER processing because it caches NEP-17 tracking
data, everything else can do just fine with Simple.
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 50825
with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks
with lots of transactions while RC4 dump mostly consists of empty blocks.
Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53):
Ryzen 9 5950X:
RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05%
TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86%
CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62%
Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08%
Core i7-8565U:
RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96%
TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81%
CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87%
Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62%
DB restore:
real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80%
user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64%
sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32%
After the change:
Ryzen 9 5950X:
RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54%
TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13%
CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65%
Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66%
Core i7-8565U:
RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49%
TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50%
CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82%
Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18%
DB restore:
real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18%
user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81%
sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21%
On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM
more used. Core i7-8565U changes don't seem to be statistically significant:
1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM
used. It also is 1% worse in DB restore time.
The result is somewhat expected, on a powerful machine with lots of spare
cores we get 10%+ better results while on average resource-constrained laptop it
doesn't change much (the machine is already saturated). Overall, this seems to
be worthwhile.
2020-12-29 15:25:21 +00:00
bc . lastBatch = d . GetBatch ( )
2020-11-24 09:07:58 +00:00
}
2021-04-07 08:14:18 +00:00
// Every persist cycle we also compact our in-memory MPT. It's flushed
// already in AddMPTBatch, so collapsing it is safe.
persistedHeight := atomic . LoadUint32 ( & bc . persistedHeight )
if persistedHeight == block . Index - 1 {
// 10 is good and roughly estimated to fit remaining trie into 1M of memory.
mpt . Collapse ( 10 )
}
2020-05-12 14:45:17 +00:00
core: spread storeBlock actions to three goroutines
Block processing consists of:
* saving block/transactions to the DB
* executing blocks/transactions
* processing notifications/saving AERs
* updating MPT
* atomically updating Blockchain state
Of these the first one is completely independent of others, it can be done in
a separate routine easily. The third one technically depends on the second,
it just doesn't have data until something is executed. At the same time it
doesn't affect future executions in any way, so we can offload
AER/notification processing to separate goroutine (while the main thread
proceeds with other transactions).
MPT update depends on all executions, so it can't be offloaded, but it can be
done concurrently to AER processing. And only the last thing actually needs
all previous ones to be finished, so it's a natural synchronization point.
So we spawn two additional routines and let the main one execute transactions
and update MPT as fast as it can. While technically all of these routines
could share single DAO (they are working with different KV sets) benchmarking
shows that using separate DAOs and then persisting them to lower one actually
works about 7-8%% better. At the same time we can simplify DAOs used, Cached
one is only relevant for AER processing because it caches NEP-17 tracking
data, everything else can do just fine with Simple.
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 50825
with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks
with lots of transactions while RC4 dump mostly consists of empty blocks.
Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53):
Ryzen 9 5950X:
RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05%
TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86%
CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62%
Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08%
Core i7-8565U:
RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96%
TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81%
CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87%
Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62%
DB restore:
real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80%
user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64%
sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32%
After the change:
Ryzen 9 5950X:
RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54%
TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13%
CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65%
Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66%
Core i7-8565U:
RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49%
TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50%
CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82%
Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18%
DB restore:
real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18%
user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81%
sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21%
On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM
more used. Core i7-8565U changes don't seem to be statistically significant:
1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM
used. It also is 1% worse in DB restore time.
The result is somewhat expected, on a powerful machine with lots of spare
cores we get 10%+ better results while on average resource-constrained laptop it
doesn't change much (the machine is already saturated). Overall, this seems to
be worthwhile.
2020-12-29 15:25:21 +00:00
// Wait for _both_ goroutines to finish.
blockerr := <- blockdone
aererr := <- aerdone
if blockerr != nil {
return blockerr
}
if aererr != nil {
return aererr
}
2020-05-12 14:45:17 +00:00
bc . lock . Lock ( )
2020-05-29 14:20:00 +00:00
_ , err = cache . Persist ( )
2019-12-11 10:10:51 +00:00
if err != nil {
2020-05-12 14:45:17 +00:00
bc . lock . Unlock ( )
2018-03-21 16:11:04 +00:00
return err
}
2021-01-29 14:33:24 +00:00
2021-03-26 21:13:19 +00:00
mpt . Store = bc . dao . Store
bc . stateRoot . UpdateCurrentLocal ( mpt , sr )
2019-12-23 16:18:12 +00:00
bc . topBlock . Store ( block )
2019-09-26 15:14:00 +00:00
atomic . StoreUint32 ( & bc . blockHeight , block . Index )
2020-11-27 10:55:48 +00:00
bc . memPool . RemoveStale ( func ( tx * transaction . Transaction ) bool { return bc . IsTxStillRelevant ( tx , txpool , false ) } , bc )
for _ , f := range bc . postBlock {
f ( bc , txpool , block )
}
2021-01-18 12:52:51 +00:00
if err := bc . updateExtensibleWhitelist ( block . Index ) ; err != nil {
bc . lock . Unlock ( )
return err
}
2020-05-12 14:45:17 +00:00
bc . lock . Unlock ( )
updateBlockHeightMetric ( block . Index )
2020-05-12 14:20:41 +00:00
// Genesis block is stored when Blockchain is not yet running, so there
// is no one to read this event. And it doesn't make much sense as event
// anyway.
if block . Index != 0 {
bc . events <- bcEvent { block , appExecResults }
}
2018-03-09 15:55:25 +00:00
return nil
}
2021-01-18 12:52:51 +00:00
func ( bc * Blockchain ) updateExtensibleWhitelist ( height uint32 ) error {
updateCommittee := native . ShouldUpdateCommittee ( height , bc )
2021-03-23 10:37:30 +00:00
stateVals , sh , err := bc . contracts . Designate . GetDesignatedByRole ( bc . dao , noderoles . StateValidator , height )
2021-01-18 12:52:51 +00:00
if err != nil {
return err
}
2021-05-04 14:26:38 +00:00
if bc . extensible . Load ( ) != nil && ! updateCommittee && sh != height {
2021-01-18 12:52:51 +00:00
return nil
}
newList := [ ] util . Uint160 { bc . contracts . NEO . GetCommitteeAddress ( ) }
nextVals := bc . contracts . NEO . GetNextBlockValidatorsInternal ( )
script , err := smartcontract . CreateDefaultMultiSigRedeemScript ( nextVals )
if err != nil {
return err
}
newList = append ( newList , hash . Hash160 ( script ) )
bc . updateExtensibleList ( & newList , bc . contracts . NEO . GetNextBlockValidatorsInternal ( ) )
if len ( stateVals ) > 0 {
2021-03-23 10:37:30 +00:00
h , err := bc . contracts . Designate . GetLastDesignatedHash ( bc . dao , noderoles . StateValidator )
2021-01-18 12:52:51 +00:00
if err != nil {
return err
}
newList = append ( newList , h )
bc . updateExtensibleList ( & newList , stateVals )
}
sort . Slice ( newList , func ( i , j int ) bool {
return newList [ i ] . Less ( newList [ j ] )
} )
bc . extensible . Store ( newList )
return nil
}
func ( bc * Blockchain ) updateExtensibleList ( s * [ ] util . Uint160 , pubs keys . PublicKeys ) {
for _ , pub := range pubs {
* s = append ( * s , pub . GetScriptHash ( ) )
}
}
// IsExtensibleAllowed determines if script hash is allowed to send extensible payloads.
func ( bc * Blockchain ) IsExtensibleAllowed ( u util . Uint160 ) bool {
us := bc . extensible . Load ( ) . ( [ ] util . Uint160 )
n := sort . Search ( len ( us ) , func ( i int ) bool { return ! us [ i ] . Less ( u ) } )
return n < len ( us )
}
core: spread storeBlock actions to three goroutines
Block processing consists of:
* saving block/transactions to the DB
* executing blocks/transactions
* processing notifications/saving AERs
* updating MPT
* atomically updating Blockchain state
Of these the first one is completely independent of others, it can be done in
a separate routine easily. The third one technically depends on the second,
it just doesn't have data until something is executed. At the same time it
doesn't affect future executions in any way, so we can offload
AER/notification processing to separate goroutine (while the main thread
proceeds with other transactions).
MPT update depends on all executions, so it can't be offloaded, but it can be
done concurrently to AER processing. And only the last thing actually needs
all previous ones to be finished, so it's a natural synchronization point.
So we spawn two additional routines and let the main one execute transactions
and update MPT as fast as it can. While technically all of these routines
could share single DAO (they are working with different KV sets) benchmarking
shows that using separate DAOs and then persisting them to lower one actually
works about 7-8%% better. At the same time we can simplify DAOs used, Cached
one is only relevant for AER processing because it caches NEP-17 tracking
data, everything else can do just fine with Simple.
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 50825
with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks
with lots of transactions while RC4 dump mostly consists of empty blocks.
Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53):
Ryzen 9 5950X:
RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05%
TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86%
CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62%
Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08%
Core i7-8565U:
RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96%
TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81%
CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87%
Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62%
DB restore:
real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80%
user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64%
sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32%
After the change:
Ryzen 9 5950X:
RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54%
TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13%
CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65%
Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66%
Core i7-8565U:
RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49%
TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50%
CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82%
Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18%
DB restore:
real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18%
user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81%
sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21%
On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM
more used. Core i7-8565U changes don't seem to be statistically significant:
1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM
used. It also is 1% worse in DB restore time.
The result is somewhat expected, on a powerful machine with lots of spare
cores we get 10%+ better results while on average resource-constrained laptop it
doesn't change much (the machine is already saturated). Overall, this seems to
be worthwhile.
2020-12-29 15:25:21 +00:00
func ( bc * Blockchain ) runPersist ( script [ ] byte , block * block . Block , cache dao . DAO , trig trigger . Type ) ( * state . AppExecResult , error ) {
2020-10-29 16:14:49 +00:00
systemInterop := bc . newInteropContext ( trig , cache , block , nil )
2020-09-23 08:48:31 +00:00
v := systemInterop . SpawnVM ( )
2021-03-16 19:48:32 +00:00
v . LoadScriptWithFlags ( script , callflag . All )
2021-02-05 08:25:22 +00:00
v . SetPriceGetter ( systemInterop . GetPrice )
2020-09-23 08:48:31 +00:00
if err := v . Run ( ) ; err != nil {
return nil , fmt . Errorf ( "VM has failed: %w" , err )
} else if _ , err := systemInterop . DAO . Persist ( ) ; err != nil {
return nil , fmt . Errorf ( "can't save changes: %w" , err )
}
return & state . AppExecResult {
2020-11-11 15:43:28 +00:00
Container : block . Hash ( ) , // application logs can be retrieved by block hash
Execution : state . Execution {
Trigger : trig ,
VMState : v . State ( ) ,
GasConsumed : v . GasConsumed ( ) ,
Stack : v . Estack ( ) . ToArray ( ) ,
Events : systemInterop . Notifications ,
} ,
2020-09-23 08:48:31 +00:00
} , nil
}
2020-06-18 13:34:56 +00:00
func ( bc * Blockchain ) handleNotification ( note * state . NotificationEvent , d * dao . Cached , b * block . Block , h util . Uint256 ) {
2020-11-19 15:01:42 +00:00
if note . Name != "Transfer" {
2020-06-18 13:34:56 +00:00
return
}
2020-06-29 08:25:32 +00:00
arr , ok := note . Item . Value ( ) . ( [ ] stackitem . Item )
if ! ok || len ( arr ) != 3 {
2020-06-18 13:34:56 +00:00
return
}
var from [ ] byte
2020-06-29 08:25:32 +00:00
fromValue := arr [ 0 ] . Value ( )
2020-06-18 13:34:56 +00:00
// we don't have `from` set when we are minting tokens
if fromValue != nil {
from , ok = fromValue . ( [ ] byte )
if ! ok {
return
}
}
var to [ ] byte
2020-06-29 08:25:32 +00:00
toValue := arr [ 1 ] . Value ( )
2020-06-18 13:34:56 +00:00
// we don't have `to` set when we are burning tokens
if toValue != nil {
to , ok = toValue . ( [ ] byte )
if ! ok {
return
}
}
2020-06-29 08:25:32 +00:00
amount , ok := arr [ 2 ] . Value ( ) . ( * big . Int )
2020-06-18 13:34:56 +00:00
if ! ok {
2020-06-29 08:25:32 +00:00
bs , ok := arr [ 2 ] . Value ( ) . ( [ ] byte )
2020-06-18 13:34:56 +00:00
if ! ok {
return
}
2021-04-12 12:09:37 +00:00
if len ( bs ) > bigint . MaxBytesLen {
return // Not a proper number.
}
2020-06-18 13:34:56 +00:00
amount = bigint . FromBytes ( bs )
}
2020-11-24 08:14:25 +00:00
bc . processNEP17Transfer ( d , h , b , note . ScriptHash , from , to , amount )
2020-06-18 13:34:56 +00:00
}
2020-03-05 14:11:58 +00:00
func parseUint160 ( addr [ ] byte ) util . Uint160 {
2020-03-05 07:45:50 +00:00
if u , err := util . Uint160DecodeBytesBE ( addr ) ; err == nil {
2020-03-05 14:11:58 +00:00
return u
2020-03-05 07:45:50 +00:00
}
2020-03-05 14:11:58 +00:00
return util . Uint160 { }
2020-03-05 07:45:50 +00:00
}
2020-11-24 08:14:25 +00:00
func ( bc * Blockchain ) processNEP17Transfer ( cache * dao . Cached , h util . Uint256 , b * block . Block , sc util . Uint160 , from , to [ ] byte , amount * big . Int ) {
2020-03-05 07:45:50 +00:00
toAddr := parseUint160 ( to )
fromAddr := parseUint160 ( from )
2020-07-28 09:23:58 +00:00
var id int32
nativeContract := bc . contracts . ByHash ( sc )
if nativeContract != nil {
2021-02-09 09:26:25 +00:00
id = nativeContract . Metadata ( ) . ID
2020-07-28 09:23:58 +00:00
} else {
2020-12-13 15:26:35 +00:00
assetContract , err := bc . contracts . Management . GetContract ( cache , sc )
2020-09-10 11:43:24 +00:00
if err != nil {
2020-07-28 09:23:58 +00:00
return
}
id = assetContract . ID
}
2020-11-24 08:14:25 +00:00
transfer := & state . NEP17Transfer {
2020-07-28 16:05:16 +00:00
Asset : id ,
2020-03-05 14:11:58 +00:00
From : fromAddr ,
To : toAddr ,
Block : b . Index ,
Timestamp : b . Timestamp ,
2020-06-18 13:34:56 +00:00
Tx : h ,
2020-03-05 14:11:58 +00:00
}
if ! fromAddr . Equals ( util . Uint160 { } ) {
2021-07-25 10:48:50 +00:00
balances , err := cache . GetNEP17TransferInfo ( fromAddr )
2020-03-05 07:45:50 +00:00
if err != nil {
return
}
2021-07-25 12:00:44 +00:00
balances . LastUpdated [ id ] = b . Index
2020-07-09 09:57:24 +00:00
transfer . Amount = * new ( big . Int ) . Sub ( & transfer . Amount , amount )
2021-02-26 11:08:48 +00:00
balances . NewBatch , err = cache . AppendNEP17Transfer ( fromAddr ,
balances . NextTransferBatch , balances . NewBatch , transfer )
2020-03-12 09:43:21 +00:00
if err != nil {
return
}
2021-02-26 11:08:48 +00:00
if balances . NewBatch {
2020-03-12 09:43:21 +00:00
balances . NextTransferBatch ++
}
2021-07-25 10:48:50 +00:00
if err := cache . PutNEP17TransferInfo ( fromAddr , balances ) ; err != nil {
2020-03-05 14:11:58 +00:00
return
}
2020-03-05 07:45:50 +00:00
}
2020-03-05 14:11:58 +00:00
if ! toAddr . Equals ( util . Uint160 { } ) {
2021-07-25 10:48:50 +00:00
balances , err := cache . GetNEP17TransferInfo ( toAddr )
2020-03-05 07:45:50 +00:00
if err != nil {
return
}
2021-07-25 12:00:44 +00:00
balances . LastUpdated [ id ] = b . Index
2020-03-05 14:11:58 +00:00
2020-07-09 09:57:24 +00:00
transfer . Amount = * amount
2021-02-26 11:08:48 +00:00
balances . NewBatch , err = cache . AppendNEP17Transfer ( toAddr ,
balances . NextTransferBatch , balances . NewBatch , transfer )
2020-03-12 09:43:21 +00:00
if err != nil {
return
}
2021-02-26 11:08:48 +00:00
if balances . NewBatch {
2020-03-12 09:43:21 +00:00
balances . NextTransferBatch ++
}
2021-07-25 10:48:50 +00:00
if err := cache . PutNEP17TransferInfo ( toAddr , balances ) ; err != nil {
2020-03-05 14:11:58 +00:00
return
}
2020-03-05 07:45:50 +00:00
}
}
2020-11-24 08:14:25 +00:00
// ForEachNEP17Transfer executes f for each nep17 transfer in log.
func ( bc * Blockchain ) ForEachNEP17Transfer ( acc util . Uint160 , f func ( * state . NEP17Transfer ) ( bool , error ) ) error {
2021-07-25 10:48:50 +00:00
balances , err := bc . dao . GetNEP17TransferInfo ( acc )
2020-03-05 12:16:03 +00:00
if err != nil {
return nil
}
2020-09-08 09:57:45 +00:00
for i := int ( balances . NextTransferBatch ) ; i >= 0 ; i -- {
2020-11-24 08:14:25 +00:00
lg , err := bc . dao . GetNEP17TransferLog ( acc , uint32 ( i ) )
2020-03-12 09:43:21 +00:00
if err != nil {
return nil
}
2020-09-08 12:29:07 +00:00
cont , err := lg . ForEach ( f )
2020-08-06 11:20:36 +00:00
if err != nil {
return err
}
2020-09-08 12:29:07 +00:00
if ! cont {
break
}
2020-03-12 09:43:21 +00:00
}
2020-08-06 11:20:36 +00:00
return nil
2020-03-05 12:16:03 +00:00
}
2021-07-25 12:00:44 +00:00
// GetNEP17Contracts returns the list of deployed NEP17 contracts.
func ( bc * Blockchain ) GetNEP17Contracts ( ) [ ] util . Uint160 {
return bc . contracts . Management . GetNEP17Contracts ( )
}
// GetNEP17LastUpdated returns a set of contract ids with the corresponding last updated
// block indexes.
func ( bc * Blockchain ) GetNEP17LastUpdated ( acc util . Uint160 ) ( map [ int32 ] uint32 , error ) {
info , err := bc . dao . GetNEP17TransferInfo ( acc )
2020-03-11 15:22:46 +00:00
if err != nil {
2021-07-25 12:00:44 +00:00
return nil , err
2020-03-11 15:22:46 +00:00
}
2021-07-25 12:00:44 +00:00
return info . LastUpdated , nil
2020-03-11 15:22:46 +00:00
}
2020-05-18 08:20:41 +00:00
// GetUtilityTokenBalance returns utility token (GAS) balance for the acc.
2020-07-09 09:57:24 +00:00
func ( bc * Blockchain ) GetUtilityTokenBalance ( acc util . Uint160 ) * big . Int {
2021-07-25 12:00:44 +00:00
bs := bc . contracts . GAS . BalanceOf ( bc . dao , acc )
if bs == nil {
2020-07-09 09:57:24 +00:00
return big . NewInt ( 0 )
2020-06-04 19:25:56 +00:00
}
2021-07-25 12:00:44 +00:00
return bs
2020-05-18 08:20:41 +00:00
}
2020-06-01 20:27:03 +00:00
// GetGoverningTokenBalance returns governing token (NEO) balance and the height
// of the last balance change for the account.
2020-07-09 09:57:24 +00:00
func ( bc * Blockchain ) GetGoverningTokenBalance ( acc util . Uint160 ) ( * big . Int , uint32 ) {
2021-07-25 12:00:44 +00:00
return bc . contracts . NEO . BalanceOf ( bc . dao , acc )
2020-06-01 20:27:03 +00:00
}
2020-11-27 10:55:48 +00:00
// GetNotaryBalance returns Notary deposit amount for the specified account.
func ( bc * Blockchain ) GetNotaryBalance ( acc util . Uint160 ) * big . Int {
return bc . contracts . Notary . BalanceOf ( bc . dao , acc )
}
// GetNotaryContractScriptHash returns Notary native contract hash.
func ( bc * Blockchain ) GetNotaryContractScriptHash ( ) util . Uint160 {
if bc . P2PSigExtensionsEnabled ( ) {
return bc . contracts . Notary . Hash
}
return util . Uint160 { }
}
// GetNotaryDepositExpiration returns Notary deposit expiration height for the specified account.
func ( bc * Blockchain ) GetNotaryDepositExpiration ( acc util . Uint160 ) uint32 {
return bc . contracts . Notary . ExpirationOf ( bc . dao , acc )
}
2020-02-06 15:47:03 +00:00
// LastBatch returns last persisted storage batch.
func ( bc * Blockchain ) LastBatch ( ) * storage . MemBatch {
return bc . lastBatch
}
2020-04-07 09:41:12 +00:00
// persist flushes current in-memory Store contents to the persistent storage.
2019-10-21 07:04:58 +00:00
func ( bc * Blockchain ) persist ( ) error {
2018-03-09 15:55:25 +00:00
var (
2018-03-14 09:36:59 +00:00
start = time . Now ( )
2019-10-17 09:27:15 +00:00
persisted int
2019-09-26 15:14:00 +00:00
err error
2018-03-09 15:55:25 +00:00
)
2019-12-12 18:17:13 +00:00
persisted , err = bc . dao . Persist ( )
2019-09-26 15:14:00 +00:00
if err != nil {
return err
2018-02-04 19:54:51 +00:00
}
2018-03-14 09:36:59 +00:00
if persisted > 0 {
2019-11-25 17:39:11 +00:00
bHeight , err := bc . dao . GetCurrentBlockHeight ( )
2019-11-06 13:10:37 +00:00
if err != nil {
return err
}
oldHeight := atomic . SwapUint32 ( & bc . persistedHeight , bHeight )
diff := bHeight - oldHeight
2019-11-25 17:39:11 +00:00
storedHeaderHeight , _ , err := bc . dao . GetCurrentHeaderHeight ( )
2019-11-06 13:10:37 +00:00
if err != nil {
return err
}
2021-04-07 12:36:42 +00:00
bc . log . Info ( "persisted to disk" ,
zap . Uint32 ( "blocks" , diff ) ,
zap . Int ( "keys" , persisted ) ,
2019-12-30 07:43:05 +00:00
zap . Uint32 ( "headerHeight" , storedHeaderHeight ) ,
zap . Uint32 ( "blockHeight" , bHeight ) ,
zap . Duration ( "took" , time . Since ( start ) ) )
2019-10-29 17:51:17 +00:00
// update monitoring metrics.
updatePersistedHeightMetric ( bHeight )
2018-03-14 09:36:59 +00:00
}
2019-09-26 15:14:00 +00:00
return nil
2018-03-14 09:36:59 +00:00
}
2020-11-13 13:54:38 +00:00
// GetTransaction returns a TX and its height by the given hash. The height is MaxUint32 if tx is in the mempool.
2018-03-21 16:11:04 +00:00
func ( bc * Blockchain ) GetTransaction ( hash util . Uint256 ) ( * transaction . Transaction , uint32 , error ) {
2020-06-05 16:01:10 +00:00
if tx , ok := bc . memPool . TryGetValue ( hash ) ; ok {
2020-11-13 13:54:38 +00:00
return tx , math . MaxUint32 , nil // the height is not actually defined for memPool transaction.
Implement rpc server method: sendrawtransaction (#174)
* Added new config attributes: 'SecondsPerBlock','LowPriorityThreshold'
* Added new files:
* Added new method: CompareTo
* Fixed empty Slice case
* Added new methods: LessThan, GreaterThan, Equal, CompareTo
* Added new method: InputIntersection
* Added MaxTransactionSize, GroupOutputByAssetID
* Added ned method: ScriptHash
* Added new method: IsDoubleSpend
* Refactor blockchainer, Added Feer interface, Verify and GetMemPool method
* 1) Added MemPool
2) Added new methods to satisfy the blockchainer interface: IsLowPriority, Verify, GetMemPool
* Added new methods: RelayTxn, RelayDirectly
* Fixed tests
* Implemented RPC server method sendrawtransaction
* Refactor getrawtransaction, sendrawtransaction in separate methods
* Moved 'secondsPerBlock' to config file
* Implemented Kim suggestions:
1) Fixed data race issues
2) refactor Verify method
3) Get rid of unused InputIntersection method due to refactoring Verify method
4) Fixed bug in https://github.com/CityOfZion/neo-go/pull/174#discussion_r264108135
5) minor simplications of the code
* Fixed minor issues related to
1) space
2) getter methods do not need pointer on the receiver
3) error message
4) refactoring CompareTo method in uint256.go
* Fixed small issues
* Use sync.RWMutex instead of sync.Mutex
* Refined (R)Lock/(R)Unlock
* return error instead of bool in Verify methods
2019-03-20 12:30:05 +00:00
}
2019-11-25 17:39:11 +00:00
return bc . dao . GetTransaction ( hash )
2018-03-21 16:11:04 +00:00
}
2020-11-11 15:43:28 +00:00
// GetAppExecResults returns application execution results with the specified trigger by the given
2020-09-21 11:08:15 +00:00
// tx hash or block hash.
2020-11-11 15:43:28 +00:00
func ( bc * Blockchain ) GetAppExecResults ( hash util . Uint256 , trig trigger . Type ) ( [ ] state . AppExecResult , error ) {
return bc . dao . GetAppExecResults ( hash , trig )
2020-02-21 14:56:28 +00:00
}
2019-10-11 11:08:21 +00:00
// GetStorageItem returns an item from storage.
2021-03-05 14:06:54 +00:00
func ( bc * Blockchain ) GetStorageItem ( id int32 , key [ ] byte ) state . StorageItem {
2020-06-18 10:50:30 +00:00
return bc . dao . GetStorageItem ( id , key )
2019-10-11 11:08:21 +00:00
}
2020-06-18 10:50:30 +00:00
// GetStorageItems returns all storage items for a given contract id.
2021-03-05 14:06:54 +00:00
func ( bc * Blockchain ) GetStorageItems ( id int32 ) ( map [ string ] state . StorageItem , error ) {
2020-06-18 10:50:30 +00:00
return bc . dao . GetStorageItems ( id )
2019-10-11 11:08:21 +00:00
}
2018-03-14 09:36:59 +00:00
// GetBlock returns a Block by the given hash.
2020-01-14 12:32:07 +00:00
func ( bc * Blockchain ) GetBlock ( hash util . Uint256 ) ( * block . Block , error ) {
2019-12-23 16:18:12 +00:00
topBlock := bc . topBlock . Load ( )
if topBlock != nil {
2020-09-16 14:40:27 +00:00
tb := topBlock . ( * block . Block )
if tb . Hash ( ) . Equals ( hash ) {
2019-12-23 16:18:12 +00:00
return tb , nil
}
}
2020-06-04 19:59:34 +00:00
block , err := bc . dao . GetBlock ( hash )
2019-09-26 15:14:00 +00:00
if err != nil {
2019-10-16 13:41:50 +00:00
return nil , err
2018-03-17 11:53:21 +00:00
}
2021-03-10 09:29:56 +00:00
if ! block . MerkleRoot . Equals ( util . Uint256 { } ) && len ( block . Transactions ) == 0 {
return nil , errors . New ( "only header is found" )
}
2019-10-10 17:02:09 +00:00
for _ , tx := range block . Transactions {
2020-02-10 15:53:22 +00:00
stx , _ , err := bc . dao . GetTransaction ( tx . Hash ( ) )
2019-10-10 17:02:09 +00:00
if err != nil {
return nil , err
}
* tx = * stx
}
2018-03-17 11:53:21 +00:00
return block , nil
}
2019-09-03 14:51:37 +00:00
// GetHeader returns data block header identified with the given hash value.
2020-01-14 12:32:07 +00:00
func ( bc * Blockchain ) GetHeader ( hash util . Uint256 ) ( * block . Header , error ) {
2019-12-23 16:18:12 +00:00
topBlock := bc . topBlock . Load ( )
if topBlock != nil {
2020-09-16 14:40:27 +00:00
tb := topBlock . ( * block . Block )
if tb . Hash ( ) . Equals ( hash ) {
2021-03-01 13:44:47 +00:00
return & tb . Header , nil
2019-12-23 16:18:12 +00:00
}
}
2020-06-04 19:59:34 +00:00
block , err := bc . dao . GetBlock ( hash )
2019-09-26 15:14:00 +00:00
if err != nil {
return nil , err
}
2021-03-01 13:44:47 +00:00
return & block . Header , nil
2018-03-14 09:36:59 +00:00
}
2019-10-22 14:56:03 +00:00
// HasTransaction returns true if the blockchain contains he given
2018-03-14 09:36:59 +00:00
// transaction hash.
func ( bc * Blockchain ) HasTransaction ( hash util . Uint256 ) bool {
2020-10-15 11:45:29 +00:00
if bc . memPool . ContainsKey ( hash ) {
return true
}
return bc . dao . HasTransaction ( hash ) == dao . ErrAlreadyExists
2018-03-14 09:36:59 +00:00
}
2019-10-22 14:56:03 +00:00
// HasBlock returns true if the blockchain contains the given
2018-03-14 09:36:59 +00:00
// block hash.
func ( bc * Blockchain ) HasBlock ( hash util . Uint256 ) bool {
2019-02-20 17:39:32 +00:00
if header , err := bc . GetHeader ( hash ) ; err == nil {
2018-03-17 11:53:21 +00:00
return header . Index <= bc . BlockHeight ( )
}
2018-03-14 09:36:59 +00:00
return false
}
2019-02-09 15:53:58 +00:00
// CurrentBlockHash returns the highest processed block hash.
2020-09-16 11:28:18 +00:00
func ( bc * Blockchain ) CurrentBlockHash ( ) util . Uint256 {
topBlock := bc . topBlock . Load ( )
if topBlock != nil {
2020-09-16 14:40:27 +00:00
tb := topBlock . ( * block . Block )
return tb . Hash ( )
2018-03-09 15:55:25 +00:00
}
2020-09-16 11:28:18 +00:00
return bc . GetHeaderHash ( int ( bc . BlockHeight ( ) ) )
2018-02-01 20:28:45 +00:00
}
2018-02-06 06:43:32 +00:00
2018-02-07 14:16:50 +00:00
// CurrentHeaderHash returns the hash of the latest known header.
2020-09-16 11:28:18 +00:00
func ( bc * Blockchain ) CurrentHeaderHash ( ) util . Uint256 {
bc . headerHashesLock . RLock ( )
hash := bc . headerHashes [ len ( bc . headerHashes ) - 1 ]
bc . headerHashesLock . RUnlock ( )
return hash
2018-02-07 14:16:50 +00:00
}
2020-09-16 11:28:18 +00:00
// GetHeaderHash returns hash of the header/block with specified index, if
// Blockchain doesn't have a hash for this height, zero Uint256 value is returned.
func ( bc * Blockchain ) GetHeaderHash ( i int ) util . Uint256 {
bc . headerHashesLock . RLock ( )
defer bc . headerHashesLock . RUnlock ( )
hashesLen := len ( bc . headerHashes )
if hashesLen <= i {
return util . Uint256 { }
2018-03-14 09:36:59 +00:00
}
2020-09-16 11:28:18 +00:00
return bc . headerHashes [ i ]
2018-03-14 09:36:59 +00:00
}
2018-03-09 15:55:25 +00:00
// BlockHeight returns the height/index of the highest block.
2018-02-06 06:43:32 +00:00
func ( bc * Blockchain ) BlockHeight ( ) uint32 {
2018-03-09 15:55:25 +00:00
return atomic . LoadUint32 ( & bc . blockHeight )
2018-02-06 06:43:32 +00:00
}
2018-03-09 15:55:25 +00:00
// HeaderHeight returns the index/height of the highest header.
2018-03-14 09:36:59 +00:00
func ( bc * Blockchain ) HeaderHeight ( ) uint32 {
2020-09-16 11:28:18 +00:00
bc . headerHashesLock . RLock ( )
n := len ( bc . headerHashes )
bc . headerHashesLock . RUnlock ( )
return uint32 ( n - 1 )
2018-02-06 06:43:32 +00:00
}
2019-09-30 16:52:16 +00:00
// GetContractState returns contract by its script hash.
2019-11-28 16:06:09 +00:00
func ( bc * Blockchain ) GetContractState ( hash util . Uint160 ) * state . Contract {
2020-12-13 15:26:35 +00:00
contract , err := bc . contracts . Management . GetContract ( bc . dao , hash )
2019-11-25 17:39:11 +00:00
if contract == nil && err != storage . ErrKeyNotFound {
2019-12-30 07:43:05 +00:00
bc . log . Warn ( "failed to get contract state" , zap . Error ( err ) )
2019-09-30 16:52:16 +00:00
}
2019-11-25 17:39:11 +00:00
return contract
2019-09-30 16:52:16 +00:00
}
2020-07-28 13:36:47 +00:00
// GetContractScriptHash returns contract script hash by its ID.
func ( bc * Blockchain ) GetContractScriptHash ( id int32 ) ( util . Uint160 , error ) {
return bc . dao . GetContractScriptHash ( id )
}
2020-09-25 09:40:57 +00:00
// GetNativeContractScriptHash returns native contract script hash by its name.
func ( bc * Blockchain ) GetNativeContractScriptHash ( name string ) ( util . Uint160 , error ) {
c := bc . contracts . ByName ( name )
if c != nil {
return c . Metadata ( ) . Hash , nil
}
return util . Uint160 { } , errors . New ( "Unknown native contract" )
}
2021-02-09 09:25:38 +00:00
// GetNatives returns list of native contracts.
func ( bc * Blockchain ) GetNatives ( ) [ ] state . NativeContract {
2021-02-09 09:29:41 +00:00
res := make ( [ ] state . NativeContract , 0 , len ( bc . contracts . Contracts ) )
for _ , c := range bc . contracts . Contracts {
res = append ( res , c . Metadata ( ) . NativeContract )
2021-02-09 09:25:38 +00:00
}
return res
}
2019-10-22 14:56:03 +00:00
// GetConfig returns the config stored in the blockchain.
2019-02-20 17:39:32 +00:00
func ( bc * Blockchain ) GetConfig ( ) config . ProtocolConfiguration {
return bc . config
}
2020-05-12 14:20:41 +00:00
// SubscribeForBlocks adds given channel to new block event broadcasting, so when
// there is a new block added to the chain you'll receive it via this channel.
// Make sure it's read from regularly as not reading these events might affect
// other Blockchain functions.
func ( bc * Blockchain ) SubscribeForBlocks ( ch chan <- * block . Block ) {
bc . subCh <- ch
}
// SubscribeForTransactions adds given channel to new transaction event
// broadcasting, so when there is a new transaction added to the chain (in a
// block) you'll receive it via this channel. Make sure it's read from regularly
// as not reading these events might affect other Blockchain functions.
func ( bc * Blockchain ) SubscribeForTransactions ( ch chan <- * transaction . Transaction ) {
bc . subCh <- ch
}
// SubscribeForNotifications adds given channel to new notifications event
// broadcasting, so when an in-block transaction execution generates a
// notification you'll receive it via this channel. Only notifications from
// successful transactions are broadcasted, if you're interested in failed
// transactions use SubscribeForExecutions instead. Make sure this channel is
// read from regularly as not reading these events might affect other Blockchain
// functions.
func ( bc * Blockchain ) SubscribeForNotifications ( ch chan <- * state . NotificationEvent ) {
bc . subCh <- ch
}
// SubscribeForExecutions adds given channel to new transaction execution event
// broadcasting, so when an in-block transaction execution happens you'll receive
// the result of it via this channel. Make sure it's read from regularly as not
// reading these events might affect other Blockchain functions.
func ( bc * Blockchain ) SubscribeForExecutions ( ch chan <- * state . AppExecResult ) {
bc . subCh <- ch
}
// UnsubscribeFromBlocks unsubscribes given channel from new block notifications,
// you can close it afterwards. Passing non-subscribed channel is a no-op.
func ( bc * Blockchain ) UnsubscribeFromBlocks ( ch chan <- * block . Block ) {
bc . unsubCh <- ch
}
// UnsubscribeFromTransactions unsubscribes given channel from new transaction
// notifications, you can close it afterwards. Passing non-subscribed channel is
// a no-op.
func ( bc * Blockchain ) UnsubscribeFromTransactions ( ch chan <- * transaction . Transaction ) {
bc . unsubCh <- ch
}
// UnsubscribeFromNotifications unsubscribes given channel from new
// execution-generated notifications, you can close it afterwards. Passing
// non-subscribed channel is a no-op.
func ( bc * Blockchain ) UnsubscribeFromNotifications ( ch chan <- * state . NotificationEvent ) {
bc . unsubCh <- ch
}
// UnsubscribeFromExecutions unsubscribes given channel from new execution
// notifications, you can close it afterwards. Passing non-subscribed channel is
// a no-op.
func ( bc * Blockchain ) UnsubscribeFromExecutions ( ch chan <- * state . AppExecResult ) {
bc . unsubCh <- ch
}
2020-06-04 19:59:34 +00:00
// CalculateClaimable calculates the amount of GAS generated by owning specified
2020-08-26 09:07:30 +00:00
// amount of NEO between specified blocks.
2020-11-06 09:27:05 +00:00
func ( bc * Blockchain ) CalculateClaimable ( acc util . Uint160 , endHeight uint32 ) ( * big . Int , error ) {
return bc . contracts . NEO . CalculateBonus ( bc . dao , acc , endHeight )
2020-02-25 13:15:17 +00:00
}
2020-05-08 17:54:24 +00:00
// FeePerByte returns transaction network fee per byte.
2020-06-23 14:15:35 +00:00
func ( bc * Blockchain ) FeePerByte ( ) int64 {
return bc . contracts . Policy . GetFeePerByteInternal ( bc . dao )
2019-02-20 17:39:32 +00:00
}
Implement rpc server method: sendrawtransaction (#174)
* Added new config attributes: 'SecondsPerBlock','LowPriorityThreshold'
* Added new files:
* Added new method: CompareTo
* Fixed empty Slice case
* Added new methods: LessThan, GreaterThan, Equal, CompareTo
* Added new method: InputIntersection
* Added MaxTransactionSize, GroupOutputByAssetID
* Added ned method: ScriptHash
* Added new method: IsDoubleSpend
* Refactor blockchainer, Added Feer interface, Verify and GetMemPool method
* 1) Added MemPool
2) Added new methods to satisfy the blockchainer interface: IsLowPriority, Verify, GetMemPool
* Added new methods: RelayTxn, RelayDirectly
* Fixed tests
* Implemented RPC server method sendrawtransaction
* Refactor getrawtransaction, sendrawtransaction in separate methods
* Moved 'secondsPerBlock' to config file
* Implemented Kim suggestions:
1) Fixed data race issues
2) refactor Verify method
3) Get rid of unused InputIntersection method due to refactoring Verify method
4) Fixed bug in https://github.com/CityOfZion/neo-go/pull/174#discussion_r264108135
5) minor simplications of the code
* Fixed minor issues related to
1) space
2) getter methods do not need pointer on the receiver
3) error message
4) refactoring CompareTo method in uint256.go
* Fixed small issues
* Use sync.RWMutex instead of sync.Mutex
* Refined (R)Lock/(R)Unlock
* return error instead of bool in Verify methods
2019-03-20 12:30:05 +00:00
// GetMemPool returns the memory pool of the blockchain.
2020-02-04 14:36:11 +00:00
func ( bc * Blockchain ) GetMemPool ( ) * mempool . Pool {
2020-08-19 16:27:15 +00:00
return bc . memPool
Implement rpc server method: sendrawtransaction (#174)
* Added new config attributes: 'SecondsPerBlock','LowPriorityThreshold'
* Added new files:
* Added new method: CompareTo
* Fixed empty Slice case
* Added new methods: LessThan, GreaterThan, Equal, CompareTo
* Added new method: InputIntersection
* Added MaxTransactionSize, GroupOutputByAssetID
* Added ned method: ScriptHash
* Added new method: IsDoubleSpend
* Refactor blockchainer, Added Feer interface, Verify and GetMemPool method
* 1) Added MemPool
2) Added new methods to satisfy the blockchainer interface: IsLowPriority, Verify, GetMemPool
* Added new methods: RelayTxn, RelayDirectly
* Fixed tests
* Implemented RPC server method sendrawtransaction
* Refactor getrawtransaction, sendrawtransaction in separate methods
* Moved 'secondsPerBlock' to config file
* Implemented Kim suggestions:
1) Fixed data race issues
2) refactor Verify method
3) Get rid of unused InputIntersection method due to refactoring Verify method
4) Fixed bug in https://github.com/CityOfZion/neo-go/pull/174#discussion_r264108135
5) minor simplications of the code
* Fixed minor issues related to
1) space
2) getter methods do not need pointer on the receiver
3) error message
4) refactoring CompareTo method in uint256.go
* Fixed small issues
* Use sync.RWMutex instead of sync.Mutex
* Refined (R)Lock/(R)Unlock
* return error instead of bool in Verify methods
2019-03-20 12:30:05 +00:00
}
2020-02-18 17:16:38 +00:00
// ApplyPolicyToTxSet applies configured policies to given transaction set. It
// expects slice to be ordered by fee and returns a subslice of it.
2020-06-05 16:01:10 +00:00
func ( bc * Blockchain ) ApplyPolicyToTxSet ( txes [ ] * transaction . Transaction ) [ ] * transaction . Transaction {
2021-02-17 15:22:57 +00:00
maxTx := bc . config . MaxTransactionsPerBlock
2020-06-15 18:13:32 +00:00
if maxTx != 0 && len ( txes ) > int ( maxTx ) {
txes = txes [ : maxTx ]
2020-02-18 17:16:38 +00:00
}
2021-07-09 10:01:42 +00:00
maxBlockSize := bc . config . MaxBlockSize
maxBlockSysFee := bc . config . MaxBlockSystemFee
2021-03-15 10:00:04 +00:00
defaultWitness := bc . defaultBlockWitness . Load ( )
if defaultWitness == nil {
m := smartcontract . GetDefaultHonestNodeCount ( bc . config . ValidatorsCount )
verification , _ := smartcontract . CreateDefaultMultiSigRedeemScript ( bc . contracts . NEO . GetNextBlockValidatorsInternal ( ) )
defaultWitness = transaction . Witness {
InvocationScript : make ( [ ] byte , 66 * m ) ,
VerificationScript : verification ,
}
bc . defaultBlockWitness . Store ( defaultWitness )
}
var (
2021-03-15 10:51:07 +00:00
b = & block . Block { Header : block . Header { Script : defaultWitness . ( transaction . Witness ) } }
blockSize = uint32 ( b . GetExpectedBlockSizeWithoutTransactions ( len ( txes ) ) )
blockSysFee int64
2021-03-15 10:00:04 +00:00
)
for i , tx := range txes {
blockSize += uint32 ( tx . Size ( ) )
2021-03-15 10:51:07 +00:00
blockSysFee += tx . SystemFee
if blockSize > maxBlockSize || blockSysFee > maxBlockSysFee {
2021-03-15 10:00:04 +00:00
txes = txes [ : i ]
break
}
}
2020-02-18 17:16:38 +00:00
return txes
}
2020-08-13 10:57:30 +00:00
// Various errors that could be returns upon header verification.
var (
ErrHdrHashMismatch = errors . New ( "previous header hash doesn't match" )
ErrHdrIndexMismatch = errors . New ( "previous header index doesn't match" )
ErrHdrInvalidTimestamp = errors . New ( "block is not newer than the previous one" )
2020-11-17 12:57:50 +00:00
ErrHdrStateRootSetting = errors . New ( "state root setting mismatch" )
ErrHdrInvalidStateRoot = errors . New ( "state root for previous block is invalid" )
2020-08-13 10:57:30 +00:00
)
2020-02-29 14:52:09 +00:00
func ( bc * Blockchain ) verifyHeader ( currHeader , prevHeader * block . Header ) error {
2021-03-26 19:55:08 +00:00
if bc . config . StateRootInHeader {
2021-06-29 15:28:44 +00:00
if bc . stateRoot . CurrentLocalHeight ( ) == prevHeader . Index {
if sr := bc . stateRoot . CurrentLocalStateRoot ( ) ; currHeader . PrevStateRoot != sr {
return fmt . Errorf ( "%w: %s != %s" ,
ErrHdrInvalidStateRoot , currHeader . PrevStateRoot . StringLE ( ) , sr . StringLE ( ) )
}
2021-03-26 19:55:08 +00:00
}
}
2020-02-29 14:52:09 +00:00
if prevHeader . Hash ( ) != currHeader . PrevHash {
2020-08-13 10:57:30 +00:00
return ErrHdrHashMismatch
2019-10-15 09:52:10 +00:00
}
2020-02-29 14:52:09 +00:00
if prevHeader . Index + 1 != currHeader . Index {
2020-08-13 10:57:30 +00:00
return ErrHdrIndexMismatch
2019-10-15 09:52:10 +00:00
}
2020-02-29 14:52:09 +00:00
if prevHeader . Timestamp >= currHeader . Timestamp {
2020-08-13 10:57:30 +00:00
return ErrHdrInvalidTimestamp
2019-10-15 09:52:10 +00:00
}
2020-02-29 14:52:09 +00:00
return bc . verifyHeaderWitnesses ( currHeader , prevHeader )
2019-10-15 09:52:10 +00:00
}
2020-08-13 10:42:21 +00:00
// Various errors that could be returned upon verification.
var (
2021-02-15 13:40:42 +00:00
ErrTxExpired = errors . New ( "transaction has expired" )
ErrInsufficientFunds = errors . New ( "insufficient funds" )
ErrTxSmallNetworkFee = errors . New ( "too small network fee" )
ErrTxTooBig = errors . New ( "too big transaction" )
ErrMemPoolConflict = errors . New ( "invalid transaction due to conflicts with the memory pool" )
ErrInvalidScript = errors . New ( "invalid script" )
ErrInvalidAttribute = errors . New ( "invalid attribute" )
2020-08-13 10:42:21 +00:00
)
2020-08-19 16:27:15 +00:00
// verifyAndPoolTx verifies whether a transaction is bonafide or not and tries
// to add it to the mempool given.
2020-11-27 10:55:48 +00:00
func ( bc * Blockchain ) verifyAndPoolTx ( t * transaction . Transaction , pool * mempool . Pool , feer mempool . Feer , data ... interface { } ) error {
2021-02-09 19:28:36 +00:00
// This code can technically be moved out of here, because it doesn't
// really require a chain lock.
err := vm . IsScriptCorrect ( t . Script , nil )
if err != nil {
return fmt . Errorf ( "%w: %v" , ErrInvalidScript , err )
}
2020-04-15 06:50:13 +00:00
height := bc . BlockHeight ( )
2020-11-27 10:55:48 +00:00
isPartialTx := data != nil
2021-05-17 08:07:08 +00:00
if t . ValidUntilBlock <= height || ! isPartialTx && t . ValidUntilBlock > height + bc . config . MaxValidUntilBlockIncrement {
2020-08-13 10:42:21 +00:00
return fmt . Errorf ( "%w: ValidUntilBlock = %d, current height = %d" , ErrTxExpired , t . ValidUntilBlock , height )
2020-04-15 06:50:13 +00:00
}
2020-08-06 18:49:54 +00:00
// Policying.
if err := bc . contracts . Policy . CheckPolicy ( bc . dao , t ) ; err != nil {
// Only one %w can be used.
return fmt . Errorf ( "%w: %v" , ErrPolicy , err )
2020-08-03 13:35:05 +00:00
}
2020-09-10 16:28:16 +00:00
size := t . Size ( )
2020-05-08 17:54:24 +00:00
if size > transaction . MaxTransactionSize {
2020-08-13 10:42:21 +00:00
return fmt . Errorf ( "%w: (%d > MaxTransactionSize %d)" , ErrTxTooBig , size , transaction . MaxTransactionSize )
Implement rpc server method: sendrawtransaction (#174)
* Added new config attributes: 'SecondsPerBlock','LowPriorityThreshold'
* Added new files:
* Added new method: CompareTo
* Fixed empty Slice case
* Added new methods: LessThan, GreaterThan, Equal, CompareTo
* Added new method: InputIntersection
* Added MaxTransactionSize, GroupOutputByAssetID
* Added ned method: ScriptHash
* Added new method: IsDoubleSpend
* Refactor blockchainer, Added Feer interface, Verify and GetMemPool method
* 1) Added MemPool
2) Added new methods to satisfy the blockchainer interface: IsLowPriority, Verify, GetMemPool
* Added new methods: RelayTxn, RelayDirectly
* Fixed tests
* Implemented RPC server method sendrawtransaction
* Refactor getrawtransaction, sendrawtransaction in separate methods
* Moved 'secondsPerBlock' to config file
* Implemented Kim suggestions:
1) Fixed data race issues
2) refactor Verify method
3) Get rid of unused InputIntersection method due to refactoring Verify method
4) Fixed bug in https://github.com/CityOfZion/neo-go/pull/174#discussion_r264108135
5) minor simplications of the code
* Fixed minor issues related to
1) space
2) getter methods do not need pointer on the receiver
3) error message
4) refactoring CompareTo method in uint256.go
* Fixed small issues
* Use sync.RWMutex instead of sync.Mutex
* Refined (R)Lock/(R)Unlock
* return error instead of bool in Verify methods
2019-03-20 12:30:05 +00:00
}
2020-06-23 14:15:35 +00:00
needNetworkFee := int64 ( size ) * bc . FeePerByte ( )
2020-11-18 11:26:13 +00:00
if bc . P2PSigExtensionsEnabled ( ) {
attrs := t . GetAttributes ( transaction . NotaryAssistedT )
if len ( attrs ) != 0 {
na := attrs [ 0 ] . Value . ( * transaction . NotaryAssisted )
needNetworkFee += ( int64 ( na . NKeys ) + 1 ) * transaction . NotaryServiceFeePerKey
}
}
2020-06-23 14:15:35 +00:00
netFee := t . NetworkFee - needNetworkFee
2020-05-08 17:54:24 +00:00
if netFee < 0 {
2020-08-13 10:42:21 +00:00
return fmt . Errorf ( "%w: net fee is %v, need %v" , ErrTxSmallNetworkFee , t . NetworkFee , needNetworkFee )
2020-05-08 17:54:24 +00:00
}
2020-10-15 11:45:29 +00:00
// check that current tx wasn't included in the conflicts attributes of some other transaction which is already in the chain
if err := bc . dao . HasTransaction ( t . Hash ( ) ) ; err != nil {
switch {
case errors . Is ( err , dao . ErrAlreadyExists ) :
return fmt . Errorf ( "blockchain: %w" , ErrAlreadyExists )
case errors . Is ( err , dao . ErrHasConflicts ) :
return fmt . Errorf ( "blockchain: %w" , ErrHasConflicts )
default :
return err
}
2020-08-19 16:27:15 +00:00
}
2021-02-09 19:28:36 +00:00
err = bc . verifyTxWitnesses ( t , nil , isPartialTx )
2020-08-19 16:27:15 +00:00
if err != nil {
return err
}
2020-11-27 10:55:48 +00:00
if err := bc . verifyTxAttributes ( t , isPartialTx ) ; err != nil {
2020-10-15 13:52:45 +00:00
return err
}
2020-11-27 10:55:48 +00:00
err = pool . Add ( t , feer , data ... )
2020-08-19 16:27:15 +00:00
if err != nil {
switch {
case errors . Is ( err , mempool . ErrConflict ) :
2020-08-13 10:42:21 +00:00
return ErrMemPoolConflict
2020-08-19 16:27:15 +00:00
case errors . Is ( err , mempool . ErrDup ) :
return fmt . Errorf ( "mempool: %w" , ErrAlreadyExists )
case errors . Is ( err , mempool . ErrInsufficientFunds ) :
return ErrInsufficientFunds
case errors . Is ( err , mempool . ErrOOM ) :
return ErrOOM
2020-10-15 11:45:29 +00:00
case errors . Is ( err , mempool . ErrConflictsAttribute ) :
return fmt . Errorf ( "mempool: %w: %s" , ErrHasConflicts , err )
2020-08-19 16:27:15 +00:00
default :
return err
2019-11-19 17:37:27 +00:00
}
Implement rpc server method: sendrawtransaction (#174)
* Added new config attributes: 'SecondsPerBlock','LowPriorityThreshold'
* Added new files:
* Added new method: CompareTo
* Fixed empty Slice case
* Added new methods: LessThan, GreaterThan, Equal, CompareTo
* Added new method: InputIntersection
* Added MaxTransactionSize, GroupOutputByAssetID
* Added ned method: ScriptHash
* Added new method: IsDoubleSpend
* Refactor blockchainer, Added Feer interface, Verify and GetMemPool method
* 1) Added MemPool
2) Added new methods to satisfy the blockchainer interface: IsLowPriority, Verify, GetMemPool
* Added new methods: RelayTxn, RelayDirectly
* Fixed tests
* Implemented RPC server method sendrawtransaction
* Refactor getrawtransaction, sendrawtransaction in separate methods
* Moved 'secondsPerBlock' to config file
* Implemented Kim suggestions:
1) Fixed data race issues
2) refactor Verify method
3) Get rid of unused InputIntersection method due to refactoring Verify method
4) Fixed bug in https://github.com/CityOfZion/neo-go/pull/174#discussion_r264108135
5) minor simplications of the code
* Fixed minor issues related to
1) space
2) getter methods do not need pointer on the receiver
3) error message
4) refactoring CompareTo method in uint256.go
* Fixed small issues
* Use sync.RWMutex instead of sync.Mutex
* Refined (R)Lock/(R)Unlock
* return error instead of bool in Verify methods
2019-03-20 12:30:05 +00:00
}
2020-08-19 16:27:15 +00:00
return nil
Implement rpc server method: sendrawtransaction (#174)
* Added new config attributes: 'SecondsPerBlock','LowPriorityThreshold'
* Added new files:
* Added new method: CompareTo
* Fixed empty Slice case
* Added new methods: LessThan, GreaterThan, Equal, CompareTo
* Added new method: InputIntersection
* Added MaxTransactionSize, GroupOutputByAssetID
* Added ned method: ScriptHash
* Added new method: IsDoubleSpend
* Refactor blockchainer, Added Feer interface, Verify and GetMemPool method
* 1) Added MemPool
2) Added new methods to satisfy the blockchainer interface: IsLowPriority, Verify, GetMemPool
* Added new methods: RelayTxn, RelayDirectly
* Fixed tests
* Implemented RPC server method sendrawtransaction
* Refactor getrawtransaction, sendrawtransaction in separate methods
* Moved 'secondsPerBlock' to config file
* Implemented Kim suggestions:
1) Fixed data race issues
2) refactor Verify method
3) Get rid of unused InputIntersection method due to refactoring Verify method
4) Fixed bug in https://github.com/CityOfZion/neo-go/pull/174#discussion_r264108135
5) minor simplications of the code
* Fixed minor issues related to
1) space
2) getter methods do not need pointer on the receiver
3) error message
4) refactoring CompareTo method in uint256.go
* Fixed small issues
* Use sync.RWMutex instead of sync.Mutex
* Refined (R)Lock/(R)Unlock
* return error instead of bool in Verify methods
2019-03-20 12:30:05 +00:00
}
2020-11-27 10:55:48 +00:00
func ( bc * Blockchain ) verifyTxAttributes ( tx * transaction . Transaction , isPartialTx bool ) error {
2020-08-19 13:20:48 +00:00
for i := range tx . Attributes {
2020-10-15 10:06:22 +00:00
switch attrType := tx . Attributes [ i ] . Type ; attrType {
2020-08-19 13:20:48 +00:00
case transaction . HighPriority :
2020-09-25 14:01:58 +00:00
h := bc . contracts . NEO . GetCommitteeAddress ( )
2020-11-20 10:30:27 +00:00
if ! tx . HasSigner ( h ) {
return fmt . Errorf ( "%w: high priority tx is not signed by committee" , ErrInvalidAttribute )
2020-08-19 13:20:48 +00:00
}
2020-09-24 13:33:40 +00:00
case transaction . OracleResponseT :
2020-11-05 16:34:48 +00:00
h , err := bc . contracts . Oracle . GetScriptHash ( bc . dao )
if err != nil || h . Equals ( util . Uint160 { } ) {
2020-09-24 13:33:40 +00:00
return fmt . Errorf ( "%w: %v" , ErrInvalidAttribute , err )
}
hasOracle := false
for i := range tx . Signers {
2020-10-01 12:26:51 +00:00
if tx . Signers [ i ] . Scopes != transaction . None {
2020-09-24 13:33:40 +00:00
return fmt . Errorf ( "%w: oracle tx has invalid signer scope" , ErrInvalidAttribute )
}
if tx . Signers [ i ] . Account . Equals ( h ) {
hasOracle = true
}
}
if ! hasOracle {
return fmt . Errorf ( "%w: oracle tx is not signed by oracle nodes" , ErrInvalidAttribute )
}
2021-01-22 08:28:13 +00:00
if ! bytes . Equal ( tx . Script , bc . contracts . Oracle . GetOracleResponseScript ( ) ) {
2020-09-24 13:33:40 +00:00
return fmt . Errorf ( "%w: oracle tx has invalid script" , ErrInvalidAttribute )
}
resp := tx . Attributes [ i ] . Value . ( * transaction . OracleResponse )
req , err := bc . contracts . Oracle . GetRequestInternal ( bc . dao , resp . ID )
if err != nil {
return fmt . Errorf ( "%w: oracle tx points to invalid request: %v" , ErrInvalidAttribute , err )
}
if uint64 ( tx . NetworkFee + tx . SystemFee ) < req . GasForResponse {
return fmt . Errorf ( "%w: oracle tx has insufficient gas" , ErrInvalidAttribute )
}
2020-10-14 16:07:16 +00:00
case transaction . NotValidBeforeT :
if ! bc . config . P2PSigExtensions {
2020-11-20 10:25:19 +00:00
return fmt . Errorf ( "%w: NotValidBefore attribute was found, but P2PSigExtensions are disabled" , ErrInvalidAttribute )
2020-10-14 16:07:16 +00:00
}
2020-11-27 10:55:48 +00:00
nvb := tx . Attributes [ i ] . Value . ( * transaction . NotValidBefore ) . Height
if isPartialTx {
maxNVBDelta := bc . contracts . Notary . GetMaxNotValidBeforeDelta ( bc . dao )
if bc . BlockHeight ( ) + maxNVBDelta < nvb {
return fmt . Errorf ( "%w: partially-filled transaction should become valid not less then %d blocks after current chain's height %d" , ErrInvalidAttribute , maxNVBDelta , bc . BlockHeight ( ) )
}
if nvb + maxNVBDelta < tx . ValidUntilBlock {
return fmt . Errorf ( "%w: partially-filled transaction should be valid during less than %d blocks" , ErrInvalidAttribute , maxNVBDelta )
}
} else {
if height := bc . BlockHeight ( ) ; height < nvb {
return fmt . Errorf ( "%w: transaction is not yet valid: NotValidBefore = %d, current height = %d" , ErrInvalidAttribute , nvb , height )
}
2020-10-14 16:07:16 +00:00
}
2020-10-15 11:45:29 +00:00
case transaction . ConflictsT :
if ! bc . config . P2PSigExtensions {
2020-11-20 10:25:19 +00:00
return fmt . Errorf ( "%w: Conflicts attribute was found, but P2PSigExtensions are disabled" , ErrInvalidAttribute )
2020-10-15 11:45:29 +00:00
}
conflicts := tx . Attributes [ i ] . Value . ( * transaction . Conflicts )
if err := bc . dao . HasTransaction ( conflicts . Hash ) ; errors . Is ( err , dao . ErrAlreadyExists ) {
2020-11-20 10:25:19 +00:00
return fmt . Errorf ( "%w: conflicting transaction %s is already on chain" , ErrInvalidAttribute , conflicts . Hash . StringLE ( ) )
2020-10-15 11:45:29 +00:00
}
2020-11-18 11:26:13 +00:00
case transaction . NotaryAssistedT :
if ! bc . config . P2PSigExtensions {
return fmt . Errorf ( "%w: NotaryAssisted attribute was found, but P2PSigExtensions are disabled" , ErrInvalidAttribute )
}
2020-11-19 10:00:46 +00:00
if ! tx . HasSigner ( bc . contracts . Notary . Hash ) {
return fmt . Errorf ( "%w: NotaryAssisted attribute was found, but transaction is not signed by the Notary native contract" , ErrInvalidAttribute )
}
2020-10-15 10:06:22 +00:00
default :
if ! bc . config . ReservedAttributes && attrType >= transaction . ReservedLowerBound && attrType <= transaction . ReservedUpperBound {
2020-11-20 10:25:19 +00:00
return fmt . Errorf ( "%w: attribute of reserved type was found, but ReservedAttributes are disabled" , ErrInvalidAttribute )
2020-10-15 10:06:22 +00:00
}
2020-08-19 13:20:48 +00:00
}
}
return nil
}
2020-11-27 10:55:48 +00:00
// IsTxStillRelevant is a callback for mempool transaction filtering after the
2020-08-19 12:27:13 +00:00
// new block addition. It returns false for transactions added by the new block
2020-09-10 12:02:03 +00:00
// (passed via txpool) and does witness reverification for non-standard
2020-02-05 21:23:49 +00:00
// contracts. It operates under the assumption that full transaction verification
// was already done so we don't need to check basic things like size, input/output
2020-08-19 12:27:13 +00:00
// correctness, presence in blocks before the new one, etc.
2020-11-27 10:55:48 +00:00
func ( bc * Blockchain ) IsTxStillRelevant ( t * transaction . Transaction , txpool * mempool . Pool , isPartialTx bool ) bool {
2020-02-05 21:23:49 +00:00
var recheckWitness bool
2020-10-12 11:58:40 +00:00
var curheight = bc . BlockHeight ( )
2020-02-05 21:23:49 +00:00
2020-10-12 11:58:40 +00:00
if t . ValidUntilBlock <= curheight {
return false
}
2020-09-10 12:02:03 +00:00
if txpool == nil {
2020-10-15 11:45:29 +00:00
if bc . dao . HasTransaction ( t . Hash ( ) ) != nil {
2020-09-10 12:02:03 +00:00
return false
}
2020-10-15 11:45:29 +00:00
} else if txpool . HasConflicts ( t , bc ) {
2020-02-05 21:23:49 +00:00
return false
}
2020-11-27 10:55:48 +00:00
if err := bc . verifyTxAttributes ( t , isPartialTx ) ; err != nil {
2020-08-19 13:20:48 +00:00
return false
}
2020-02-05 21:23:49 +00:00
for i := range t . Scripts {
if ! vm . IsStandardContract ( t . Scripts [ i ] . VerificationScript ) {
recheckWitness = true
break
}
}
if recheckWitness {
2020-11-27 10:55:48 +00:00
return bc . verifyTxWitnesses ( t , nil , isPartialTx ) == nil
2020-02-05 21:23:49 +00:00
}
return true
}
2020-08-19 16:27:15 +00:00
// VerifyTx verifies whether transaction is bonafide or not relative to the
// current blockchain state. Note that this verification is completely isolated
// from the main node's mempool.
func ( bc * Blockchain ) VerifyTx ( t * transaction . Transaction ) error {
2021-01-15 12:40:15 +00:00
var mp = mempool . New ( 1 , 0 , false )
2020-02-04 15:43:21 +00:00
bc . lock . RLock ( )
defer bc . lock . RUnlock ( )
2020-11-27 10:55:48 +00:00
return bc . verifyAndPoolTx ( t , mp , bc )
2020-02-04 15:43:21 +00:00
}
2020-08-19 16:27:15 +00:00
// PoolTx verifies and tries to add given transaction into the mempool. If not
// given, the default mempool is used. Passing multiple pools is not supported.
func ( bc * Blockchain ) PoolTx ( t * transaction . Transaction , pools ... * mempool . Pool ) error {
var pool = bc . memPool
2020-02-04 15:43:21 +00:00
bc . lock . RLock ( )
defer bc . lock . RUnlock ( )
2020-08-19 16:27:15 +00:00
// Programmer error.
if len ( pools ) > 1 {
panic ( "too many pools given" )
2020-02-04 15:43:21 +00:00
}
2020-08-19 16:27:15 +00:00
if len ( pools ) == 1 {
pool = pools [ 0 ]
2020-02-04 15:43:21 +00:00
}
2020-11-27 10:55:48 +00:00
return bc . verifyAndPoolTx ( t , pool , bc )
}
// PoolTxWithData verifies and tries to add given transaction with additional data into the mempool.
func ( bc * Blockchain ) PoolTxWithData ( t * transaction . Transaction , data interface { } , mp * mempool . Pool , feer mempool . Feer , verificationFunction func ( bc blockchainer . Blockchainer , tx * transaction . Transaction , data interface { } ) error ) error {
bc . lock . RLock ( )
defer bc . lock . RUnlock ( )
if verificationFunction != nil {
err := verificationFunction ( bc , t , data )
if err != nil {
return err
}
}
2021-01-15 12:40:15 +00:00
return bc . verifyAndPoolTx ( t , mp , feer , data )
2020-02-04 15:43:21 +00:00
}
2019-11-11 15:25:28 +00:00
//GetStandByValidators returns validators from the configuration.
2020-06-23 15:15:55 +00:00
func ( bc * Blockchain ) GetStandByValidators ( ) keys . PublicKeys {
2020-08-05 08:30:14 +00:00
return bc . sbCommittee [ : bc . config . ValidatorsCount ] . Copy ( )
}
2020-08-14 09:16:24 +00:00
// GetStandByCommittee returns standby committee from the configuration.
2020-08-05 08:30:14 +00:00
func ( bc * Blockchain ) GetStandByCommittee ( ) keys . PublicKeys {
return bc . sbCommittee . Copy ( )
2019-11-11 15:25:28 +00:00
}
2020-09-21 12:34:04 +00:00
// GetCommittee returns the sorted list of public keys of nodes in committee.
func ( bc * Blockchain ) GetCommittee ( ) ( keys . PublicKeys , error ) {
2020-08-28 07:24:54 +00:00
pubs := bc . contracts . NEO . GetCommitteeMembers ( )
2020-09-21 12:34:04 +00:00
sort . Sort ( pubs )
return pubs , nil
}
2020-07-11 10:10:57 +00:00
// GetValidators returns current validators.
2020-04-26 17:04:16 +00:00
func ( bc * Blockchain ) GetValidators ( ) ( [ ] * keys . PublicKey , error ) {
2020-09-22 09:53:44 +00:00
return bc . contracts . NEO . ComputeNextBlockValidators ( bc , bc . dao )
2019-11-18 12:24:48 +00:00
}
2020-07-11 10:10:57 +00:00
// GetNextBlockValidators returns next block validators.
func ( bc * Blockchain ) GetNextBlockValidators ( ) ( [ ] * keys . PublicKey , error ) {
2020-08-28 07:24:54 +00:00
return bc . contracts . NEO . GetNextBlockValidatorsInternal ( ) , nil
2020-07-11 10:10:57 +00:00
}
2020-04-26 17:04:16 +00:00
// GetEnrollments returns all registered validators.
func ( bc * Blockchain ) GetEnrollments ( ) ( [ ] state . Validator , error ) {
2020-08-03 08:43:51 +00:00
return bc . contracts . NEO . GetCandidates ( bc . dao )
2019-11-18 12:24:48 +00:00
}
2019-10-29 15:31:39 +00:00
// GetTestVM returns a VM and a Store setup for a test run of some sort of code.
2020-12-14 12:23:39 +00:00
func ( bc * Blockchain ) GetTestVM ( t trigger . Type , tx * transaction . Transaction , b * block . Block ) * vm . VM {
2020-09-04 13:45:31 +00:00
d := bc . dao . GetWrapped ( ) . ( * dao . Simple )
2020-12-14 12:23:39 +00:00
systemInterop := bc . newInteropContext ( t , d , b , tx )
2020-07-28 13:38:00 +00:00
vm := systemInterop . SpawnVM ( )
2021-02-05 08:25:22 +00:00
vm . SetPriceGetter ( systemInterop . GetPrice )
2021-01-19 08:23:39 +00:00
vm . LoadToken = contract . LoadToken ( systemInterop )
2020-04-03 06:49:01 +00:00
return vm
2019-10-29 15:31:39 +00:00
}
2020-08-13 10:42:21 +00:00
// Various witness verification errors.
var (
2020-08-13 15:42:53 +00:00
ErrWitnessHashMismatch = errors . New ( "witness hash mismatch" )
2020-10-09 10:32:54 +00:00
ErrNativeContractWitness = errors . New ( "native contract witness must have empty verification script" )
2020-08-13 15:42:53 +00:00
ErrVerificationFailed = errors . New ( "signature check failed" )
2021-02-09 19:28:36 +00:00
ErrInvalidInvocation = errors . New ( "invalid invocation script" )
2020-12-10 07:56:02 +00:00
ErrInvalidSignature = fmt . Errorf ( "%w: invalid signature" , ErrVerificationFailed )
2021-02-09 19:28:36 +00:00
ErrInvalidVerification = errors . New ( "invalid verification script" )
2020-08-13 15:42:53 +00:00
ErrUnknownVerificationContract = errors . New ( "unknown verification contract" )
ErrInvalidVerificationContract = errors . New ( "verification contract is missing `verify` method" )
2020-08-13 10:42:21 +00:00
)
2021-03-11 17:15:23 +00:00
// InitVerificationVM initializes VM for witness check.
func ( bc * Blockchain ) InitVerificationVM ( v * vm . VM , getContract func ( util . Uint160 ) ( * state . Contract , error ) , hash util . Uint160 , witness * transaction . Witness ) error {
2020-11-26 19:45:51 +00:00
if len ( witness . VerificationScript ) != 0 {
2020-08-13 15:42:53 +00:00
if witness . ScriptHash ( ) != hash {
return ErrWitnessHashMismatch
}
2020-10-09 10:32:54 +00:00
if bc . contracts . ByHash ( hash ) != nil {
return ErrNativeContractWitness
}
2021-02-09 19:28:36 +00:00
err := vm . IsScriptCorrect ( witness . VerificationScript , nil )
if err != nil {
return fmt . Errorf ( "%w: %v" , ErrInvalidVerification , err )
}
2021-02-15 12:03:32 +00:00
v . LoadScriptWithFlags ( witness . VerificationScript , callflag . ReadOnly )
2020-08-13 15:42:53 +00:00
} else {
2021-03-11 17:15:23 +00:00
cs , err := getContract ( hash )
2020-08-13 15:42:53 +00:00
if err != nil {
return ErrUnknownVerificationContract
}
2021-01-26 14:37:34 +00:00
md := cs . Manifest . ABI . GetMethod ( manifest . MethodVerify , - 1 )
2021-01-26 15:00:08 +00:00
if md == nil || md . ReturnType != smartcontract . BoolType {
2020-08-13 15:42:53 +00:00
return ErrInvalidVerificationContract
}
2021-01-26 14:37:34 +00:00
initMD := cs . Manifest . ABI . GetMethod ( manifest . MethodInit , 0 )
2021-02-15 12:03:32 +00:00
v . LoadScriptWithHash ( cs . NEF . Script , hash , callflag . ReadOnly )
2021-01-19 08:23:39 +00:00
v . Context ( ) . NEF = & cs . NEF
2020-11-26 19:45:51 +00:00
v . Jump ( v . Context ( ) , md . Offset )
2019-10-15 09:52:10 +00:00
2021-02-15 13:40:44 +00:00
if initMD != nil {
2020-11-26 19:45:51 +00:00
v . Call ( v . Context ( ) , initMD . Offset )
}
2020-08-20 08:02:11 +00:00
}
2021-01-22 09:54:17 +00:00
if len ( witness . InvocationScript ) != 0 {
2021-02-09 19:28:36 +00:00
err := vm . IsScriptCorrect ( witness . InvocationScript , nil )
if err != nil {
return fmt . Errorf ( "%w: %v" , ErrInvalidInvocation , err )
}
2021-01-22 09:54:17 +00:00
v . LoadScript ( witness . InvocationScript )
}
2020-08-20 08:02:11 +00:00
return nil
}
// VerifyWitness checks that w is a correct witness for c signed by h.
2021-03-25 12:22:16 +00:00
func ( bc * Blockchain ) VerifyWitness ( h util . Uint160 , c hash . Hashable , w * transaction . Witness , gas int64 ) error {
2020-08-20 08:02:11 +00:00
ic := bc . newInteropContext ( trigger . Verification , bc . dao , nil , nil )
ic . Container = c
2020-09-29 13:05:42 +00:00
_ , err := bc . verifyHashAgainstScript ( h , w , ic , gas )
return err
2020-08-20 08:02:11 +00:00
}
2020-09-29 13:05:42 +00:00
// verifyHashAgainstScript verifies given hash against the given witness and returns the amount of GAS consumed.
func ( bc * Blockchain ) verifyHashAgainstScript ( hash util . Uint160 , witness * transaction . Witness , interopCtx * interop . Context , gas int64 ) ( int64 , error ) {
2020-07-25 11:32:04 +00:00
gasPolicy := bc . contracts . Policy . GetMaxVerificationGas ( interopCtx . DAO )
if gas > gasPolicy {
gas = gasPolicy
}
2020-07-28 13:38:00 +00:00
vm := interopCtx . SpawnVM ( )
2021-02-05 08:25:22 +00:00
vm . SetPriceGetter ( interopCtx . GetPrice )
2021-01-19 08:23:39 +00:00
vm . LoadToken = contract . LoadToken ( interopCtx )
2020-07-13 15:24:58 +00:00
vm . GasLimit = gas
2021-03-11 17:15:23 +00:00
if err := bc . InitVerificationVM ( vm , interopCtx . GetContract , hash , witness ) ; err != nil {
2020-09-29 13:05:42 +00:00
return 0 , err
2020-08-20 08:02:11 +00:00
}
2020-08-13 15:42:53 +00:00
err := vm . Run ( )
2019-10-15 09:52:10 +00:00
if vm . HasFailed ( ) {
2020-09-29 13:05:42 +00:00
return 0 , fmt . Errorf ( "%w: vm execution has failed: %v" , ErrVerificationFailed , err )
2019-10-15 09:52:10 +00:00
}
resEl := vm . Estack ( ) . Pop ( )
if resEl != nil {
2020-08-21 17:55:20 +00:00
res , err := resEl . Item ( ) . TryBool ( )
if err != nil {
2020-09-29 13:05:42 +00:00
return 0 , fmt . Errorf ( "%w: invalid return value" , ErrVerificationFailed )
2020-08-21 17:55:20 +00:00
}
2020-08-17 19:02:15 +00:00
if vm . Estack ( ) . Len ( ) != 0 {
2020-09-29 13:05:42 +00:00
return 0 , fmt . Errorf ( "%w: expected exactly one returned value" , ErrVerificationFailed )
2020-08-17 19:02:15 +00:00
}
2020-12-10 07:42:12 +00:00
if ! res {
2020-12-10 07:56:02 +00:00
return vm . GasConsumed ( ) , ErrInvalidSignature
2020-12-10 07:42:12 +00:00
}
2019-10-15 09:52:10 +00:00
} else {
2020-09-29 13:05:42 +00:00
return 0 , fmt . Errorf ( "%w: no result returned from the script" , ErrVerificationFailed )
2019-10-15 09:52:10 +00:00
}
2020-09-29 13:05:42 +00:00
return vm . GasConsumed ( ) , nil
2019-10-15 09:52:10 +00:00
}
2019-10-22 14:56:03 +00:00
// verifyTxWitnesses verifies the scripts (witnesses) that come with a given
2019-09-30 14:39:42 +00:00
// transaction. It can reorder them by ScriptHash, because that's required to
2019-10-11 14:00:11 +00:00
// match a slice of script hashes from the Blockchain. Block parameter
// is used for easy interop access and can be omitted for transactions that are
// not yet added into any block.
Implement rpc server method: sendrawtransaction (#174)
* Added new config attributes: 'SecondsPerBlock','LowPriorityThreshold'
* Added new files:
* Added new method: CompareTo
* Fixed empty Slice case
* Added new methods: LessThan, GreaterThan, Equal, CompareTo
* Added new method: InputIntersection
* Added MaxTransactionSize, GroupOutputByAssetID
* Added ned method: ScriptHash
* Added new method: IsDoubleSpend
* Refactor blockchainer, Added Feer interface, Verify and GetMemPool method
* 1) Added MemPool
2) Added new methods to satisfy the blockchainer interface: IsLowPriority, Verify, GetMemPool
* Added new methods: RelayTxn, RelayDirectly
* Fixed tests
* Implemented RPC server method sendrawtransaction
* Refactor getrawtransaction, sendrawtransaction in separate methods
* Moved 'secondsPerBlock' to config file
* Implemented Kim suggestions:
1) Fixed data race issues
2) refactor Verify method
3) Get rid of unused InputIntersection method due to refactoring Verify method
4) Fixed bug in https://github.com/CityOfZion/neo-go/pull/174#discussion_r264108135
5) minor simplications of the code
* Fixed minor issues related to
1) space
2) getter methods do not need pointer on the receiver
3) error message
4) refactoring CompareTo method in uint256.go
* Fixed small issues
* Use sync.RWMutex instead of sync.Mutex
* Refined (R)Lock/(R)Unlock
* return error instead of bool in Verify methods
2019-03-20 12:30:05 +00:00
// Golang implementation of VerifyWitnesses method in C# (https://github.com/neo-project/neo/blob/master/neo/SmartContract/Helper.cs#L87).
2020-11-27 10:55:48 +00:00
func ( bc * Blockchain ) verifyTxWitnesses ( t * transaction . Transaction , block * block . Block , isPartialTx bool ) error {
2020-04-03 06:49:01 +00:00
interopCtx := bc . newInteropContext ( trigger . Verification , bc . dao , block , t )
2020-09-30 10:50:58 +00:00
gasLimit := t . NetworkFee - int64 ( t . Size ( ) ) * bc . FeePerByte ( )
2020-12-10 08:07:29 +00:00
if bc . P2PSigExtensionsEnabled ( ) {
attrs := t . GetAttributes ( transaction . NotaryAssistedT )
if len ( attrs ) != 0 {
na := attrs [ 0 ] . Value . ( * transaction . NotaryAssisted )
gasLimit -= ( int64 ( na . NKeys ) + 1 ) * transaction . NotaryServiceFeePerKey
}
}
2020-08-06 18:16:34 +00:00
for i := range t . Signers {
2020-09-29 13:05:42 +00:00
gasConsumed , err := bc . verifyHashAgainstScript ( t . Signers [ i ] . Account , & t . Scripts [ i ] , interopCtx , gasLimit )
2020-11-27 10:55:48 +00:00
if err != nil &&
! ( i == 0 && isPartialTx && errors . Is ( err , ErrInvalidSignature ) ) { // it's OK for partially-filled transaction with dummy first witness.
2020-08-06 14:44:08 +00:00
return fmt . Errorf ( "witness #%d: %w" , i , err )
2019-09-23 17:13:44 +00:00
}
2020-09-29 13:05:42 +00:00
gasLimit -= gasConsumed
Implement rpc server method: sendrawtransaction (#174)
* Added new config attributes: 'SecondsPerBlock','LowPriorityThreshold'
* Added new files:
* Added new method: CompareTo
* Fixed empty Slice case
* Added new methods: LessThan, GreaterThan, Equal, CompareTo
* Added new method: InputIntersection
* Added MaxTransactionSize, GroupOutputByAssetID
* Added ned method: ScriptHash
* Added new method: IsDoubleSpend
* Refactor blockchainer, Added Feer interface, Verify and GetMemPool method
* 1) Added MemPool
2) Added new methods to satisfy the blockchainer interface: IsLowPriority, Verify, GetMemPool
* Added new methods: RelayTxn, RelayDirectly
* Fixed tests
* Implemented RPC server method sendrawtransaction
* Refactor getrawtransaction, sendrawtransaction in separate methods
* Moved 'secondsPerBlock' to config file
* Implemented Kim suggestions:
1) Fixed data race issues
2) refactor Verify method
3) Get rid of unused InputIntersection method due to refactoring Verify method
4) Fixed bug in https://github.com/CityOfZion/neo-go/pull/174#discussion_r264108135
5) minor simplications of the code
* Fixed minor issues related to
1) space
2) getter methods do not need pointer on the receiver
3) error message
4) refactoring CompareTo method in uint256.go
* Fixed small issues
* Use sync.RWMutex instead of sync.Mutex
* Refined (R)Lock/(R)Unlock
* return error instead of bool in Verify methods
2019-03-20 12:30:05 +00:00
}
return nil
}
2020-02-29 14:52:09 +00:00
// verifyHeaderWitnesses is a block-specific implementation of VerifyWitnesses logic.
func ( bc * Blockchain ) verifyHeaderWitnesses ( currHeader , prevHeader * block . Header ) error {
2019-10-15 09:52:10 +00:00
var hash util . Uint160
2020-02-29 14:52:09 +00:00
if prevHeader == nil && currHeader . PrevHash . Equals ( util . Uint256 { } ) {
hash = currHeader . Script . ScriptHash ( )
2019-10-15 09:52:10 +00:00
} else {
hash = prevHeader . NextConsensus
}
2021-07-25 12:00:44 +00:00
return bc . VerifyWitness ( hash , currHeader , & currHeader . Script , HeaderVerificationGasLimit )
2019-10-15 09:52:10 +00:00
}
2020-05-20 19:32:59 +00:00
// GoverningTokenHash returns the governing token (NEO) native contract hash.
func ( bc * Blockchain ) GoverningTokenHash ( ) util . Uint160 {
return bc . contracts . NEO . Hash
}
// UtilityTokenHash returns the utility token (GAS) native contract hash.
func ( bc * Blockchain ) UtilityTokenHash ( ) util . Uint160 {
return bc . contracts . GAS . Hash
}
2020-12-13 15:26:35 +00:00
// ManagementContractHash returns management contract's hash.
func ( bc * Blockchain ) ManagementContractHash ( ) util . Uint160 {
return bc . contracts . Management . Hash
}
2018-02-06 06:43:32 +00:00
func hashAndIndexToBytes ( h util . Uint256 , index uint32 ) [ ] byte {
2019-09-16 09:18:13 +00:00
buf := io . NewBufBinWriter ( )
2019-11-27 09:23:18 +00:00
buf . WriteBytes ( h . BytesLE ( ) )
2019-12-12 15:52:23 +00:00
buf . WriteU32LE ( index )
2019-09-16 09:18:13 +00:00
return buf . Bytes ( )
2018-02-06 06:43:32 +00:00
}
Implement rpc server method: sendrawtransaction (#174)
* Added new config attributes: 'SecondsPerBlock','LowPriorityThreshold'
* Added new files:
* Added new method: CompareTo
* Fixed empty Slice case
* Added new methods: LessThan, GreaterThan, Equal, CompareTo
* Added new method: InputIntersection
* Added MaxTransactionSize, GroupOutputByAssetID
* Added ned method: ScriptHash
* Added new method: IsDoubleSpend
* Refactor blockchainer, Added Feer interface, Verify and GetMemPool method
* 1) Added MemPool
2) Added new methods to satisfy the blockchainer interface: IsLowPriority, Verify, GetMemPool
* Added new methods: RelayTxn, RelayDirectly
* Fixed tests
* Implemented RPC server method sendrawtransaction
* Refactor getrawtransaction, sendrawtransaction in separate methods
* Moved 'secondsPerBlock' to config file
* Implemented Kim suggestions:
1) Fixed data race issues
2) refactor Verify method
3) Get rid of unused InputIntersection method due to refactoring Verify method
4) Fixed bug in https://github.com/CityOfZion/neo-go/pull/174#discussion_r264108135
5) minor simplications of the code
* Fixed minor issues related to
1) space
2) getter methods do not need pointer on the receiver
3) error message
4) refactoring CompareTo method in uint256.go
* Fixed small issues
* Use sync.RWMutex instead of sync.Mutex
* Refined (R)Lock/(R)Unlock
* return error instead of bool in Verify methods
2019-03-20 12:30:05 +00:00
2020-04-08 10:35:39 +00:00
func ( bc * Blockchain ) newInteropContext ( trigger trigger . Type , d dao . DAO , block * block . Block , tx * transaction . Transaction ) * interop . Context {
2020-12-13 15:26:35 +00:00
ic := interop . NewContext ( trigger , bc , d , bc . contracts . Management . GetContract , bc . contracts . Contracts , block , tx , bc . log )
2021-05-11 14:40:03 +00:00
ic . Functions = systemInterops
2020-04-13 13:31:04 +00:00
switch {
case tx != nil :
ic . Container = tx
case block != nil :
ic . Container = block
}
2021-07-14 12:05:28 +00:00
ic . InitNonceData ( )
2020-04-13 13:31:04 +00:00
return ic
2019-12-30 11:01:49 +00:00
}
2020-10-15 11:45:29 +00:00
// P2PSigExtensionsEnabled defines whether P2P signature extensions are enabled.
func ( bc * Blockchain ) P2PSigExtensionsEnabled ( ) bool {
return bc . config . P2PSigExtensions
}
2020-11-27 10:55:48 +00:00
// RegisterPostBlock appends provided function to the list of functions which should be run after new block
// is stored.
func ( bc * Blockchain ) RegisterPostBlock ( f func ( blockchainer . Blockchainer , * mempool . Pool , * block . Block ) ) {
bc . postBlock = append ( bc . postBlock , f )
}
// -- start Policer.
// GetPolicer provides access to policy values via Policer interface.
func ( bc * Blockchain ) GetPolicer ( ) blockchainer . Policer {
return bc
}
2020-12-11 12:22:49 +00:00
// GetBaseExecFee return execution price for `NOP`.
func ( bc * Blockchain ) GetBaseExecFee ( ) int64 {
2020-12-14 09:18:59 +00:00
return bc . contracts . Policy . GetExecFeeFactorInternal ( bc . dao )
2020-12-11 12:22:49 +00:00
}
2020-11-27 10:55:48 +00:00
// GetMaxVerificationGAS returns maximum verification GAS Policy limit.
func ( bc * Blockchain ) GetMaxVerificationGAS ( ) int64 {
return bc . contracts . Policy . GetMaxVerificationGas ( bc . dao )
}
2020-12-14 09:41:23 +00:00
// GetStoragePrice returns current storage price.
func ( bc * Blockchain ) GetStoragePrice ( ) int64 {
2021-02-02 15:46:43 +00:00
if bc . BlockHeight ( ) == 0 {
return native . DefaultStoragePrice
}
2020-12-14 09:41:23 +00:00
return bc . contracts . Policy . GetStoragePriceInternal ( bc . dao )
}
2020-11-27 10:55:48 +00:00
// -- end Policer.