neo-go/pkg/core/blockchain.go

3098 lines
108 KiB
Go
Raw Normal View History

package core
import (
2020-09-24 13:33:40 +00:00
"bytes"
"encoding/binary"
"errors"
"fmt"
"math"
"math/big"
"sort"
"sync"
"sync/atomic"
"time"
json "github.com/nspcc-dev/go-ordered-json"
"github.com/nspcc-dev/neo-go/pkg/config"
"github.com/nspcc-dev/neo-go/pkg/config/limits"
"github.com/nspcc-dev/neo-go/pkg/core/block"
2020-04-07 09:41:12 +00:00
"github.com/nspcc-dev/neo-go/pkg/core/dao"
"github.com/nspcc-dev/neo-go/pkg/core/interop"
2021-01-19 08:23:39 +00:00
"github.com/nspcc-dev/neo-go/pkg/core/interop/contract"
"github.com/nspcc-dev/neo-go/pkg/core/mempool"
"github.com/nspcc-dev/neo-go/pkg/core/mpt"
2020-03-19 15:52:37 +00:00
"github.com/nspcc-dev/neo-go/pkg/core/native"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/stateroot"
"github.com/nspcc-dev/neo-go/pkg/core/statesync"
"github.com/nspcc-dev/neo-go/pkg/core/storage"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
2019-12-30 07:43:05 +00:00
"go.uber.org/zap"
)
2019-10-22 14:56:03 +00:00
// Tuning parameters.
const (
core: allow transaction to conflict with block Transaction 0x289c235dcdab8be7426d05f0fbb5e86c619f81481ea136493fa95deee5dbb7cc is already on mainnet at block 5272006 and we can't do anything with it. This transaction has genesis block hash in Conflicts attribute. It leads to the following consequences: 1. Genesis block executable record is overwritten by conflict record stub. Genesis block can't be retrieved anymore. This bug is described in #3427. 2. Somehow this transaction has passed verification on NeoGo CN without any warnings: ``` Apr 24 16:12:30 kangra neo-go[2453907]: 2024-04-24T16:12:30.865+0300 INFO initializing dbft {"height": 5272006, "view": 0, "index": 6, "role": "Backup"} Apr 24 16:12:31 kangra neo-go[2453907]: 2024-04-24T16:12:31.245+0300 INFO persisted to disk {"blocks": 1, "keys": 37, "headerHeight": 5272005, "blockHeight": 5272005, "took": "14.548903ms"} Apr 24 16:12:34 kangra neo-go[2453907]: 2024-04-24T16:12:34.977+0300 ERROR can't add SV-signed state root {"error": "stateroot mismatch at block 5272005: 9d5f95784f26c862d6f889f213aad1e3330611880c02330e88db8802c750aa46 vs d25304d518645df725014897d13bbf023919928e79074abcea48f31cf9f32a25"} Apr 24 16:12:45 kangra neo-go[2453907]: 2024-04-24T16:12:45.820+0300 INFO received PrepareRequest {"validator": 5, "tx": 1} Apr 24 16:12:45 kangra neo-go[2453907]: 2024-04-24T16:12:45.821+0300 INFO sending PrepareResponse {"height": 5272006, "view": 0} Apr 24 16:12:45 kangra neo-go[2453907]: 2024-04-24T16:12:45.827+0300 INFO received PrepareResponse {"validator": 4} Apr 24 16:12:45 kangra neo-go[2453907]: 2024-04-24T16:12:45.830+0300 INFO received PrepareResponse {"validator": 3} Apr 24 16:12:45 kangra neo-go[2453907]: 2024-04-24T16:12:45.875+0300 INFO received PrepareResponse {"validator": 2} Apr 24 16:12:45 kangra neo-go[2453907]: 2024-04-24T16:12:45.878+0300 INFO sending Commit {"height": 5272006, "view": 0} Apr 24 16:12:45 kangra neo-go[2453907]: 2024-04-24T16:12:45.879+0300 INFO received Commit {"validator": 4} Apr 24 16:12:45 kangra neo-go[2453907]: 2024-04-24T16:12:45.881+0300 INFO received PrepareResponse {"validator": 0} Apr 24 16:12:45 kangra neo-go[2453907]: 2024-04-24T16:12:45.881+0300 INFO received Commit {"validator": 3} Apr 24 16:12:45 kangra neo-go[2453907]: 2024-04-24T16:12:45.906+0300 INFO received Commit {"validator": 0} Apr 24 16:12:45 kangra neo-go[2453907]: 2024-04-24T16:12:45.907+0300 INFO received PrepareResponse {"validator": 1} Apr 24 16:12:45 kangra neo-go[2453907]: 2024-04-24T16:12:45.915+0300 INFO received Commit {"validator": 1} Apr 24 16:12:45 kangra neo-go[2453907]: 2024-04-24T16:12:45.915+0300 INFO approving block {"height": 5272006, "hash": "6b111519537343ce579d04ccad71c43318b12c680d0f374dfcd466aa22643fb6", "tx_count": 1, "merkle": "ccb7dbe5ee5da93f4936a11e48819f616ce8b5fbf0056d42e78babcd5d239c28", "prev": "12ad6cc5d0cd357b9fc9fb0c1a016ba8014d3cdd5a96818598e6a40a1a4a2a21"} Apr 24 16:12:45 kangra neo-go[2453907]: 2024-04-24T16:12:45.917+0300 WARN contract invocation failed {"tx": "289c235dcdab8be7426d05f0fbb5e86c619f81481ea136493fa95deee5dbb7cc", "block": 5272006, "error": "at instruction 86 (ASSERT): ASSERT failed"} Apr 24 16:12:45 kangra neo-go[2453907]: 2024-04-24T16:12:45.950+0300 INFO initializing dbft {"height": 5272007, "view": 0, "index": 6, "role": "Primary"} Apr 24 16:12:46 kangra neo-go[2453907]: 2024-04-24T16:12:46.256+0300 INFO persisted to disk {"blocks": 1, "keys": 67, "headerHeight": 5272006, "blockHeight": 5272006, "took": "16.576594ms"} ``` And thus, we must treat this transaction as valid for this behaviour to be reproducable. This commit contains two fixes: 1. Do not overwrite block executable records by conflict record stubs. If some transaction conflicts with block, then just skip the conflict record stub for this attribute since it's impossible to create transaction with the same hash. 2. Do not fail verification for those transactions that have Conflicts attribute with block hash inside. This one is controversial, but we have to adjust this code to treat already accepted transaction as valid. Close #3427. The transaction itself: ``` { "id" : 1, "jsonrpc" : "2.0", "result" : { "attributes" : [ { "height" : 0, "type" : "NotValidBefore" }, { "hash" : "0x1f4d1defa46faa5e7b9b8d3f79a06bec777d7c26c4aa5f6f5899a291daa87c15", "type" : "Conflicts" } ], "blockhash" : "0xb63f6422aa66d4fc4d370f0d682cb11833c471adcc049d57ce4373531915116b", "blocktime" : 1713964365700, "confirmations" : 108335, "hash" : "0x289c235dcdab8be7426d05f0fbb5e86c619f81481ea136493fa95deee5dbb7cc", "netfee" : "237904", "nonce" : 0, "script" : "CxAMFIPvkoyXujYCRmgq9qEfMJQ4wNveDBSD75KMl7o2AkZoKvahHzCUOMDb3hTAHwwIdHJhbnNmZXIMFPVj6kC8KD1NDgXEjqMFs/Kgc0DvQWJ9W1I5", "sender" : "NbcGB1tBEGM5MfhNbDAimvpJKzvVjLQ3jW", "signers" : [ { "account" : "0x649ca095e38a790d6c15ff78e0c6175099b428ac", "scopes" : "None" }, { "account" : "0xdedbc03894301fa1f62a68460236ba978c92ef83", "scopes" : "None" } ], "size" : 412, "sysfee" : "997778", "validuntilblock" : 5277629, "version" : 0, "vmstate" : "FAULT", "witnesses" : [ { "invocation" : "DECw8XNuyRg5vPeHxisQXlZ7VYNDxxK4xEm8zwpPyWJSSu+JaRKQxdrlPkXxXj34wc4ZSrZvKICGgPFE0ZHXhLPo", "verification" : "DCEC+PI2tRSlp0wGwnjRuQdWdI0tBXNS7SlzSBBHFsaKUsdBVuezJw==" }, { "invocation" : "DEAxwi97t+rg9RsccOUzdJTJK7idbR7uUqQp0/0/ob9FbuW/tFius3/FOi82PDZtwdhk7s7KiNM/pU7vZLsgIbM0", "verification" : "DCEDbInkzF5llzmgljE4HSMvtrNgPaz73XO5wgVJXLHNLXRBVuezJw==" } ] } } ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2024-05-15 11:06:38 +00:00
version = "0.2.12"
// DefaultInitialGAS is the default amount of GAS emitted to the standby validators
// multisignature account during native GAS contract initialization.
DefaultInitialGAS = 52000000_00000000
defaultGCPeriod = 10000
2020-11-27 10:55:48 +00:00
defaultMemPoolSize = 50000
defaultP2PNotaryRequestPayloadPoolSize = 1000
2021-03-15 10:00:04 +00:00
defaultMaxBlockSize = 262144
2021-03-15 10:51:07 +00:00
defaultMaxBlockSystemFee = 900000000000
defaultMaxTraceableBlocks = 2102400 // 1 year of 15s blocks
defaultMaxTransactionsPerBlock = 512
defaultTimePerBlock = 15 * time.Second
// HeaderVerificationGasLimit is the maximum amount of GAS for block header verification.
HeaderVerificationGasLimit = 3_00000000 // 3 GAS
defaultStateSyncInterval = 40000
)
2022-10-20 10:59:19 +00:00
// stateChangeStage denotes the stage of state modification process.
type stateChangeStage byte
2022-10-20 10:59:19 +00:00
// A set of stages used to split state jump / state reset into atomic operations.
const (
2022-10-20 10:59:19 +00:00
// none means that no state jump or state reset process was initiated yet.
none stateChangeStage = 1 << iota
// stateJumpStarted means that state jump was just initiated, but outdated storage items
// were not yet removed.
stateJumpStarted
// newStorageItemsAdded means that contract storage items are up-to-date with the current
// state.
newStorageItemsAdded
2022-10-20 10:59:19 +00:00
// staleBlocksRemoved means that state corresponding to the stale blocks (genesis block in
// in case of state jump) was removed from the storage.
staleBlocksRemoved
// headersReset denotes stale SYS-prefixed and IX-prefixed information was removed from
// the storage (applicable to state reset only).
headersReset
// transfersReset denotes NEP transfers were successfully updated (applicable to state reset only).
transfersReset
// stateResetBit represents a bit identifier for state reset process. If this bit is not set, then
// it's an unfinished state jump.
stateResetBit byte = 1 << 7
)
var (
// ErrAlreadyExists is returned when trying to add some transaction
// that already exists on chain.
ErrAlreadyExists = errors.New("already exists in blockchain")
// ErrAlreadyInPool is returned when trying to add some already existing
// transaction into the mempool.
ErrAlreadyInPool = errors.New("already exists in mempool")
// ErrOOM is returned when adding transaction to the memory pool because
// it reached its full capacity.
ErrOOM = errors.New("no space left in the memory pool")
// ErrPolicy is returned on attempt to add transaction that doesn't
// comply with node's configured policy into the mempool.
ErrPolicy = errors.New("not allowed by policy")
// ErrInvalidBlockIndex is returned when trying to add block with index
// other than expected height of the blockchain.
ErrInvalidBlockIndex = errors.New("invalid block index")
// ErrHasConflicts is returned when trying to add some transaction which
// conflicts with other transaction in the chain or pool according to
// Conflicts attribute.
ErrHasConflicts = errors.New("has conflicts")
)
var (
2020-08-26 09:07:30 +00:00
persistInterval = 1 * time.Second
)
2020-05-07 19:04:10 +00:00
// Blockchain represents the blockchain. It maintans internal state representing
// the state of the ledger that can be accessed in various ways and changed by
// adding new blocks or headers.
type Blockchain struct {
HeaderHashes
config config.Blockchain
// The only way chain state changes is by adding blocks, so we can't
// allow concurrent block additions. It differs from the next lock in
// that it's only for AddBlock method itself, the chain state is
// protected by the lock below, but holding it during all of AddBlock
// is too expensive (because the state only changes when persisting
// change cache).
addLock sync.Mutex
// This lock ensures blockchain immutability for operations that need
// that while performing their tasks. It's mostly used as a read lock
// with the only writer being the block addition logic.
lock sync.RWMutex
// Data access object for CRUD operations around storage. It's write-cached.
2020-04-07 09:41:12 +00:00
dao *dao.Simple
// persistent is the same DB as dao, but we never write to it, so all reads
// are directly from underlying persistent store.
persistent *dao.Simple
// Underlying persistent store.
store storage.Store
// Current index/height of the highest block.
// Read access should always be called by BlockHeight().
// Write access should only happen in storeBlock().
blockHeight uint32
// Current top Block wrapped in an atomic.Value for safe access.
topBlock atomic.Value
// Current persisted block count.
persistedHeight uint32
// Stop synchronization mechanisms.
stopCh chan struct{}
runToExitCh chan struct{}
// isRunning denotes whether blockchain routines are currently running.
isRunning atomic.Value
memPool *mempool.Pool
2020-11-27 10:55:48 +00:00
// postBlock is a set of callback methods which should be run under the Blockchain lock after new block is persisted.
// Block's transactions are passed via mempool.
postBlock []func(func(*transaction.Transaction, *mempool.Pool, bool) bool, *mempool.Pool, *block.Block)
2020-11-27 10:55:48 +00:00
2019-12-30 07:43:05 +00:00
log *zap.Logger
lastBatch *storage.MemBatch
2020-03-19 15:52:37 +00:00
contracts native.Contracts
extensible atomic.Value
// knownValidatorsCount is the latest known validators count used
// for defaultBlockWitness.
knownValidatorsCount atomic.Value
2021-03-15 10:00:04 +00:00
// defaultBlockWitness stores transaction.Witness with m out of n multisig,
// where n = knownValidatorsCount.
2021-03-15 10:00:04 +00:00
defaultBlockWitness atomic.Value
stateRoot *stateroot.Module
// Notification subsystem.
events chan bcEvent
subCh chan any
unsubCh chan any
}
// StateRoot represents local state root module.
type StateRoot interface {
CurrentLocalHeight() uint32
CurrentLocalStateRoot() util.Uint256
CurrentValidatedHeight() uint32
FindStates(root util.Uint256, prefix, start []byte, maxNum int) ([]storage.KeyValue, error)
SeekStates(root util.Uint256, prefix []byte, f func(k, v []byte) bool)
GetState(root util.Uint256, key []byte) ([]byte, error)
GetStateProof(root util.Uint256, key []byte) ([][]byte, error)
GetStateRoot(height uint32) (*state.MPTRoot, error)
GetLatestStateHeight(root util.Uint256) (uint32, error)
}
// bcEvent is an internal event generated by the Blockchain and then
// broadcasted to other parties. It joins the new block and associated
// invocation logs, all the other events visible from outside can be produced
// from this combination.
type bcEvent struct {
block *block.Block
appExecResults []*state.AppExecResult
}
// transferData is used for transfer caching during storeBlock.
type transferData struct {
Info state.TokenTransferInfo
Log11 state.TokenTransferLog
Log17 state.TokenTransferLog
}
2019-10-22 14:56:03 +00:00
// NewBlockchain returns a new blockchain object the will use the
2020-05-07 19:04:10 +00:00
// given Store as its underlying storage. For it to work correctly you need
// to spawn a goroutine for its Run method after this initialization.
func NewBlockchain(s storage.Store, cfg config.Blockchain, log *zap.Logger) (*Blockchain, error) {
2019-12-30 07:43:05 +00:00
if log == nil {
return nil, errors.New("empty logger")
}
// Protocol configuration fixups/checks.
if cfg.InitialGASSupply <= 0 {
cfg.InitialGASSupply = fixedn.Fixed8(DefaultInitialGAS)
log.Info("initial gas supply is not set or wrong, setting default value", zap.Stringer("InitialGASSupply", cfg.InitialGASSupply))
}
if cfg.MemPoolSize <= 0 {
cfg.MemPoolSize = defaultMemPoolSize
log.Info("mempool size is not set or wrong, setting default value", zap.Int("MemPoolSize", cfg.MemPoolSize))
}
2020-11-27 10:55:48 +00:00
if cfg.P2PSigExtensions && cfg.P2PNotaryRequestPayloadPoolSize <= 0 {
cfg.P2PNotaryRequestPayloadPoolSize = defaultP2PNotaryRequestPayloadPoolSize
log.Info("P2PNotaryRequestPayloadPool size is not set or wrong, setting default value", zap.Int("P2PNotaryRequestPayloadPoolSize", cfg.P2PNotaryRequestPayloadPoolSize))
}
2021-03-15 10:00:04 +00:00
if cfg.MaxBlockSize == 0 {
cfg.MaxBlockSize = defaultMaxBlockSize
log.Info("MaxBlockSize is not set or wrong, setting default value", zap.Uint32("MaxBlockSize", cfg.MaxBlockSize))
}
2021-03-15 10:51:07 +00:00
if cfg.MaxBlockSystemFee <= 0 {
cfg.MaxBlockSystemFee = defaultMaxBlockSystemFee
log.Info("MaxBlockSystemFee is not set or wrong, setting default value", zap.Int64("MaxBlockSystemFee", cfg.MaxBlockSystemFee))
}
if cfg.MaxTraceableBlocks == 0 {
cfg.MaxTraceableBlocks = defaultMaxTraceableBlocks
log.Info("MaxTraceableBlocks is not set or wrong, using default value", zap.Uint32("MaxTraceableBlocks", cfg.MaxTraceableBlocks))
}
if cfg.MaxTransactionsPerBlock == 0 {
cfg.MaxTransactionsPerBlock = defaultMaxTransactionsPerBlock
log.Info("MaxTransactionsPerBlock is not set or wrong, using default value",
zap.Uint16("MaxTransactionsPerBlock", cfg.MaxTransactionsPerBlock))
}
if cfg.TimePerBlock <= 0 {
cfg.TimePerBlock = defaultTimePerBlock
log.Info("TimePerBlock is not set or wrong, using default value",
zap.Duration("TimePerBlock", cfg.TimePerBlock))
}
if cfg.MaxValidUntilBlockIncrement == 0 {
const timePerDay = 24 * time.Hour
cfg.MaxValidUntilBlockIncrement = uint32(timePerDay / cfg.TimePerBlock)
log.Info("MaxValidUntilBlockIncrement is not set or wrong, using default value",
zap.Uint32("MaxValidUntilBlockIncrement", cfg.MaxValidUntilBlockIncrement))
}
if cfg.P2PStateExchangeExtensions {
if !cfg.StateRootInHeader {
return nil, errors.New("P2PStatesExchangeExtensions are enabled, but StateRootInHeader is off")
}
if cfg.KeepOnlyLatestState && !cfg.RemoveUntraceableBlocks {
return nil, errors.New("P2PStateExchangeExtensions can be enabled either on MPT-complete node (KeepOnlyLatestState=false) or on light GC-enabled node (RemoveUntraceableBlocks=true)")
}
if cfg.StateSyncInterval <= 0 {
cfg.StateSyncInterval = defaultStateSyncInterval
log.Info("StateSyncInterval is not set or wrong, using default value",
zap.Int("StateSyncInterval", cfg.StateSyncInterval))
}
}
2022-05-06 12:19:17 +00:00
if cfg.Hardforks == nil {
cfg.Hardforks = map[string]uint32{}
for _, hf := range config.Hardforks {
cfg.Hardforks[hf.String()] = 0
}
2022-05-06 12:19:17 +00:00
log.Info("Hardforks are not set, using default value")
} else if len(cfg.Hardforks) != 0 {
// Explicitly set the height of all old omitted hardforks to 0 for proper
// IsHardforkEnabled behaviour.
for _, hf := range config.Hardforks {
if _, ok := cfg.Hardforks[hf.String()]; !ok {
cfg.Hardforks[hf.String()] = 0
continue
}
break
}
2022-05-06 12:19:17 +00:00
}
// Local config consistency checks.
if cfg.Ledger.RemoveUntraceableBlocks && cfg.Ledger.GarbageCollectionPeriod == 0 {
cfg.Ledger.GarbageCollectionPeriod = defaultGCPeriod
log.Info("GarbageCollectionPeriod is not set or wrong, using default value", zap.Uint32("GarbageCollectionPeriod", cfg.Ledger.GarbageCollectionPeriod))
}
bc := &Blockchain{
config: cfg,
dao: dao.NewSimple(s, cfg.StateRootInHeader),
persistent: dao.NewSimple(s, cfg.StateRootInHeader),
store: s,
stopCh: make(chan struct{}),
runToExitCh: make(chan struct{}),
memPool: mempool.New(cfg.MemPoolSize, 0, false, updateMempoolMetrics),
log: log,
events: make(chan bcEvent),
subCh: make(chan any),
unsubCh: make(chan any),
contracts: *native.NewContracts(cfg.ProtocolConfiguration),
}
bc.stateRoot = stateroot.NewModule(cfg, bc.VerifyWitness, bc.log, bc.dao.Store)
bc.contracts.Designate.StateRootService = bc.stateRoot
if err := bc.init(); err != nil {
return nil, err
}
bc.isRunning.Store(false)
return bc, nil
}
// GetDesignatedByRole returns a set of designated public keys for the given role
// relevant for the next block.
func (bc *Blockchain) GetDesignatedByRole(r noderoles.Role) (keys.PublicKeys, uint32, error) {
// Retrieve designated nodes starting from the next block, because the current
// block is already stored, thus, dependant services can't use PostPersist callback
// to fetch relevant information at their start.
res, h, err := bc.contracts.Designate.GetDesignatedByRole(bc.dao, r, bc.BlockHeight()+1)
return res, h, err
}
// getCurrentHF returns the latest currently enabled hardfork. In case if no hardforks are enabled, the
// default config.Hardfork(0) value is returned.
func (bc *Blockchain) getCurrentHF() config.Hardfork {
var (
height = bc.BlockHeight()
current config.Hardfork
)
// Rely on the fact that hardforks list is continuous.
for _, hf := range config.Hardforks {
enableHeight, ok := bc.config.Hardforks[hf.String()]
if !ok || height < enableHeight {
break
}
current = hf
}
return current
}
// SetOracle sets oracle module. It can safely be called on the running blockchain.
// To unregister Oracle service use SetOracle(nil).
func (bc *Blockchain) SetOracle(mod native.OracleService) {
orc := bc.contracts.Oracle
currentHF := bc.getCurrentHF()
if mod != nil {
orcMd := orc.HFSpecificContractMD(&currentHF)
md, ok := orcMd.GetMethod(manifest.MethodVerify, -1)
if !ok {
panic(fmt.Errorf("%s method not found", manifest.MethodVerify))
}
mod.UpdateNativeContract(orcMd.NEF.Script, orc.GetOracleResponseScript(),
orc.Hash, md.MD.Offset)
keys, _, err := bc.GetDesignatedByRole(noderoles.Oracle)
if err != nil {
bc.log.Error("failed to get oracle key list")
return
}
mod.UpdateOracleNodes(keys)
reqs, err := bc.contracts.Oracle.GetRequests(bc.dao)
if err != nil {
bc.log.Error("failed to get current oracle request list")
return
}
mod.AddRequests(reqs)
}
orc.Module.Store(&mod)
bc.contracts.Designate.OracleService.Store(&mod)
}
// SetNotary sets notary module. It may safely be called on the running blockchain.
// To unregister Notary service use SetNotary(nil).
func (bc *Blockchain) SetNotary(mod native.NotaryService) {
2022-07-26 19:03:58 +00:00
if mod != nil {
keys, _, err := bc.GetDesignatedByRole(noderoles.P2PNotary)
2022-07-26 19:03:58 +00:00
if err != nil {
bc.log.Error("failed to get notary key list")
return
}
mod.UpdateNotaryNodes(keys)
}
bc.contracts.Designate.NotaryService.Store(&mod)
2020-12-30 08:01:13 +00:00
}
func (bc *Blockchain) init() error {
// If we could not find the version in the Store, we know that there is nothing stored.
ver, err := bc.dao.GetVersion()
if err != nil {
2019-12-30 07:43:05 +00:00
bc.log.Info("no storage version found! creating genesis block")
ver = dao.Version{
StoragePrefix: storage.STStorage,
StateRootInHeader: bc.config.StateRootInHeader,
P2PSigExtensions: bc.config.P2PSigExtensions,
P2PStateExchangeExtensions: bc.config.P2PStateExchangeExtensions,
KeepOnlyLatestState: bc.config.Ledger.KeepOnlyLatestState,
Magic: uint32(bc.config.Magic),
Value: version,
}
bc.dao.PutVersion(ver)
bc.dao.Version = ver
bc.persistent.Version = ver
genesisBlock, err := CreateGenesisBlock(bc.config.ProtocolConfiguration)
if err != nil {
return err
}
bc.HeaderHashes.initGenesis(bc.dao, genesisBlock.Hash())
if err := bc.stateRoot.Init(0); err != nil {
return fmt.Errorf("can't init MPT: %w", err)
}
return bc.storeBlock(genesisBlock, nil)
}
if ver.Value != version {
return fmt.Errorf("storage version mismatch (expected=%s, actual=%s)", version, ver.Value)
}
if ver.StateRootInHeader != bc.config.StateRootInHeader {
return fmt.Errorf("StateRootInHeader setting mismatch (config=%t, db=%t)",
bc.config.StateRootInHeader, ver.StateRootInHeader)
}
if ver.P2PSigExtensions != bc.config.P2PSigExtensions {
return fmt.Errorf("P2PSigExtensions setting mismatch (old=%t, new=%t)",
ver.P2PSigExtensions, bc.config.P2PSigExtensions)
}
if ver.P2PStateExchangeExtensions != bc.config.P2PStateExchangeExtensions {
return fmt.Errorf("P2PStateExchangeExtensions setting mismatch (old=%t, new=%t)",
ver.P2PStateExchangeExtensions, bc.config.P2PStateExchangeExtensions)
}
if ver.KeepOnlyLatestState != bc.config.Ledger.KeepOnlyLatestState {
return fmt.Errorf("KeepOnlyLatestState setting mismatch (old=%v, new=%v)",
ver.KeepOnlyLatestState, bc.config.Ledger.KeepOnlyLatestState)
}
if ver.Magic != uint32(bc.config.Magic) {
return fmt.Errorf("protocol configuration Magic mismatch (old=%v, new=%v)",
ver.Magic, bc.config.Magic)
}
bc.dao.Version = ver
bc.persistent.Version = ver
// At this point there was no version found in the storage which
// implies a creating fresh storage with the version specified
// and the genesis block as first block.
2019-12-30 07:43:05 +00:00
bc.log.Info("restoring blockchain", zap.String("version", version))
err = bc.HeaderHashes.init(bc.dao)
if err != nil {
return err
}
2022-10-20 10:59:19 +00:00
// Check whether StateChangeState stage is in the storage and continue interrupted state jump / state reset if so.
stateChStage, err := bc.dao.Store.Get([]byte{byte(storage.SYSStateChangeStage)})
if err == nil {
2022-10-20 10:59:19 +00:00
if len(stateChStage) != 1 {
return fmt.Errorf("invalid state jump stage format")
}
2022-10-20 10:59:19 +00:00
// State jump / state reset wasn't finished yet, thus continue it.
stateSyncPoint, err := bc.dao.GetStateSyncPoint()
if err != nil {
return fmt.Errorf("failed to get state sync point from the storage")
}
2022-10-20 10:59:19 +00:00
if (stateChStage[0] & stateResetBit) != 0 {
return bc.resetStateInternal(stateSyncPoint, stateChangeStage(stateChStage[0]&(^stateResetBit)))
}
if !(bc.config.P2PStateExchangeExtensions && bc.config.Ledger.RemoveUntraceableBlocks) {
2022-10-20 10:59:19 +00:00
return errors.New("state jump was not completed, but P2PStateExchangeExtensions are disabled or archival node capability is on. " +
"To start an archival node drop the database manually and restart the node")
}
return bc.jumpToStateInternal(stateSyncPoint, stateChangeStage(stateChStage[0]))
}
bHeight, err := bc.dao.GetCurrentBlockHeight()
if err != nil {
return fmt.Errorf("failed to retrieve current block height: %w", err)
}
bc.blockHeight = bHeight
bc.persistedHeight = bHeight
bc.log.Debug("initializing caches", zap.Uint32("blockHeight", bHeight))
if err = bc.stateRoot.Init(bHeight); err != nil {
return fmt.Errorf("can't init MPT at height %d: %w", bHeight, err)
}
err = bc.initializeNativeCache(bc.blockHeight, bc.dao)
core: add InitializeCache method to NEO native contracts There might be a case when cached contract values store nil (e.g. after restoring chain from dump). We should always initialize cached values irrespective to the (NEO).Initialize method. This commit fixes a bug introduced in 83e94d3 when 4-nodes privnet is failing after restoring from dump: ``` $ docker logs neo_go_node_one => Try to restore blocks before running node 2020-09-30T11:55:49.122Z INFO no storage version found! creating genesis block 2020-09-30T11:55:49.124Z INFO service hasn't started since it's disabled {"service": "Pprof"} 2020-09-30T11:55:49.124Z INFO service hasn't started since it's disabled {"service": "Prometheus"} 2020-09-30T11:55:49.124Z INFO skipped genesis block {"hash": "3792eaa22c196399a114666fd491c4b9ac52491d9abb1f633a8036a8ac81e4db"} 2020-09-30T11:55:49.141Z INFO shutting down service {"service": "Pprof", "endpoint": ":30001"} 2020-09-30T11:55:49.141Z INFO shutting down service {"service": "Prometheus", "endpoint": ":40001"} 2020-09-30T11:55:49.141Z INFO blockchain persist completed {"persistedBlocks": 3, "persistedKeys": 146, "headerHeight": 3, "blockHeight": 3, "took": "324.27µs"} 2020-09-30T11:55:49.150Z INFO restoring blockchain {"version": "0.1.0"} 2020-09-30T11:55:49.150Z INFO service hasn't started since it's disabled {"service": "Prometheus"} 2020-09-30T11:55:49.151Z INFO service hasn't started since it's disabled {"service": "Pprof"} 2020-09-30T11:55:49.443Z INFO starting rpc-server {"endpoint": ":30333"} 2020-09-30T11:55:49.443Z INFO node started {"blockHeight": 3, "headerHeight": 3} _ ____________ __________ / | / / ____/ __ \ / ____/ __ \ / |/ / __/ / / / /_____/ / __/ / / / / /| / /___/ /_/ /_____/ /_/ / /_/ / /_/ |_/_____/\____/ \____/\____/ /NEO-GO:/ 2020-09-30T11:55:49.444Z INFO new peer connected {"addr": "172.23.0.5:39638", "peerCount": 1} 2020-09-30T11:55:49.444Z INFO new peer connected {"addr": "172.23.0.5:20333", "peerCount": 2} 2020-09-30T11:55:49.444Z WARN peer disconnected {"addr": "172.23.0.5:20333", "reason": "identical node id", "peerCount": 1} 2020-09-30T11:55:49.445Z WARN peer disconnected {"addr": "172.23.0.5:39638", "reason": "identical node id", "peerCount": 0} 2020-09-30T11:55:49.445Z INFO new peer connected {"addr": "172.23.0.3:20335", "peerCount": 1} 2020-09-30T11:55:49.445Z INFO new peer connected {"addr": "172.23.0.2:20334", "peerCount": 2} 2020-09-30T11:55:49.445Z INFO started protocol {"addr": "172.23.0.3:20335", "userAgent": "/NEO-GO:/", "startHeight": 3, "id": 1339919829} 2020-09-30T11:55:49.445Z INFO new peer connected {"addr": "172.23.0.4:20336", "peerCount": 3} 2020-09-30T11:55:49.445Z INFO started protocol {"addr": "172.23.0.4:20336", "userAgent": "/NEO-GO:/", "startHeight": 3, "id": 4036722359} 2020-09-30T11:55:49.445Z INFO node reached synchronized state, starting consensus 2020-09-30T11:55:49.445Z INFO started protocol {"addr": "172.23.0.2:20334", "userAgent": "/NEO-GO:/", "startHeight": 3, "id": 1557367037} panic: runtime error: integer divide by zero goroutine 132 [running]: github.com/nspcc-dev/dbft.(*Context).GetPrimaryIndex(...) github.com/nspcc-dev/dbft@v0.0.0-20200925163137-8f3b9ab3b720/context.go:83 github.com/nspcc-dev/dbft.(*Context).reset(0xc0000e0780, 0x0) github.com/nspcc-dev/dbft@v0.0.0-20200925163137-8f3b9ab3b720/context.go:208 +0x64b github.com/nspcc-dev/dbft.(*DBFT).InitializeConsensus(0xc0000e0780, 0x964800) github.com/nspcc-dev/dbft@v0.0.0-20200925163137-8f3b9ab3b720/dbft.go:87 +0x51 github.com/nspcc-dev/dbft.(*DBFT).Start(0xc0000e0780) github.com/nspcc-dev/dbft@v0.0.0-20200925163137-8f3b9ab3b720/dbft.go:81 +0x4b github.com/nspcc-dev/neo-go/pkg/consensus.(*service).Start(0xc0001a2160) github.com/nspcc-dev/neo-go/pkg/consensus/consensus.go:206 +0x56 github.com/nspcc-dev/neo-go/pkg/network.(*Server).tryStartConsensus(0xc0000ec500) github.com/nspcc-dev/neo-go/pkg/network/server.go:311 +0xda github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleMessage(0xc0000ec500, 0x104d800, 0xc000222090, 0xc0000a6f10, 0x0, 0x0) github.com/nspcc-dev/neo-go/pkg/network/server.go:781 +0xa7a github.com/nspcc-dev/neo-go/pkg/network.(*TCPPeer).handleConn(0xc000222090) github.com/nspcc-dev/neo-go/pkg/network/tcp_peer.go:162 +0x2e7 created by github.com/nspcc-dev/neo-go/pkg/network.(*TCPTransport).Dial github.com/nspcc-dev/neo-go/pkg/network/tcp_transport.go:40 +0x1ac ```
2020-10-02 11:44:42 +00:00
if err != nil {
return fmt.Errorf("can't init natives cache: %w", err)
}
// Check autogenerated native contracts' manifests and NEFs against the stored ones.
// Need to be done after native Management cache initialization to be able to get
// contract state from DAO via high-level bc API.
var current = bc.getCurrentHF()
for _, c := range bc.contracts.Contracts {
md := c.Metadata()
storedCS := bc.GetContractState(md.Hash)
// Check that contract was deployed.
if !bc.isHardforkEnabled(c.ActiveIn(), bHeight) {
if storedCS != nil {
return fmt.Errorf("native contract %s is already stored, but marked as inactive for height %d in config", md.Name, bHeight)
}
continue
}
if storedCS == nil {
return fmt.Errorf("native contract %s is not stored, but should be active at height %d according to config", md.Name, bHeight)
}
storedCSBytes, err := stackitem.SerializeConvertible(storedCS)
if err != nil {
return fmt.Errorf("failed to check native %s state against autogenerated one: %w", md.Name, err)
}
hfMD := md.HFSpecificContractMD(&current)
autogenCS := &state.Contract{
ContractBase: hfMD.ContractBase,
UpdateCounter: storedCS.UpdateCounter, // it can be restored only from the DB, so use the stored value.
}
autogenCSBytes, err := stackitem.SerializeConvertible(autogenCS)
if err != nil {
return fmt.Errorf("failed to check native %s state against autogenerated one: %w", md.Name, err)
}
if !bytes.Equal(storedCSBytes, autogenCSBytes) {
storedJ, _ := json.Marshal(storedCS)
autogenJ, _ := json.Marshal(autogenCS)
return fmt.Errorf("native %s: version mismatch for the latest hardfork %s (stored contract state differs from autogenerated one), "+
"try to resynchronize the node from the genesis: %s vs %s", md.Name, current, string(storedJ), string(autogenJ))
}
}
updateBlockHeightMetric(bHeight)
updatePersistedHeightMetric(bHeight)
updateHeaderHeightMetric(bc.HeaderHeight())
return bc.updateExtensibleWhitelist(bHeight)
}
// jumpToState is an atomic operation that changes Blockchain state to the one
// specified by the state sync point p. All the data needed for the jump must be
// collected by the state sync module.
func (bc *Blockchain) jumpToState(p uint32) error {
bc.addLock.Lock()
bc.lock.Lock()
defer bc.lock.Unlock()
defer bc.addLock.Unlock()
return bc.jumpToStateInternal(p, none)
}
// jumpToStateInternal is an internal representation of jumpToState callback that
// changes Blockchain state to the one specified by state sync point p and state
// jump stage. All the data needed for the jump must be in the DB, otherwise an
// error is returned. It is not protected by mutex.
2022-10-20 10:59:19 +00:00
func (bc *Blockchain) jumpToStateInternal(p uint32, stage stateChangeStage) error {
if p >= bc.HeaderHeight() {
return fmt.Errorf("invalid state sync point %d: headerHeignt is %d", p, bc.HeaderHeight())
}
bc.log.Info("jumping to state sync point", zap.Uint32("state sync point", p))
2022-10-20 10:59:19 +00:00
jumpStageKey := []byte{byte(storage.SYSStateChangeStage)}
switch stage {
case none:
bc.dao.Store.Put(jumpStageKey, []byte{byte(stateJumpStarted)})
fallthrough
case stateJumpStarted:
newPrefix := statesync.TemporaryPrefix(bc.dao.Version.StoragePrefix)
v, err := bc.dao.GetVersion()
if err != nil {
return fmt.Errorf("failed to get dao.Version: %w", err)
}
v.StoragePrefix = newPrefix
bc.dao.PutVersion(v)
bc.persistent.Version = v
bc.dao.Store.Put(jumpStageKey, []byte{byte(newStorageItemsAdded)})
fallthrough
case newStorageItemsAdded:
cache := bc.dao.GetPrivate()
prefix := statesync.TemporaryPrefix(bc.dao.Version.StoragePrefix)
bc.dao.Store.Seek(storage.SeekRange{Prefix: []byte{byte(prefix)}}, func(k, _ []byte) bool {
// #1468, but don't need to copy here, because it is done by Store.
cache.Store.Delete(k)
return true
})
// After current state is updated, we need to remove outdated state-related data if so.
// The only outdated data we might have is genesis-related data, so check it.
if p-bc.config.MaxTraceableBlocks > 0 {
err := cache.DeleteBlock(bc.GetHeaderHash(0))
if err != nil {
return fmt.Errorf("failed to remove outdated state data for the genesis block: %w", err)
}
prefixes := []byte{byte(storage.STNEP11Transfers), byte(storage.STNEP17Transfers), byte(storage.STTokenTransferInfo)}
for i := range prefixes {
cache.Store.Seek(storage.SeekRange{Prefix: prefixes[i : i+1]}, func(k, v []byte) bool {
cache.Store.Delete(k)
return true
})
}
}
2022-10-20 10:59:19 +00:00
// Update SYS-prefixed info.
block, err := bc.dao.GetBlock(bc.GetHeaderHash(p))
2022-10-20 10:59:19 +00:00
if err != nil {
return fmt.Errorf("failed to get current block: %w", err)
}
cache.StoreAsCurrentBlock(block)
cache.Store.Put(jumpStageKey, []byte{byte(staleBlocksRemoved)})
_, err = cache.Persist()
if err != nil {
return fmt.Errorf("failed to persist old items removal: %w", err)
}
2022-10-20 10:59:19 +00:00
case staleBlocksRemoved:
// there's nothing to do after that, so just continue with common operations
// and remove state jump stage in the end.
default:
2022-10-20 10:59:19 +00:00
return fmt.Errorf("unknown state jump stage: %d", stage)
}
block, err := bc.dao.GetBlock(bc.GetHeaderHash(p + 1))
if err != nil {
return fmt.Errorf("failed to get block to init MPT: %w", err)
}
bc.stateRoot.JumpToState(&state.MPTRoot{
Index: p,
Root: block.PrevStateRoot,
})
2022-10-20 10:59:19 +00:00
bc.dao.Store.Delete(jumpStageKey)
err = bc.resetRAMState(p, false)
if err != nil {
return fmt.Errorf("failed to update in-memory blockchain data: %w", err)
}
return nil
}
// resetRAMState resets in-memory cached info.
func (bc *Blockchain) resetRAMState(height uint32, resetHeaders bool) error {
if resetHeaders {
err := bc.HeaderHashes.init(bc.dao)
if err != nil {
return err
}
2022-10-20 10:59:19 +00:00
}
block, err := bc.dao.GetBlock(bc.GetHeaderHash(height))
2022-10-20 10:59:19 +00:00
if err != nil {
return fmt.Errorf("failed to get current block: %w", err)
}
bc.topBlock.Store(block)
atomic.StoreUint32(&bc.blockHeight, height)
atomic.StoreUint32(&bc.persistedHeight, height)
err = bc.initializeNativeCache(block.Index, bc.dao)
if err != nil {
return fmt.Errorf("failed to initialize natives cache: %w", err)
}
2022-10-20 10:59:19 +00:00
if err := bc.updateExtensibleWhitelist(height); err != nil {
return fmt.Errorf("failed to update extensible whitelist: %w", err)
}
2022-10-20 10:59:19 +00:00
updateBlockHeightMetric(height)
updatePersistedHeightMetric(height)
updateHeaderHeightMetric(bc.HeaderHeight())
2022-10-20 10:59:19 +00:00
return nil
}
2022-10-20 10:59:19 +00:00
// Reset resets chain state to the specified height if possible. This method
// performs direct DB changes and can be called on non-running Blockchain only.
func (bc *Blockchain) Reset(height uint32) error {
if bc.isRunning.Load().(bool) {
return errors.New("can't reset state of the running blockchain")
}
bc.dao.PutStateSyncPoint(height)
return bc.resetStateInternal(height, none)
}
func (bc *Blockchain) resetStateInternal(height uint32, stage stateChangeStage) error {
// Cache isn't yet initialized, so retrieve block height right from DAO.
currHeight, err := bc.dao.GetCurrentBlockHeight()
if err != nil {
return fmt.Errorf("failed to retrieve current block height: %w", err)
2022-10-20 10:59:19 +00:00
}
// Headers are already initialized by this moment, thus may use chain's API.
hHeight := bc.HeaderHeight()
// State reset may already be started by this moment, so perform these checks only if it wasn't.
if stage == none {
if height > currHeight {
return fmt.Errorf("current block height is %d, can't reset state to height %d", currHeight, height)
}
if height == currHeight && hHeight == currHeight {
bc.log.Info("chain is at the proper state", zap.Uint32("height", height))
return nil
}
if bc.config.Ledger.KeepOnlyLatestState {
return fmt.Errorf("KeepOnlyLatestState is enabled, state for height %d is outdated and removed from the storage", height)
}
if bc.config.Ledger.RemoveUntraceableBlocks && currHeight >= bc.config.MaxTraceableBlocks {
return fmt.Errorf("RemoveUntraceableBlocks is enabled, a necessary batch of traceable blocks has already been removed")
}
2022-10-20 10:59:19 +00:00
}
// Retrieve necessary state before the DB modification.
b, err := bc.GetBlock(bc.GetHeaderHash(height))
2022-10-20 10:59:19 +00:00
if err != nil {
return fmt.Errorf("failed to retrieve block %d: %w", height, err)
}
sr, err := bc.stateRoot.GetStateRoot(height)
if err != nil {
return fmt.Errorf("failed to retrieve stateroot for height %d: %w", height, err)
}
v := bc.dao.Version
core: avoid squashing of data from different state reset stages `cache` persisting operation is concurrent with the storage modifications made by further state reset stages. We can't allow situation when data from the next stage are leaking into the previous stage. State reset stages must be atomic in turms of DB persisting, thus, use another `upperCache` MemCached store to keep them apart. Here are the results of mainnet's BoltDB reset from 1.1M to 6K: This patch: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:15:25.783+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:15:25.783+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:15:25.783+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:15:25.783+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:15:25.787+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:15:25.906+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:15:25.906+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:15:57.031+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "31.125057214s"} 2023-04-11T16:16:12.644+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "15.613156971s", "keys": 321895} 2023-04-11T16:16:13.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "16.663444208s"} 2023-04-11T16:16:19.784+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.760308543s"} 2023-04-11T16:16:19.784+0300 INFO blocks, transactions ans AERs are reset {"took": "53.878632911s"} 2023-04-11T16:16:22.870+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "8.974838893s", "keys": 334823} 2023-04-11T16:16:22.870+0300 DEBUG trying to reset contract storage items 2023-04-11T16:16:27.272+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "7.487357441s", "keys": 208913} 2023-04-11T16:17:23.678+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "1m0.80711106s"} 2023-04-11T16:18:00.769+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "36.967660061s"} 2023-04-11T16:18:20.478+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "56.796257788s", "keys": 200000} 2023-04-11T16:18:54.115+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.637412437s"} 2023-04-11T16:19:18.844+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m18.0737668s", "keys": 200000} 2023-04-11T16:19:27.650+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.806264019s"} 2023-04-11T16:19:27.650+0300 INFO contract storage items are reset {"took": "3m4.780232077s", "keys": 656944} 2023-04-11T16:20:15.660+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.544386403s", "keys": 200000} 2023-04-11T16:20:15.660+0300 DEBUG trying to reset headers information 2023-04-11T16:20:16.385+0300 INFO headers information is reset {"took": "725.174932ms"} 2023-04-11T16:20:19.586+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "51.936278608s", "keys": 56945} 2023-04-11T16:20:19.587+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:20:35.845+0300 INFO state root information and NEP transfers are reset {"took": "16.25852114s"} 2023-04-11T16:21:10.000+0300 DEBUG headers information is persisted {"took": "53.613638429s", "keys": 528438} 2023-04-11T16:21:10.003+0300 DEBUG trying to remove stale storage items 2023-04-11T16:21:18.108+0300 INFO stale storage items are reset {"took": "8.105140658s", "keys": 1350176} 2023-04-11T16:21:18.108+0300 DEBUG trying to remove state reset point 2023-04-11T16:21:18.108+0300 INFO state reset point is removed {"took": "8.554µs"} 2023-04-11T16:21:20.151+0300 DEBUG state root information and NEP transfers are persisted {"took": "44.305707049s", "keys": 602578} 2023-04-11T16:21:20.212+0300 INFO state reset point information is persisted {"took": "2.103764633s", "keys": 2} 2023-04-11T16:21:20.213+0300 INFO reset finished successfully {"took": "5m54.306861367s"} ``` The previous commit: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:24:04.256+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:24:04.256+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:24:04.256+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:24:04.256+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:24:04.261+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:24:04.368+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:24:04.368+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:24:30.363+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "25.995261037s"} 2023-04-11T16:24:44.947+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "14.584447338s", "keys": 321897} 2023-04-11T16:24:45.791+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "15.428492824s"} 2023-04-11T16:24:51.252+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.460662766s"} 2023-04-11T16:24:51.252+0300 INFO blocks, transactions ans AERs are reset {"took": "46.884558096s"} 2023-04-11T16:24:55.399+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "9.607820004s", "keys": 334821} 2023-04-11T16:24:55.399+0300 DEBUG trying to reset contract storage items 2023-04-11T16:24:59.981+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "8.728713255s", "keys": 208913} 2023-04-11T16:25:50.827+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "55.426411416s"} 2023-04-11T16:26:28.734+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "37.902647706s"} 2023-04-11T16:26:53.960+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "1m3.129453265s", "keys": 200001} 2023-04-11T16:27:27.645+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.685283662s"} 2023-04-11T16:27:52.173+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m23.438465575s", "keys": 199999} 2023-04-11T16:28:00.995+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.821990443s"} 2023-04-11T16:28:00.995+0300 INFO contract storage items are reset {"took": "3m5.595950958s", "keys": 656944} 2023-04-11T16:28:49.164+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.518344712s", "keys": 200000} 2023-04-11T16:28:49.164+0300 DEBUG trying to reset headers information 2023-04-11T16:28:49.936+0300 INFO headers information is reset {"took": "772.36435ms"} 2023-04-11T16:28:53.122+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "52.126928092s", "keys": 56945} 2023-04-11T16:28:53.122+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:29:09.332+0300 INFO state root information and NEP transfers are reset {"took": "16.20921699s"} 2023-04-11T16:29:46.264+0300 DEBUG headers information is persisted {"took": "56.326715249s", "keys": 528438} 2023-04-11T16:29:46.267+0300 DEBUG trying to remove stale storage items 2023-04-11T16:29:53.986+0300 INFO stale storage items are reset {"took": "7.718950145s", "keys": 1350176} 2023-04-11T16:29:53.986+0300 DEBUG trying to remove state reset point 2023-04-11T16:29:53.986+0300 INFO state reset point is removed {"took": "6.013µs"} 2023-04-11T16:29:55.899+0300 DEBUG state root information and NEP transfers are persisted {"took": "46.567302762s", "keys": 602578} 2023-04-11T16:29:55.929+0300 INFO state reset point information is persisted {"took": "1.942392208s", "keys": 2} 2023-04-11T16:29:55.929+0300 INFO reset finished successfully {"took": "5m51.561573137s"} ``` Master: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:34:12.410+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:34:12.410+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:34:12.410+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:34:12.410+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:34:12.413+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:34:12.495+0300 INFO initialize state reset {"target height": 600000} 2023-04-11T16:34:12.513+0300 INFO trying to reset blocks, transactions and AERs 2023-04-11T16:35:03.582+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "51.087226195s", "keys": 321895} 2023-04-11T16:35:31.302+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "27.719871393s", "keys": 334823} 2023-04-11T16:35:41.309+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "10.007017388s", "keys": 208913} 2023-04-11T16:35:41.309+0300 INFO blocks, transactions ans AERs are reset {"took": "1m28.814245057s", "overall persisted keys": 865631} 2023-04-11T16:35:41.309+0300 INFO trying to reset contract storage items 2023-04-11T16:37:38.315+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 1, "took": "1m57.00650253s", "keys": 200000} 2023-04-11T16:39:29.704+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 2, "took": "1m51.385224725s", "keys": 200000} 2023-04-11T16:41:14.991+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 3, "took": "1m45.287483794s", "keys": 200000} 2023-04-11T16:41:31.667+0300 INFO last batch of contract storage items and IDs is persisted {"batch": 4, "took": "16.675347478s", "keys": 56945} 2023-04-11T16:41:31.667+0300 INFO contract storage items and IDs are reset {"took": "5m50.357775401s", "keys": 656944} 2023-04-11T16:41:31.667+0300 INFO trying to reset headers information 2023-04-11T16:42:16.779+0300 INFO headers information is reset {"took": "45.111354262s", "keys": 528438} 2023-04-11T16:42:16.784+0300 INFO trying to reset state root information and NEP transfers 2023-04-11T16:42:35.778+0300 INFO state root information and NEP transfers are reset {"took": "18.99373117s", "keys": 602578} 2023-04-11T16:42:35.781+0300 INFO trying to remove stale storage items 2023-04-11T16:42:43.884+0300 INFO stale storage items are reset {"took": "8.103929306s", "keys": 1350176} 2023-04-11T16:42:43.885+0300 INFO trying to remove state reset point 2023-04-11T16:42:43.926+0300 INFO stale reset point is removed {"took": "41.858883ms", "keys": 2} 2023-04-11T16:42:43.932+0300 INFO reset finished successfully {"took": "8m31.437493325s"} ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-04-11 11:18:48 +00:00
// dao is MemCachedStore over DB, we use dao directly to persist cached changes
// right to the underlying DB.
cache := bc.dao
// upperCache is a private MemCachedStore over cache. During each of the state
// sync stages we put the data inside the upperCache; in the end of each stage
// we persist changes from upperCache to cache. Changes from cache are persisted
// directly to the underlying persistent storage (boltDB, levelDB, etc.).
// upperCache/cache segregation is needed to keep the DB state clean and to
// persist data from different stages separately.
upperCache := cache.GetPrivate()
2022-10-20 10:59:19 +00:00
bc.log.Info("initializing state reset", zap.Uint32("target height", height))
2022-10-20 10:59:19 +00:00
start := time.Now()
p := start
// Start batch persisting routine, it will be used for blocks/txs/AERs/storage items batches persist.
type postPersist func(persistedKeys int, err error) error
var (
persistCh = make(chan postPersist)
persistToExitCh = make(chan struct{})
)
go func() {
for {
f, ok := <-persistCh
if !ok {
break
}
persistErr := f(cache.Persist())
if persistErr != nil {
bc.log.Fatal("persist failed", zap.Error(persistErr))
panic(persistErr)
}
}
close(persistToExitCh)
}()
defer func() {
close(persistCh)
<-persistToExitCh
bc.log.Info("reset finished successfully", zap.Duration("took", time.Since(start)))
}()
2022-10-20 10:59:19 +00:00
resetStageKey := []byte{byte(storage.SYSStateChangeStage)}
switch stage {
case none:
core: avoid squashing of data from different state reset stages `cache` persisting operation is concurrent with the storage modifications made by further state reset stages. We can't allow situation when data from the next stage are leaking into the previous stage. State reset stages must be atomic in turms of DB persisting, thus, use another `upperCache` MemCached store to keep them apart. Here are the results of mainnet's BoltDB reset from 1.1M to 6K: This patch: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:15:25.783+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:15:25.783+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:15:25.783+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:15:25.783+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:15:25.787+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:15:25.906+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:15:25.906+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:15:57.031+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "31.125057214s"} 2023-04-11T16:16:12.644+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "15.613156971s", "keys": 321895} 2023-04-11T16:16:13.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "16.663444208s"} 2023-04-11T16:16:19.784+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.760308543s"} 2023-04-11T16:16:19.784+0300 INFO blocks, transactions ans AERs are reset {"took": "53.878632911s"} 2023-04-11T16:16:22.870+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "8.974838893s", "keys": 334823} 2023-04-11T16:16:22.870+0300 DEBUG trying to reset contract storage items 2023-04-11T16:16:27.272+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "7.487357441s", "keys": 208913} 2023-04-11T16:17:23.678+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "1m0.80711106s"} 2023-04-11T16:18:00.769+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "36.967660061s"} 2023-04-11T16:18:20.478+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "56.796257788s", "keys": 200000} 2023-04-11T16:18:54.115+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.637412437s"} 2023-04-11T16:19:18.844+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m18.0737668s", "keys": 200000} 2023-04-11T16:19:27.650+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.806264019s"} 2023-04-11T16:19:27.650+0300 INFO contract storage items are reset {"took": "3m4.780232077s", "keys": 656944} 2023-04-11T16:20:15.660+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.544386403s", "keys": 200000} 2023-04-11T16:20:15.660+0300 DEBUG trying to reset headers information 2023-04-11T16:20:16.385+0300 INFO headers information is reset {"took": "725.174932ms"} 2023-04-11T16:20:19.586+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "51.936278608s", "keys": 56945} 2023-04-11T16:20:19.587+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:20:35.845+0300 INFO state root information and NEP transfers are reset {"took": "16.25852114s"} 2023-04-11T16:21:10.000+0300 DEBUG headers information is persisted {"took": "53.613638429s", "keys": 528438} 2023-04-11T16:21:10.003+0300 DEBUG trying to remove stale storage items 2023-04-11T16:21:18.108+0300 INFO stale storage items are reset {"took": "8.105140658s", "keys": 1350176} 2023-04-11T16:21:18.108+0300 DEBUG trying to remove state reset point 2023-04-11T16:21:18.108+0300 INFO state reset point is removed {"took": "8.554µs"} 2023-04-11T16:21:20.151+0300 DEBUG state root information and NEP transfers are persisted {"took": "44.305707049s", "keys": 602578} 2023-04-11T16:21:20.212+0300 INFO state reset point information is persisted {"took": "2.103764633s", "keys": 2} 2023-04-11T16:21:20.213+0300 INFO reset finished successfully {"took": "5m54.306861367s"} ``` The previous commit: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:24:04.256+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:24:04.256+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:24:04.256+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:24:04.256+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:24:04.261+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:24:04.368+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:24:04.368+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:24:30.363+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "25.995261037s"} 2023-04-11T16:24:44.947+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "14.584447338s", "keys": 321897} 2023-04-11T16:24:45.791+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "15.428492824s"} 2023-04-11T16:24:51.252+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.460662766s"} 2023-04-11T16:24:51.252+0300 INFO blocks, transactions ans AERs are reset {"took": "46.884558096s"} 2023-04-11T16:24:55.399+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "9.607820004s", "keys": 334821} 2023-04-11T16:24:55.399+0300 DEBUG trying to reset contract storage items 2023-04-11T16:24:59.981+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "8.728713255s", "keys": 208913} 2023-04-11T16:25:50.827+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "55.426411416s"} 2023-04-11T16:26:28.734+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "37.902647706s"} 2023-04-11T16:26:53.960+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "1m3.129453265s", "keys": 200001} 2023-04-11T16:27:27.645+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.685283662s"} 2023-04-11T16:27:52.173+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m23.438465575s", "keys": 199999} 2023-04-11T16:28:00.995+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.821990443s"} 2023-04-11T16:28:00.995+0300 INFO contract storage items are reset {"took": "3m5.595950958s", "keys": 656944} 2023-04-11T16:28:49.164+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.518344712s", "keys": 200000} 2023-04-11T16:28:49.164+0300 DEBUG trying to reset headers information 2023-04-11T16:28:49.936+0300 INFO headers information is reset {"took": "772.36435ms"} 2023-04-11T16:28:53.122+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "52.126928092s", "keys": 56945} 2023-04-11T16:28:53.122+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:29:09.332+0300 INFO state root information and NEP transfers are reset {"took": "16.20921699s"} 2023-04-11T16:29:46.264+0300 DEBUG headers information is persisted {"took": "56.326715249s", "keys": 528438} 2023-04-11T16:29:46.267+0300 DEBUG trying to remove stale storage items 2023-04-11T16:29:53.986+0300 INFO stale storage items are reset {"took": "7.718950145s", "keys": 1350176} 2023-04-11T16:29:53.986+0300 DEBUG trying to remove state reset point 2023-04-11T16:29:53.986+0300 INFO state reset point is removed {"took": "6.013µs"} 2023-04-11T16:29:55.899+0300 DEBUG state root information and NEP transfers are persisted {"took": "46.567302762s", "keys": 602578} 2023-04-11T16:29:55.929+0300 INFO state reset point information is persisted {"took": "1.942392208s", "keys": 2} 2023-04-11T16:29:55.929+0300 INFO reset finished successfully {"took": "5m51.561573137s"} ``` Master: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:34:12.410+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:34:12.410+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:34:12.410+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:34:12.410+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:34:12.413+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:34:12.495+0300 INFO initialize state reset {"target height": 600000} 2023-04-11T16:34:12.513+0300 INFO trying to reset blocks, transactions and AERs 2023-04-11T16:35:03.582+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "51.087226195s", "keys": 321895} 2023-04-11T16:35:31.302+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "27.719871393s", "keys": 334823} 2023-04-11T16:35:41.309+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "10.007017388s", "keys": 208913} 2023-04-11T16:35:41.309+0300 INFO blocks, transactions ans AERs are reset {"took": "1m28.814245057s", "overall persisted keys": 865631} 2023-04-11T16:35:41.309+0300 INFO trying to reset contract storage items 2023-04-11T16:37:38.315+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 1, "took": "1m57.00650253s", "keys": 200000} 2023-04-11T16:39:29.704+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 2, "took": "1m51.385224725s", "keys": 200000} 2023-04-11T16:41:14.991+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 3, "took": "1m45.287483794s", "keys": 200000} 2023-04-11T16:41:31.667+0300 INFO last batch of contract storage items and IDs is persisted {"batch": 4, "took": "16.675347478s", "keys": 56945} 2023-04-11T16:41:31.667+0300 INFO contract storage items and IDs are reset {"took": "5m50.357775401s", "keys": 656944} 2023-04-11T16:41:31.667+0300 INFO trying to reset headers information 2023-04-11T16:42:16.779+0300 INFO headers information is reset {"took": "45.111354262s", "keys": 528438} 2023-04-11T16:42:16.784+0300 INFO trying to reset state root information and NEP transfers 2023-04-11T16:42:35.778+0300 INFO state root information and NEP transfers are reset {"took": "18.99373117s", "keys": 602578} 2023-04-11T16:42:35.781+0300 INFO trying to remove stale storage items 2023-04-11T16:42:43.884+0300 INFO stale storage items are reset {"took": "8.103929306s", "keys": 1350176} 2023-04-11T16:42:43.885+0300 INFO trying to remove state reset point 2023-04-11T16:42:43.926+0300 INFO stale reset point is removed {"took": "41.858883ms", "keys": 2} 2023-04-11T16:42:43.932+0300 INFO reset finished successfully {"took": "8m31.437493325s"} ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-04-11 11:18:48 +00:00
upperCache.Store.Put(resetStageKey, []byte{stateResetBit | byte(stateJumpStarted)})
// Technically, there's no difference between Persist() and PersistSync() for the private
// MemCached storage, but we'd better use the sync version in case of some further code changes.
_, uerr := upperCache.PersistSync()
if uerr != nil {
panic(uerr)
}
upperCache = cache.GetPrivate()
persistCh <- func(persistedKeys int, err error) error {
if err != nil {
return fmt.Errorf("failed to persist state reset start marker to the DB: %w", err)
}
return nil
2022-10-20 10:59:19 +00:00
}
fallthrough
case stateJumpStarted:
bc.log.Debug("trying to reset blocks, transactions and AERs")
// Remove blocks/transactions/aers from currHeight down to height (not including height itself).
core: reset blocks, txs and AERs in several stages Sometimes it can be hard to persist all changes at ones, the process can take almost all RAM and a lot of time. Here's the example of reset for mainnet from 2.4M to 1: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --height 1 2022-11-20T17:16:48.236+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2022-11-20T17:16:48.236+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2022-11-20T17:16:48.237+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2022-11-20T17:16:48.237+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2022-11-20T17:16:48.240+0300 INFO restoring blockchain {"version": "0.2.6"} 2022-11-20T17:16:48.297+0300 INFO initialize state reset {"target height": 1} 2022-11-20T17:16:48.300+0300 INFO trying to reset blocks, transactions and AERs 2022-11-20T17:19:29.313+0300 INFO blocks, transactions ans AERs are reset {"took": "2m41.015126493s", "keys": 3958420} ... ``` To avoid OOM killer, split blocks reset into multiple stages. It increases operation time due to intermediate DB persists, but makes things cleaner, the result for almost the same DB height with the new approach: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --height 1 2022-11-20T17:39:42.023+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2022-11-20T17:39:42.023+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2022-11-20T17:39:42.023+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2022-11-20T17:39:42.023+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2022-11-20T17:39:42.026+0300 INFO restoring blockchain {"version": "0.2.6"} 2022-11-20T17:39:42.071+0300 INFO initialize state reset {"target height": 1} 2022-11-20T17:39:42.073+0300 INFO trying to reset blocks, transactions and AERs 2022-11-20T17:40:11.735+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "29.66363737s", "keys": 210973} 2022-11-20T17:40:33.574+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "21.839208683s", "keys": 241203} 2022-11-20T17:41:29.325+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "55.750698386s", "keys": 250593} 2022-11-20T17:42:12.532+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 4, "took": "43.205892757s", "keys": 321896} 2022-11-20T17:43:07.978+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 5, "took": "55.445398156s", "keys": 334822} 2022-11-20T17:43:35.603+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 6, "took": "27.625292032s", "keys": 317131} 2022-11-20T17:43:51.747+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 7, "took": "16.144359017s", "keys": 355832} 2022-11-20T17:44:05.176+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 8, "took": "13.428733899s", "keys": 357690} 2022-11-20T17:44:32.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 9, "took": "27.718548783s", "keys": 393356} 2022-11-20T17:44:51.814+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 10, "took": "18.917954658s", "keys": 366492} 2022-11-20T17:45:07.208+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 11, "took": "15.392642196s", "keys": 326030} 2022-11-20T17:45:18.776+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 12, "took": "11.568255716s", "keys": 299884} 2022-11-20T17:45:25.862+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 13, "took": "7.086079594s", "keys": 190399} 2022-11-20T17:45:25.862+0300 INFO blocks, transactions ans AERs are reset {"took": "5m43.791214084s", "overall persisted keys": 3966301} ... ```
2022-11-20 17:55:48 +00:00
// Keep headers for now, they'll be removed later. It's hard to handle the whole set of changes in
// one stage, so persist periodically.
const persistBatchSize = 100 * headerBatchCount // count blocks only, should be enough to avoid OOM killer even for large blocks
var (
pBlocksStart = p
blocksCnt, batchCnt int
keysCnt = new(int)
core: reset blocks, txs and AERs in several stages Sometimes it can be hard to persist all changes at ones, the process can take almost all RAM and a lot of time. Here's the example of reset for mainnet from 2.4M to 1: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --height 1 2022-11-20T17:16:48.236+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2022-11-20T17:16:48.236+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2022-11-20T17:16:48.237+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2022-11-20T17:16:48.237+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2022-11-20T17:16:48.240+0300 INFO restoring blockchain {"version": "0.2.6"} 2022-11-20T17:16:48.297+0300 INFO initialize state reset {"target height": 1} 2022-11-20T17:16:48.300+0300 INFO trying to reset blocks, transactions and AERs 2022-11-20T17:19:29.313+0300 INFO blocks, transactions ans AERs are reset {"took": "2m41.015126493s", "keys": 3958420} ... ``` To avoid OOM killer, split blocks reset into multiple stages. It increases operation time due to intermediate DB persists, but makes things cleaner, the result for almost the same DB height with the new approach: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --height 1 2022-11-20T17:39:42.023+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2022-11-20T17:39:42.023+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2022-11-20T17:39:42.023+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2022-11-20T17:39:42.023+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2022-11-20T17:39:42.026+0300 INFO restoring blockchain {"version": "0.2.6"} 2022-11-20T17:39:42.071+0300 INFO initialize state reset {"target height": 1} 2022-11-20T17:39:42.073+0300 INFO trying to reset blocks, transactions and AERs 2022-11-20T17:40:11.735+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "29.66363737s", "keys": 210973} 2022-11-20T17:40:33.574+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "21.839208683s", "keys": 241203} 2022-11-20T17:41:29.325+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "55.750698386s", "keys": 250593} 2022-11-20T17:42:12.532+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 4, "took": "43.205892757s", "keys": 321896} 2022-11-20T17:43:07.978+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 5, "took": "55.445398156s", "keys": 334822} 2022-11-20T17:43:35.603+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 6, "took": "27.625292032s", "keys": 317131} 2022-11-20T17:43:51.747+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 7, "took": "16.144359017s", "keys": 355832} 2022-11-20T17:44:05.176+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 8, "took": "13.428733899s", "keys": 357690} 2022-11-20T17:44:32.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 9, "took": "27.718548783s", "keys": 393356} 2022-11-20T17:44:51.814+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 10, "took": "18.917954658s", "keys": 366492} 2022-11-20T17:45:07.208+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 11, "took": "15.392642196s", "keys": 326030} 2022-11-20T17:45:18.776+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 12, "took": "11.568255716s", "keys": 299884} 2022-11-20T17:45:25.862+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 13, "took": "7.086079594s", "keys": 190399} 2022-11-20T17:45:25.862+0300 INFO blocks, transactions ans AERs are reset {"took": "5m43.791214084s", "overall persisted keys": 3966301} ... ```
2022-11-20 17:55:48 +00:00
)
for i := height + 1; i <= currHeight; i++ {
core: avoid squashing of data from different state reset stages `cache` persisting operation is concurrent with the storage modifications made by further state reset stages. We can't allow situation when data from the next stage are leaking into the previous stage. State reset stages must be atomic in turms of DB persisting, thus, use another `upperCache` MemCached store to keep them apart. Here are the results of mainnet's BoltDB reset from 1.1M to 6K: This patch: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:15:25.783+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:15:25.783+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:15:25.783+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:15:25.783+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:15:25.787+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:15:25.906+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:15:25.906+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:15:57.031+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "31.125057214s"} 2023-04-11T16:16:12.644+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "15.613156971s", "keys": 321895} 2023-04-11T16:16:13.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "16.663444208s"} 2023-04-11T16:16:19.784+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.760308543s"} 2023-04-11T16:16:19.784+0300 INFO blocks, transactions ans AERs are reset {"took": "53.878632911s"} 2023-04-11T16:16:22.870+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "8.974838893s", "keys": 334823} 2023-04-11T16:16:22.870+0300 DEBUG trying to reset contract storage items 2023-04-11T16:16:27.272+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "7.487357441s", "keys": 208913} 2023-04-11T16:17:23.678+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "1m0.80711106s"} 2023-04-11T16:18:00.769+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "36.967660061s"} 2023-04-11T16:18:20.478+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "56.796257788s", "keys": 200000} 2023-04-11T16:18:54.115+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.637412437s"} 2023-04-11T16:19:18.844+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m18.0737668s", "keys": 200000} 2023-04-11T16:19:27.650+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.806264019s"} 2023-04-11T16:19:27.650+0300 INFO contract storage items are reset {"took": "3m4.780232077s", "keys": 656944} 2023-04-11T16:20:15.660+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.544386403s", "keys": 200000} 2023-04-11T16:20:15.660+0300 DEBUG trying to reset headers information 2023-04-11T16:20:16.385+0300 INFO headers information is reset {"took": "725.174932ms"} 2023-04-11T16:20:19.586+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "51.936278608s", "keys": 56945} 2023-04-11T16:20:19.587+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:20:35.845+0300 INFO state root information and NEP transfers are reset {"took": "16.25852114s"} 2023-04-11T16:21:10.000+0300 DEBUG headers information is persisted {"took": "53.613638429s", "keys": 528438} 2023-04-11T16:21:10.003+0300 DEBUG trying to remove stale storage items 2023-04-11T16:21:18.108+0300 INFO stale storage items are reset {"took": "8.105140658s", "keys": 1350176} 2023-04-11T16:21:18.108+0300 DEBUG trying to remove state reset point 2023-04-11T16:21:18.108+0300 INFO state reset point is removed {"took": "8.554µs"} 2023-04-11T16:21:20.151+0300 DEBUG state root information and NEP transfers are persisted {"took": "44.305707049s", "keys": 602578} 2023-04-11T16:21:20.212+0300 INFO state reset point information is persisted {"took": "2.103764633s", "keys": 2} 2023-04-11T16:21:20.213+0300 INFO reset finished successfully {"took": "5m54.306861367s"} ``` The previous commit: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:24:04.256+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:24:04.256+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:24:04.256+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:24:04.256+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:24:04.261+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:24:04.368+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:24:04.368+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:24:30.363+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "25.995261037s"} 2023-04-11T16:24:44.947+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "14.584447338s", "keys": 321897} 2023-04-11T16:24:45.791+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "15.428492824s"} 2023-04-11T16:24:51.252+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.460662766s"} 2023-04-11T16:24:51.252+0300 INFO blocks, transactions ans AERs are reset {"took": "46.884558096s"} 2023-04-11T16:24:55.399+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "9.607820004s", "keys": 334821} 2023-04-11T16:24:55.399+0300 DEBUG trying to reset contract storage items 2023-04-11T16:24:59.981+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "8.728713255s", "keys": 208913} 2023-04-11T16:25:50.827+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "55.426411416s"} 2023-04-11T16:26:28.734+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "37.902647706s"} 2023-04-11T16:26:53.960+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "1m3.129453265s", "keys": 200001} 2023-04-11T16:27:27.645+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.685283662s"} 2023-04-11T16:27:52.173+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m23.438465575s", "keys": 199999} 2023-04-11T16:28:00.995+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.821990443s"} 2023-04-11T16:28:00.995+0300 INFO contract storage items are reset {"took": "3m5.595950958s", "keys": 656944} 2023-04-11T16:28:49.164+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.518344712s", "keys": 200000} 2023-04-11T16:28:49.164+0300 DEBUG trying to reset headers information 2023-04-11T16:28:49.936+0300 INFO headers information is reset {"took": "772.36435ms"} 2023-04-11T16:28:53.122+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "52.126928092s", "keys": 56945} 2023-04-11T16:28:53.122+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:29:09.332+0300 INFO state root information and NEP transfers are reset {"took": "16.20921699s"} 2023-04-11T16:29:46.264+0300 DEBUG headers information is persisted {"took": "56.326715249s", "keys": 528438} 2023-04-11T16:29:46.267+0300 DEBUG trying to remove stale storage items 2023-04-11T16:29:53.986+0300 INFO stale storage items are reset {"took": "7.718950145s", "keys": 1350176} 2023-04-11T16:29:53.986+0300 DEBUG trying to remove state reset point 2023-04-11T16:29:53.986+0300 INFO state reset point is removed {"took": "6.013µs"} 2023-04-11T16:29:55.899+0300 DEBUG state root information and NEP transfers are persisted {"took": "46.567302762s", "keys": 602578} 2023-04-11T16:29:55.929+0300 INFO state reset point information is persisted {"took": "1.942392208s", "keys": 2} 2023-04-11T16:29:55.929+0300 INFO reset finished successfully {"took": "5m51.561573137s"} ``` Master: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:34:12.410+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:34:12.410+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:34:12.410+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:34:12.410+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:34:12.413+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:34:12.495+0300 INFO initialize state reset {"target height": 600000} 2023-04-11T16:34:12.513+0300 INFO trying to reset blocks, transactions and AERs 2023-04-11T16:35:03.582+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "51.087226195s", "keys": 321895} 2023-04-11T16:35:31.302+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "27.719871393s", "keys": 334823} 2023-04-11T16:35:41.309+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "10.007017388s", "keys": 208913} 2023-04-11T16:35:41.309+0300 INFO blocks, transactions ans AERs are reset {"took": "1m28.814245057s", "overall persisted keys": 865631} 2023-04-11T16:35:41.309+0300 INFO trying to reset contract storage items 2023-04-11T16:37:38.315+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 1, "took": "1m57.00650253s", "keys": 200000} 2023-04-11T16:39:29.704+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 2, "took": "1m51.385224725s", "keys": 200000} 2023-04-11T16:41:14.991+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 3, "took": "1m45.287483794s", "keys": 200000} 2023-04-11T16:41:31.667+0300 INFO last batch of contract storage items and IDs is persisted {"batch": 4, "took": "16.675347478s", "keys": 56945} 2023-04-11T16:41:31.667+0300 INFO contract storage items and IDs are reset {"took": "5m50.357775401s", "keys": 656944} 2023-04-11T16:41:31.667+0300 INFO trying to reset headers information 2023-04-11T16:42:16.779+0300 INFO headers information is reset {"took": "45.111354262s", "keys": 528438} 2023-04-11T16:42:16.784+0300 INFO trying to reset state root information and NEP transfers 2023-04-11T16:42:35.778+0300 INFO state root information and NEP transfers are reset {"took": "18.99373117s", "keys": 602578} 2023-04-11T16:42:35.781+0300 INFO trying to remove stale storage items 2023-04-11T16:42:43.884+0300 INFO stale storage items are reset {"took": "8.103929306s", "keys": 1350176} 2023-04-11T16:42:43.885+0300 INFO trying to remove state reset point 2023-04-11T16:42:43.926+0300 INFO stale reset point is removed {"took": "41.858883ms", "keys": 2} 2023-04-11T16:42:43.932+0300 INFO reset finished successfully {"took": "8m31.437493325s"} ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-04-11 11:18:48 +00:00
err := upperCache.DeleteBlock(bc.GetHeaderHash(i))
2022-10-20 10:59:19 +00:00
if err != nil {
return fmt.Errorf("error while removing block %d: %w", i, err)
}
core: reset blocks, txs and AERs in several stages Sometimes it can be hard to persist all changes at ones, the process can take almost all RAM and a lot of time. Here's the example of reset for mainnet from 2.4M to 1: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --height 1 2022-11-20T17:16:48.236+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2022-11-20T17:16:48.236+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2022-11-20T17:16:48.237+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2022-11-20T17:16:48.237+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2022-11-20T17:16:48.240+0300 INFO restoring blockchain {"version": "0.2.6"} 2022-11-20T17:16:48.297+0300 INFO initialize state reset {"target height": 1} 2022-11-20T17:16:48.300+0300 INFO trying to reset blocks, transactions and AERs 2022-11-20T17:19:29.313+0300 INFO blocks, transactions ans AERs are reset {"took": "2m41.015126493s", "keys": 3958420} ... ``` To avoid OOM killer, split blocks reset into multiple stages. It increases operation time due to intermediate DB persists, but makes things cleaner, the result for almost the same DB height with the new approach: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --height 1 2022-11-20T17:39:42.023+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2022-11-20T17:39:42.023+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2022-11-20T17:39:42.023+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2022-11-20T17:39:42.023+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2022-11-20T17:39:42.026+0300 INFO restoring blockchain {"version": "0.2.6"} 2022-11-20T17:39:42.071+0300 INFO initialize state reset {"target height": 1} 2022-11-20T17:39:42.073+0300 INFO trying to reset blocks, transactions and AERs 2022-11-20T17:40:11.735+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "29.66363737s", "keys": 210973} 2022-11-20T17:40:33.574+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "21.839208683s", "keys": 241203} 2022-11-20T17:41:29.325+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "55.750698386s", "keys": 250593} 2022-11-20T17:42:12.532+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 4, "took": "43.205892757s", "keys": 321896} 2022-11-20T17:43:07.978+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 5, "took": "55.445398156s", "keys": 334822} 2022-11-20T17:43:35.603+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 6, "took": "27.625292032s", "keys": 317131} 2022-11-20T17:43:51.747+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 7, "took": "16.144359017s", "keys": 355832} 2022-11-20T17:44:05.176+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 8, "took": "13.428733899s", "keys": 357690} 2022-11-20T17:44:32.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 9, "took": "27.718548783s", "keys": 393356} 2022-11-20T17:44:51.814+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 10, "took": "18.917954658s", "keys": 366492} 2022-11-20T17:45:07.208+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 11, "took": "15.392642196s", "keys": 326030} 2022-11-20T17:45:18.776+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 12, "took": "11.568255716s", "keys": 299884} 2022-11-20T17:45:25.862+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 13, "took": "7.086079594s", "keys": 190399} 2022-11-20T17:45:25.862+0300 INFO blocks, transactions ans AERs are reset {"took": "5m43.791214084s", "overall persisted keys": 3966301} ... ```
2022-11-20 17:55:48 +00:00
blocksCnt++
if blocksCnt == persistBatchSize {
blocksCnt = 0
batchCnt++
bc.log.Info("intermediate batch of removed blocks, transactions and AERs is collected",
zap.Int("batch", batchCnt),
zap.Duration("took", time.Since(p)))
persistStart := time.Now()
persistBatch := batchCnt
core: avoid squashing of data from different state reset stages `cache` persisting operation is concurrent with the storage modifications made by further state reset stages. We can't allow situation when data from the next stage are leaking into the previous stage. State reset stages must be atomic in turms of DB persisting, thus, use another `upperCache` MemCached store to keep them apart. Here are the results of mainnet's BoltDB reset from 1.1M to 6K: This patch: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:15:25.783+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:15:25.783+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:15:25.783+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:15:25.783+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:15:25.787+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:15:25.906+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:15:25.906+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:15:57.031+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "31.125057214s"} 2023-04-11T16:16:12.644+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "15.613156971s", "keys": 321895} 2023-04-11T16:16:13.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "16.663444208s"} 2023-04-11T16:16:19.784+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.760308543s"} 2023-04-11T16:16:19.784+0300 INFO blocks, transactions ans AERs are reset {"took": "53.878632911s"} 2023-04-11T16:16:22.870+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "8.974838893s", "keys": 334823} 2023-04-11T16:16:22.870+0300 DEBUG trying to reset contract storage items 2023-04-11T16:16:27.272+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "7.487357441s", "keys": 208913} 2023-04-11T16:17:23.678+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "1m0.80711106s"} 2023-04-11T16:18:00.769+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "36.967660061s"} 2023-04-11T16:18:20.478+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "56.796257788s", "keys": 200000} 2023-04-11T16:18:54.115+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.637412437s"} 2023-04-11T16:19:18.844+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m18.0737668s", "keys": 200000} 2023-04-11T16:19:27.650+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.806264019s"} 2023-04-11T16:19:27.650+0300 INFO contract storage items are reset {"took": "3m4.780232077s", "keys": 656944} 2023-04-11T16:20:15.660+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.544386403s", "keys": 200000} 2023-04-11T16:20:15.660+0300 DEBUG trying to reset headers information 2023-04-11T16:20:16.385+0300 INFO headers information is reset {"took": "725.174932ms"} 2023-04-11T16:20:19.586+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "51.936278608s", "keys": 56945} 2023-04-11T16:20:19.587+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:20:35.845+0300 INFO state root information and NEP transfers are reset {"took": "16.25852114s"} 2023-04-11T16:21:10.000+0300 DEBUG headers information is persisted {"took": "53.613638429s", "keys": 528438} 2023-04-11T16:21:10.003+0300 DEBUG trying to remove stale storage items 2023-04-11T16:21:18.108+0300 INFO stale storage items are reset {"took": "8.105140658s", "keys": 1350176} 2023-04-11T16:21:18.108+0300 DEBUG trying to remove state reset point 2023-04-11T16:21:18.108+0300 INFO state reset point is removed {"took": "8.554µs"} 2023-04-11T16:21:20.151+0300 DEBUG state root information and NEP transfers are persisted {"took": "44.305707049s", "keys": 602578} 2023-04-11T16:21:20.212+0300 INFO state reset point information is persisted {"took": "2.103764633s", "keys": 2} 2023-04-11T16:21:20.213+0300 INFO reset finished successfully {"took": "5m54.306861367s"} ``` The previous commit: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:24:04.256+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:24:04.256+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:24:04.256+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:24:04.256+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:24:04.261+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:24:04.368+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:24:04.368+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:24:30.363+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "25.995261037s"} 2023-04-11T16:24:44.947+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "14.584447338s", "keys": 321897} 2023-04-11T16:24:45.791+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "15.428492824s"} 2023-04-11T16:24:51.252+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.460662766s"} 2023-04-11T16:24:51.252+0300 INFO blocks, transactions ans AERs are reset {"took": "46.884558096s"} 2023-04-11T16:24:55.399+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "9.607820004s", "keys": 334821} 2023-04-11T16:24:55.399+0300 DEBUG trying to reset contract storage items 2023-04-11T16:24:59.981+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "8.728713255s", "keys": 208913} 2023-04-11T16:25:50.827+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "55.426411416s"} 2023-04-11T16:26:28.734+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "37.902647706s"} 2023-04-11T16:26:53.960+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "1m3.129453265s", "keys": 200001} 2023-04-11T16:27:27.645+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.685283662s"} 2023-04-11T16:27:52.173+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m23.438465575s", "keys": 199999} 2023-04-11T16:28:00.995+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.821990443s"} 2023-04-11T16:28:00.995+0300 INFO contract storage items are reset {"took": "3m5.595950958s", "keys": 656944} 2023-04-11T16:28:49.164+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.518344712s", "keys": 200000} 2023-04-11T16:28:49.164+0300 DEBUG trying to reset headers information 2023-04-11T16:28:49.936+0300 INFO headers information is reset {"took": "772.36435ms"} 2023-04-11T16:28:53.122+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "52.126928092s", "keys": 56945} 2023-04-11T16:28:53.122+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:29:09.332+0300 INFO state root information and NEP transfers are reset {"took": "16.20921699s"} 2023-04-11T16:29:46.264+0300 DEBUG headers information is persisted {"took": "56.326715249s", "keys": 528438} 2023-04-11T16:29:46.267+0300 DEBUG trying to remove stale storage items 2023-04-11T16:29:53.986+0300 INFO stale storage items are reset {"took": "7.718950145s", "keys": 1350176} 2023-04-11T16:29:53.986+0300 DEBUG trying to remove state reset point 2023-04-11T16:29:53.986+0300 INFO state reset point is removed {"took": "6.013µs"} 2023-04-11T16:29:55.899+0300 DEBUG state root information and NEP transfers are persisted {"took": "46.567302762s", "keys": 602578} 2023-04-11T16:29:55.929+0300 INFO state reset point information is persisted {"took": "1.942392208s", "keys": 2} 2023-04-11T16:29:55.929+0300 INFO reset finished successfully {"took": "5m51.561573137s"} ``` Master: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:34:12.410+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:34:12.410+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:34:12.410+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:34:12.410+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:34:12.413+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:34:12.495+0300 INFO initialize state reset {"target height": 600000} 2023-04-11T16:34:12.513+0300 INFO trying to reset blocks, transactions and AERs 2023-04-11T16:35:03.582+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "51.087226195s", "keys": 321895} 2023-04-11T16:35:31.302+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "27.719871393s", "keys": 334823} 2023-04-11T16:35:41.309+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "10.007017388s", "keys": 208913} 2023-04-11T16:35:41.309+0300 INFO blocks, transactions ans AERs are reset {"took": "1m28.814245057s", "overall persisted keys": 865631} 2023-04-11T16:35:41.309+0300 INFO trying to reset contract storage items 2023-04-11T16:37:38.315+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 1, "took": "1m57.00650253s", "keys": 200000} 2023-04-11T16:39:29.704+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 2, "took": "1m51.385224725s", "keys": 200000} 2023-04-11T16:41:14.991+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 3, "took": "1m45.287483794s", "keys": 200000} 2023-04-11T16:41:31.667+0300 INFO last batch of contract storage items and IDs is persisted {"batch": 4, "took": "16.675347478s", "keys": 56945} 2023-04-11T16:41:31.667+0300 INFO contract storage items and IDs are reset {"took": "5m50.357775401s", "keys": 656944} 2023-04-11T16:41:31.667+0300 INFO trying to reset headers information 2023-04-11T16:42:16.779+0300 INFO headers information is reset {"took": "45.111354262s", "keys": 528438} 2023-04-11T16:42:16.784+0300 INFO trying to reset state root information and NEP transfers 2023-04-11T16:42:35.778+0300 INFO state root information and NEP transfers are reset {"took": "18.99373117s", "keys": 602578} 2023-04-11T16:42:35.781+0300 INFO trying to remove stale storage items 2023-04-11T16:42:43.884+0300 INFO stale storage items are reset {"took": "8.103929306s", "keys": 1350176} 2023-04-11T16:42:43.885+0300 INFO trying to remove state reset point 2023-04-11T16:42:43.926+0300 INFO stale reset point is removed {"took": "41.858883ms", "keys": 2} 2023-04-11T16:42:43.932+0300 INFO reset finished successfully {"took": "8m31.437493325s"} ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-04-11 11:18:48 +00:00
_, uerr := upperCache.PersistSync()
if uerr != nil {
panic(uerr)
}
upperCache = cache.GetPrivate()
persistCh <- func(persistedKeys int, err error) error {
if err != nil {
return fmt.Errorf("failed to persist intermediate batch of removed blocks, transactions and AERs: %w", err)
}
*keysCnt += persistedKeys
bc.log.Debug("intermediate batch of removed blocks, transactions and AERs is persisted",
zap.Int("batch", persistBatch),
zap.Duration("took", time.Since(persistStart)),
zap.Int("keys", persistedKeys))
return nil
}
core: reset blocks, txs and AERs in several stages Sometimes it can be hard to persist all changes at ones, the process can take almost all RAM and a lot of time. Here's the example of reset for mainnet from 2.4M to 1: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --height 1 2022-11-20T17:16:48.236+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2022-11-20T17:16:48.236+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2022-11-20T17:16:48.237+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2022-11-20T17:16:48.237+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2022-11-20T17:16:48.240+0300 INFO restoring blockchain {"version": "0.2.6"} 2022-11-20T17:16:48.297+0300 INFO initialize state reset {"target height": 1} 2022-11-20T17:16:48.300+0300 INFO trying to reset blocks, transactions and AERs 2022-11-20T17:19:29.313+0300 INFO blocks, transactions ans AERs are reset {"took": "2m41.015126493s", "keys": 3958420} ... ``` To avoid OOM killer, split blocks reset into multiple stages. It increases operation time due to intermediate DB persists, but makes things cleaner, the result for almost the same DB height with the new approach: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --height 1 2022-11-20T17:39:42.023+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2022-11-20T17:39:42.023+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2022-11-20T17:39:42.023+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2022-11-20T17:39:42.023+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2022-11-20T17:39:42.026+0300 INFO restoring blockchain {"version": "0.2.6"} 2022-11-20T17:39:42.071+0300 INFO initialize state reset {"target height": 1} 2022-11-20T17:39:42.073+0300 INFO trying to reset blocks, transactions and AERs 2022-11-20T17:40:11.735+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "29.66363737s", "keys": 210973} 2022-11-20T17:40:33.574+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "21.839208683s", "keys": 241203} 2022-11-20T17:41:29.325+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "55.750698386s", "keys": 250593} 2022-11-20T17:42:12.532+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 4, "took": "43.205892757s", "keys": 321896} 2022-11-20T17:43:07.978+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 5, "took": "55.445398156s", "keys": 334822} 2022-11-20T17:43:35.603+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 6, "took": "27.625292032s", "keys": 317131} 2022-11-20T17:43:51.747+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 7, "took": "16.144359017s", "keys": 355832} 2022-11-20T17:44:05.176+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 8, "took": "13.428733899s", "keys": 357690} 2022-11-20T17:44:32.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 9, "took": "27.718548783s", "keys": 393356} 2022-11-20T17:44:51.814+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 10, "took": "18.917954658s", "keys": 366492} 2022-11-20T17:45:07.208+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 11, "took": "15.392642196s", "keys": 326030} 2022-11-20T17:45:18.776+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 12, "took": "11.568255716s", "keys": 299884} 2022-11-20T17:45:25.862+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 13, "took": "7.086079594s", "keys": 190399} 2022-11-20T17:45:25.862+0300 INFO blocks, transactions ans AERs are reset {"took": "5m43.791214084s", "overall persisted keys": 3966301} ... ```
2022-11-20 17:55:48 +00:00
p = time.Now()
}
2022-10-20 10:59:19 +00:00
}
core: avoid squashing of data from different state reset stages `cache` persisting operation is concurrent with the storage modifications made by further state reset stages. We can't allow situation when data from the next stage are leaking into the previous stage. State reset stages must be atomic in turms of DB persisting, thus, use another `upperCache` MemCached store to keep them apart. Here are the results of mainnet's BoltDB reset from 1.1M to 6K: This patch: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:15:25.783+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:15:25.783+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:15:25.783+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:15:25.783+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:15:25.787+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:15:25.906+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:15:25.906+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:15:57.031+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "31.125057214s"} 2023-04-11T16:16:12.644+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "15.613156971s", "keys": 321895} 2023-04-11T16:16:13.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "16.663444208s"} 2023-04-11T16:16:19.784+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.760308543s"} 2023-04-11T16:16:19.784+0300 INFO blocks, transactions ans AERs are reset {"took": "53.878632911s"} 2023-04-11T16:16:22.870+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "8.974838893s", "keys": 334823} 2023-04-11T16:16:22.870+0300 DEBUG trying to reset contract storage items 2023-04-11T16:16:27.272+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "7.487357441s", "keys": 208913} 2023-04-11T16:17:23.678+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "1m0.80711106s"} 2023-04-11T16:18:00.769+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "36.967660061s"} 2023-04-11T16:18:20.478+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "56.796257788s", "keys": 200000} 2023-04-11T16:18:54.115+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.637412437s"} 2023-04-11T16:19:18.844+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m18.0737668s", "keys": 200000} 2023-04-11T16:19:27.650+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.806264019s"} 2023-04-11T16:19:27.650+0300 INFO contract storage items are reset {"took": "3m4.780232077s", "keys": 656944} 2023-04-11T16:20:15.660+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.544386403s", "keys": 200000} 2023-04-11T16:20:15.660+0300 DEBUG trying to reset headers information 2023-04-11T16:20:16.385+0300 INFO headers information is reset {"took": "725.174932ms"} 2023-04-11T16:20:19.586+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "51.936278608s", "keys": 56945} 2023-04-11T16:20:19.587+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:20:35.845+0300 INFO state root information and NEP transfers are reset {"took": "16.25852114s"} 2023-04-11T16:21:10.000+0300 DEBUG headers information is persisted {"took": "53.613638429s", "keys": 528438} 2023-04-11T16:21:10.003+0300 DEBUG trying to remove stale storage items 2023-04-11T16:21:18.108+0300 INFO stale storage items are reset {"took": "8.105140658s", "keys": 1350176} 2023-04-11T16:21:18.108+0300 DEBUG trying to remove state reset point 2023-04-11T16:21:18.108+0300 INFO state reset point is removed {"took": "8.554µs"} 2023-04-11T16:21:20.151+0300 DEBUG state root information and NEP transfers are persisted {"took": "44.305707049s", "keys": 602578} 2023-04-11T16:21:20.212+0300 INFO state reset point information is persisted {"took": "2.103764633s", "keys": 2} 2023-04-11T16:21:20.213+0300 INFO reset finished successfully {"took": "5m54.306861367s"} ``` The previous commit: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:24:04.256+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:24:04.256+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:24:04.256+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:24:04.256+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:24:04.261+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:24:04.368+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:24:04.368+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:24:30.363+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "25.995261037s"} 2023-04-11T16:24:44.947+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "14.584447338s", "keys": 321897} 2023-04-11T16:24:45.791+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "15.428492824s"} 2023-04-11T16:24:51.252+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.460662766s"} 2023-04-11T16:24:51.252+0300 INFO blocks, transactions ans AERs are reset {"took": "46.884558096s"} 2023-04-11T16:24:55.399+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "9.607820004s", "keys": 334821} 2023-04-11T16:24:55.399+0300 DEBUG trying to reset contract storage items 2023-04-11T16:24:59.981+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "8.728713255s", "keys": 208913} 2023-04-11T16:25:50.827+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "55.426411416s"} 2023-04-11T16:26:28.734+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "37.902647706s"} 2023-04-11T16:26:53.960+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "1m3.129453265s", "keys": 200001} 2023-04-11T16:27:27.645+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.685283662s"} 2023-04-11T16:27:52.173+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m23.438465575s", "keys": 199999} 2023-04-11T16:28:00.995+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.821990443s"} 2023-04-11T16:28:00.995+0300 INFO contract storage items are reset {"took": "3m5.595950958s", "keys": 656944} 2023-04-11T16:28:49.164+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.518344712s", "keys": 200000} 2023-04-11T16:28:49.164+0300 DEBUG trying to reset headers information 2023-04-11T16:28:49.936+0300 INFO headers information is reset {"took": "772.36435ms"} 2023-04-11T16:28:53.122+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "52.126928092s", "keys": 56945} 2023-04-11T16:28:53.122+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:29:09.332+0300 INFO state root information and NEP transfers are reset {"took": "16.20921699s"} 2023-04-11T16:29:46.264+0300 DEBUG headers information is persisted {"took": "56.326715249s", "keys": 528438} 2023-04-11T16:29:46.267+0300 DEBUG trying to remove stale storage items 2023-04-11T16:29:53.986+0300 INFO stale storage items are reset {"took": "7.718950145s", "keys": 1350176} 2023-04-11T16:29:53.986+0300 DEBUG trying to remove state reset point 2023-04-11T16:29:53.986+0300 INFO state reset point is removed {"took": "6.013µs"} 2023-04-11T16:29:55.899+0300 DEBUG state root information and NEP transfers are persisted {"took": "46.567302762s", "keys": 602578} 2023-04-11T16:29:55.929+0300 INFO state reset point information is persisted {"took": "1.942392208s", "keys": 2} 2023-04-11T16:29:55.929+0300 INFO reset finished successfully {"took": "5m51.561573137s"} ``` Master: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:34:12.410+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:34:12.410+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:34:12.410+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:34:12.410+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:34:12.413+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:34:12.495+0300 INFO initialize state reset {"target height": 600000} 2023-04-11T16:34:12.513+0300 INFO trying to reset blocks, transactions and AERs 2023-04-11T16:35:03.582+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "51.087226195s", "keys": 321895} 2023-04-11T16:35:31.302+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "27.719871393s", "keys": 334823} 2023-04-11T16:35:41.309+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "10.007017388s", "keys": 208913} 2023-04-11T16:35:41.309+0300 INFO blocks, transactions ans AERs are reset {"took": "1m28.814245057s", "overall persisted keys": 865631} 2023-04-11T16:35:41.309+0300 INFO trying to reset contract storage items 2023-04-11T16:37:38.315+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 1, "took": "1m57.00650253s", "keys": 200000} 2023-04-11T16:39:29.704+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 2, "took": "1m51.385224725s", "keys": 200000} 2023-04-11T16:41:14.991+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 3, "took": "1m45.287483794s", "keys": 200000} 2023-04-11T16:41:31.667+0300 INFO last batch of contract storage items and IDs is persisted {"batch": 4, "took": "16.675347478s", "keys": 56945} 2023-04-11T16:41:31.667+0300 INFO contract storage items and IDs are reset {"took": "5m50.357775401s", "keys": 656944} 2023-04-11T16:41:31.667+0300 INFO trying to reset headers information 2023-04-11T16:42:16.779+0300 INFO headers information is reset {"took": "45.111354262s", "keys": 528438} 2023-04-11T16:42:16.784+0300 INFO trying to reset state root information and NEP transfers 2023-04-11T16:42:35.778+0300 INFO state root information and NEP transfers are reset {"took": "18.99373117s", "keys": 602578} 2023-04-11T16:42:35.781+0300 INFO trying to remove stale storage items 2023-04-11T16:42:43.884+0300 INFO stale storage items are reset {"took": "8.103929306s", "keys": 1350176} 2023-04-11T16:42:43.885+0300 INFO trying to remove state reset point 2023-04-11T16:42:43.926+0300 INFO stale reset point is removed {"took": "41.858883ms", "keys": 2} 2023-04-11T16:42:43.932+0300 INFO reset finished successfully {"took": "8m31.437493325s"} ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-04-11 11:18:48 +00:00
upperCache.Store.Put(resetStageKey, []byte{stateResetBit | byte(staleBlocksRemoved)})
core: reset blocks, txs and AERs in several stages Sometimes it can be hard to persist all changes at ones, the process can take almost all RAM and a lot of time. Here's the example of reset for mainnet from 2.4M to 1: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --height 1 2022-11-20T17:16:48.236+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2022-11-20T17:16:48.236+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2022-11-20T17:16:48.237+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2022-11-20T17:16:48.237+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2022-11-20T17:16:48.240+0300 INFO restoring blockchain {"version": "0.2.6"} 2022-11-20T17:16:48.297+0300 INFO initialize state reset {"target height": 1} 2022-11-20T17:16:48.300+0300 INFO trying to reset blocks, transactions and AERs 2022-11-20T17:19:29.313+0300 INFO blocks, transactions ans AERs are reset {"took": "2m41.015126493s", "keys": 3958420} ... ``` To avoid OOM killer, split blocks reset into multiple stages. It increases operation time due to intermediate DB persists, but makes things cleaner, the result for almost the same DB height with the new approach: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --height 1 2022-11-20T17:39:42.023+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2022-11-20T17:39:42.023+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2022-11-20T17:39:42.023+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2022-11-20T17:39:42.023+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2022-11-20T17:39:42.026+0300 INFO restoring blockchain {"version": "0.2.6"} 2022-11-20T17:39:42.071+0300 INFO initialize state reset {"target height": 1} 2022-11-20T17:39:42.073+0300 INFO trying to reset blocks, transactions and AERs 2022-11-20T17:40:11.735+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "29.66363737s", "keys": 210973} 2022-11-20T17:40:33.574+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "21.839208683s", "keys": 241203} 2022-11-20T17:41:29.325+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "55.750698386s", "keys": 250593} 2022-11-20T17:42:12.532+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 4, "took": "43.205892757s", "keys": 321896} 2022-11-20T17:43:07.978+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 5, "took": "55.445398156s", "keys": 334822} 2022-11-20T17:43:35.603+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 6, "took": "27.625292032s", "keys": 317131} 2022-11-20T17:43:51.747+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 7, "took": "16.144359017s", "keys": 355832} 2022-11-20T17:44:05.176+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 8, "took": "13.428733899s", "keys": 357690} 2022-11-20T17:44:32.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 9, "took": "27.718548783s", "keys": 393356} 2022-11-20T17:44:51.814+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 10, "took": "18.917954658s", "keys": 366492} 2022-11-20T17:45:07.208+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 11, "took": "15.392642196s", "keys": 326030} 2022-11-20T17:45:18.776+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 12, "took": "11.568255716s", "keys": 299884} 2022-11-20T17:45:25.862+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 13, "took": "7.086079594s", "keys": 190399} 2022-11-20T17:45:25.862+0300 INFO blocks, transactions ans AERs are reset {"took": "5m43.791214084s", "overall persisted keys": 3966301} ... ```
2022-11-20 17:55:48 +00:00
batchCnt++
bc.log.Info("last batch of removed blocks, transactions and AERs is collected",
zap.Int("batch", batchCnt),
zap.Duration("took", time.Since(p)))
bc.log.Info("blocks, transactions ans AERs are reset", zap.Duration("took", time.Since(pBlocksStart)))
persistStart := time.Now()
persistBatch := batchCnt
core: avoid squashing of data from different state reset stages `cache` persisting operation is concurrent with the storage modifications made by further state reset stages. We can't allow situation when data from the next stage are leaking into the previous stage. State reset stages must be atomic in turms of DB persisting, thus, use another `upperCache` MemCached store to keep them apart. Here are the results of mainnet's BoltDB reset from 1.1M to 6K: This patch: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:15:25.783+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:15:25.783+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:15:25.783+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:15:25.783+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:15:25.787+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:15:25.906+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:15:25.906+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:15:57.031+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "31.125057214s"} 2023-04-11T16:16:12.644+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "15.613156971s", "keys": 321895} 2023-04-11T16:16:13.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "16.663444208s"} 2023-04-11T16:16:19.784+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.760308543s"} 2023-04-11T16:16:19.784+0300 INFO blocks, transactions ans AERs are reset {"took": "53.878632911s"} 2023-04-11T16:16:22.870+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "8.974838893s", "keys": 334823} 2023-04-11T16:16:22.870+0300 DEBUG trying to reset contract storage items 2023-04-11T16:16:27.272+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "7.487357441s", "keys": 208913} 2023-04-11T16:17:23.678+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "1m0.80711106s"} 2023-04-11T16:18:00.769+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "36.967660061s"} 2023-04-11T16:18:20.478+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "56.796257788s", "keys": 200000} 2023-04-11T16:18:54.115+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.637412437s"} 2023-04-11T16:19:18.844+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m18.0737668s", "keys": 200000} 2023-04-11T16:19:27.650+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.806264019s"} 2023-04-11T16:19:27.650+0300 INFO contract storage items are reset {"took": "3m4.780232077s", "keys": 656944} 2023-04-11T16:20:15.660+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.544386403s", "keys": 200000} 2023-04-11T16:20:15.660+0300 DEBUG trying to reset headers information 2023-04-11T16:20:16.385+0300 INFO headers information is reset {"took": "725.174932ms"} 2023-04-11T16:20:19.586+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "51.936278608s", "keys": 56945} 2023-04-11T16:20:19.587+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:20:35.845+0300 INFO state root information and NEP transfers are reset {"took": "16.25852114s"} 2023-04-11T16:21:10.000+0300 DEBUG headers information is persisted {"took": "53.613638429s", "keys": 528438} 2023-04-11T16:21:10.003+0300 DEBUG trying to remove stale storage items 2023-04-11T16:21:18.108+0300 INFO stale storage items are reset {"took": "8.105140658s", "keys": 1350176} 2023-04-11T16:21:18.108+0300 DEBUG trying to remove state reset point 2023-04-11T16:21:18.108+0300 INFO state reset point is removed {"took": "8.554µs"} 2023-04-11T16:21:20.151+0300 DEBUG state root information and NEP transfers are persisted {"took": "44.305707049s", "keys": 602578} 2023-04-11T16:21:20.212+0300 INFO state reset point information is persisted {"took": "2.103764633s", "keys": 2} 2023-04-11T16:21:20.213+0300 INFO reset finished successfully {"took": "5m54.306861367s"} ``` The previous commit: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:24:04.256+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:24:04.256+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:24:04.256+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:24:04.256+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:24:04.261+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:24:04.368+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:24:04.368+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:24:30.363+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "25.995261037s"} 2023-04-11T16:24:44.947+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "14.584447338s", "keys": 321897} 2023-04-11T16:24:45.791+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "15.428492824s"} 2023-04-11T16:24:51.252+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.460662766s"} 2023-04-11T16:24:51.252+0300 INFO blocks, transactions ans AERs are reset {"took": "46.884558096s"} 2023-04-11T16:24:55.399+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "9.607820004s", "keys": 334821} 2023-04-11T16:24:55.399+0300 DEBUG trying to reset contract storage items 2023-04-11T16:24:59.981+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "8.728713255s", "keys": 208913} 2023-04-11T16:25:50.827+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "55.426411416s"} 2023-04-11T16:26:28.734+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "37.902647706s"} 2023-04-11T16:26:53.960+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "1m3.129453265s", "keys": 200001} 2023-04-11T16:27:27.645+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.685283662s"} 2023-04-11T16:27:52.173+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m23.438465575s", "keys": 199999} 2023-04-11T16:28:00.995+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.821990443s"} 2023-04-11T16:28:00.995+0300 INFO contract storage items are reset {"took": "3m5.595950958s", "keys": 656944} 2023-04-11T16:28:49.164+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.518344712s", "keys": 200000} 2023-04-11T16:28:49.164+0300 DEBUG trying to reset headers information 2023-04-11T16:28:49.936+0300 INFO headers information is reset {"took": "772.36435ms"} 2023-04-11T16:28:53.122+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "52.126928092s", "keys": 56945} 2023-04-11T16:28:53.122+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:29:09.332+0300 INFO state root information and NEP transfers are reset {"took": "16.20921699s"} 2023-04-11T16:29:46.264+0300 DEBUG headers information is persisted {"took": "56.326715249s", "keys": 528438} 2023-04-11T16:29:46.267+0300 DEBUG trying to remove stale storage items 2023-04-11T16:29:53.986+0300 INFO stale storage items are reset {"took": "7.718950145s", "keys": 1350176} 2023-04-11T16:29:53.986+0300 DEBUG trying to remove state reset point 2023-04-11T16:29:53.986+0300 INFO state reset point is removed {"took": "6.013µs"} 2023-04-11T16:29:55.899+0300 DEBUG state root information and NEP transfers are persisted {"took": "46.567302762s", "keys": 602578} 2023-04-11T16:29:55.929+0300 INFO state reset point information is persisted {"took": "1.942392208s", "keys": 2} 2023-04-11T16:29:55.929+0300 INFO reset finished successfully {"took": "5m51.561573137s"} ``` Master: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:34:12.410+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:34:12.410+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:34:12.410+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:34:12.410+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:34:12.413+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:34:12.495+0300 INFO initialize state reset {"target height": 600000} 2023-04-11T16:34:12.513+0300 INFO trying to reset blocks, transactions and AERs 2023-04-11T16:35:03.582+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "51.087226195s", "keys": 321895} 2023-04-11T16:35:31.302+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "27.719871393s", "keys": 334823} 2023-04-11T16:35:41.309+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "10.007017388s", "keys": 208913} 2023-04-11T16:35:41.309+0300 INFO blocks, transactions ans AERs are reset {"took": "1m28.814245057s", "overall persisted keys": 865631} 2023-04-11T16:35:41.309+0300 INFO trying to reset contract storage items 2023-04-11T16:37:38.315+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 1, "took": "1m57.00650253s", "keys": 200000} 2023-04-11T16:39:29.704+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 2, "took": "1m51.385224725s", "keys": 200000} 2023-04-11T16:41:14.991+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 3, "took": "1m45.287483794s", "keys": 200000} 2023-04-11T16:41:31.667+0300 INFO last batch of contract storage items and IDs is persisted {"batch": 4, "took": "16.675347478s", "keys": 56945} 2023-04-11T16:41:31.667+0300 INFO contract storage items and IDs are reset {"took": "5m50.357775401s", "keys": 656944} 2023-04-11T16:41:31.667+0300 INFO trying to reset headers information 2023-04-11T16:42:16.779+0300 INFO headers information is reset {"took": "45.111354262s", "keys": 528438} 2023-04-11T16:42:16.784+0300 INFO trying to reset state root information and NEP transfers 2023-04-11T16:42:35.778+0300 INFO state root information and NEP transfers are reset {"took": "18.99373117s", "keys": 602578} 2023-04-11T16:42:35.781+0300 INFO trying to remove stale storage items 2023-04-11T16:42:43.884+0300 INFO stale storage items are reset {"took": "8.103929306s", "keys": 1350176} 2023-04-11T16:42:43.885+0300 INFO trying to remove state reset point 2023-04-11T16:42:43.926+0300 INFO stale reset point is removed {"took": "41.858883ms", "keys": 2} 2023-04-11T16:42:43.932+0300 INFO reset finished successfully {"took": "8m31.437493325s"} ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-04-11 11:18:48 +00:00
_, uerr := upperCache.PersistSync()
if uerr != nil {
panic(uerr)
}
upperCache = cache.GetPrivate()
persistCh <- func(persistedKeys int, err error) error {
if err != nil {
return fmt.Errorf("failed to persist last batch of removed blocks, transactions ans AERs: %w", err)
}
*keysCnt += persistedKeys
bc.log.Debug("last batch of removed blocks, transactions and AERs is persisted",
zap.Int("batch", persistBatch),
zap.Duration("took", time.Since(persistStart)),
zap.Int("keys", persistedKeys))
return nil
}
2022-10-20 10:59:19 +00:00
p = time.Now()
fallthrough
case staleBlocksRemoved:
// Completely remove contract IDs to update them later.
bc.log.Debug("trying to reset contract storage items")
pStorageStart := p
2022-10-20 10:59:19 +00:00
p = time.Now()
2022-10-20 10:59:19 +00:00
var mode = mpt.ModeAll
if bc.config.Ledger.RemoveUntraceableBlocks {
2022-10-20 10:59:19 +00:00
mode |= mpt.ModeGCFlag
}
core: avoid squashing of data from different state reset stages `cache` persisting operation is concurrent with the storage modifications made by further state reset stages. We can't allow situation when data from the next stage are leaking into the previous stage. State reset stages must be atomic in turms of DB persisting, thus, use another `upperCache` MemCached store to keep them apart. Here are the results of mainnet's BoltDB reset from 1.1M to 6K: This patch: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:15:25.783+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:15:25.783+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:15:25.783+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:15:25.783+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:15:25.787+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:15:25.906+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:15:25.906+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:15:57.031+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "31.125057214s"} 2023-04-11T16:16:12.644+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "15.613156971s", "keys": 321895} 2023-04-11T16:16:13.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "16.663444208s"} 2023-04-11T16:16:19.784+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.760308543s"} 2023-04-11T16:16:19.784+0300 INFO blocks, transactions ans AERs are reset {"took": "53.878632911s"} 2023-04-11T16:16:22.870+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "8.974838893s", "keys": 334823} 2023-04-11T16:16:22.870+0300 DEBUG trying to reset contract storage items 2023-04-11T16:16:27.272+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "7.487357441s", "keys": 208913} 2023-04-11T16:17:23.678+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "1m0.80711106s"} 2023-04-11T16:18:00.769+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "36.967660061s"} 2023-04-11T16:18:20.478+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "56.796257788s", "keys": 200000} 2023-04-11T16:18:54.115+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.637412437s"} 2023-04-11T16:19:18.844+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m18.0737668s", "keys": 200000} 2023-04-11T16:19:27.650+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.806264019s"} 2023-04-11T16:19:27.650+0300 INFO contract storage items are reset {"took": "3m4.780232077s", "keys": 656944} 2023-04-11T16:20:15.660+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.544386403s", "keys": 200000} 2023-04-11T16:20:15.660+0300 DEBUG trying to reset headers information 2023-04-11T16:20:16.385+0300 INFO headers information is reset {"took": "725.174932ms"} 2023-04-11T16:20:19.586+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "51.936278608s", "keys": 56945} 2023-04-11T16:20:19.587+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:20:35.845+0300 INFO state root information and NEP transfers are reset {"took": "16.25852114s"} 2023-04-11T16:21:10.000+0300 DEBUG headers information is persisted {"took": "53.613638429s", "keys": 528438} 2023-04-11T16:21:10.003+0300 DEBUG trying to remove stale storage items 2023-04-11T16:21:18.108+0300 INFO stale storage items are reset {"took": "8.105140658s", "keys": 1350176} 2023-04-11T16:21:18.108+0300 DEBUG trying to remove state reset point 2023-04-11T16:21:18.108+0300 INFO state reset point is removed {"took": "8.554µs"} 2023-04-11T16:21:20.151+0300 DEBUG state root information and NEP transfers are persisted {"took": "44.305707049s", "keys": 602578} 2023-04-11T16:21:20.212+0300 INFO state reset point information is persisted {"took": "2.103764633s", "keys": 2} 2023-04-11T16:21:20.213+0300 INFO reset finished successfully {"took": "5m54.306861367s"} ``` The previous commit: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:24:04.256+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:24:04.256+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:24:04.256+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:24:04.256+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:24:04.261+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:24:04.368+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:24:04.368+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:24:30.363+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "25.995261037s"} 2023-04-11T16:24:44.947+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "14.584447338s", "keys": 321897} 2023-04-11T16:24:45.791+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "15.428492824s"} 2023-04-11T16:24:51.252+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.460662766s"} 2023-04-11T16:24:51.252+0300 INFO blocks, transactions ans AERs are reset {"took": "46.884558096s"} 2023-04-11T16:24:55.399+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "9.607820004s", "keys": 334821} 2023-04-11T16:24:55.399+0300 DEBUG trying to reset contract storage items 2023-04-11T16:24:59.981+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "8.728713255s", "keys": 208913} 2023-04-11T16:25:50.827+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "55.426411416s"} 2023-04-11T16:26:28.734+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "37.902647706s"} 2023-04-11T16:26:53.960+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "1m3.129453265s", "keys": 200001} 2023-04-11T16:27:27.645+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.685283662s"} 2023-04-11T16:27:52.173+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m23.438465575s", "keys": 199999} 2023-04-11T16:28:00.995+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.821990443s"} 2023-04-11T16:28:00.995+0300 INFO contract storage items are reset {"took": "3m5.595950958s", "keys": 656944} 2023-04-11T16:28:49.164+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.518344712s", "keys": 200000} 2023-04-11T16:28:49.164+0300 DEBUG trying to reset headers information 2023-04-11T16:28:49.936+0300 INFO headers information is reset {"took": "772.36435ms"} 2023-04-11T16:28:53.122+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "52.126928092s", "keys": 56945} 2023-04-11T16:28:53.122+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:29:09.332+0300 INFO state root information and NEP transfers are reset {"took": "16.20921699s"} 2023-04-11T16:29:46.264+0300 DEBUG headers information is persisted {"took": "56.326715249s", "keys": 528438} 2023-04-11T16:29:46.267+0300 DEBUG trying to remove stale storage items 2023-04-11T16:29:53.986+0300 INFO stale storage items are reset {"took": "7.718950145s", "keys": 1350176} 2023-04-11T16:29:53.986+0300 DEBUG trying to remove state reset point 2023-04-11T16:29:53.986+0300 INFO state reset point is removed {"took": "6.013µs"} 2023-04-11T16:29:55.899+0300 DEBUG state root information and NEP transfers are persisted {"took": "46.567302762s", "keys": 602578} 2023-04-11T16:29:55.929+0300 INFO state reset point information is persisted {"took": "1.942392208s", "keys": 2} 2023-04-11T16:29:55.929+0300 INFO reset finished successfully {"took": "5m51.561573137s"} ``` Master: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:34:12.410+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:34:12.410+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:34:12.410+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:34:12.410+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:34:12.413+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:34:12.495+0300 INFO initialize state reset {"target height": 600000} 2023-04-11T16:34:12.513+0300 INFO trying to reset blocks, transactions and AERs 2023-04-11T16:35:03.582+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "51.087226195s", "keys": 321895} 2023-04-11T16:35:31.302+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "27.719871393s", "keys": 334823} 2023-04-11T16:35:41.309+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "10.007017388s", "keys": 208913} 2023-04-11T16:35:41.309+0300 INFO blocks, transactions ans AERs are reset {"took": "1m28.814245057s", "overall persisted keys": 865631} 2023-04-11T16:35:41.309+0300 INFO trying to reset contract storage items 2023-04-11T16:37:38.315+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 1, "took": "1m57.00650253s", "keys": 200000} 2023-04-11T16:39:29.704+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 2, "took": "1m51.385224725s", "keys": 200000} 2023-04-11T16:41:14.991+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 3, "took": "1m45.287483794s", "keys": 200000} 2023-04-11T16:41:31.667+0300 INFO last batch of contract storage items and IDs is persisted {"batch": 4, "took": "16.675347478s", "keys": 56945} 2023-04-11T16:41:31.667+0300 INFO contract storage items and IDs are reset {"took": "5m50.357775401s", "keys": 656944} 2023-04-11T16:41:31.667+0300 INFO trying to reset headers information 2023-04-11T16:42:16.779+0300 INFO headers information is reset {"took": "45.111354262s", "keys": 528438} 2023-04-11T16:42:16.784+0300 INFO trying to reset state root information and NEP transfers 2023-04-11T16:42:35.778+0300 INFO state root information and NEP transfers are reset {"took": "18.99373117s", "keys": 602578} 2023-04-11T16:42:35.781+0300 INFO trying to remove stale storage items 2023-04-11T16:42:43.884+0300 INFO stale storage items are reset {"took": "8.103929306s", "keys": 1350176} 2023-04-11T16:42:43.885+0300 INFO trying to remove state reset point 2023-04-11T16:42:43.926+0300 INFO stale reset point is removed {"took": "41.858883ms", "keys": 2} 2023-04-11T16:42:43.932+0300 INFO reset finished successfully {"took": "8m31.437493325s"} ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-04-11 11:18:48 +00:00
trieStore := mpt.NewTrieStore(sr.Root, mode, upperCache.Store)
2022-10-20 10:59:19 +00:00
oldStoragePrefix := v.StoragePrefix
newStoragePrefix := statesync.TemporaryPrefix(oldStoragePrefix)
const persistBatchSize = 200000
var cnt, storageItmsCnt, batchCnt int
2022-10-20 10:59:19 +00:00
trieStore.Seek(storage.SeekRange{Prefix: []byte{byte(oldStoragePrefix)}}, func(k, v []byte) bool {
if cnt >= persistBatchSize {
cnt = 0
batchCnt++
bc.log.Info("intermediate batch of contract storage items and IDs is collected",
zap.Int("batch", batchCnt),
zap.Duration("took", time.Since(p)))
persistStart := time.Now()
persistBatch := batchCnt
core: avoid squashing of data from different state reset stages `cache` persisting operation is concurrent with the storage modifications made by further state reset stages. We can't allow situation when data from the next stage are leaking into the previous stage. State reset stages must be atomic in turms of DB persisting, thus, use another `upperCache` MemCached store to keep them apart. Here are the results of mainnet's BoltDB reset from 1.1M to 6K: This patch: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:15:25.783+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:15:25.783+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:15:25.783+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:15:25.783+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:15:25.787+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:15:25.906+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:15:25.906+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:15:57.031+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "31.125057214s"} 2023-04-11T16:16:12.644+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "15.613156971s", "keys": 321895} 2023-04-11T16:16:13.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "16.663444208s"} 2023-04-11T16:16:19.784+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.760308543s"} 2023-04-11T16:16:19.784+0300 INFO blocks, transactions ans AERs are reset {"took": "53.878632911s"} 2023-04-11T16:16:22.870+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "8.974838893s", "keys": 334823} 2023-04-11T16:16:22.870+0300 DEBUG trying to reset contract storage items 2023-04-11T16:16:27.272+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "7.487357441s", "keys": 208913} 2023-04-11T16:17:23.678+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "1m0.80711106s"} 2023-04-11T16:18:00.769+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "36.967660061s"} 2023-04-11T16:18:20.478+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "56.796257788s", "keys": 200000} 2023-04-11T16:18:54.115+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.637412437s"} 2023-04-11T16:19:18.844+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m18.0737668s", "keys": 200000} 2023-04-11T16:19:27.650+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.806264019s"} 2023-04-11T16:19:27.650+0300 INFO contract storage items are reset {"took": "3m4.780232077s", "keys": 656944} 2023-04-11T16:20:15.660+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.544386403s", "keys": 200000} 2023-04-11T16:20:15.660+0300 DEBUG trying to reset headers information 2023-04-11T16:20:16.385+0300 INFO headers information is reset {"took": "725.174932ms"} 2023-04-11T16:20:19.586+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "51.936278608s", "keys": 56945} 2023-04-11T16:20:19.587+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:20:35.845+0300 INFO state root information and NEP transfers are reset {"took": "16.25852114s"} 2023-04-11T16:21:10.000+0300 DEBUG headers information is persisted {"took": "53.613638429s", "keys": 528438} 2023-04-11T16:21:10.003+0300 DEBUG trying to remove stale storage items 2023-04-11T16:21:18.108+0300 INFO stale storage items are reset {"took": "8.105140658s", "keys": 1350176} 2023-04-11T16:21:18.108+0300 DEBUG trying to remove state reset point 2023-04-11T16:21:18.108+0300 INFO state reset point is removed {"took": "8.554µs"} 2023-04-11T16:21:20.151+0300 DEBUG state root information and NEP transfers are persisted {"took": "44.305707049s", "keys": 602578} 2023-04-11T16:21:20.212+0300 INFO state reset point information is persisted {"took": "2.103764633s", "keys": 2} 2023-04-11T16:21:20.213+0300 INFO reset finished successfully {"took": "5m54.306861367s"} ``` The previous commit: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:24:04.256+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:24:04.256+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:24:04.256+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:24:04.256+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:24:04.261+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:24:04.368+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:24:04.368+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:24:30.363+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "25.995261037s"} 2023-04-11T16:24:44.947+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "14.584447338s", "keys": 321897} 2023-04-11T16:24:45.791+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "15.428492824s"} 2023-04-11T16:24:51.252+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.460662766s"} 2023-04-11T16:24:51.252+0300 INFO blocks, transactions ans AERs are reset {"took": "46.884558096s"} 2023-04-11T16:24:55.399+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "9.607820004s", "keys": 334821} 2023-04-11T16:24:55.399+0300 DEBUG trying to reset contract storage items 2023-04-11T16:24:59.981+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "8.728713255s", "keys": 208913} 2023-04-11T16:25:50.827+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "55.426411416s"} 2023-04-11T16:26:28.734+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "37.902647706s"} 2023-04-11T16:26:53.960+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "1m3.129453265s", "keys": 200001} 2023-04-11T16:27:27.645+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.685283662s"} 2023-04-11T16:27:52.173+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m23.438465575s", "keys": 199999} 2023-04-11T16:28:00.995+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.821990443s"} 2023-04-11T16:28:00.995+0300 INFO contract storage items are reset {"took": "3m5.595950958s", "keys": 656944} 2023-04-11T16:28:49.164+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.518344712s", "keys": 200000} 2023-04-11T16:28:49.164+0300 DEBUG trying to reset headers information 2023-04-11T16:28:49.936+0300 INFO headers information is reset {"took": "772.36435ms"} 2023-04-11T16:28:53.122+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "52.126928092s", "keys": 56945} 2023-04-11T16:28:53.122+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:29:09.332+0300 INFO state root information and NEP transfers are reset {"took": "16.20921699s"} 2023-04-11T16:29:46.264+0300 DEBUG headers information is persisted {"took": "56.326715249s", "keys": 528438} 2023-04-11T16:29:46.267+0300 DEBUG trying to remove stale storage items 2023-04-11T16:29:53.986+0300 INFO stale storage items are reset {"took": "7.718950145s", "keys": 1350176} 2023-04-11T16:29:53.986+0300 DEBUG trying to remove state reset point 2023-04-11T16:29:53.986+0300 INFO state reset point is removed {"took": "6.013µs"} 2023-04-11T16:29:55.899+0300 DEBUG state root information and NEP transfers are persisted {"took": "46.567302762s", "keys": 602578} 2023-04-11T16:29:55.929+0300 INFO state reset point information is persisted {"took": "1.942392208s", "keys": 2} 2023-04-11T16:29:55.929+0300 INFO reset finished successfully {"took": "5m51.561573137s"} ``` Master: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:34:12.410+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:34:12.410+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:34:12.410+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:34:12.410+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:34:12.413+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:34:12.495+0300 INFO initialize state reset {"target height": 600000} 2023-04-11T16:34:12.513+0300 INFO trying to reset blocks, transactions and AERs 2023-04-11T16:35:03.582+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "51.087226195s", "keys": 321895} 2023-04-11T16:35:31.302+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "27.719871393s", "keys": 334823} 2023-04-11T16:35:41.309+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "10.007017388s", "keys": 208913} 2023-04-11T16:35:41.309+0300 INFO blocks, transactions ans AERs are reset {"took": "1m28.814245057s", "overall persisted keys": 865631} 2023-04-11T16:35:41.309+0300 INFO trying to reset contract storage items 2023-04-11T16:37:38.315+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 1, "took": "1m57.00650253s", "keys": 200000} 2023-04-11T16:39:29.704+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 2, "took": "1m51.385224725s", "keys": 200000} 2023-04-11T16:41:14.991+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 3, "took": "1m45.287483794s", "keys": 200000} 2023-04-11T16:41:31.667+0300 INFO last batch of contract storage items and IDs is persisted {"batch": 4, "took": "16.675347478s", "keys": 56945} 2023-04-11T16:41:31.667+0300 INFO contract storage items and IDs are reset {"took": "5m50.357775401s", "keys": 656944} 2023-04-11T16:41:31.667+0300 INFO trying to reset headers information 2023-04-11T16:42:16.779+0300 INFO headers information is reset {"took": "45.111354262s", "keys": 528438} 2023-04-11T16:42:16.784+0300 INFO trying to reset state root information and NEP transfers 2023-04-11T16:42:35.778+0300 INFO state root information and NEP transfers are reset {"took": "18.99373117s", "keys": 602578} 2023-04-11T16:42:35.781+0300 INFO trying to remove stale storage items 2023-04-11T16:42:43.884+0300 INFO stale storage items are reset {"took": "8.103929306s", "keys": 1350176} 2023-04-11T16:42:43.885+0300 INFO trying to remove state reset point 2023-04-11T16:42:43.926+0300 INFO stale reset point is removed {"took": "41.858883ms", "keys": 2} 2023-04-11T16:42:43.932+0300 INFO reset finished successfully {"took": "8m31.437493325s"} ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-04-11 11:18:48 +00:00
_, uerr := upperCache.PersistSync()
if uerr != nil {
panic(uerr)
}
upperCache = cache.GetPrivate()
persistCh <- func(persistedKeys int, err error) error {
if err != nil {
return fmt.Errorf("failed to persist intermediate batch of contract storage items: %w", err)
}
bc.log.Debug("intermediate batch of contract storage items is persisted",
zap.Int("batch", persistBatch),
zap.Duration("took", time.Since(persistStart)),
zap.Int("keys", persistedKeys))
return nil
}
p = time.Now()
2022-10-20 10:59:19 +00:00
}
// May safely omit KV copying.
k[0] = byte(newStoragePrefix)
core: avoid squashing of data from different state reset stages `cache` persisting operation is concurrent with the storage modifications made by further state reset stages. We can't allow situation when data from the next stage are leaking into the previous stage. State reset stages must be atomic in turms of DB persisting, thus, use another `upperCache` MemCached store to keep them apart. Here are the results of mainnet's BoltDB reset from 1.1M to 6K: This patch: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:15:25.783+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:15:25.783+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:15:25.783+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:15:25.783+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:15:25.787+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:15:25.906+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:15:25.906+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:15:57.031+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "31.125057214s"} 2023-04-11T16:16:12.644+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "15.613156971s", "keys": 321895} 2023-04-11T16:16:13.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "16.663444208s"} 2023-04-11T16:16:19.784+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.760308543s"} 2023-04-11T16:16:19.784+0300 INFO blocks, transactions ans AERs are reset {"took": "53.878632911s"} 2023-04-11T16:16:22.870+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "8.974838893s", "keys": 334823} 2023-04-11T16:16:22.870+0300 DEBUG trying to reset contract storage items 2023-04-11T16:16:27.272+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "7.487357441s", "keys": 208913} 2023-04-11T16:17:23.678+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "1m0.80711106s"} 2023-04-11T16:18:00.769+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "36.967660061s"} 2023-04-11T16:18:20.478+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "56.796257788s", "keys": 200000} 2023-04-11T16:18:54.115+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.637412437s"} 2023-04-11T16:19:18.844+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m18.0737668s", "keys": 200000} 2023-04-11T16:19:27.650+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.806264019s"} 2023-04-11T16:19:27.650+0300 INFO contract storage items are reset {"took": "3m4.780232077s", "keys": 656944} 2023-04-11T16:20:15.660+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.544386403s", "keys": 200000} 2023-04-11T16:20:15.660+0300 DEBUG trying to reset headers information 2023-04-11T16:20:16.385+0300 INFO headers information is reset {"took": "725.174932ms"} 2023-04-11T16:20:19.586+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "51.936278608s", "keys": 56945} 2023-04-11T16:20:19.587+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:20:35.845+0300 INFO state root information and NEP transfers are reset {"took": "16.25852114s"} 2023-04-11T16:21:10.000+0300 DEBUG headers information is persisted {"took": "53.613638429s", "keys": 528438} 2023-04-11T16:21:10.003+0300 DEBUG trying to remove stale storage items 2023-04-11T16:21:18.108+0300 INFO stale storage items are reset {"took": "8.105140658s", "keys": 1350176} 2023-04-11T16:21:18.108+0300 DEBUG trying to remove state reset point 2023-04-11T16:21:18.108+0300 INFO state reset point is removed {"took": "8.554µs"} 2023-04-11T16:21:20.151+0300 DEBUG state root information and NEP transfers are persisted {"took": "44.305707049s", "keys": 602578} 2023-04-11T16:21:20.212+0300 INFO state reset point information is persisted {"took": "2.103764633s", "keys": 2} 2023-04-11T16:21:20.213+0300 INFO reset finished successfully {"took": "5m54.306861367s"} ``` The previous commit: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:24:04.256+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:24:04.256+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:24:04.256+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:24:04.256+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:24:04.261+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:24:04.368+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:24:04.368+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:24:30.363+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "25.995261037s"} 2023-04-11T16:24:44.947+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "14.584447338s", "keys": 321897} 2023-04-11T16:24:45.791+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "15.428492824s"} 2023-04-11T16:24:51.252+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.460662766s"} 2023-04-11T16:24:51.252+0300 INFO blocks, transactions ans AERs are reset {"took": "46.884558096s"} 2023-04-11T16:24:55.399+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "9.607820004s", "keys": 334821} 2023-04-11T16:24:55.399+0300 DEBUG trying to reset contract storage items 2023-04-11T16:24:59.981+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "8.728713255s", "keys": 208913} 2023-04-11T16:25:50.827+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "55.426411416s"} 2023-04-11T16:26:28.734+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "37.902647706s"} 2023-04-11T16:26:53.960+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "1m3.129453265s", "keys": 200001} 2023-04-11T16:27:27.645+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.685283662s"} 2023-04-11T16:27:52.173+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m23.438465575s", "keys": 199999} 2023-04-11T16:28:00.995+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.821990443s"} 2023-04-11T16:28:00.995+0300 INFO contract storage items are reset {"took": "3m5.595950958s", "keys": 656944} 2023-04-11T16:28:49.164+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.518344712s", "keys": 200000} 2023-04-11T16:28:49.164+0300 DEBUG trying to reset headers information 2023-04-11T16:28:49.936+0300 INFO headers information is reset {"took": "772.36435ms"} 2023-04-11T16:28:53.122+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "52.126928092s", "keys": 56945} 2023-04-11T16:28:53.122+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:29:09.332+0300 INFO state root information and NEP transfers are reset {"took": "16.20921699s"} 2023-04-11T16:29:46.264+0300 DEBUG headers information is persisted {"took": "56.326715249s", "keys": 528438} 2023-04-11T16:29:46.267+0300 DEBUG trying to remove stale storage items 2023-04-11T16:29:53.986+0300 INFO stale storage items are reset {"took": "7.718950145s", "keys": 1350176} 2023-04-11T16:29:53.986+0300 DEBUG trying to remove state reset point 2023-04-11T16:29:53.986+0300 INFO state reset point is removed {"took": "6.013µs"} 2023-04-11T16:29:55.899+0300 DEBUG state root information and NEP transfers are persisted {"took": "46.567302762s", "keys": 602578} 2023-04-11T16:29:55.929+0300 INFO state reset point information is persisted {"took": "1.942392208s", "keys": 2} 2023-04-11T16:29:55.929+0300 INFO reset finished successfully {"took": "5m51.561573137s"} ``` Master: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:34:12.410+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:34:12.410+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:34:12.410+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:34:12.410+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:34:12.413+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:34:12.495+0300 INFO initialize state reset {"target height": 600000} 2023-04-11T16:34:12.513+0300 INFO trying to reset blocks, transactions and AERs 2023-04-11T16:35:03.582+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "51.087226195s", "keys": 321895} 2023-04-11T16:35:31.302+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "27.719871393s", "keys": 334823} 2023-04-11T16:35:41.309+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "10.007017388s", "keys": 208913} 2023-04-11T16:35:41.309+0300 INFO blocks, transactions ans AERs are reset {"took": "1m28.814245057s", "overall persisted keys": 865631} 2023-04-11T16:35:41.309+0300 INFO trying to reset contract storage items 2023-04-11T16:37:38.315+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 1, "took": "1m57.00650253s", "keys": 200000} 2023-04-11T16:39:29.704+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 2, "took": "1m51.385224725s", "keys": 200000} 2023-04-11T16:41:14.991+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 3, "took": "1m45.287483794s", "keys": 200000} 2023-04-11T16:41:31.667+0300 INFO last batch of contract storage items and IDs is persisted {"batch": 4, "took": "16.675347478s", "keys": 56945} 2023-04-11T16:41:31.667+0300 INFO contract storage items and IDs are reset {"took": "5m50.357775401s", "keys": 656944} 2023-04-11T16:41:31.667+0300 INFO trying to reset headers information 2023-04-11T16:42:16.779+0300 INFO headers information is reset {"took": "45.111354262s", "keys": 528438} 2023-04-11T16:42:16.784+0300 INFO trying to reset state root information and NEP transfers 2023-04-11T16:42:35.778+0300 INFO state root information and NEP transfers are reset {"took": "18.99373117s", "keys": 602578} 2023-04-11T16:42:35.781+0300 INFO trying to remove stale storage items 2023-04-11T16:42:43.884+0300 INFO stale storage items are reset {"took": "8.103929306s", "keys": 1350176} 2023-04-11T16:42:43.885+0300 INFO trying to remove state reset point 2023-04-11T16:42:43.926+0300 INFO stale reset point is removed {"took": "41.858883ms", "keys": 2} 2023-04-11T16:42:43.932+0300 INFO reset finished successfully {"took": "8m31.437493325s"} ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-04-11 11:18:48 +00:00
upperCache.Store.Put(k, v)
cnt++
storageItmsCnt++
2022-10-20 10:59:19 +00:00
return true
2022-10-20 10:59:19 +00:00
})
trieStore.Close()
core: avoid squashing of data from different state reset stages `cache` persisting operation is concurrent with the storage modifications made by further state reset stages. We can't allow situation when data from the next stage are leaking into the previous stage. State reset stages must be atomic in turms of DB persisting, thus, use another `upperCache` MemCached store to keep them apart. Here are the results of mainnet's BoltDB reset from 1.1M to 6K: This patch: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:15:25.783+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:15:25.783+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:15:25.783+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:15:25.783+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:15:25.787+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:15:25.906+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:15:25.906+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:15:57.031+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "31.125057214s"} 2023-04-11T16:16:12.644+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "15.613156971s", "keys": 321895} 2023-04-11T16:16:13.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "16.663444208s"} 2023-04-11T16:16:19.784+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.760308543s"} 2023-04-11T16:16:19.784+0300 INFO blocks, transactions ans AERs are reset {"took": "53.878632911s"} 2023-04-11T16:16:22.870+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "8.974838893s", "keys": 334823} 2023-04-11T16:16:22.870+0300 DEBUG trying to reset contract storage items 2023-04-11T16:16:27.272+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "7.487357441s", "keys": 208913} 2023-04-11T16:17:23.678+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "1m0.80711106s"} 2023-04-11T16:18:00.769+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "36.967660061s"} 2023-04-11T16:18:20.478+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "56.796257788s", "keys": 200000} 2023-04-11T16:18:54.115+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.637412437s"} 2023-04-11T16:19:18.844+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m18.0737668s", "keys": 200000} 2023-04-11T16:19:27.650+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.806264019s"} 2023-04-11T16:19:27.650+0300 INFO contract storage items are reset {"took": "3m4.780232077s", "keys": 656944} 2023-04-11T16:20:15.660+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.544386403s", "keys": 200000} 2023-04-11T16:20:15.660+0300 DEBUG trying to reset headers information 2023-04-11T16:20:16.385+0300 INFO headers information is reset {"took": "725.174932ms"} 2023-04-11T16:20:19.586+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "51.936278608s", "keys": 56945} 2023-04-11T16:20:19.587+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:20:35.845+0300 INFO state root information and NEP transfers are reset {"took": "16.25852114s"} 2023-04-11T16:21:10.000+0300 DEBUG headers information is persisted {"took": "53.613638429s", "keys": 528438} 2023-04-11T16:21:10.003+0300 DEBUG trying to remove stale storage items 2023-04-11T16:21:18.108+0300 INFO stale storage items are reset {"took": "8.105140658s", "keys": 1350176} 2023-04-11T16:21:18.108+0300 DEBUG trying to remove state reset point 2023-04-11T16:21:18.108+0300 INFO state reset point is removed {"took": "8.554µs"} 2023-04-11T16:21:20.151+0300 DEBUG state root information and NEP transfers are persisted {"took": "44.305707049s", "keys": 602578} 2023-04-11T16:21:20.212+0300 INFO state reset point information is persisted {"took": "2.103764633s", "keys": 2} 2023-04-11T16:21:20.213+0300 INFO reset finished successfully {"took": "5m54.306861367s"} ``` The previous commit: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:24:04.256+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:24:04.256+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:24:04.256+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:24:04.256+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:24:04.261+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:24:04.368+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:24:04.368+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:24:30.363+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "25.995261037s"} 2023-04-11T16:24:44.947+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "14.584447338s", "keys": 321897} 2023-04-11T16:24:45.791+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "15.428492824s"} 2023-04-11T16:24:51.252+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.460662766s"} 2023-04-11T16:24:51.252+0300 INFO blocks, transactions ans AERs are reset {"took": "46.884558096s"} 2023-04-11T16:24:55.399+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "9.607820004s", "keys": 334821} 2023-04-11T16:24:55.399+0300 DEBUG trying to reset contract storage items 2023-04-11T16:24:59.981+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "8.728713255s", "keys": 208913} 2023-04-11T16:25:50.827+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "55.426411416s"} 2023-04-11T16:26:28.734+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "37.902647706s"} 2023-04-11T16:26:53.960+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "1m3.129453265s", "keys": 200001} 2023-04-11T16:27:27.645+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.685283662s"} 2023-04-11T16:27:52.173+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m23.438465575s", "keys": 199999} 2023-04-11T16:28:00.995+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.821990443s"} 2023-04-11T16:28:00.995+0300 INFO contract storage items are reset {"took": "3m5.595950958s", "keys": 656944} 2023-04-11T16:28:49.164+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.518344712s", "keys": 200000} 2023-04-11T16:28:49.164+0300 DEBUG trying to reset headers information 2023-04-11T16:28:49.936+0300 INFO headers information is reset {"took": "772.36435ms"} 2023-04-11T16:28:53.122+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "52.126928092s", "keys": 56945} 2023-04-11T16:28:53.122+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:29:09.332+0300 INFO state root information and NEP transfers are reset {"took": "16.20921699s"} 2023-04-11T16:29:46.264+0300 DEBUG headers information is persisted {"took": "56.326715249s", "keys": 528438} 2023-04-11T16:29:46.267+0300 DEBUG trying to remove stale storage items 2023-04-11T16:29:53.986+0300 INFO stale storage items are reset {"took": "7.718950145s", "keys": 1350176} 2023-04-11T16:29:53.986+0300 DEBUG trying to remove state reset point 2023-04-11T16:29:53.986+0300 INFO state reset point is removed {"took": "6.013µs"} 2023-04-11T16:29:55.899+0300 DEBUG state root information and NEP transfers are persisted {"took": "46.567302762s", "keys": 602578} 2023-04-11T16:29:55.929+0300 INFO state reset point information is persisted {"took": "1.942392208s", "keys": 2} 2023-04-11T16:29:55.929+0300 INFO reset finished successfully {"took": "5m51.561573137s"} ``` Master: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:34:12.410+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:34:12.410+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:34:12.410+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:34:12.410+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:34:12.413+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:34:12.495+0300 INFO initialize state reset {"target height": 600000} 2023-04-11T16:34:12.513+0300 INFO trying to reset blocks, transactions and AERs 2023-04-11T16:35:03.582+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "51.087226195s", "keys": 321895} 2023-04-11T16:35:31.302+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "27.719871393s", "keys": 334823} 2023-04-11T16:35:41.309+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "10.007017388s", "keys": 208913} 2023-04-11T16:35:41.309+0300 INFO blocks, transactions ans AERs are reset {"took": "1m28.814245057s", "overall persisted keys": 865631} 2023-04-11T16:35:41.309+0300 INFO trying to reset contract storage items 2023-04-11T16:37:38.315+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 1, "took": "1m57.00650253s", "keys": 200000} 2023-04-11T16:39:29.704+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 2, "took": "1m51.385224725s", "keys": 200000} 2023-04-11T16:41:14.991+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 3, "took": "1m45.287483794s", "keys": 200000} 2023-04-11T16:41:31.667+0300 INFO last batch of contract storage items and IDs is persisted {"batch": 4, "took": "16.675347478s", "keys": 56945} 2023-04-11T16:41:31.667+0300 INFO contract storage items and IDs are reset {"took": "5m50.357775401s", "keys": 656944} 2023-04-11T16:41:31.667+0300 INFO trying to reset headers information 2023-04-11T16:42:16.779+0300 INFO headers information is reset {"took": "45.111354262s", "keys": 528438} 2023-04-11T16:42:16.784+0300 INFO trying to reset state root information and NEP transfers 2023-04-11T16:42:35.778+0300 INFO state root information and NEP transfers are reset {"took": "18.99373117s", "keys": 602578} 2023-04-11T16:42:35.781+0300 INFO trying to remove stale storage items 2023-04-11T16:42:43.884+0300 INFO stale storage items are reset {"took": "8.103929306s", "keys": 1350176} 2023-04-11T16:42:43.885+0300 INFO trying to remove state reset point 2023-04-11T16:42:43.926+0300 INFO stale reset point is removed {"took": "41.858883ms", "keys": 2} 2023-04-11T16:42:43.932+0300 INFO reset finished successfully {"took": "8m31.437493325s"} ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-04-11 11:18:48 +00:00
upperCache.Store.Put(resetStageKey, []byte{stateResetBit | byte(newStorageItemsAdded)})
batchCnt++
persistBatch := batchCnt
bc.log.Info("last batch of contract storage items is collected", zap.Int("batch", batchCnt), zap.Duration("took", time.Since(p)))
bc.log.Info("contract storage items are reset", zap.Duration("took", time.Since(pStorageStart)),
zap.Int("keys", storageItmsCnt))
lastStart := time.Now()
core: avoid squashing of data from different state reset stages `cache` persisting operation is concurrent with the storage modifications made by further state reset stages. We can't allow situation when data from the next stage are leaking into the previous stage. State reset stages must be atomic in turms of DB persisting, thus, use another `upperCache` MemCached store to keep them apart. Here are the results of mainnet's BoltDB reset from 1.1M to 6K: This patch: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:15:25.783+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:15:25.783+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:15:25.783+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:15:25.783+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:15:25.787+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:15:25.906+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:15:25.906+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:15:57.031+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "31.125057214s"} 2023-04-11T16:16:12.644+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "15.613156971s", "keys": 321895} 2023-04-11T16:16:13.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "16.663444208s"} 2023-04-11T16:16:19.784+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.760308543s"} 2023-04-11T16:16:19.784+0300 INFO blocks, transactions ans AERs are reset {"took": "53.878632911s"} 2023-04-11T16:16:22.870+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "8.974838893s", "keys": 334823} 2023-04-11T16:16:22.870+0300 DEBUG trying to reset contract storage items 2023-04-11T16:16:27.272+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "7.487357441s", "keys": 208913} 2023-04-11T16:17:23.678+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "1m0.80711106s"} 2023-04-11T16:18:00.769+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "36.967660061s"} 2023-04-11T16:18:20.478+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "56.796257788s", "keys": 200000} 2023-04-11T16:18:54.115+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.637412437s"} 2023-04-11T16:19:18.844+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m18.0737668s", "keys": 200000} 2023-04-11T16:19:27.650+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.806264019s"} 2023-04-11T16:19:27.650+0300 INFO contract storage items are reset {"took": "3m4.780232077s", "keys": 656944} 2023-04-11T16:20:15.660+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.544386403s", "keys": 200000} 2023-04-11T16:20:15.660+0300 DEBUG trying to reset headers information 2023-04-11T16:20:16.385+0300 INFO headers information is reset {"took": "725.174932ms"} 2023-04-11T16:20:19.586+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "51.936278608s", "keys": 56945} 2023-04-11T16:20:19.587+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:20:35.845+0300 INFO state root information and NEP transfers are reset {"took": "16.25852114s"} 2023-04-11T16:21:10.000+0300 DEBUG headers information is persisted {"took": "53.613638429s", "keys": 528438} 2023-04-11T16:21:10.003+0300 DEBUG trying to remove stale storage items 2023-04-11T16:21:18.108+0300 INFO stale storage items are reset {"took": "8.105140658s", "keys": 1350176} 2023-04-11T16:21:18.108+0300 DEBUG trying to remove state reset point 2023-04-11T16:21:18.108+0300 INFO state reset point is removed {"took": "8.554µs"} 2023-04-11T16:21:20.151+0300 DEBUG state root information and NEP transfers are persisted {"took": "44.305707049s", "keys": 602578} 2023-04-11T16:21:20.212+0300 INFO state reset point information is persisted {"took": "2.103764633s", "keys": 2} 2023-04-11T16:21:20.213+0300 INFO reset finished successfully {"took": "5m54.306861367s"} ``` The previous commit: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:24:04.256+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:24:04.256+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:24:04.256+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:24:04.256+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:24:04.261+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:24:04.368+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:24:04.368+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:24:30.363+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "25.995261037s"} 2023-04-11T16:24:44.947+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "14.584447338s", "keys": 321897} 2023-04-11T16:24:45.791+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "15.428492824s"} 2023-04-11T16:24:51.252+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.460662766s"} 2023-04-11T16:24:51.252+0300 INFO blocks, transactions ans AERs are reset {"took": "46.884558096s"} 2023-04-11T16:24:55.399+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "9.607820004s", "keys": 334821} 2023-04-11T16:24:55.399+0300 DEBUG trying to reset contract storage items 2023-04-11T16:24:59.981+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "8.728713255s", "keys": 208913} 2023-04-11T16:25:50.827+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "55.426411416s"} 2023-04-11T16:26:28.734+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "37.902647706s"} 2023-04-11T16:26:53.960+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "1m3.129453265s", "keys": 200001} 2023-04-11T16:27:27.645+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.685283662s"} 2023-04-11T16:27:52.173+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m23.438465575s", "keys": 199999} 2023-04-11T16:28:00.995+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.821990443s"} 2023-04-11T16:28:00.995+0300 INFO contract storage items are reset {"took": "3m5.595950958s", "keys": 656944} 2023-04-11T16:28:49.164+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.518344712s", "keys": 200000} 2023-04-11T16:28:49.164+0300 DEBUG trying to reset headers information 2023-04-11T16:28:49.936+0300 INFO headers information is reset {"took": "772.36435ms"} 2023-04-11T16:28:53.122+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "52.126928092s", "keys": 56945} 2023-04-11T16:28:53.122+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:29:09.332+0300 INFO state root information and NEP transfers are reset {"took": "16.20921699s"} 2023-04-11T16:29:46.264+0300 DEBUG headers information is persisted {"took": "56.326715249s", "keys": 528438} 2023-04-11T16:29:46.267+0300 DEBUG trying to remove stale storage items 2023-04-11T16:29:53.986+0300 INFO stale storage items are reset {"took": "7.718950145s", "keys": 1350176} 2023-04-11T16:29:53.986+0300 DEBUG trying to remove state reset point 2023-04-11T16:29:53.986+0300 INFO state reset point is removed {"took": "6.013µs"} 2023-04-11T16:29:55.899+0300 DEBUG state root information and NEP transfers are persisted {"took": "46.567302762s", "keys": 602578} 2023-04-11T16:29:55.929+0300 INFO state reset point information is persisted {"took": "1.942392208s", "keys": 2} 2023-04-11T16:29:55.929+0300 INFO reset finished successfully {"took": "5m51.561573137s"} ``` Master: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:34:12.410+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:34:12.410+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:34:12.410+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:34:12.410+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:34:12.413+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:34:12.495+0300 INFO initialize state reset {"target height": 600000} 2023-04-11T16:34:12.513+0300 INFO trying to reset blocks, transactions and AERs 2023-04-11T16:35:03.582+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "51.087226195s", "keys": 321895} 2023-04-11T16:35:31.302+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "27.719871393s", "keys": 334823} 2023-04-11T16:35:41.309+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "10.007017388s", "keys": 208913} 2023-04-11T16:35:41.309+0300 INFO blocks, transactions ans AERs are reset {"took": "1m28.814245057s", "overall persisted keys": 865631} 2023-04-11T16:35:41.309+0300 INFO trying to reset contract storage items 2023-04-11T16:37:38.315+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 1, "took": "1m57.00650253s", "keys": 200000} 2023-04-11T16:39:29.704+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 2, "took": "1m51.385224725s", "keys": 200000} 2023-04-11T16:41:14.991+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 3, "took": "1m45.287483794s", "keys": 200000} 2023-04-11T16:41:31.667+0300 INFO last batch of contract storage items and IDs is persisted {"batch": 4, "took": "16.675347478s", "keys": 56945} 2023-04-11T16:41:31.667+0300 INFO contract storage items and IDs are reset {"took": "5m50.357775401s", "keys": 656944} 2023-04-11T16:41:31.667+0300 INFO trying to reset headers information 2023-04-11T16:42:16.779+0300 INFO headers information is reset {"took": "45.111354262s", "keys": 528438} 2023-04-11T16:42:16.784+0300 INFO trying to reset state root information and NEP transfers 2023-04-11T16:42:35.778+0300 INFO state root information and NEP transfers are reset {"took": "18.99373117s", "keys": 602578} 2023-04-11T16:42:35.781+0300 INFO trying to remove stale storage items 2023-04-11T16:42:43.884+0300 INFO stale storage items are reset {"took": "8.103929306s", "keys": 1350176} 2023-04-11T16:42:43.885+0300 INFO trying to remove state reset point 2023-04-11T16:42:43.926+0300 INFO stale reset point is removed {"took": "41.858883ms", "keys": 2} 2023-04-11T16:42:43.932+0300 INFO reset finished successfully {"took": "8m31.437493325s"} ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-04-11 11:18:48 +00:00
_, uerr := upperCache.PersistSync()
if uerr != nil {
panic(uerr)
}
upperCache = cache.GetPrivate()
persistCh <- func(persistedKeys int, err error) error {
if err != nil {
return fmt.Errorf("failed to persist contract storage items and IDs changes to the DB: %w", err)
}
bc.log.Debug("last batch of contract storage items and IDs is persisted", zap.Int("batch", persistBatch), zap.Duration("took", time.Since(lastStart)), zap.Int("keys", persistedKeys))
return nil
}
2022-10-20 10:59:19 +00:00
p = time.Now()
fallthrough
case newStorageItemsAdded:
// Reset SYS-prefixed and IX-prefixed information.
bc.log.Debug("trying to reset headers information")
for i := height + 1; i <= hHeight; i++ {
core: avoid squashing of data from different state reset stages `cache` persisting operation is concurrent with the storage modifications made by further state reset stages. We can't allow situation when data from the next stage are leaking into the previous stage. State reset stages must be atomic in turms of DB persisting, thus, use another `upperCache` MemCached store to keep them apart. Here are the results of mainnet's BoltDB reset from 1.1M to 6K: This patch: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:15:25.783+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:15:25.783+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:15:25.783+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:15:25.783+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:15:25.787+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:15:25.906+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:15:25.906+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:15:57.031+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "31.125057214s"} 2023-04-11T16:16:12.644+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "15.613156971s", "keys": 321895} 2023-04-11T16:16:13.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "16.663444208s"} 2023-04-11T16:16:19.784+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.760308543s"} 2023-04-11T16:16:19.784+0300 INFO blocks, transactions ans AERs are reset {"took": "53.878632911s"} 2023-04-11T16:16:22.870+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "8.974838893s", "keys": 334823} 2023-04-11T16:16:22.870+0300 DEBUG trying to reset contract storage items 2023-04-11T16:16:27.272+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "7.487357441s", "keys": 208913} 2023-04-11T16:17:23.678+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "1m0.80711106s"} 2023-04-11T16:18:00.769+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "36.967660061s"} 2023-04-11T16:18:20.478+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "56.796257788s", "keys": 200000} 2023-04-11T16:18:54.115+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.637412437s"} 2023-04-11T16:19:18.844+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m18.0737668s", "keys": 200000} 2023-04-11T16:19:27.650+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.806264019s"} 2023-04-11T16:19:27.650+0300 INFO contract storage items are reset {"took": "3m4.780232077s", "keys": 656944} 2023-04-11T16:20:15.660+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.544386403s", "keys": 200000} 2023-04-11T16:20:15.660+0300 DEBUG trying to reset headers information 2023-04-11T16:20:16.385+0300 INFO headers information is reset {"took": "725.174932ms"} 2023-04-11T16:20:19.586+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "51.936278608s", "keys": 56945} 2023-04-11T16:20:19.587+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:20:35.845+0300 INFO state root information and NEP transfers are reset {"took": "16.25852114s"} 2023-04-11T16:21:10.000+0300 DEBUG headers information is persisted {"took": "53.613638429s", "keys": 528438} 2023-04-11T16:21:10.003+0300 DEBUG trying to remove stale storage items 2023-04-11T16:21:18.108+0300 INFO stale storage items are reset {"took": "8.105140658s", "keys": 1350176} 2023-04-11T16:21:18.108+0300 DEBUG trying to remove state reset point 2023-04-11T16:21:18.108+0300 INFO state reset point is removed {"took": "8.554µs"} 2023-04-11T16:21:20.151+0300 DEBUG state root information and NEP transfers are persisted {"took": "44.305707049s", "keys": 602578} 2023-04-11T16:21:20.212+0300 INFO state reset point information is persisted {"took": "2.103764633s", "keys": 2} 2023-04-11T16:21:20.213+0300 INFO reset finished successfully {"took": "5m54.306861367s"} ``` The previous commit: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:24:04.256+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:24:04.256+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:24:04.256+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:24:04.256+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:24:04.261+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:24:04.368+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:24:04.368+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:24:30.363+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "25.995261037s"} 2023-04-11T16:24:44.947+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "14.584447338s", "keys": 321897} 2023-04-11T16:24:45.791+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "15.428492824s"} 2023-04-11T16:24:51.252+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.460662766s"} 2023-04-11T16:24:51.252+0300 INFO blocks, transactions ans AERs are reset {"took": "46.884558096s"} 2023-04-11T16:24:55.399+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "9.607820004s", "keys": 334821} 2023-04-11T16:24:55.399+0300 DEBUG trying to reset contract storage items 2023-04-11T16:24:59.981+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "8.728713255s", "keys": 208913} 2023-04-11T16:25:50.827+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "55.426411416s"} 2023-04-11T16:26:28.734+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "37.902647706s"} 2023-04-11T16:26:53.960+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "1m3.129453265s", "keys": 200001} 2023-04-11T16:27:27.645+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.685283662s"} 2023-04-11T16:27:52.173+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m23.438465575s", "keys": 199999} 2023-04-11T16:28:00.995+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.821990443s"} 2023-04-11T16:28:00.995+0300 INFO contract storage items are reset {"took": "3m5.595950958s", "keys": 656944} 2023-04-11T16:28:49.164+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.518344712s", "keys": 200000} 2023-04-11T16:28:49.164+0300 DEBUG trying to reset headers information 2023-04-11T16:28:49.936+0300 INFO headers information is reset {"took": "772.36435ms"} 2023-04-11T16:28:53.122+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "52.126928092s", "keys": 56945} 2023-04-11T16:28:53.122+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:29:09.332+0300 INFO state root information and NEP transfers are reset {"took": "16.20921699s"} 2023-04-11T16:29:46.264+0300 DEBUG headers information is persisted {"took": "56.326715249s", "keys": 528438} 2023-04-11T16:29:46.267+0300 DEBUG trying to remove stale storage items 2023-04-11T16:29:53.986+0300 INFO stale storage items are reset {"took": "7.718950145s", "keys": 1350176} 2023-04-11T16:29:53.986+0300 DEBUG trying to remove state reset point 2023-04-11T16:29:53.986+0300 INFO state reset point is removed {"took": "6.013µs"} 2023-04-11T16:29:55.899+0300 DEBUG state root information and NEP transfers are persisted {"took": "46.567302762s", "keys": 602578} 2023-04-11T16:29:55.929+0300 INFO state reset point information is persisted {"took": "1.942392208s", "keys": 2} 2023-04-11T16:29:55.929+0300 INFO reset finished successfully {"took": "5m51.561573137s"} ``` Master: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:34:12.410+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:34:12.410+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:34:12.410+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:34:12.410+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:34:12.413+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:34:12.495+0300 INFO initialize state reset {"target height": 600000} 2023-04-11T16:34:12.513+0300 INFO trying to reset blocks, transactions and AERs 2023-04-11T16:35:03.582+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "51.087226195s", "keys": 321895} 2023-04-11T16:35:31.302+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "27.719871393s", "keys": 334823} 2023-04-11T16:35:41.309+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "10.007017388s", "keys": 208913} 2023-04-11T16:35:41.309+0300 INFO blocks, transactions ans AERs are reset {"took": "1m28.814245057s", "overall persisted keys": 865631} 2023-04-11T16:35:41.309+0300 INFO trying to reset contract storage items 2023-04-11T16:37:38.315+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 1, "took": "1m57.00650253s", "keys": 200000} 2023-04-11T16:39:29.704+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 2, "took": "1m51.385224725s", "keys": 200000} 2023-04-11T16:41:14.991+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 3, "took": "1m45.287483794s", "keys": 200000} 2023-04-11T16:41:31.667+0300 INFO last batch of contract storage items and IDs is persisted {"batch": 4, "took": "16.675347478s", "keys": 56945} 2023-04-11T16:41:31.667+0300 INFO contract storage items and IDs are reset {"took": "5m50.357775401s", "keys": 656944} 2023-04-11T16:41:31.667+0300 INFO trying to reset headers information 2023-04-11T16:42:16.779+0300 INFO headers information is reset {"took": "45.111354262s", "keys": 528438} 2023-04-11T16:42:16.784+0300 INFO trying to reset state root information and NEP transfers 2023-04-11T16:42:35.778+0300 INFO state root information and NEP transfers are reset {"took": "18.99373117s", "keys": 602578} 2023-04-11T16:42:35.781+0300 INFO trying to remove stale storage items 2023-04-11T16:42:43.884+0300 INFO stale storage items are reset {"took": "8.103929306s", "keys": 1350176} 2023-04-11T16:42:43.885+0300 INFO trying to remove state reset point 2023-04-11T16:42:43.926+0300 INFO stale reset point is removed {"took": "41.858883ms", "keys": 2} 2023-04-11T16:42:43.932+0300 INFO reset finished successfully {"took": "8m31.437493325s"} ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-04-11 11:18:48 +00:00
upperCache.PurgeHeader(bc.GetHeaderHash(i))
}
core: avoid squashing of data from different state reset stages `cache` persisting operation is concurrent with the storage modifications made by further state reset stages. We can't allow situation when data from the next stage are leaking into the previous stage. State reset stages must be atomic in turms of DB persisting, thus, use another `upperCache` MemCached store to keep them apart. Here are the results of mainnet's BoltDB reset from 1.1M to 6K: This patch: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:15:25.783+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:15:25.783+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:15:25.783+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:15:25.783+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:15:25.787+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:15:25.906+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:15:25.906+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:15:57.031+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "31.125057214s"} 2023-04-11T16:16:12.644+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "15.613156971s", "keys": 321895} 2023-04-11T16:16:13.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "16.663444208s"} 2023-04-11T16:16:19.784+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.760308543s"} 2023-04-11T16:16:19.784+0300 INFO blocks, transactions ans AERs are reset {"took": "53.878632911s"} 2023-04-11T16:16:22.870+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "8.974838893s", "keys": 334823} 2023-04-11T16:16:22.870+0300 DEBUG trying to reset contract storage items 2023-04-11T16:16:27.272+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "7.487357441s", "keys": 208913} 2023-04-11T16:17:23.678+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "1m0.80711106s"} 2023-04-11T16:18:00.769+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "36.967660061s"} 2023-04-11T16:18:20.478+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "56.796257788s", "keys": 200000} 2023-04-11T16:18:54.115+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.637412437s"} 2023-04-11T16:19:18.844+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m18.0737668s", "keys": 200000} 2023-04-11T16:19:27.650+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.806264019s"} 2023-04-11T16:19:27.650+0300 INFO contract storage items are reset {"took": "3m4.780232077s", "keys": 656944} 2023-04-11T16:20:15.660+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.544386403s", "keys": 200000} 2023-04-11T16:20:15.660+0300 DEBUG trying to reset headers information 2023-04-11T16:20:16.385+0300 INFO headers information is reset {"took": "725.174932ms"} 2023-04-11T16:20:19.586+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "51.936278608s", "keys": 56945} 2023-04-11T16:20:19.587+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:20:35.845+0300 INFO state root information and NEP transfers are reset {"took": "16.25852114s"} 2023-04-11T16:21:10.000+0300 DEBUG headers information is persisted {"took": "53.613638429s", "keys": 528438} 2023-04-11T16:21:10.003+0300 DEBUG trying to remove stale storage items 2023-04-11T16:21:18.108+0300 INFO stale storage items are reset {"took": "8.105140658s", "keys": 1350176} 2023-04-11T16:21:18.108+0300 DEBUG trying to remove state reset point 2023-04-11T16:21:18.108+0300 INFO state reset point is removed {"took": "8.554µs"} 2023-04-11T16:21:20.151+0300 DEBUG state root information and NEP transfers are persisted {"took": "44.305707049s", "keys": 602578} 2023-04-11T16:21:20.212+0300 INFO state reset point information is persisted {"took": "2.103764633s", "keys": 2} 2023-04-11T16:21:20.213+0300 INFO reset finished successfully {"took": "5m54.306861367s"} ``` The previous commit: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:24:04.256+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:24:04.256+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:24:04.256+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:24:04.256+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:24:04.261+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:24:04.368+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:24:04.368+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:24:30.363+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "25.995261037s"} 2023-04-11T16:24:44.947+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "14.584447338s", "keys": 321897} 2023-04-11T16:24:45.791+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "15.428492824s"} 2023-04-11T16:24:51.252+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.460662766s"} 2023-04-11T16:24:51.252+0300 INFO blocks, transactions ans AERs are reset {"took": "46.884558096s"} 2023-04-11T16:24:55.399+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "9.607820004s", "keys": 334821} 2023-04-11T16:24:55.399+0300 DEBUG trying to reset contract storage items 2023-04-11T16:24:59.981+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "8.728713255s", "keys": 208913} 2023-04-11T16:25:50.827+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "55.426411416s"} 2023-04-11T16:26:28.734+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "37.902647706s"} 2023-04-11T16:26:53.960+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "1m3.129453265s", "keys": 200001} 2023-04-11T16:27:27.645+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.685283662s"} 2023-04-11T16:27:52.173+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m23.438465575s", "keys": 199999} 2023-04-11T16:28:00.995+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.821990443s"} 2023-04-11T16:28:00.995+0300 INFO contract storage items are reset {"took": "3m5.595950958s", "keys": 656944} 2023-04-11T16:28:49.164+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.518344712s", "keys": 200000} 2023-04-11T16:28:49.164+0300 DEBUG trying to reset headers information 2023-04-11T16:28:49.936+0300 INFO headers information is reset {"took": "772.36435ms"} 2023-04-11T16:28:53.122+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "52.126928092s", "keys": 56945} 2023-04-11T16:28:53.122+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:29:09.332+0300 INFO state root information and NEP transfers are reset {"took": "16.20921699s"} 2023-04-11T16:29:46.264+0300 DEBUG headers information is persisted {"took": "56.326715249s", "keys": 528438} 2023-04-11T16:29:46.267+0300 DEBUG trying to remove stale storage items 2023-04-11T16:29:53.986+0300 INFO stale storage items are reset {"took": "7.718950145s", "keys": 1350176} 2023-04-11T16:29:53.986+0300 DEBUG trying to remove state reset point 2023-04-11T16:29:53.986+0300 INFO state reset point is removed {"took": "6.013µs"} 2023-04-11T16:29:55.899+0300 DEBUG state root information and NEP transfers are persisted {"took": "46.567302762s", "keys": 602578} 2023-04-11T16:29:55.929+0300 INFO state reset point information is persisted {"took": "1.942392208s", "keys": 2} 2023-04-11T16:29:55.929+0300 INFO reset finished successfully {"took": "5m51.561573137s"} ``` Master: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:34:12.410+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:34:12.410+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:34:12.410+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:34:12.410+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:34:12.413+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:34:12.495+0300 INFO initialize state reset {"target height": 600000} 2023-04-11T16:34:12.513+0300 INFO trying to reset blocks, transactions and AERs 2023-04-11T16:35:03.582+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "51.087226195s", "keys": 321895} 2023-04-11T16:35:31.302+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "27.719871393s", "keys": 334823} 2023-04-11T16:35:41.309+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "10.007017388s", "keys": 208913} 2023-04-11T16:35:41.309+0300 INFO blocks, transactions ans AERs are reset {"took": "1m28.814245057s", "overall persisted keys": 865631} 2023-04-11T16:35:41.309+0300 INFO trying to reset contract storage items 2023-04-11T16:37:38.315+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 1, "took": "1m57.00650253s", "keys": 200000} 2023-04-11T16:39:29.704+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 2, "took": "1m51.385224725s", "keys": 200000} 2023-04-11T16:41:14.991+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 3, "took": "1m45.287483794s", "keys": 200000} 2023-04-11T16:41:31.667+0300 INFO last batch of contract storage items and IDs is persisted {"batch": 4, "took": "16.675347478s", "keys": 56945} 2023-04-11T16:41:31.667+0300 INFO contract storage items and IDs are reset {"took": "5m50.357775401s", "keys": 656944} 2023-04-11T16:41:31.667+0300 INFO trying to reset headers information 2023-04-11T16:42:16.779+0300 INFO headers information is reset {"took": "45.111354262s", "keys": 528438} 2023-04-11T16:42:16.784+0300 INFO trying to reset state root information and NEP transfers 2023-04-11T16:42:35.778+0300 INFO state root information and NEP transfers are reset {"took": "18.99373117s", "keys": 602578} 2023-04-11T16:42:35.781+0300 INFO trying to remove stale storage items 2023-04-11T16:42:43.884+0300 INFO stale storage items are reset {"took": "8.103929306s", "keys": 1350176} 2023-04-11T16:42:43.885+0300 INFO trying to remove state reset point 2023-04-11T16:42:43.926+0300 INFO stale reset point is removed {"took": "41.858883ms", "keys": 2} 2023-04-11T16:42:43.932+0300 INFO reset finished successfully {"took": "8m31.437493325s"} ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-04-11 11:18:48 +00:00
upperCache.DeleteHeaderHashes(height+1, headerBatchCount)
upperCache.StoreAsCurrentBlock(b)
upperCache.PutCurrentHeader(b.Hash(), height)
2022-10-20 10:59:19 +00:00
v.StoragePrefix = statesync.TemporaryPrefix(v.StoragePrefix)
core: avoid squashing of data from different state reset stages `cache` persisting operation is concurrent with the storage modifications made by further state reset stages. We can't allow situation when data from the next stage are leaking into the previous stage. State reset stages must be atomic in turms of DB persisting, thus, use another `upperCache` MemCached store to keep them apart. Here are the results of mainnet's BoltDB reset from 1.1M to 6K: This patch: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:15:25.783+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:15:25.783+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:15:25.783+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:15:25.783+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:15:25.787+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:15:25.906+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:15:25.906+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:15:57.031+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "31.125057214s"} 2023-04-11T16:16:12.644+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "15.613156971s", "keys": 321895} 2023-04-11T16:16:13.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "16.663444208s"} 2023-04-11T16:16:19.784+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.760308543s"} 2023-04-11T16:16:19.784+0300 INFO blocks, transactions ans AERs are reset {"took": "53.878632911s"} 2023-04-11T16:16:22.870+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "8.974838893s", "keys": 334823} 2023-04-11T16:16:22.870+0300 DEBUG trying to reset contract storage items 2023-04-11T16:16:27.272+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "7.487357441s", "keys": 208913} 2023-04-11T16:17:23.678+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "1m0.80711106s"} 2023-04-11T16:18:00.769+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "36.967660061s"} 2023-04-11T16:18:20.478+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "56.796257788s", "keys": 200000} 2023-04-11T16:18:54.115+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.637412437s"} 2023-04-11T16:19:18.844+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m18.0737668s", "keys": 200000} 2023-04-11T16:19:27.650+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.806264019s"} 2023-04-11T16:19:27.650+0300 INFO contract storage items are reset {"took": "3m4.780232077s", "keys": 656944} 2023-04-11T16:20:15.660+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.544386403s", "keys": 200000} 2023-04-11T16:20:15.660+0300 DEBUG trying to reset headers information 2023-04-11T16:20:16.385+0300 INFO headers information is reset {"took": "725.174932ms"} 2023-04-11T16:20:19.586+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "51.936278608s", "keys": 56945} 2023-04-11T16:20:19.587+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:20:35.845+0300 INFO state root information and NEP transfers are reset {"took": "16.25852114s"} 2023-04-11T16:21:10.000+0300 DEBUG headers information is persisted {"took": "53.613638429s", "keys": 528438} 2023-04-11T16:21:10.003+0300 DEBUG trying to remove stale storage items 2023-04-11T16:21:18.108+0300 INFO stale storage items are reset {"took": "8.105140658s", "keys": 1350176} 2023-04-11T16:21:18.108+0300 DEBUG trying to remove state reset point 2023-04-11T16:21:18.108+0300 INFO state reset point is removed {"took": "8.554µs"} 2023-04-11T16:21:20.151+0300 DEBUG state root information and NEP transfers are persisted {"took": "44.305707049s", "keys": 602578} 2023-04-11T16:21:20.212+0300 INFO state reset point information is persisted {"took": "2.103764633s", "keys": 2} 2023-04-11T16:21:20.213+0300 INFO reset finished successfully {"took": "5m54.306861367s"} ``` The previous commit: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:24:04.256+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:24:04.256+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:24:04.256+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:24:04.256+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:24:04.261+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:24:04.368+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:24:04.368+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:24:30.363+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "25.995261037s"} 2023-04-11T16:24:44.947+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "14.584447338s", "keys": 321897} 2023-04-11T16:24:45.791+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "15.428492824s"} 2023-04-11T16:24:51.252+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.460662766s"} 2023-04-11T16:24:51.252+0300 INFO blocks, transactions ans AERs are reset {"took": "46.884558096s"} 2023-04-11T16:24:55.399+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "9.607820004s", "keys": 334821} 2023-04-11T16:24:55.399+0300 DEBUG trying to reset contract storage items 2023-04-11T16:24:59.981+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "8.728713255s", "keys": 208913} 2023-04-11T16:25:50.827+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "55.426411416s"} 2023-04-11T16:26:28.734+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "37.902647706s"} 2023-04-11T16:26:53.960+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "1m3.129453265s", "keys": 200001} 2023-04-11T16:27:27.645+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.685283662s"} 2023-04-11T16:27:52.173+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m23.438465575s", "keys": 199999} 2023-04-11T16:28:00.995+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.821990443s"} 2023-04-11T16:28:00.995+0300 INFO contract storage items are reset {"took": "3m5.595950958s", "keys": 656944} 2023-04-11T16:28:49.164+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.518344712s", "keys": 200000} 2023-04-11T16:28:49.164+0300 DEBUG trying to reset headers information 2023-04-11T16:28:49.936+0300 INFO headers information is reset {"took": "772.36435ms"} 2023-04-11T16:28:53.122+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "52.126928092s", "keys": 56945} 2023-04-11T16:28:53.122+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:29:09.332+0300 INFO state root information and NEP transfers are reset {"took": "16.20921699s"} 2023-04-11T16:29:46.264+0300 DEBUG headers information is persisted {"took": "56.326715249s", "keys": 528438} 2023-04-11T16:29:46.267+0300 DEBUG trying to remove stale storage items 2023-04-11T16:29:53.986+0300 INFO stale storage items are reset {"took": "7.718950145s", "keys": 1350176} 2023-04-11T16:29:53.986+0300 DEBUG trying to remove state reset point 2023-04-11T16:29:53.986+0300 INFO state reset point is removed {"took": "6.013µs"} 2023-04-11T16:29:55.899+0300 DEBUG state root information and NEP transfers are persisted {"took": "46.567302762s", "keys": 602578} 2023-04-11T16:29:55.929+0300 INFO state reset point information is persisted {"took": "1.942392208s", "keys": 2} 2023-04-11T16:29:55.929+0300 INFO reset finished successfully {"took": "5m51.561573137s"} ``` Master: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:34:12.410+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:34:12.410+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:34:12.410+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:34:12.410+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:34:12.413+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:34:12.495+0300 INFO initialize state reset {"target height": 600000} 2023-04-11T16:34:12.513+0300 INFO trying to reset blocks, transactions and AERs 2023-04-11T16:35:03.582+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "51.087226195s", "keys": 321895} 2023-04-11T16:35:31.302+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "27.719871393s", "keys": 334823} 2023-04-11T16:35:41.309+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "10.007017388s", "keys": 208913} 2023-04-11T16:35:41.309+0300 INFO blocks, transactions ans AERs are reset {"took": "1m28.814245057s", "overall persisted keys": 865631} 2023-04-11T16:35:41.309+0300 INFO trying to reset contract storage items 2023-04-11T16:37:38.315+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 1, "took": "1m57.00650253s", "keys": 200000} 2023-04-11T16:39:29.704+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 2, "took": "1m51.385224725s", "keys": 200000} 2023-04-11T16:41:14.991+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 3, "took": "1m45.287483794s", "keys": 200000} 2023-04-11T16:41:31.667+0300 INFO last batch of contract storage items and IDs is persisted {"batch": 4, "took": "16.675347478s", "keys": 56945} 2023-04-11T16:41:31.667+0300 INFO contract storage items and IDs are reset {"took": "5m50.357775401s", "keys": 656944} 2023-04-11T16:41:31.667+0300 INFO trying to reset headers information 2023-04-11T16:42:16.779+0300 INFO headers information is reset {"took": "45.111354262s", "keys": 528438} 2023-04-11T16:42:16.784+0300 INFO trying to reset state root information and NEP transfers 2023-04-11T16:42:35.778+0300 INFO state root information and NEP transfers are reset {"took": "18.99373117s", "keys": 602578} 2023-04-11T16:42:35.781+0300 INFO trying to remove stale storage items 2023-04-11T16:42:43.884+0300 INFO stale storage items are reset {"took": "8.103929306s", "keys": 1350176} 2023-04-11T16:42:43.885+0300 INFO trying to remove state reset point 2023-04-11T16:42:43.926+0300 INFO stale reset point is removed {"took": "41.858883ms", "keys": 2} 2023-04-11T16:42:43.932+0300 INFO reset finished successfully {"took": "8m31.437493325s"} ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-04-11 11:18:48 +00:00
upperCache.PutVersion(v)
// It's important to manually change the cache's Version at this stage, so that native cache
// can be properly initialized (with the correct contract storage data prefix) at the final
// stage of the state reset. At the same time, DB's SYSVersion-prefixed data will be persisted
// from upperCache to cache in a standard way (several lines below).
cache.Version = v
2022-10-20 10:59:19 +00:00
bc.persistent.Version = v
core: avoid squashing of data from different state reset stages `cache` persisting operation is concurrent with the storage modifications made by further state reset stages. We can't allow situation when data from the next stage are leaking into the previous stage. State reset stages must be atomic in turms of DB persisting, thus, use another `upperCache` MemCached store to keep them apart. Here are the results of mainnet's BoltDB reset from 1.1M to 6K: This patch: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:15:25.783+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:15:25.783+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:15:25.783+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:15:25.783+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:15:25.787+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:15:25.906+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:15:25.906+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:15:57.031+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "31.125057214s"} 2023-04-11T16:16:12.644+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "15.613156971s", "keys": 321895} 2023-04-11T16:16:13.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "16.663444208s"} 2023-04-11T16:16:19.784+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.760308543s"} 2023-04-11T16:16:19.784+0300 INFO blocks, transactions ans AERs are reset {"took": "53.878632911s"} 2023-04-11T16:16:22.870+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "8.974838893s", "keys": 334823} 2023-04-11T16:16:22.870+0300 DEBUG trying to reset contract storage items 2023-04-11T16:16:27.272+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "7.487357441s", "keys": 208913} 2023-04-11T16:17:23.678+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "1m0.80711106s"} 2023-04-11T16:18:00.769+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "36.967660061s"} 2023-04-11T16:18:20.478+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "56.796257788s", "keys": 200000} 2023-04-11T16:18:54.115+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.637412437s"} 2023-04-11T16:19:18.844+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m18.0737668s", "keys": 200000} 2023-04-11T16:19:27.650+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.806264019s"} 2023-04-11T16:19:27.650+0300 INFO contract storage items are reset {"took": "3m4.780232077s", "keys": 656944} 2023-04-11T16:20:15.660+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.544386403s", "keys": 200000} 2023-04-11T16:20:15.660+0300 DEBUG trying to reset headers information 2023-04-11T16:20:16.385+0300 INFO headers information is reset {"took": "725.174932ms"} 2023-04-11T16:20:19.586+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "51.936278608s", "keys": 56945} 2023-04-11T16:20:19.587+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:20:35.845+0300 INFO state root information and NEP transfers are reset {"took": "16.25852114s"} 2023-04-11T16:21:10.000+0300 DEBUG headers information is persisted {"took": "53.613638429s", "keys": 528438} 2023-04-11T16:21:10.003+0300 DEBUG trying to remove stale storage items 2023-04-11T16:21:18.108+0300 INFO stale storage items are reset {"took": "8.105140658s", "keys": 1350176} 2023-04-11T16:21:18.108+0300 DEBUG trying to remove state reset point 2023-04-11T16:21:18.108+0300 INFO state reset point is removed {"took": "8.554µs"} 2023-04-11T16:21:20.151+0300 DEBUG state root information and NEP transfers are persisted {"took": "44.305707049s", "keys": 602578} 2023-04-11T16:21:20.212+0300 INFO state reset point information is persisted {"took": "2.103764633s", "keys": 2} 2023-04-11T16:21:20.213+0300 INFO reset finished successfully {"took": "5m54.306861367s"} ``` The previous commit: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:24:04.256+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:24:04.256+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:24:04.256+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:24:04.256+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:24:04.261+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:24:04.368+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:24:04.368+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:24:30.363+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "25.995261037s"} 2023-04-11T16:24:44.947+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "14.584447338s", "keys": 321897} 2023-04-11T16:24:45.791+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "15.428492824s"} 2023-04-11T16:24:51.252+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.460662766s"} 2023-04-11T16:24:51.252+0300 INFO blocks, transactions ans AERs are reset {"took": "46.884558096s"} 2023-04-11T16:24:55.399+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "9.607820004s", "keys": 334821} 2023-04-11T16:24:55.399+0300 DEBUG trying to reset contract storage items 2023-04-11T16:24:59.981+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "8.728713255s", "keys": 208913} 2023-04-11T16:25:50.827+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "55.426411416s"} 2023-04-11T16:26:28.734+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "37.902647706s"} 2023-04-11T16:26:53.960+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "1m3.129453265s", "keys": 200001} 2023-04-11T16:27:27.645+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.685283662s"} 2023-04-11T16:27:52.173+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m23.438465575s", "keys": 199999} 2023-04-11T16:28:00.995+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.821990443s"} 2023-04-11T16:28:00.995+0300 INFO contract storage items are reset {"took": "3m5.595950958s", "keys": 656944} 2023-04-11T16:28:49.164+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.518344712s", "keys": 200000} 2023-04-11T16:28:49.164+0300 DEBUG trying to reset headers information 2023-04-11T16:28:49.936+0300 INFO headers information is reset {"took": "772.36435ms"} 2023-04-11T16:28:53.122+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "52.126928092s", "keys": 56945} 2023-04-11T16:28:53.122+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:29:09.332+0300 INFO state root information and NEP transfers are reset {"took": "16.20921699s"} 2023-04-11T16:29:46.264+0300 DEBUG headers information is persisted {"took": "56.326715249s", "keys": 528438} 2023-04-11T16:29:46.267+0300 DEBUG trying to remove stale storage items 2023-04-11T16:29:53.986+0300 INFO stale storage items are reset {"took": "7.718950145s", "keys": 1350176} 2023-04-11T16:29:53.986+0300 DEBUG trying to remove state reset point 2023-04-11T16:29:53.986+0300 INFO state reset point is removed {"took": "6.013µs"} 2023-04-11T16:29:55.899+0300 DEBUG state root information and NEP transfers are persisted {"took": "46.567302762s", "keys": 602578} 2023-04-11T16:29:55.929+0300 INFO state reset point information is persisted {"took": "1.942392208s", "keys": 2} 2023-04-11T16:29:55.929+0300 INFO reset finished successfully {"took": "5m51.561573137s"} ``` Master: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:34:12.410+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:34:12.410+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:34:12.410+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:34:12.410+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:34:12.413+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:34:12.495+0300 INFO initialize state reset {"target height": 600000} 2023-04-11T16:34:12.513+0300 INFO trying to reset blocks, transactions and AERs 2023-04-11T16:35:03.582+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "51.087226195s", "keys": 321895} 2023-04-11T16:35:31.302+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "27.719871393s", "keys": 334823} 2023-04-11T16:35:41.309+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "10.007017388s", "keys": 208913} 2023-04-11T16:35:41.309+0300 INFO blocks, transactions ans AERs are reset {"took": "1m28.814245057s", "overall persisted keys": 865631} 2023-04-11T16:35:41.309+0300 INFO trying to reset contract storage items 2023-04-11T16:37:38.315+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 1, "took": "1m57.00650253s", "keys": 200000} 2023-04-11T16:39:29.704+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 2, "took": "1m51.385224725s", "keys": 200000} 2023-04-11T16:41:14.991+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 3, "took": "1m45.287483794s", "keys": 200000} 2023-04-11T16:41:31.667+0300 INFO last batch of contract storage items and IDs is persisted {"batch": 4, "took": "16.675347478s", "keys": 56945} 2023-04-11T16:41:31.667+0300 INFO contract storage items and IDs are reset {"took": "5m50.357775401s", "keys": 656944} 2023-04-11T16:41:31.667+0300 INFO trying to reset headers information 2023-04-11T16:42:16.779+0300 INFO headers information is reset {"took": "45.111354262s", "keys": 528438} 2023-04-11T16:42:16.784+0300 INFO trying to reset state root information and NEP transfers 2023-04-11T16:42:35.778+0300 INFO state root information and NEP transfers are reset {"took": "18.99373117s", "keys": 602578} 2023-04-11T16:42:35.781+0300 INFO trying to remove stale storage items 2023-04-11T16:42:43.884+0300 INFO stale storage items are reset {"took": "8.103929306s", "keys": 1350176} 2023-04-11T16:42:43.885+0300 INFO trying to remove state reset point 2023-04-11T16:42:43.926+0300 INFO stale reset point is removed {"took": "41.858883ms", "keys": 2} 2023-04-11T16:42:43.932+0300 INFO reset finished successfully {"took": "8m31.437493325s"} ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-04-11 11:18:48 +00:00
upperCache.Store.Put(resetStageKey, []byte{stateResetBit | byte(headersReset)})
bc.log.Info("headers information is reset", zap.Duration("took", time.Since(p)))
2022-10-20 10:59:19 +00:00
persistStart := time.Now()
core: avoid squashing of data from different state reset stages `cache` persisting operation is concurrent with the storage modifications made by further state reset stages. We can't allow situation when data from the next stage are leaking into the previous stage. State reset stages must be atomic in turms of DB persisting, thus, use another `upperCache` MemCached store to keep them apart. Here are the results of mainnet's BoltDB reset from 1.1M to 6K: This patch: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:15:25.783+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:15:25.783+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:15:25.783+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:15:25.783+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:15:25.787+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:15:25.906+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:15:25.906+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:15:57.031+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "31.125057214s"} 2023-04-11T16:16:12.644+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "15.613156971s", "keys": 321895} 2023-04-11T16:16:13.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "16.663444208s"} 2023-04-11T16:16:19.784+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.760308543s"} 2023-04-11T16:16:19.784+0300 INFO blocks, transactions ans AERs are reset {"took": "53.878632911s"} 2023-04-11T16:16:22.870+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "8.974838893s", "keys": 334823} 2023-04-11T16:16:22.870+0300 DEBUG trying to reset contract storage items 2023-04-11T16:16:27.272+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "7.487357441s", "keys": 208913} 2023-04-11T16:17:23.678+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "1m0.80711106s"} 2023-04-11T16:18:00.769+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "36.967660061s"} 2023-04-11T16:18:20.478+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "56.796257788s", "keys": 200000} 2023-04-11T16:18:54.115+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.637412437s"} 2023-04-11T16:19:18.844+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m18.0737668s", "keys": 200000} 2023-04-11T16:19:27.650+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.806264019s"} 2023-04-11T16:19:27.650+0300 INFO contract storage items are reset {"took": "3m4.780232077s", "keys": 656944} 2023-04-11T16:20:15.660+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.544386403s", "keys": 200000} 2023-04-11T16:20:15.660+0300 DEBUG trying to reset headers information 2023-04-11T16:20:16.385+0300 INFO headers information is reset {"took": "725.174932ms"} 2023-04-11T16:20:19.586+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "51.936278608s", "keys": 56945} 2023-04-11T16:20:19.587+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:20:35.845+0300 INFO state root information and NEP transfers are reset {"took": "16.25852114s"} 2023-04-11T16:21:10.000+0300 DEBUG headers information is persisted {"took": "53.613638429s", "keys": 528438} 2023-04-11T16:21:10.003+0300 DEBUG trying to remove stale storage items 2023-04-11T16:21:18.108+0300 INFO stale storage items are reset {"took": "8.105140658s", "keys": 1350176} 2023-04-11T16:21:18.108+0300 DEBUG trying to remove state reset point 2023-04-11T16:21:18.108+0300 INFO state reset point is removed {"took": "8.554µs"} 2023-04-11T16:21:20.151+0300 DEBUG state root information and NEP transfers are persisted {"took": "44.305707049s", "keys": 602578} 2023-04-11T16:21:20.212+0300 INFO state reset point information is persisted {"took": "2.103764633s", "keys": 2} 2023-04-11T16:21:20.213+0300 INFO reset finished successfully {"took": "5m54.306861367s"} ``` The previous commit: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:24:04.256+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:24:04.256+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:24:04.256+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:24:04.256+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:24:04.261+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:24:04.368+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:24:04.368+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:24:30.363+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "25.995261037s"} 2023-04-11T16:24:44.947+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "14.584447338s", "keys": 321897} 2023-04-11T16:24:45.791+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "15.428492824s"} 2023-04-11T16:24:51.252+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.460662766s"} 2023-04-11T16:24:51.252+0300 INFO blocks, transactions ans AERs are reset {"took": "46.884558096s"} 2023-04-11T16:24:55.399+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "9.607820004s", "keys": 334821} 2023-04-11T16:24:55.399+0300 DEBUG trying to reset contract storage items 2023-04-11T16:24:59.981+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "8.728713255s", "keys": 208913} 2023-04-11T16:25:50.827+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "55.426411416s"} 2023-04-11T16:26:28.734+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "37.902647706s"} 2023-04-11T16:26:53.960+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "1m3.129453265s", "keys": 200001} 2023-04-11T16:27:27.645+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.685283662s"} 2023-04-11T16:27:52.173+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m23.438465575s", "keys": 199999} 2023-04-11T16:28:00.995+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.821990443s"} 2023-04-11T16:28:00.995+0300 INFO contract storage items are reset {"took": "3m5.595950958s", "keys": 656944} 2023-04-11T16:28:49.164+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.518344712s", "keys": 200000} 2023-04-11T16:28:49.164+0300 DEBUG trying to reset headers information 2023-04-11T16:28:49.936+0300 INFO headers information is reset {"took": "772.36435ms"} 2023-04-11T16:28:53.122+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "52.126928092s", "keys": 56945} 2023-04-11T16:28:53.122+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:29:09.332+0300 INFO state root information and NEP transfers are reset {"took": "16.20921699s"} 2023-04-11T16:29:46.264+0300 DEBUG headers information is persisted {"took": "56.326715249s", "keys": 528438} 2023-04-11T16:29:46.267+0300 DEBUG trying to remove stale storage items 2023-04-11T16:29:53.986+0300 INFO stale storage items are reset {"took": "7.718950145s", "keys": 1350176} 2023-04-11T16:29:53.986+0300 DEBUG trying to remove state reset point 2023-04-11T16:29:53.986+0300 INFO state reset point is removed {"took": "6.013µs"} 2023-04-11T16:29:55.899+0300 DEBUG state root information and NEP transfers are persisted {"took": "46.567302762s", "keys": 602578} 2023-04-11T16:29:55.929+0300 INFO state reset point information is persisted {"took": "1.942392208s", "keys": 2} 2023-04-11T16:29:55.929+0300 INFO reset finished successfully {"took": "5m51.561573137s"} ``` Master: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:34:12.410+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:34:12.410+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:34:12.410+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:34:12.410+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:34:12.413+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:34:12.495+0300 INFO initialize state reset {"target height": 600000} 2023-04-11T16:34:12.513+0300 INFO trying to reset blocks, transactions and AERs 2023-04-11T16:35:03.582+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "51.087226195s", "keys": 321895} 2023-04-11T16:35:31.302+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "27.719871393s", "keys": 334823} 2023-04-11T16:35:41.309+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "10.007017388s", "keys": 208913} 2023-04-11T16:35:41.309+0300 INFO blocks, transactions ans AERs are reset {"took": "1m28.814245057s", "overall persisted keys": 865631} 2023-04-11T16:35:41.309+0300 INFO trying to reset contract storage items 2023-04-11T16:37:38.315+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 1, "took": "1m57.00650253s", "keys": 200000} 2023-04-11T16:39:29.704+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 2, "took": "1m51.385224725s", "keys": 200000} 2023-04-11T16:41:14.991+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 3, "took": "1m45.287483794s", "keys": 200000} 2023-04-11T16:41:31.667+0300 INFO last batch of contract storage items and IDs is persisted {"batch": 4, "took": "16.675347478s", "keys": 56945} 2023-04-11T16:41:31.667+0300 INFO contract storage items and IDs are reset {"took": "5m50.357775401s", "keys": 656944} 2023-04-11T16:41:31.667+0300 INFO trying to reset headers information 2023-04-11T16:42:16.779+0300 INFO headers information is reset {"took": "45.111354262s", "keys": 528438} 2023-04-11T16:42:16.784+0300 INFO trying to reset state root information and NEP transfers 2023-04-11T16:42:35.778+0300 INFO state root information and NEP transfers are reset {"took": "18.99373117s", "keys": 602578} 2023-04-11T16:42:35.781+0300 INFO trying to remove stale storage items 2023-04-11T16:42:43.884+0300 INFO stale storage items are reset {"took": "8.103929306s", "keys": 1350176} 2023-04-11T16:42:43.885+0300 INFO trying to remove state reset point 2023-04-11T16:42:43.926+0300 INFO stale reset point is removed {"took": "41.858883ms", "keys": 2} 2023-04-11T16:42:43.932+0300 INFO reset finished successfully {"took": "8m31.437493325s"} ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-04-11 11:18:48 +00:00
_, uerr := upperCache.PersistSync()
if uerr != nil {
panic(uerr)
}
upperCache = cache.GetPrivate()
persistCh <- func(persistedKeys int, err error) error {
if err != nil {
return fmt.Errorf("failed to persist headers changes to the DB: %w", err)
}
bc.log.Debug("headers information is persisted", zap.Duration("took", time.Since(persistStart)), zap.Int("keys", persistedKeys))
return nil
}
2022-10-20 10:59:19 +00:00
p = time.Now()
fallthrough
case headersReset:
// Reset MPT.
bc.log.Debug("trying to reset state root information and NEP transfers")
core: avoid squashing of data from different state reset stages `cache` persisting operation is concurrent with the storage modifications made by further state reset stages. We can't allow situation when data from the next stage are leaking into the previous stage. State reset stages must be atomic in turms of DB persisting, thus, use another `upperCache` MemCached store to keep them apart. Here are the results of mainnet's BoltDB reset from 1.1M to 6K: This patch: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:15:25.783+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:15:25.783+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:15:25.783+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:15:25.783+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:15:25.787+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:15:25.906+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:15:25.906+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:15:57.031+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "31.125057214s"} 2023-04-11T16:16:12.644+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "15.613156971s", "keys": 321895} 2023-04-11T16:16:13.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "16.663444208s"} 2023-04-11T16:16:19.784+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.760308543s"} 2023-04-11T16:16:19.784+0300 INFO blocks, transactions ans AERs are reset {"took": "53.878632911s"} 2023-04-11T16:16:22.870+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "8.974838893s", "keys": 334823} 2023-04-11T16:16:22.870+0300 DEBUG trying to reset contract storage items 2023-04-11T16:16:27.272+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "7.487357441s", "keys": 208913} 2023-04-11T16:17:23.678+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "1m0.80711106s"} 2023-04-11T16:18:00.769+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "36.967660061s"} 2023-04-11T16:18:20.478+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "56.796257788s", "keys": 200000} 2023-04-11T16:18:54.115+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.637412437s"} 2023-04-11T16:19:18.844+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m18.0737668s", "keys": 200000} 2023-04-11T16:19:27.650+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.806264019s"} 2023-04-11T16:19:27.650+0300 INFO contract storage items are reset {"took": "3m4.780232077s", "keys": 656944} 2023-04-11T16:20:15.660+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.544386403s", "keys": 200000} 2023-04-11T16:20:15.660+0300 DEBUG trying to reset headers information 2023-04-11T16:20:16.385+0300 INFO headers information is reset {"took": "725.174932ms"} 2023-04-11T16:20:19.586+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "51.936278608s", "keys": 56945} 2023-04-11T16:20:19.587+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:20:35.845+0300 INFO state root information and NEP transfers are reset {"took": "16.25852114s"} 2023-04-11T16:21:10.000+0300 DEBUG headers information is persisted {"took": "53.613638429s", "keys": 528438} 2023-04-11T16:21:10.003+0300 DEBUG trying to remove stale storage items 2023-04-11T16:21:18.108+0300 INFO stale storage items are reset {"took": "8.105140658s", "keys": 1350176} 2023-04-11T16:21:18.108+0300 DEBUG trying to remove state reset point 2023-04-11T16:21:18.108+0300 INFO state reset point is removed {"took": "8.554µs"} 2023-04-11T16:21:20.151+0300 DEBUG state root information and NEP transfers are persisted {"took": "44.305707049s", "keys": 602578} 2023-04-11T16:21:20.212+0300 INFO state reset point information is persisted {"took": "2.103764633s", "keys": 2} 2023-04-11T16:21:20.213+0300 INFO reset finished successfully {"took": "5m54.306861367s"} ``` The previous commit: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:24:04.256+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:24:04.256+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:24:04.256+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:24:04.256+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:24:04.261+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:24:04.368+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:24:04.368+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:24:30.363+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "25.995261037s"} 2023-04-11T16:24:44.947+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "14.584447338s", "keys": 321897} 2023-04-11T16:24:45.791+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "15.428492824s"} 2023-04-11T16:24:51.252+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.460662766s"} 2023-04-11T16:24:51.252+0300 INFO blocks, transactions ans AERs are reset {"took": "46.884558096s"} 2023-04-11T16:24:55.399+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "9.607820004s", "keys": 334821} 2023-04-11T16:24:55.399+0300 DEBUG trying to reset contract storage items 2023-04-11T16:24:59.981+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "8.728713255s", "keys": 208913} 2023-04-11T16:25:50.827+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "55.426411416s"} 2023-04-11T16:26:28.734+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "37.902647706s"} 2023-04-11T16:26:53.960+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "1m3.129453265s", "keys": 200001} 2023-04-11T16:27:27.645+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.685283662s"} 2023-04-11T16:27:52.173+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m23.438465575s", "keys": 199999} 2023-04-11T16:28:00.995+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.821990443s"} 2023-04-11T16:28:00.995+0300 INFO contract storage items are reset {"took": "3m5.595950958s", "keys": 656944} 2023-04-11T16:28:49.164+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.518344712s", "keys": 200000} 2023-04-11T16:28:49.164+0300 DEBUG trying to reset headers information 2023-04-11T16:28:49.936+0300 INFO headers information is reset {"took": "772.36435ms"} 2023-04-11T16:28:53.122+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "52.126928092s", "keys": 56945} 2023-04-11T16:28:53.122+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:29:09.332+0300 INFO state root information and NEP transfers are reset {"took": "16.20921699s"} 2023-04-11T16:29:46.264+0300 DEBUG headers information is persisted {"took": "56.326715249s", "keys": 528438} 2023-04-11T16:29:46.267+0300 DEBUG trying to remove stale storage items 2023-04-11T16:29:53.986+0300 INFO stale storage items are reset {"took": "7.718950145s", "keys": 1350176} 2023-04-11T16:29:53.986+0300 DEBUG trying to remove state reset point 2023-04-11T16:29:53.986+0300 INFO state reset point is removed {"took": "6.013µs"} 2023-04-11T16:29:55.899+0300 DEBUG state root information and NEP transfers are persisted {"took": "46.567302762s", "keys": 602578} 2023-04-11T16:29:55.929+0300 INFO state reset point information is persisted {"took": "1.942392208s", "keys": 2} 2023-04-11T16:29:55.929+0300 INFO reset finished successfully {"took": "5m51.561573137s"} ``` Master: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:34:12.410+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:34:12.410+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:34:12.410+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:34:12.410+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:34:12.413+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:34:12.495+0300 INFO initialize state reset {"target height": 600000} 2023-04-11T16:34:12.513+0300 INFO trying to reset blocks, transactions and AERs 2023-04-11T16:35:03.582+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "51.087226195s", "keys": 321895} 2023-04-11T16:35:31.302+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "27.719871393s", "keys": 334823} 2023-04-11T16:35:41.309+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "10.007017388s", "keys": 208913} 2023-04-11T16:35:41.309+0300 INFO blocks, transactions ans AERs are reset {"took": "1m28.814245057s", "overall persisted keys": 865631} 2023-04-11T16:35:41.309+0300 INFO trying to reset contract storage items 2023-04-11T16:37:38.315+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 1, "took": "1m57.00650253s", "keys": 200000} 2023-04-11T16:39:29.704+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 2, "took": "1m51.385224725s", "keys": 200000} 2023-04-11T16:41:14.991+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 3, "took": "1m45.287483794s", "keys": 200000} 2023-04-11T16:41:31.667+0300 INFO last batch of contract storage items and IDs is persisted {"batch": 4, "took": "16.675347478s", "keys": 56945} 2023-04-11T16:41:31.667+0300 INFO contract storage items and IDs are reset {"took": "5m50.357775401s", "keys": 656944} 2023-04-11T16:41:31.667+0300 INFO trying to reset headers information 2023-04-11T16:42:16.779+0300 INFO headers information is reset {"took": "45.111354262s", "keys": 528438} 2023-04-11T16:42:16.784+0300 INFO trying to reset state root information and NEP transfers 2023-04-11T16:42:35.778+0300 INFO state root information and NEP transfers are reset {"took": "18.99373117s", "keys": 602578} 2023-04-11T16:42:35.781+0300 INFO trying to remove stale storage items 2023-04-11T16:42:43.884+0300 INFO stale storage items are reset {"took": "8.103929306s", "keys": 1350176} 2023-04-11T16:42:43.885+0300 INFO trying to remove state reset point 2023-04-11T16:42:43.926+0300 INFO stale reset point is removed {"took": "41.858883ms", "keys": 2} 2023-04-11T16:42:43.932+0300 INFO reset finished successfully {"took": "8m31.437493325s"} ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-04-11 11:18:48 +00:00
err = bc.stateRoot.ResetState(height, upperCache.Store)
2022-10-20 10:59:19 +00:00
if err != nil {
return fmt.Errorf("failed to rollback MPT state: %w", err)
}
// Reset transfers.
core: avoid squashing of data from different state reset stages `cache` persisting operation is concurrent with the storage modifications made by further state reset stages. We can't allow situation when data from the next stage are leaking into the previous stage. State reset stages must be atomic in turms of DB persisting, thus, use another `upperCache` MemCached store to keep them apart. Here are the results of mainnet's BoltDB reset from 1.1M to 6K: This patch: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:15:25.783+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:15:25.783+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:15:25.783+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:15:25.783+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:15:25.787+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:15:25.906+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:15:25.906+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:15:57.031+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "31.125057214s"} 2023-04-11T16:16:12.644+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "15.613156971s", "keys": 321895} 2023-04-11T16:16:13.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "16.663444208s"} 2023-04-11T16:16:19.784+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.760308543s"} 2023-04-11T16:16:19.784+0300 INFO blocks, transactions ans AERs are reset {"took": "53.878632911s"} 2023-04-11T16:16:22.870+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "8.974838893s", "keys": 334823} 2023-04-11T16:16:22.870+0300 DEBUG trying to reset contract storage items 2023-04-11T16:16:27.272+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "7.487357441s", "keys": 208913} 2023-04-11T16:17:23.678+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "1m0.80711106s"} 2023-04-11T16:18:00.769+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "36.967660061s"} 2023-04-11T16:18:20.478+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "56.796257788s", "keys": 200000} 2023-04-11T16:18:54.115+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.637412437s"} 2023-04-11T16:19:18.844+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m18.0737668s", "keys": 200000} 2023-04-11T16:19:27.650+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.806264019s"} 2023-04-11T16:19:27.650+0300 INFO contract storage items are reset {"took": "3m4.780232077s", "keys": 656944} 2023-04-11T16:20:15.660+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.544386403s", "keys": 200000} 2023-04-11T16:20:15.660+0300 DEBUG trying to reset headers information 2023-04-11T16:20:16.385+0300 INFO headers information is reset {"took": "725.174932ms"} 2023-04-11T16:20:19.586+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "51.936278608s", "keys": 56945} 2023-04-11T16:20:19.587+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:20:35.845+0300 INFO state root information and NEP transfers are reset {"took": "16.25852114s"} 2023-04-11T16:21:10.000+0300 DEBUG headers information is persisted {"took": "53.613638429s", "keys": 528438} 2023-04-11T16:21:10.003+0300 DEBUG trying to remove stale storage items 2023-04-11T16:21:18.108+0300 INFO stale storage items are reset {"took": "8.105140658s", "keys": 1350176} 2023-04-11T16:21:18.108+0300 DEBUG trying to remove state reset point 2023-04-11T16:21:18.108+0300 INFO state reset point is removed {"took": "8.554µs"} 2023-04-11T16:21:20.151+0300 DEBUG state root information and NEP transfers are persisted {"took": "44.305707049s", "keys": 602578} 2023-04-11T16:21:20.212+0300 INFO state reset point information is persisted {"took": "2.103764633s", "keys": 2} 2023-04-11T16:21:20.213+0300 INFO reset finished successfully {"took": "5m54.306861367s"} ``` The previous commit: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:24:04.256+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:24:04.256+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:24:04.256+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:24:04.256+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:24:04.261+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:24:04.368+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:24:04.368+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:24:30.363+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "25.995261037s"} 2023-04-11T16:24:44.947+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "14.584447338s", "keys": 321897} 2023-04-11T16:24:45.791+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "15.428492824s"} 2023-04-11T16:24:51.252+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.460662766s"} 2023-04-11T16:24:51.252+0300 INFO blocks, transactions ans AERs are reset {"took": "46.884558096s"} 2023-04-11T16:24:55.399+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "9.607820004s", "keys": 334821} 2023-04-11T16:24:55.399+0300 DEBUG trying to reset contract storage items 2023-04-11T16:24:59.981+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "8.728713255s", "keys": 208913} 2023-04-11T16:25:50.827+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "55.426411416s"} 2023-04-11T16:26:28.734+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "37.902647706s"} 2023-04-11T16:26:53.960+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "1m3.129453265s", "keys": 200001} 2023-04-11T16:27:27.645+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.685283662s"} 2023-04-11T16:27:52.173+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m23.438465575s", "keys": 199999} 2023-04-11T16:28:00.995+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.821990443s"} 2023-04-11T16:28:00.995+0300 INFO contract storage items are reset {"took": "3m5.595950958s", "keys": 656944} 2023-04-11T16:28:49.164+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.518344712s", "keys": 200000} 2023-04-11T16:28:49.164+0300 DEBUG trying to reset headers information 2023-04-11T16:28:49.936+0300 INFO headers information is reset {"took": "772.36435ms"} 2023-04-11T16:28:53.122+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "52.126928092s", "keys": 56945} 2023-04-11T16:28:53.122+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:29:09.332+0300 INFO state root information and NEP transfers are reset {"took": "16.20921699s"} 2023-04-11T16:29:46.264+0300 DEBUG headers information is persisted {"took": "56.326715249s", "keys": 528438} 2023-04-11T16:29:46.267+0300 DEBUG trying to remove stale storage items 2023-04-11T16:29:53.986+0300 INFO stale storage items are reset {"took": "7.718950145s", "keys": 1350176} 2023-04-11T16:29:53.986+0300 DEBUG trying to remove state reset point 2023-04-11T16:29:53.986+0300 INFO state reset point is removed {"took": "6.013µs"} 2023-04-11T16:29:55.899+0300 DEBUG state root information and NEP transfers are persisted {"took": "46.567302762s", "keys": 602578} 2023-04-11T16:29:55.929+0300 INFO state reset point information is persisted {"took": "1.942392208s", "keys": 2} 2023-04-11T16:29:55.929+0300 INFO reset finished successfully {"took": "5m51.561573137s"} ``` Master: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:34:12.410+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:34:12.410+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:34:12.410+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:34:12.410+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:34:12.413+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:34:12.495+0300 INFO initialize state reset {"target height": 600000} 2023-04-11T16:34:12.513+0300 INFO trying to reset blocks, transactions and AERs 2023-04-11T16:35:03.582+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "51.087226195s", "keys": 321895} 2023-04-11T16:35:31.302+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "27.719871393s", "keys": 334823} 2023-04-11T16:35:41.309+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "10.007017388s", "keys": 208913} 2023-04-11T16:35:41.309+0300 INFO blocks, transactions ans AERs are reset {"took": "1m28.814245057s", "overall persisted keys": 865631} 2023-04-11T16:35:41.309+0300 INFO trying to reset contract storage items 2023-04-11T16:37:38.315+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 1, "took": "1m57.00650253s", "keys": 200000} 2023-04-11T16:39:29.704+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 2, "took": "1m51.385224725s", "keys": 200000} 2023-04-11T16:41:14.991+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 3, "took": "1m45.287483794s", "keys": 200000} 2023-04-11T16:41:31.667+0300 INFO last batch of contract storage items and IDs is persisted {"batch": 4, "took": "16.675347478s", "keys": 56945} 2023-04-11T16:41:31.667+0300 INFO contract storage items and IDs are reset {"took": "5m50.357775401s", "keys": 656944} 2023-04-11T16:41:31.667+0300 INFO trying to reset headers information 2023-04-11T16:42:16.779+0300 INFO headers information is reset {"took": "45.111354262s", "keys": 528438} 2023-04-11T16:42:16.784+0300 INFO trying to reset state root information and NEP transfers 2023-04-11T16:42:35.778+0300 INFO state root information and NEP transfers are reset {"took": "18.99373117s", "keys": 602578} 2023-04-11T16:42:35.781+0300 INFO trying to remove stale storage items 2023-04-11T16:42:43.884+0300 INFO stale storage items are reset {"took": "8.103929306s", "keys": 1350176} 2023-04-11T16:42:43.885+0300 INFO trying to remove state reset point 2023-04-11T16:42:43.926+0300 INFO stale reset point is removed {"took": "41.858883ms", "keys": 2} 2023-04-11T16:42:43.932+0300 INFO reset finished successfully {"took": "8m31.437493325s"} ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-04-11 11:18:48 +00:00
err = bc.resetTransfers(upperCache, height)
2022-10-20 10:59:19 +00:00
if err != nil {
return fmt.Errorf("failed to strip transfer log / transfer info: %w", err)
}
core: avoid squashing of data from different state reset stages `cache` persisting operation is concurrent with the storage modifications made by further state reset stages. We can't allow situation when data from the next stage are leaking into the previous stage. State reset stages must be atomic in turms of DB persisting, thus, use another `upperCache` MemCached store to keep them apart. Here are the results of mainnet's BoltDB reset from 1.1M to 6K: This patch: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:15:25.783+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:15:25.783+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:15:25.783+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:15:25.783+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:15:25.787+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:15:25.906+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:15:25.906+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:15:57.031+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "31.125057214s"} 2023-04-11T16:16:12.644+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "15.613156971s", "keys": 321895} 2023-04-11T16:16:13.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "16.663444208s"} 2023-04-11T16:16:19.784+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.760308543s"} 2023-04-11T16:16:19.784+0300 INFO blocks, transactions ans AERs are reset {"took": "53.878632911s"} 2023-04-11T16:16:22.870+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "8.974838893s", "keys": 334823} 2023-04-11T16:16:22.870+0300 DEBUG trying to reset contract storage items 2023-04-11T16:16:27.272+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "7.487357441s", "keys": 208913} 2023-04-11T16:17:23.678+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "1m0.80711106s"} 2023-04-11T16:18:00.769+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "36.967660061s"} 2023-04-11T16:18:20.478+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "56.796257788s", "keys": 200000} 2023-04-11T16:18:54.115+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.637412437s"} 2023-04-11T16:19:18.844+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m18.0737668s", "keys": 200000} 2023-04-11T16:19:27.650+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.806264019s"} 2023-04-11T16:19:27.650+0300 INFO contract storage items are reset {"took": "3m4.780232077s", "keys": 656944} 2023-04-11T16:20:15.660+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.544386403s", "keys": 200000} 2023-04-11T16:20:15.660+0300 DEBUG trying to reset headers information 2023-04-11T16:20:16.385+0300 INFO headers information is reset {"took": "725.174932ms"} 2023-04-11T16:20:19.586+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "51.936278608s", "keys": 56945} 2023-04-11T16:20:19.587+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:20:35.845+0300 INFO state root information and NEP transfers are reset {"took": "16.25852114s"} 2023-04-11T16:21:10.000+0300 DEBUG headers information is persisted {"took": "53.613638429s", "keys": 528438} 2023-04-11T16:21:10.003+0300 DEBUG trying to remove stale storage items 2023-04-11T16:21:18.108+0300 INFO stale storage items are reset {"took": "8.105140658s", "keys": 1350176} 2023-04-11T16:21:18.108+0300 DEBUG trying to remove state reset point 2023-04-11T16:21:18.108+0300 INFO state reset point is removed {"took": "8.554µs"} 2023-04-11T16:21:20.151+0300 DEBUG state root information and NEP transfers are persisted {"took": "44.305707049s", "keys": 602578} 2023-04-11T16:21:20.212+0300 INFO state reset point information is persisted {"took": "2.103764633s", "keys": 2} 2023-04-11T16:21:20.213+0300 INFO reset finished successfully {"took": "5m54.306861367s"} ``` The previous commit: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:24:04.256+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:24:04.256+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:24:04.256+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:24:04.256+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:24:04.261+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:24:04.368+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:24:04.368+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:24:30.363+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "25.995261037s"} 2023-04-11T16:24:44.947+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "14.584447338s", "keys": 321897} 2023-04-11T16:24:45.791+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "15.428492824s"} 2023-04-11T16:24:51.252+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.460662766s"} 2023-04-11T16:24:51.252+0300 INFO blocks, transactions ans AERs are reset {"took": "46.884558096s"} 2023-04-11T16:24:55.399+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "9.607820004s", "keys": 334821} 2023-04-11T16:24:55.399+0300 DEBUG trying to reset contract storage items 2023-04-11T16:24:59.981+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "8.728713255s", "keys": 208913} 2023-04-11T16:25:50.827+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "55.426411416s"} 2023-04-11T16:26:28.734+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "37.902647706s"} 2023-04-11T16:26:53.960+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "1m3.129453265s", "keys": 200001} 2023-04-11T16:27:27.645+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.685283662s"} 2023-04-11T16:27:52.173+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m23.438465575s", "keys": 199999} 2023-04-11T16:28:00.995+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.821990443s"} 2023-04-11T16:28:00.995+0300 INFO contract storage items are reset {"took": "3m5.595950958s", "keys": 656944} 2023-04-11T16:28:49.164+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.518344712s", "keys": 200000} 2023-04-11T16:28:49.164+0300 DEBUG trying to reset headers information 2023-04-11T16:28:49.936+0300 INFO headers information is reset {"took": "772.36435ms"} 2023-04-11T16:28:53.122+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "52.126928092s", "keys": 56945} 2023-04-11T16:28:53.122+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:29:09.332+0300 INFO state root information and NEP transfers are reset {"took": "16.20921699s"} 2023-04-11T16:29:46.264+0300 DEBUG headers information is persisted {"took": "56.326715249s", "keys": 528438} 2023-04-11T16:29:46.267+0300 DEBUG trying to remove stale storage items 2023-04-11T16:29:53.986+0300 INFO stale storage items are reset {"took": "7.718950145s", "keys": 1350176} 2023-04-11T16:29:53.986+0300 DEBUG trying to remove state reset point 2023-04-11T16:29:53.986+0300 INFO state reset point is removed {"took": "6.013µs"} 2023-04-11T16:29:55.899+0300 DEBUG state root information and NEP transfers are persisted {"took": "46.567302762s", "keys": 602578} 2023-04-11T16:29:55.929+0300 INFO state reset point information is persisted {"took": "1.942392208s", "keys": 2} 2023-04-11T16:29:55.929+0300 INFO reset finished successfully {"took": "5m51.561573137s"} ``` Master: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:34:12.410+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:34:12.410+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:34:12.410+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:34:12.410+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:34:12.413+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:34:12.495+0300 INFO initialize state reset {"target height": 600000} 2023-04-11T16:34:12.513+0300 INFO trying to reset blocks, transactions and AERs 2023-04-11T16:35:03.582+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "51.087226195s", "keys": 321895} 2023-04-11T16:35:31.302+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "27.719871393s", "keys": 334823} 2023-04-11T16:35:41.309+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "10.007017388s", "keys": 208913} 2023-04-11T16:35:41.309+0300 INFO blocks, transactions ans AERs are reset {"took": "1m28.814245057s", "overall persisted keys": 865631} 2023-04-11T16:35:41.309+0300 INFO trying to reset contract storage items 2023-04-11T16:37:38.315+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 1, "took": "1m57.00650253s", "keys": 200000} 2023-04-11T16:39:29.704+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 2, "took": "1m51.385224725s", "keys": 200000} 2023-04-11T16:41:14.991+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 3, "took": "1m45.287483794s", "keys": 200000} 2023-04-11T16:41:31.667+0300 INFO last batch of contract storage items and IDs is persisted {"batch": 4, "took": "16.675347478s", "keys": 56945} 2023-04-11T16:41:31.667+0300 INFO contract storage items and IDs are reset {"took": "5m50.357775401s", "keys": 656944} 2023-04-11T16:41:31.667+0300 INFO trying to reset headers information 2023-04-11T16:42:16.779+0300 INFO headers information is reset {"took": "45.111354262s", "keys": 528438} 2023-04-11T16:42:16.784+0300 INFO trying to reset state root information and NEP transfers 2023-04-11T16:42:35.778+0300 INFO state root information and NEP transfers are reset {"took": "18.99373117s", "keys": 602578} 2023-04-11T16:42:35.781+0300 INFO trying to remove stale storage items 2023-04-11T16:42:43.884+0300 INFO stale storage items are reset {"took": "8.103929306s", "keys": 1350176} 2023-04-11T16:42:43.885+0300 INFO trying to remove state reset point 2023-04-11T16:42:43.926+0300 INFO stale reset point is removed {"took": "41.858883ms", "keys": 2} 2023-04-11T16:42:43.932+0300 INFO reset finished successfully {"took": "8m31.437493325s"} ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-04-11 11:18:48 +00:00
upperCache.Store.Put(resetStageKey, []byte{stateResetBit | byte(transfersReset)})
bc.log.Info("state root information and NEP transfers are reset", zap.Duration("took", time.Since(p)))
persistStart := time.Now()
core: avoid squashing of data from different state reset stages `cache` persisting operation is concurrent with the storage modifications made by further state reset stages. We can't allow situation when data from the next stage are leaking into the previous stage. State reset stages must be atomic in turms of DB persisting, thus, use another `upperCache` MemCached store to keep them apart. Here are the results of mainnet's BoltDB reset from 1.1M to 6K: This patch: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:15:25.783+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:15:25.783+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:15:25.783+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:15:25.783+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:15:25.787+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:15:25.906+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:15:25.906+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:15:57.031+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "31.125057214s"} 2023-04-11T16:16:12.644+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "15.613156971s", "keys": 321895} 2023-04-11T16:16:13.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "16.663444208s"} 2023-04-11T16:16:19.784+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.760308543s"} 2023-04-11T16:16:19.784+0300 INFO blocks, transactions ans AERs are reset {"took": "53.878632911s"} 2023-04-11T16:16:22.870+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "8.974838893s", "keys": 334823} 2023-04-11T16:16:22.870+0300 DEBUG trying to reset contract storage items 2023-04-11T16:16:27.272+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "7.487357441s", "keys": 208913} 2023-04-11T16:17:23.678+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "1m0.80711106s"} 2023-04-11T16:18:00.769+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "36.967660061s"} 2023-04-11T16:18:20.478+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "56.796257788s", "keys": 200000} 2023-04-11T16:18:54.115+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.637412437s"} 2023-04-11T16:19:18.844+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m18.0737668s", "keys": 200000} 2023-04-11T16:19:27.650+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.806264019s"} 2023-04-11T16:19:27.650+0300 INFO contract storage items are reset {"took": "3m4.780232077s", "keys": 656944} 2023-04-11T16:20:15.660+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.544386403s", "keys": 200000} 2023-04-11T16:20:15.660+0300 DEBUG trying to reset headers information 2023-04-11T16:20:16.385+0300 INFO headers information is reset {"took": "725.174932ms"} 2023-04-11T16:20:19.586+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "51.936278608s", "keys": 56945} 2023-04-11T16:20:19.587+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:20:35.845+0300 INFO state root information and NEP transfers are reset {"took": "16.25852114s"} 2023-04-11T16:21:10.000+0300 DEBUG headers information is persisted {"took": "53.613638429s", "keys": 528438} 2023-04-11T16:21:10.003+0300 DEBUG trying to remove stale storage items 2023-04-11T16:21:18.108+0300 INFO stale storage items are reset {"took": "8.105140658s", "keys": 1350176} 2023-04-11T16:21:18.108+0300 DEBUG trying to remove state reset point 2023-04-11T16:21:18.108+0300 INFO state reset point is removed {"took": "8.554µs"} 2023-04-11T16:21:20.151+0300 DEBUG state root information and NEP transfers are persisted {"took": "44.305707049s", "keys": 602578} 2023-04-11T16:21:20.212+0300 INFO state reset point information is persisted {"took": "2.103764633s", "keys": 2} 2023-04-11T16:21:20.213+0300 INFO reset finished successfully {"took": "5m54.306861367s"} ``` The previous commit: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:24:04.256+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:24:04.256+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:24:04.256+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:24:04.256+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:24:04.261+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:24:04.368+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:24:04.368+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:24:30.363+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "25.995261037s"} 2023-04-11T16:24:44.947+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "14.584447338s", "keys": 321897} 2023-04-11T16:24:45.791+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "15.428492824s"} 2023-04-11T16:24:51.252+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.460662766s"} 2023-04-11T16:24:51.252+0300 INFO blocks, transactions ans AERs are reset {"took": "46.884558096s"} 2023-04-11T16:24:55.399+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "9.607820004s", "keys": 334821} 2023-04-11T16:24:55.399+0300 DEBUG trying to reset contract storage items 2023-04-11T16:24:59.981+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "8.728713255s", "keys": 208913} 2023-04-11T16:25:50.827+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "55.426411416s"} 2023-04-11T16:26:28.734+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "37.902647706s"} 2023-04-11T16:26:53.960+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "1m3.129453265s", "keys": 200001} 2023-04-11T16:27:27.645+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.685283662s"} 2023-04-11T16:27:52.173+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m23.438465575s", "keys": 199999} 2023-04-11T16:28:00.995+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.821990443s"} 2023-04-11T16:28:00.995+0300 INFO contract storage items are reset {"took": "3m5.595950958s", "keys": 656944} 2023-04-11T16:28:49.164+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.518344712s", "keys": 200000} 2023-04-11T16:28:49.164+0300 DEBUG trying to reset headers information 2023-04-11T16:28:49.936+0300 INFO headers information is reset {"took": "772.36435ms"} 2023-04-11T16:28:53.122+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "52.126928092s", "keys": 56945} 2023-04-11T16:28:53.122+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:29:09.332+0300 INFO state root information and NEP transfers are reset {"took": "16.20921699s"} 2023-04-11T16:29:46.264+0300 DEBUG headers information is persisted {"took": "56.326715249s", "keys": 528438} 2023-04-11T16:29:46.267+0300 DEBUG trying to remove stale storage items 2023-04-11T16:29:53.986+0300 INFO stale storage items are reset {"took": "7.718950145s", "keys": 1350176} 2023-04-11T16:29:53.986+0300 DEBUG trying to remove state reset point 2023-04-11T16:29:53.986+0300 INFO state reset point is removed {"took": "6.013µs"} 2023-04-11T16:29:55.899+0300 DEBUG state root information and NEP transfers are persisted {"took": "46.567302762s", "keys": 602578} 2023-04-11T16:29:55.929+0300 INFO state reset point information is persisted {"took": "1.942392208s", "keys": 2} 2023-04-11T16:29:55.929+0300 INFO reset finished successfully {"took": "5m51.561573137s"} ``` Master: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:34:12.410+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:34:12.410+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:34:12.410+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:34:12.410+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:34:12.413+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:34:12.495+0300 INFO initialize state reset {"target height": 600000} 2023-04-11T16:34:12.513+0300 INFO trying to reset blocks, transactions and AERs 2023-04-11T16:35:03.582+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "51.087226195s", "keys": 321895} 2023-04-11T16:35:31.302+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "27.719871393s", "keys": 334823} 2023-04-11T16:35:41.309+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "10.007017388s", "keys": 208913} 2023-04-11T16:35:41.309+0300 INFO blocks, transactions ans AERs are reset {"took": "1m28.814245057s", "overall persisted keys": 865631} 2023-04-11T16:35:41.309+0300 INFO trying to reset contract storage items 2023-04-11T16:37:38.315+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 1, "took": "1m57.00650253s", "keys": 200000} 2023-04-11T16:39:29.704+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 2, "took": "1m51.385224725s", "keys": 200000} 2023-04-11T16:41:14.991+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 3, "took": "1m45.287483794s", "keys": 200000} 2023-04-11T16:41:31.667+0300 INFO last batch of contract storage items and IDs is persisted {"batch": 4, "took": "16.675347478s", "keys": 56945} 2023-04-11T16:41:31.667+0300 INFO contract storage items and IDs are reset {"took": "5m50.357775401s", "keys": 656944} 2023-04-11T16:41:31.667+0300 INFO trying to reset headers information 2023-04-11T16:42:16.779+0300 INFO headers information is reset {"took": "45.111354262s", "keys": 528438} 2023-04-11T16:42:16.784+0300 INFO trying to reset state root information and NEP transfers 2023-04-11T16:42:35.778+0300 INFO state root information and NEP transfers are reset {"took": "18.99373117s", "keys": 602578} 2023-04-11T16:42:35.781+0300 INFO trying to remove stale storage items 2023-04-11T16:42:43.884+0300 INFO stale storage items are reset {"took": "8.103929306s", "keys": 1350176} 2023-04-11T16:42:43.885+0300 INFO trying to remove state reset point 2023-04-11T16:42:43.926+0300 INFO stale reset point is removed {"took": "41.858883ms", "keys": 2} 2023-04-11T16:42:43.932+0300 INFO reset finished successfully {"took": "8m31.437493325s"} ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-04-11 11:18:48 +00:00
_, uerr := upperCache.PersistSync()
if uerr != nil {
panic(uerr)
}
upperCache = cache.GetPrivate()
persistCh <- func(persistedKeys int, err error) error {
if err != nil {
return fmt.Errorf("failed to persist contract storage items changes to the DB: %w", err)
}
2022-10-20 10:59:19 +00:00
bc.log.Debug("state root information and NEP transfers are persisted", zap.Duration("took", time.Since(persistStart)), zap.Int("keys", persistedKeys))
return nil
}
p = time.Now()
2022-10-20 10:59:19 +00:00
fallthrough
case transfersReset:
// there's nothing to do after that, so just continue with common operations
// and remove state reset stage in the end.
default:
return fmt.Errorf("unknown state reset stage: %d", stage)
}
// Direct (cache-less) DB operation: remove stale storage items.
bc.log.Debug("trying to remove stale storage items")
keys := 0
2022-10-20 10:59:19 +00:00
err = bc.store.SeekGC(storage.SeekRange{
Prefix: []byte{byte(statesync.TemporaryPrefix(v.StoragePrefix))},
}, func(_, _ []byte) bool {
keys++
2022-10-20 10:59:19 +00:00
return false
})
if err != nil {
return fmt.Errorf("faield to remove stale storage items from DB: %w", err)
}
bc.log.Info("stale storage items are reset", zap.Duration("took", time.Since(p)), zap.Int("keys", keys))
p = time.Now()
2022-10-20 10:59:19 +00:00
bc.log.Debug("trying to remove state reset point")
core: avoid squashing of data from different state reset stages `cache` persisting operation is concurrent with the storage modifications made by further state reset stages. We can't allow situation when data from the next stage are leaking into the previous stage. State reset stages must be atomic in turms of DB persisting, thus, use another `upperCache` MemCached store to keep them apart. Here are the results of mainnet's BoltDB reset from 1.1M to 6K: This patch: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:15:25.783+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:15:25.783+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:15:25.783+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:15:25.783+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:15:25.787+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:15:25.906+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:15:25.906+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:15:57.031+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "31.125057214s"} 2023-04-11T16:16:12.644+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "15.613156971s", "keys": 321895} 2023-04-11T16:16:13.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "16.663444208s"} 2023-04-11T16:16:19.784+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.760308543s"} 2023-04-11T16:16:19.784+0300 INFO blocks, transactions ans AERs are reset {"took": "53.878632911s"} 2023-04-11T16:16:22.870+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "8.974838893s", "keys": 334823} 2023-04-11T16:16:22.870+0300 DEBUG trying to reset contract storage items 2023-04-11T16:16:27.272+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "7.487357441s", "keys": 208913} 2023-04-11T16:17:23.678+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "1m0.80711106s"} 2023-04-11T16:18:00.769+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "36.967660061s"} 2023-04-11T16:18:20.478+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "56.796257788s", "keys": 200000} 2023-04-11T16:18:54.115+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.637412437s"} 2023-04-11T16:19:18.844+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m18.0737668s", "keys": 200000} 2023-04-11T16:19:27.650+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.806264019s"} 2023-04-11T16:19:27.650+0300 INFO contract storage items are reset {"took": "3m4.780232077s", "keys": 656944} 2023-04-11T16:20:15.660+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.544386403s", "keys": 200000} 2023-04-11T16:20:15.660+0300 DEBUG trying to reset headers information 2023-04-11T16:20:16.385+0300 INFO headers information is reset {"took": "725.174932ms"} 2023-04-11T16:20:19.586+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "51.936278608s", "keys": 56945} 2023-04-11T16:20:19.587+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:20:35.845+0300 INFO state root information and NEP transfers are reset {"took": "16.25852114s"} 2023-04-11T16:21:10.000+0300 DEBUG headers information is persisted {"took": "53.613638429s", "keys": 528438} 2023-04-11T16:21:10.003+0300 DEBUG trying to remove stale storage items 2023-04-11T16:21:18.108+0300 INFO stale storage items are reset {"took": "8.105140658s", "keys": 1350176} 2023-04-11T16:21:18.108+0300 DEBUG trying to remove state reset point 2023-04-11T16:21:18.108+0300 INFO state reset point is removed {"took": "8.554µs"} 2023-04-11T16:21:20.151+0300 DEBUG state root information and NEP transfers are persisted {"took": "44.305707049s", "keys": 602578} 2023-04-11T16:21:20.212+0300 INFO state reset point information is persisted {"took": "2.103764633s", "keys": 2} 2023-04-11T16:21:20.213+0300 INFO reset finished successfully {"took": "5m54.306861367s"} ``` The previous commit: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:24:04.256+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:24:04.256+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:24:04.256+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:24:04.256+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:24:04.261+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:24:04.368+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:24:04.368+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:24:30.363+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "25.995261037s"} 2023-04-11T16:24:44.947+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "14.584447338s", "keys": 321897} 2023-04-11T16:24:45.791+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "15.428492824s"} 2023-04-11T16:24:51.252+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.460662766s"} 2023-04-11T16:24:51.252+0300 INFO blocks, transactions ans AERs are reset {"took": "46.884558096s"} 2023-04-11T16:24:55.399+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "9.607820004s", "keys": 334821} 2023-04-11T16:24:55.399+0300 DEBUG trying to reset contract storage items 2023-04-11T16:24:59.981+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "8.728713255s", "keys": 208913} 2023-04-11T16:25:50.827+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "55.426411416s"} 2023-04-11T16:26:28.734+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "37.902647706s"} 2023-04-11T16:26:53.960+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "1m3.129453265s", "keys": 200001} 2023-04-11T16:27:27.645+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.685283662s"} 2023-04-11T16:27:52.173+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m23.438465575s", "keys": 199999} 2023-04-11T16:28:00.995+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.821990443s"} 2023-04-11T16:28:00.995+0300 INFO contract storage items are reset {"took": "3m5.595950958s", "keys": 656944} 2023-04-11T16:28:49.164+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.518344712s", "keys": 200000} 2023-04-11T16:28:49.164+0300 DEBUG trying to reset headers information 2023-04-11T16:28:49.936+0300 INFO headers information is reset {"took": "772.36435ms"} 2023-04-11T16:28:53.122+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "52.126928092s", "keys": 56945} 2023-04-11T16:28:53.122+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:29:09.332+0300 INFO state root information and NEP transfers are reset {"took": "16.20921699s"} 2023-04-11T16:29:46.264+0300 DEBUG headers information is persisted {"took": "56.326715249s", "keys": 528438} 2023-04-11T16:29:46.267+0300 DEBUG trying to remove stale storage items 2023-04-11T16:29:53.986+0300 INFO stale storage items are reset {"took": "7.718950145s", "keys": 1350176} 2023-04-11T16:29:53.986+0300 DEBUG trying to remove state reset point 2023-04-11T16:29:53.986+0300 INFO state reset point is removed {"took": "6.013µs"} 2023-04-11T16:29:55.899+0300 DEBUG state root information and NEP transfers are persisted {"took": "46.567302762s", "keys": 602578} 2023-04-11T16:29:55.929+0300 INFO state reset point information is persisted {"took": "1.942392208s", "keys": 2} 2023-04-11T16:29:55.929+0300 INFO reset finished successfully {"took": "5m51.561573137s"} ``` Master: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:34:12.410+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:34:12.410+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:34:12.410+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:34:12.410+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:34:12.413+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:34:12.495+0300 INFO initialize state reset {"target height": 600000} 2023-04-11T16:34:12.513+0300 INFO trying to reset blocks, transactions and AERs 2023-04-11T16:35:03.582+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "51.087226195s", "keys": 321895} 2023-04-11T16:35:31.302+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "27.719871393s", "keys": 334823} 2023-04-11T16:35:41.309+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "10.007017388s", "keys": 208913} 2023-04-11T16:35:41.309+0300 INFO blocks, transactions ans AERs are reset {"took": "1m28.814245057s", "overall persisted keys": 865631} 2023-04-11T16:35:41.309+0300 INFO trying to reset contract storage items 2023-04-11T16:37:38.315+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 1, "took": "1m57.00650253s", "keys": 200000} 2023-04-11T16:39:29.704+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 2, "took": "1m51.385224725s", "keys": 200000} 2023-04-11T16:41:14.991+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 3, "took": "1m45.287483794s", "keys": 200000} 2023-04-11T16:41:31.667+0300 INFO last batch of contract storage items and IDs is persisted {"batch": 4, "took": "16.675347478s", "keys": 56945} 2023-04-11T16:41:31.667+0300 INFO contract storage items and IDs are reset {"took": "5m50.357775401s", "keys": 656944} 2023-04-11T16:41:31.667+0300 INFO trying to reset headers information 2023-04-11T16:42:16.779+0300 INFO headers information is reset {"took": "45.111354262s", "keys": 528438} 2023-04-11T16:42:16.784+0300 INFO trying to reset state root information and NEP transfers 2023-04-11T16:42:35.778+0300 INFO state root information and NEP transfers are reset {"took": "18.99373117s", "keys": 602578} 2023-04-11T16:42:35.781+0300 INFO trying to remove stale storage items 2023-04-11T16:42:43.884+0300 INFO stale storage items are reset {"took": "8.103929306s", "keys": 1350176} 2023-04-11T16:42:43.885+0300 INFO trying to remove state reset point 2023-04-11T16:42:43.926+0300 INFO stale reset point is removed {"took": "41.858883ms", "keys": 2} 2023-04-11T16:42:43.932+0300 INFO reset finished successfully {"took": "8m31.437493325s"} ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-04-11 11:18:48 +00:00
upperCache.Store.Delete(resetStageKey)
2022-10-20 10:59:19 +00:00
// Unlike the state jump, state sync point must be removed as we have complete state for this height.
core: avoid squashing of data from different state reset stages `cache` persisting operation is concurrent with the storage modifications made by further state reset stages. We can't allow situation when data from the next stage are leaking into the previous stage. State reset stages must be atomic in turms of DB persisting, thus, use another `upperCache` MemCached store to keep them apart. Here are the results of mainnet's BoltDB reset from 1.1M to 6K: This patch: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:15:25.783+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:15:25.783+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:15:25.783+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:15:25.783+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:15:25.787+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:15:25.906+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:15:25.906+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:15:57.031+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "31.125057214s"} 2023-04-11T16:16:12.644+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "15.613156971s", "keys": 321895} 2023-04-11T16:16:13.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "16.663444208s"} 2023-04-11T16:16:19.784+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.760308543s"} 2023-04-11T16:16:19.784+0300 INFO blocks, transactions ans AERs are reset {"took": "53.878632911s"} 2023-04-11T16:16:22.870+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "8.974838893s", "keys": 334823} 2023-04-11T16:16:22.870+0300 DEBUG trying to reset contract storage items 2023-04-11T16:16:27.272+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "7.487357441s", "keys": 208913} 2023-04-11T16:17:23.678+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "1m0.80711106s"} 2023-04-11T16:18:00.769+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "36.967660061s"} 2023-04-11T16:18:20.478+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "56.796257788s", "keys": 200000} 2023-04-11T16:18:54.115+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.637412437s"} 2023-04-11T16:19:18.844+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m18.0737668s", "keys": 200000} 2023-04-11T16:19:27.650+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.806264019s"} 2023-04-11T16:19:27.650+0300 INFO contract storage items are reset {"took": "3m4.780232077s", "keys": 656944} 2023-04-11T16:20:15.660+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.544386403s", "keys": 200000} 2023-04-11T16:20:15.660+0300 DEBUG trying to reset headers information 2023-04-11T16:20:16.385+0300 INFO headers information is reset {"took": "725.174932ms"} 2023-04-11T16:20:19.586+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "51.936278608s", "keys": 56945} 2023-04-11T16:20:19.587+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:20:35.845+0300 INFO state root information and NEP transfers are reset {"took": "16.25852114s"} 2023-04-11T16:21:10.000+0300 DEBUG headers information is persisted {"took": "53.613638429s", "keys": 528438} 2023-04-11T16:21:10.003+0300 DEBUG trying to remove stale storage items 2023-04-11T16:21:18.108+0300 INFO stale storage items are reset {"took": "8.105140658s", "keys": 1350176} 2023-04-11T16:21:18.108+0300 DEBUG trying to remove state reset point 2023-04-11T16:21:18.108+0300 INFO state reset point is removed {"took": "8.554µs"} 2023-04-11T16:21:20.151+0300 DEBUG state root information and NEP transfers are persisted {"took": "44.305707049s", "keys": 602578} 2023-04-11T16:21:20.212+0300 INFO state reset point information is persisted {"took": "2.103764633s", "keys": 2} 2023-04-11T16:21:20.213+0300 INFO reset finished successfully {"took": "5m54.306861367s"} ``` The previous commit: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:24:04.256+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:24:04.256+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:24:04.256+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:24:04.256+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:24:04.261+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:24:04.368+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:24:04.368+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:24:30.363+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "25.995261037s"} 2023-04-11T16:24:44.947+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "14.584447338s", "keys": 321897} 2023-04-11T16:24:45.791+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "15.428492824s"} 2023-04-11T16:24:51.252+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.460662766s"} 2023-04-11T16:24:51.252+0300 INFO blocks, transactions ans AERs are reset {"took": "46.884558096s"} 2023-04-11T16:24:55.399+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "9.607820004s", "keys": 334821} 2023-04-11T16:24:55.399+0300 DEBUG trying to reset contract storage items 2023-04-11T16:24:59.981+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "8.728713255s", "keys": 208913} 2023-04-11T16:25:50.827+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "55.426411416s"} 2023-04-11T16:26:28.734+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "37.902647706s"} 2023-04-11T16:26:53.960+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "1m3.129453265s", "keys": 200001} 2023-04-11T16:27:27.645+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.685283662s"} 2023-04-11T16:27:52.173+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m23.438465575s", "keys": 199999} 2023-04-11T16:28:00.995+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.821990443s"} 2023-04-11T16:28:00.995+0300 INFO contract storage items are reset {"took": "3m5.595950958s", "keys": 656944} 2023-04-11T16:28:49.164+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.518344712s", "keys": 200000} 2023-04-11T16:28:49.164+0300 DEBUG trying to reset headers information 2023-04-11T16:28:49.936+0300 INFO headers information is reset {"took": "772.36435ms"} 2023-04-11T16:28:53.122+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "52.126928092s", "keys": 56945} 2023-04-11T16:28:53.122+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:29:09.332+0300 INFO state root information and NEP transfers are reset {"took": "16.20921699s"} 2023-04-11T16:29:46.264+0300 DEBUG headers information is persisted {"took": "56.326715249s", "keys": 528438} 2023-04-11T16:29:46.267+0300 DEBUG trying to remove stale storage items 2023-04-11T16:29:53.986+0300 INFO stale storage items are reset {"took": "7.718950145s", "keys": 1350176} 2023-04-11T16:29:53.986+0300 DEBUG trying to remove state reset point 2023-04-11T16:29:53.986+0300 INFO state reset point is removed {"took": "6.013µs"} 2023-04-11T16:29:55.899+0300 DEBUG state root information and NEP transfers are persisted {"took": "46.567302762s", "keys": 602578} 2023-04-11T16:29:55.929+0300 INFO state reset point information is persisted {"took": "1.942392208s", "keys": 2} 2023-04-11T16:29:55.929+0300 INFO reset finished successfully {"took": "5m51.561573137s"} ``` Master: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:34:12.410+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:34:12.410+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:34:12.410+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:34:12.410+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:34:12.413+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:34:12.495+0300 INFO initialize state reset {"target height": 600000} 2023-04-11T16:34:12.513+0300 INFO trying to reset blocks, transactions and AERs 2023-04-11T16:35:03.582+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "51.087226195s", "keys": 321895} 2023-04-11T16:35:31.302+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "27.719871393s", "keys": 334823} 2023-04-11T16:35:41.309+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "10.007017388s", "keys": 208913} 2023-04-11T16:35:41.309+0300 INFO blocks, transactions ans AERs are reset {"took": "1m28.814245057s", "overall persisted keys": 865631} 2023-04-11T16:35:41.309+0300 INFO trying to reset contract storage items 2023-04-11T16:37:38.315+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 1, "took": "1m57.00650253s", "keys": 200000} 2023-04-11T16:39:29.704+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 2, "took": "1m51.385224725s", "keys": 200000} 2023-04-11T16:41:14.991+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 3, "took": "1m45.287483794s", "keys": 200000} 2023-04-11T16:41:31.667+0300 INFO last batch of contract storage items and IDs is persisted {"batch": 4, "took": "16.675347478s", "keys": 56945} 2023-04-11T16:41:31.667+0300 INFO contract storage items and IDs are reset {"took": "5m50.357775401s", "keys": 656944} 2023-04-11T16:41:31.667+0300 INFO trying to reset headers information 2023-04-11T16:42:16.779+0300 INFO headers information is reset {"took": "45.111354262s", "keys": 528438} 2023-04-11T16:42:16.784+0300 INFO trying to reset state root information and NEP transfers 2023-04-11T16:42:35.778+0300 INFO state root information and NEP transfers are reset {"took": "18.99373117s", "keys": 602578} 2023-04-11T16:42:35.781+0300 INFO trying to remove stale storage items 2023-04-11T16:42:43.884+0300 INFO stale storage items are reset {"took": "8.103929306s", "keys": 1350176} 2023-04-11T16:42:43.885+0300 INFO trying to remove state reset point 2023-04-11T16:42:43.926+0300 INFO stale reset point is removed {"took": "41.858883ms", "keys": 2} 2023-04-11T16:42:43.932+0300 INFO reset finished successfully {"took": "8m31.437493325s"} ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-04-11 11:18:48 +00:00
upperCache.Store.Delete([]byte{byte(storage.SYSStateSyncPoint)})
bc.log.Info("state reset point is removed", zap.Duration("took", time.Since(p)))
persistStart := time.Now()
core: avoid squashing of data from different state reset stages `cache` persisting operation is concurrent with the storage modifications made by further state reset stages. We can't allow situation when data from the next stage are leaking into the previous stage. State reset stages must be atomic in turms of DB persisting, thus, use another `upperCache` MemCached store to keep them apart. Here are the results of mainnet's BoltDB reset from 1.1M to 6K: This patch: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:15:25.783+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:15:25.783+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:15:25.783+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:15:25.783+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:15:25.787+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:15:25.906+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:15:25.906+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:15:57.031+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "31.125057214s"} 2023-04-11T16:16:12.644+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "15.613156971s", "keys": 321895} 2023-04-11T16:16:13.895+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "16.663444208s"} 2023-04-11T16:16:19.784+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.760308543s"} 2023-04-11T16:16:19.784+0300 INFO blocks, transactions ans AERs are reset {"took": "53.878632911s"} 2023-04-11T16:16:22.870+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "8.974838893s", "keys": 334823} 2023-04-11T16:16:22.870+0300 DEBUG trying to reset contract storage items 2023-04-11T16:16:27.272+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "7.487357441s", "keys": 208913} 2023-04-11T16:17:23.678+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "1m0.80711106s"} 2023-04-11T16:18:00.769+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "36.967660061s"} 2023-04-11T16:18:20.478+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "56.796257788s", "keys": 200000} 2023-04-11T16:18:54.115+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.637412437s"} 2023-04-11T16:19:18.844+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m18.0737668s", "keys": 200000} 2023-04-11T16:19:27.650+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.806264019s"} 2023-04-11T16:19:27.650+0300 INFO contract storage items are reset {"took": "3m4.780232077s", "keys": 656944} 2023-04-11T16:20:15.660+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.544386403s", "keys": 200000} 2023-04-11T16:20:15.660+0300 DEBUG trying to reset headers information 2023-04-11T16:20:16.385+0300 INFO headers information is reset {"took": "725.174932ms"} 2023-04-11T16:20:19.586+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "51.936278608s", "keys": 56945} 2023-04-11T16:20:19.587+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:20:35.845+0300 INFO state root information and NEP transfers are reset {"took": "16.25852114s"} 2023-04-11T16:21:10.000+0300 DEBUG headers information is persisted {"took": "53.613638429s", "keys": 528438} 2023-04-11T16:21:10.003+0300 DEBUG trying to remove stale storage items 2023-04-11T16:21:18.108+0300 INFO stale storage items are reset {"took": "8.105140658s", "keys": 1350176} 2023-04-11T16:21:18.108+0300 DEBUG trying to remove state reset point 2023-04-11T16:21:18.108+0300 INFO state reset point is removed {"took": "8.554µs"} 2023-04-11T16:21:20.151+0300 DEBUG state root information and NEP transfers are persisted {"took": "44.305707049s", "keys": 602578} 2023-04-11T16:21:20.212+0300 INFO state reset point information is persisted {"took": "2.103764633s", "keys": 2} 2023-04-11T16:21:20.213+0300 INFO reset finished successfully {"took": "5m54.306861367s"} ``` The previous commit: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:24:04.256+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:24:04.256+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:24:04.256+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:24:04.256+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:24:04.261+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:24:04.368+0300 INFO initializing state reset {"target height": 600000} 2023-04-11T16:24:04.368+0300 DEBUG trying to reset blocks, transactions and AERs 2023-04-11T16:24:30.363+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 1, "took": "25.995261037s"} 2023-04-11T16:24:44.947+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 1, "took": "14.584447338s", "keys": 321897} 2023-04-11T16:24:45.791+0300 INFO intermediate batch of removed blocks, transactions and AERs is collected {"batch": 2, "took": "15.428492824s"} 2023-04-11T16:24:51.252+0300 INFO last batch of removed blocks, transactions and AERs is collected {"batch": 3, "took": "5.460662766s"} 2023-04-11T16:24:51.252+0300 INFO blocks, transactions ans AERs are reset {"took": "46.884558096s"} 2023-04-11T16:24:55.399+0300 DEBUG intermediate batch of removed blocks, transactions and AERs is persisted {"batch": 2, "took": "9.607820004s", "keys": 334821} 2023-04-11T16:24:55.399+0300 DEBUG trying to reset contract storage items 2023-04-11T16:24:59.981+0300 DEBUG last batch of removed blocks, transactions and AERs is persisted {"batch": 3, "took": "8.728713255s", "keys": 208913} 2023-04-11T16:25:50.827+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 1, "took": "55.426411416s"} 2023-04-11T16:26:28.734+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 2, "took": "37.902647706s"} 2023-04-11T16:26:53.960+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 1, "took": "1m3.129453265s", "keys": 200001} 2023-04-11T16:27:27.645+0300 INFO intermediate batch of contract storage items and IDs is collected {"batch": 3, "took": "33.685283662s"} 2023-04-11T16:27:52.173+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 2, "took": "1m23.438465575s", "keys": 199999} 2023-04-11T16:28:00.995+0300 INFO last batch of contract storage items is collected {"batch": 4, "took": "8.821990443s"} 2023-04-11T16:28:00.995+0300 INFO contract storage items are reset {"took": "3m5.595950958s", "keys": 656944} 2023-04-11T16:28:49.164+0300 DEBUG intermediate batch of contract storage items is persisted {"batch": 3, "took": "1m21.518344712s", "keys": 200000} 2023-04-11T16:28:49.164+0300 DEBUG trying to reset headers information 2023-04-11T16:28:49.936+0300 INFO headers information is reset {"took": "772.36435ms"} 2023-04-11T16:28:53.122+0300 DEBUG last batch of contract storage items and IDs is persisted {"batch": 4, "took": "52.126928092s", "keys": 56945} 2023-04-11T16:28:53.122+0300 DEBUG trying to reset state root information and NEP transfers 2023-04-11T16:29:09.332+0300 INFO state root information and NEP transfers are reset {"took": "16.20921699s"} 2023-04-11T16:29:46.264+0300 DEBUG headers information is persisted {"took": "56.326715249s", "keys": 528438} 2023-04-11T16:29:46.267+0300 DEBUG trying to remove stale storage items 2023-04-11T16:29:53.986+0300 INFO stale storage items are reset {"took": "7.718950145s", "keys": 1350176} 2023-04-11T16:29:53.986+0300 DEBUG trying to remove state reset point 2023-04-11T16:29:53.986+0300 INFO state reset point is removed {"took": "6.013µs"} 2023-04-11T16:29:55.899+0300 DEBUG state root information and NEP transfers are persisted {"took": "46.567302762s", "keys": 602578} 2023-04-11T16:29:55.929+0300 INFO state reset point information is persisted {"took": "1.942392208s", "keys": 2} 2023-04-11T16:29:55.929+0300 INFO reset finished successfully {"took": "5m51.561573137s"} ``` Master: ``` anna@kiwi:~/Documents/GitProjects/nspcc-dev/neo-go$ ./bin/neo-go db reset -m --debug --height 600000 2023-04-11T16:34:12.410+0300 INFO MaxBlockSize is not set or wrong, setting default value {"MaxBlockSize": 262144} 2023-04-11T16:34:12.410+0300 INFO MaxBlockSystemFee is not set or wrong, setting default value {"MaxBlockSystemFee": 900000000000} 2023-04-11T16:34:12.410+0300 INFO MaxTransactionsPerBlock is not set or wrong, using default value {"MaxTransactionsPerBlock": 512} 2023-04-11T16:34:12.410+0300 INFO MaxValidUntilBlockIncrement is not set or wrong, using default value {"MaxValidUntilBlockIncrement": 5760} 2023-04-11T16:34:12.413+0300 INFO restoring blockchain {"version": "0.2.8"} 2023-04-11T16:34:12.495+0300 INFO initialize state reset {"target height": 600000} 2023-04-11T16:34:12.513+0300 INFO trying to reset blocks, transactions and AERs 2023-04-11T16:35:03.582+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 1, "took": "51.087226195s", "keys": 321895} 2023-04-11T16:35:31.302+0300 INFO intermediate batch of removed blocks, transactions and AERs is persisted {"batches persisted": 2, "took": "27.719871393s", "keys": 334823} 2023-04-11T16:35:41.309+0300 INFO last batch of removed blocks, transactions and AERs is persisted {"batches persisted": 3, "took": "10.007017388s", "keys": 208913} 2023-04-11T16:35:41.309+0300 INFO blocks, transactions ans AERs are reset {"took": "1m28.814245057s", "overall persisted keys": 865631} 2023-04-11T16:35:41.309+0300 INFO trying to reset contract storage items 2023-04-11T16:37:38.315+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 1, "took": "1m57.00650253s", "keys": 200000} 2023-04-11T16:39:29.704+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 2, "took": "1m51.385224725s", "keys": 200000} 2023-04-11T16:41:14.991+0300 INFO intermediate batch of contract storage items and IDs is persisted {"batch": 3, "took": "1m45.287483794s", "keys": 200000} 2023-04-11T16:41:31.667+0300 INFO last batch of contract storage items and IDs is persisted {"batch": 4, "took": "16.675347478s", "keys": 56945} 2023-04-11T16:41:31.667+0300 INFO contract storage items and IDs are reset {"took": "5m50.357775401s", "keys": 656944} 2023-04-11T16:41:31.667+0300 INFO trying to reset headers information 2023-04-11T16:42:16.779+0300 INFO headers information is reset {"took": "45.111354262s", "keys": 528438} 2023-04-11T16:42:16.784+0300 INFO trying to reset state root information and NEP transfers 2023-04-11T16:42:35.778+0300 INFO state root information and NEP transfers are reset {"took": "18.99373117s", "keys": 602578} 2023-04-11T16:42:35.781+0300 INFO trying to remove stale storage items 2023-04-11T16:42:43.884+0300 INFO stale storage items are reset {"took": "8.103929306s", "keys": 1350176} 2023-04-11T16:42:43.885+0300 INFO trying to remove state reset point 2023-04-11T16:42:43.926+0300 INFO stale reset point is removed {"took": "41.858883ms", "keys": 2} 2023-04-11T16:42:43.932+0300 INFO reset finished successfully {"took": "8m31.437493325s"} ``` Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-04-11 11:18:48 +00:00
_, uerr := upperCache.PersistSync()
if uerr != nil {
panic(uerr)
}
persistCh <- func(persistedKeys int, err error) error {
if err != nil {
return fmt.Errorf("failed to persist state reset stage to DAO: %w", err)
}
bc.log.Info("state reset point information is persisted", zap.Duration("took", time.Since(persistStart)), zap.Int("keys", persistedKeys))
return nil
2022-10-20 10:59:19 +00:00
}
p = time.Now()
2022-10-20 10:59:19 +00:00
err = bc.resetRAMState(height, true)
if err != nil {
return fmt.Errorf("failed to update in-memory blockchain data: %w", err)
}
return nil
}
func (bc *Blockchain) initializeNativeCache(blockHeight uint32, d *dao.Simple) error {
for _, c := range bc.contracts.Contracts {
// Check that contract was deployed.
if !bc.isHardforkEnabled(c.ActiveIn(), blockHeight) {
continue
}
err := c.InitializeCache(blockHeight, d)
if err != nil {
return fmt.Errorf("failed to initialize cache for %s: %w", c.Metadata().Name, err)
}
}
return nil
}
// isHardforkEnabled returns true if the specified hardfork is enabled at the
// given height. nil hardfork is treated as always enabled.
func (bc *Blockchain) isHardforkEnabled(hf *config.Hardfork, blockHeight uint32) bool {
hfs := bc.config.Hardforks
if hf != nil {
start, ok := hfs[hf.String()]
if !ok || start < blockHeight {
return false
}
}
return true
}
2020-05-07 19:04:10 +00:00
// Run runs chain loop, it needs to be run as goroutine and executing it is
// critical for correct Blockchain operation.
func (bc *Blockchain) Run() {
bc.isRunning.Store(true)
persistTimer := time.NewTimer(persistInterval)
defer func() {
persistTimer.Stop()
if _, err := bc.persist(true); err != nil {
2019-12-30 07:43:05 +00:00
bc.log.Warn("failed to persist", zap.Error(err))
}
2020-04-07 09:41:12 +00:00
if err := bc.dao.Store.Close(); err != nil {
2019-12-30 07:43:05 +00:00
bc.log.Warn("failed to close db", zap.Error(err))
}
bc.isRunning.Store(false)
close(bc.runToExitCh)
}()
go bc.notificationDispatcher()
var nextSync bool
for {
select {
case <-bc.stopCh:
return
case <-persistTimer.C:
var oldPersisted uint32
var gcDur time.Duration
if bc.config.Ledger.RemoveUntraceableBlocks {
oldPersisted = atomic.LoadUint32(&bc.persistedHeight)
}
dur, err := bc.persist(nextSync)
core: don't spawn goroutine for persist function It doesn't make any sense, in some situations it leads to a number of goroutines created that will Persist one after another (as we can't Persist concurrently). We can manage it better in a single thread. This doesn't change performance in any way, but somewhat reduces resource consumption. It was tested neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 62800 with VerifyBlocks set to false) on i7-8565U. Reference (b9be892bf9f658652e2d1f074f366914dc62e830): Ryzen 9 5950X: RPS 27747.349 27407.726 27520.210 ≈ 27558 ± 0.63% TPS 26992.010 26993.468 27010.966 ≈ 26999 ± 0.04% CPU % 28.928 28.096 29.105 ≈ 28.7 ± 1.88% Mem MB 760.385 726.320 756.118 ≈ 748 ± 2.48% Core i7-8565U: RPS 7783.229 7628.409 7542.340 ≈ 7651 ± 1.60% TPS 7708.436 7607.397 7489.459 ≈ 7602 ± 1.44% CPU % 74.899 71.020 72.697 ≈ 72.9 ± 2.67% Mem MB 438.047 436.967 416.350 ≈ 430 ± 2.84% DB restore: real 0m20.838s 0m21.895s 0m21.794s ≈ 21.51 ± 2.71% user 0m39.091s 0m40.565s 0m41.493s ≈ 40.38 ± 3.00% sys 0m3.184s 0m2.923s 0m3.062s ≈ 3.06 ± 4.27% Patched: Ryzen 9 5950X: RPS 27636.957 27246.911 27462.036 ≈ 27449 ± 0.71% ↓ 0.40% TPS 27003.672 26993.468 27011.696 ≈ 27003 ± 0.03% ↑ 0.01% CPU % 28.562 28.475 28.012 ≈ 28.3 ± 1.04% ↓ 1.39% Mem MB 627.007 648.110 794.895 ≈ 690 ± 13.25% ↓ 7.75% Core i7-8565U: RPS 7497.210 7527.797 7897.532 ≈ 7641 ± 2.92% ↓ 0.13% TPS 7461.128 7482.678 7841.723 ≈ 7595 ± 2.81% ↓ 0.09% CPU % 71.559 73.423 69.005 ≈ 71.3 ± 3.11% ↓ 2.19% Mem MB 393.090 395.899 482.264 ≈ 424 ± 11.96% ↓ 1.40% DB restore: real 0m20.773s 0m21.583s 0m20.522s ≈ 20.96 ± 2.65% ↓ 2.56% user 0m39.322s 0m42.268s 0m38.626s ≈ 40.07 ± 4.82% ↓ 0.77% sys 0m3.006s 0m3.597s 0m3.042s ≈ 3.22 ± 10.31% ↑ 5.23%
2021-07-30 20:47:48 +00:00
if err != nil {
bc.log.Warn("failed to persist blockchain", zap.Error(err))
}
if bc.config.Ledger.RemoveUntraceableBlocks {
gcDur = bc.tryRunGC(oldPersisted)
}
nextSync = dur > persistInterval*2
interval := persistInterval - dur - gcDur
interval = max(interval, time.Microsecond) // Reset doesn't work with zero or negative value.
core: don't spawn goroutine for persist function It doesn't make any sense, in some situations it leads to a number of goroutines created that will Persist one after another (as we can't Persist concurrently). We can manage it better in a single thread. This doesn't change performance in any way, but somewhat reduces resource consumption. It was tested neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 62800 with VerifyBlocks set to false) on i7-8565U. Reference (b9be892bf9f658652e2d1f074f366914dc62e830): Ryzen 9 5950X: RPS 27747.349 27407.726 27520.210 ≈ 27558 ± 0.63% TPS 26992.010 26993.468 27010.966 ≈ 26999 ± 0.04% CPU % 28.928 28.096 29.105 ≈ 28.7 ± 1.88% Mem MB 760.385 726.320 756.118 ≈ 748 ± 2.48% Core i7-8565U: RPS 7783.229 7628.409 7542.340 ≈ 7651 ± 1.60% TPS 7708.436 7607.397 7489.459 ≈ 7602 ± 1.44% CPU % 74.899 71.020 72.697 ≈ 72.9 ± 2.67% Mem MB 438.047 436.967 416.350 ≈ 430 ± 2.84% DB restore: real 0m20.838s 0m21.895s 0m21.794s ≈ 21.51 ± 2.71% user 0m39.091s 0m40.565s 0m41.493s ≈ 40.38 ± 3.00% sys 0m3.184s 0m2.923s 0m3.062s ≈ 3.06 ± 4.27% Patched: Ryzen 9 5950X: RPS 27636.957 27246.911 27462.036 ≈ 27449 ± 0.71% ↓ 0.40% TPS 27003.672 26993.468 27011.696 ≈ 27003 ± 0.03% ↑ 0.01% CPU % 28.562 28.475 28.012 ≈ 28.3 ± 1.04% ↓ 1.39% Mem MB 627.007 648.110 794.895 ≈ 690 ± 13.25% ↓ 7.75% Core i7-8565U: RPS 7497.210 7527.797 7897.532 ≈ 7641 ± 2.92% ↓ 0.13% TPS 7461.128 7482.678 7841.723 ≈ 7595 ± 2.81% ↓ 0.09% CPU % 71.559 73.423 69.005 ≈ 71.3 ± 3.11% ↓ 2.19% Mem MB 393.090 395.899 482.264 ≈ 424 ± 11.96% ↓ 1.40% DB restore: real 0m20.773s 0m21.583s 0m20.522s ≈ 20.96 ± 2.65% ↓ 2.56% user 0m39.322s 0m42.268s 0m38.626s ≈ 40.07 ± 4.82% ↓ 0.77% sys 0m3.006s 0m3.597s 0m3.042s ≈ 3.22 ± 10.31% ↑ 5.23%
2021-07-30 20:47:48 +00:00
persistTimer.Reset(interval)
}
}
}
func (bc *Blockchain) tryRunGC(oldHeight uint32) time.Duration {
var dur time.Duration
newHeight := atomic.LoadUint32(&bc.persistedHeight)
var tgtBlock = int64(newHeight)
tgtBlock -= int64(bc.config.MaxTraceableBlocks)
if bc.config.P2PStateExchangeExtensions {
syncP := newHeight / uint32(bc.config.StateSyncInterval)
syncP--
syncP *= uint32(bc.config.StateSyncInterval)
tgtBlock = min(tgtBlock, int64(syncP))
}
// Always round to the GCP.
tgtBlock /= int64(bc.config.Ledger.GarbageCollectionPeriod)
tgtBlock *= int64(bc.config.Ledger.GarbageCollectionPeriod)
// Count periods.
oldHeight /= bc.config.Ledger.GarbageCollectionPeriod
newHeight /= bc.config.Ledger.GarbageCollectionPeriod
if tgtBlock > int64(bc.config.Ledger.GarbageCollectionPeriod) && newHeight != oldHeight {
tgtBlock /= int64(bc.config.Ledger.GarbageCollectionPeriod)
tgtBlock *= int64(bc.config.Ledger.GarbageCollectionPeriod)
dur = bc.stateRoot.GC(uint32(tgtBlock), bc.store)
dur += bc.removeOldTransfers(uint32(tgtBlock))
}
return dur
}
2022-10-20 10:59:19 +00:00
// resetTransfers is a helper function that strips the top newest NEP17 and NEP11 transfer logs
// down to the given height (not including the height itself) and updates corresponding token
// transfer info.
func (bc *Blockchain) resetTransfers(cache *dao.Simple, height uint32) error {
// Completely remove transfer info, updating it takes too much effort. We'll gather new
// transfer info on-the-fly later.
cache.Store.Seek(storage.SeekRange{
Prefix: []byte{byte(storage.STTokenTransferInfo)},
}, func(k, v []byte) bool {
cache.Store.Delete(k)
return true
})
// Look inside each transfer batch and iterate over the batch transfers, picking those that
// not newer than the given height. Also, for each suitable transfer update transfer info
// flushing changes after complete account's transfers processing.
prefixes := []byte{byte(storage.STNEP11Transfers), byte(storage.STNEP17Transfers)}
for i := range prefixes {
var (
acc util.Uint160
trInfo *state.TokenTransferInfo
removeFollowing bool
seekErr error
)
cache.Store.Seek(storage.SeekRange{
Prefix: prefixes[i : i+1],
Backwards: false, // From oldest to newest batch.
}, func(k, v []byte) bool {
var batchAcc util.Uint160
copy(batchAcc[:], k[1:])
if batchAcc != acc { // Some new account we're iterating over.
if trInfo != nil {
seekErr = cache.PutTokenTransferInfo(acc, trInfo)
if seekErr != nil {
return false
}
}
acc = batchAcc
trInfo = nil
removeFollowing = false
} else if removeFollowing {
cache.Store.Delete(bytes.Clone(k))
2022-10-20 10:59:19 +00:00
return seekErr == nil
}
r := io.NewBinReaderFromBuf(v[1:])
l := len(v)
bytesRead := 1 // 1 is for batch size byte which is read by default.
var (
oldBatchSize = v[0]
newBatchSize byte
)
for i := byte(0); i < v[0]; i++ { // From oldest to newest transfer of the batch.
var t *state.NEP17Transfer
if k[0] == byte(storage.STNEP11Transfers) {
tr := new(state.NEP11Transfer)
tr.DecodeBinary(r)
t = &tr.NEP17Transfer
} else {
t = new(state.NEP17Transfer)
t.DecodeBinary(r)
}
if r.Err != nil {
seekErr = fmt.Errorf("failed to decode subsequent transfer: %w", r.Err)
break
}
if t.Block > height {
break
}
bytesRead = l - r.Len() // Including batch size byte.
newBatchSize++
if trInfo == nil {
var err error
trInfo, err = cache.GetTokenTransferInfo(batchAcc)
if err != nil {
seekErr = fmt.Errorf("failed to retrieve token transfer info for %s: %w", batchAcc.StringLE(), r.Err)
return false
}
}
appendTokenTransferInfo(trInfo, t.Asset, t.Block, t.Timestamp, k[0] == byte(storage.STNEP11Transfers), newBatchSize >= state.TokenTransferBatchSize)
}
if newBatchSize == oldBatchSize {
// The batch is already in storage and doesn't need to be changed.
return seekErr == nil
}
if newBatchSize > 0 {
v[0] = newBatchSize
cache.Store.Put(k, v[:bytesRead])
} else {
cache.Store.Delete(k)
removeFollowing = true
}
return seekErr == nil
})
if seekErr != nil {
return seekErr
}
if trInfo != nil {
// Flush the last batch of transfer info changes.
err := cache.PutTokenTransferInfo(acc, trInfo)
if err != nil {
return err
}
}
}
return nil
}
// appendTokenTransferInfo is a helper for resetTransfers that updates token transfer info
// wrt the given transfer that was added to the subsequent transfer batch.
func appendTokenTransferInfo(transferData *state.TokenTransferInfo,
token int32, bIndex uint32, bTimestamp uint64, isNEP11 bool, lastTransferInBatch bool) {
var (
newBatch *bool
nextBatch *uint32
currTimestamp *uint64
)
if !isNEP11 {
newBatch = &transferData.NewNEP17Batch
nextBatch = &transferData.NextNEP17Batch
currTimestamp = &transferData.NextNEP17NewestTimestamp
} else {
newBatch = &transferData.NewNEP11Batch
nextBatch = &transferData.NextNEP11Batch
currTimestamp = &transferData.NextNEP11NewestTimestamp
}
transferData.LastUpdated[token] = bIndex
*newBatch = lastTransferInBatch
if *newBatch {
*nextBatch++
*currTimestamp = bTimestamp
}
}
func (bc *Blockchain) removeOldTransfers(index uint32) time.Duration {
bc.log.Info("starting transfer data garbage collection", zap.Uint32("index", index))
start := time.Now()
h, err := bc.GetHeader(bc.GetHeaderHash(index))
if err != nil {
dur := time.Since(start)
bc.log.Error("failed to find block header for transfer GC", zap.Duration("time", dur), zap.Error(err))
return dur
}
var removed, kept int64
var ts = h.Timestamp
prefixes := []byte{byte(storage.STNEP11Transfers), byte(storage.STNEP17Transfers)}
for i := range prefixes {
var acc util.Uint160
var canDrop bool
err = bc.store.SeekGC(storage.SeekRange{
Prefix: prefixes[i : i+1],
Backwards: true, // From new to old.
}, func(k, v []byte) bool {
// We don't look inside of the batches, it requires too much effort, instead
// we drop batches that are confirmed to contain outdated entries.
var batchAcc util.Uint160
var batchTs = binary.BigEndian.Uint64(k[1+util.Uint160Size:])
copy(batchAcc[:], k[1:])
if batchAcc != acc { // Some new account we're iterating over.
acc = batchAcc
} else if canDrop { // We've seen this account and all entries in this batch are guaranteed to be outdated.
removed++
return false
}
// We don't know what's inside, so keep the current
// batch anyway, but allow to drop older ones.
canDrop = batchTs <= ts
kept++
return true
})
if err != nil {
break
}
}
dur := time.Since(start)
if err != nil {
bc.log.Error("failed to flush transfer data GC changeset", zap.Duration("time", dur), zap.Error(err))
} else {
bc.log.Info("finished transfer data garbage collection",
zap.Int64("removed", removed),
zap.Int64("kept", kept),
zap.Duration("time", dur))
}
return dur
}
// notificationDispatcher manages subscription to events and broadcasts new events.
func (bc *Blockchain) notificationDispatcher() {
var (
// These are just sets of subscribers, though modelled as maps
// for ease of management (not a lot of subscriptions is really
// expected, but maps are convenient for adding/deleting elements).
blockFeed = make(map[chan *block.Block]bool)
headerFeed = make(map[chan *block.Header]bool)
txFeed = make(map[chan *transaction.Transaction]bool)
notificationFeed = make(map[chan *state.ContainedNotificationEvent]bool)
executionFeed = make(map[chan *state.AppExecResult]bool)
)
for {
select {
case <-bc.stopCh:
return
case sub := <-bc.subCh:
switch ch := sub.(type) {
case chan *block.Header:
headerFeed[ch] = true
case chan *block.Block:
blockFeed[ch] = true
case chan *transaction.Transaction:
txFeed[ch] = true
case chan *state.ContainedNotificationEvent:
notificationFeed[ch] = true
case chan *state.AppExecResult:
executionFeed[ch] = true
default:
panic(fmt.Sprintf("bad subscription: %T", sub))
}
case unsub := <-bc.unsubCh:
switch ch := unsub.(type) {
case chan *block.Header:
delete(headerFeed, ch)
case chan *block.Block:
delete(blockFeed, ch)
case chan *transaction.Transaction:
delete(txFeed, ch)
case chan *state.ContainedNotificationEvent:
delete(notificationFeed, ch)
case chan *state.AppExecResult:
delete(executionFeed, ch)
default:
panic(fmt.Sprintf("bad unsubscription: %T", unsub))
}
case event := <-bc.events:
// We don't want to waste time looping through transactions when there are no
// subscribers.
if len(txFeed) != 0 || len(notificationFeed) != 0 || len(executionFeed) != 0 {
aer := event.appExecResults[0]
if !aer.Container.Equals(event.block.Hash()) {
panic("inconsistent application execution results")
}
for ch := range executionFeed {
ch <- aer
}
for i := range aer.Events {
for ch := range notificationFeed {
ch <- &state.ContainedNotificationEvent{
Container: aer.Container,
NotificationEvent: aer.Events[i],
}
}
}
aerIdx := 1
for _, tx := range event.block.Transactions {
aer := event.appExecResults[aerIdx]
if !aer.Container.Equals(tx.Hash()) {
panic("inconsistent application execution results")
}
aerIdx++
for ch := range executionFeed {
ch <- aer
}
if aer.VMState == vmstate.Halt {
for i := range aer.Events {
for ch := range notificationFeed {
ch <- &state.ContainedNotificationEvent{
Container: aer.Container,
NotificationEvent: aer.Events[i],
}
}
}
}
for ch := range txFeed {
ch <- tx
}
}
aer = event.appExecResults[aerIdx]
if !aer.Container.Equals(event.block.Hash()) {
panic("inconsistent application execution results")
}
for ch := range executionFeed {
ch <- aer
}
for i := range aer.Events {
for ch := range notificationFeed {
ch <- &state.ContainedNotificationEvent{
Container: aer.Container,
NotificationEvent: aer.Events[i],
}
}
}
}
for ch := range headerFeed {
ch <- &event.block.Header
}
for ch := range blockFeed {
ch <- event.block
}
}
}
}
// Close stops Blockchain's internal loop, syncs changes to persistent storage
// and closes it. The Blockchain is no longer functional after the call to Close.
func (bc *Blockchain) Close() {
// If there is a block addition in progress, wait for it to finish and
// don't allow new ones.
bc.addLock.Lock()
close(bc.stopCh)
<-bc.runToExitCh
bc.addLock.Unlock()
_ = bc.log.Sync()
}
// AddBlock accepts successive block for the Blockchain, verifies it and
// stores internally. Eventually it will be persisted to the backing storage.
func (bc *Blockchain) AddBlock(block *block.Block) error {
bc.addLock.Lock()
defer bc.addLock.Unlock()
var mp *mempool.Pool
expectedHeight := bc.BlockHeight() + 1
if expectedHeight != block.Index {
return fmt.Errorf("expected %d, got %d: %w", expectedHeight, block.Index, ErrInvalidBlockIndex)
}
if bc.config.StateRootInHeader != block.StateRootEnabled {
return fmt.Errorf("%w: %v != %v",
ErrHdrStateRootSetting, bc.config.StateRootInHeader, block.StateRootEnabled)
}
if block.Index == bc.HeaderHeight()+1 {
err := bc.addHeaders(!bc.config.SkipBlockVerification, &block.Header)
if err != nil {
return err
}
}
if !bc.config.SkipBlockVerification {
merkle := block.ComputeMerkleRoot()
if !block.MerkleRoot.Equals(merkle) {
return errors.New("invalid block: MerkleRoot mismatch")
}
mp = mempool.New(len(block.Transactions), 0, false, nil)
for _, tx := range block.Transactions {
var err error
// Transactions are verified before adding them
// into the pool, so there is no point in doing
// it again even if we're verifying in-block transactions.
if bc.memPool.ContainsKey(tx.Hash()) {
err = mp.Add(tx, bc)
if err == nil {
continue
}
} else {
2020-11-27 10:55:48 +00:00
err = bc.verifyAndPoolTx(tx, mp, bc)
}
if err != nil {
if bc.config.VerifyTransactions {
return fmt.Errorf("transaction %s failed to verify: %w", tx.Hash().StringLE(), err)
}
bc.log.Warn(fmt.Sprintf("transaction %s failed to verify: %s", tx.Hash().StringLE(), err))
}
}
}
return bc.storeBlock(block, mp)
}
2019-10-22 14:56:03 +00:00
// AddHeaders processes the given headers and add them to the
// HeaderHashList. It expects headers to be sorted by index.
func (bc *Blockchain) AddHeaders(headers ...*block.Header) error {
return bc.addHeaders(!bc.config.SkipBlockVerification, headers...)
}
// addHeaders is an internal implementation of AddHeaders (`verify` parameter
// tells it to verify or not verify given headers).
func (bc *Blockchain) addHeaders(verify bool, headers ...*block.Header) error {
var (
start = time.Now()
err error
)
if len(headers) > 0 {
var i int
curHeight := bc.HeaderHeight()
for i = range headers {
if headers[i].Index > curHeight {
break
}
}
headers = headers[i:]
}
if len(headers) == 0 {
return nil
} else if verify {
// Verify that the chain of the headers is consistent.
var lastHeader *block.Header
if lastHeader, err = bc.GetHeader(headers[0].PrevHash); err != nil {
return fmt.Errorf("previous header was not found: %w", err)
}
for _, h := range headers {
if err = bc.verifyHeader(h, lastHeader); err != nil {
return err
}
lastHeader = h
}
}
res := bc.HeaderHashes.addHeaders(headers...)
if res == nil {
bc.log.Debug("done processing headers",
zap.Uint32("headerIndex", bc.HeaderHeight()),
zap.Uint32("blockHeight", bc.BlockHeight()),
zap.Duration("took", time.Since(start)))
}
return res
}
// GetStateRoot returns state root for the given height.
func (bc *Blockchain) GetStateRoot(height uint32) (*state.MPTRoot, error) {
return bc.stateRoot.GetStateRoot(height)
}
// GetStateModule returns state root service instance.
func (bc *Blockchain) GetStateModule() StateRoot {
return bc.stateRoot
}
// GetStateSyncModule returns new state sync service instance.
func (bc *Blockchain) GetStateSyncModule() *statesync.Module {
return statesync.NewModule(bc, bc.stateRoot, bc.log, bc.dao, bc.jumpToState)
}
// storeBlock performs chain update using the block given, it executes all
// transactions with all appropriate side-effects and updates Blockchain state.
// This is the only way to change Blockchain state.
func (bc *Blockchain) storeBlock(block *block.Block, txpool *mempool.Pool) error {
core: spread storeBlock actions to three goroutines Block processing consists of: * saving block/transactions to the DB * executing blocks/transactions * processing notifications/saving AERs * updating MPT * atomically updating Blockchain state Of these the first one is completely independent of others, it can be done in a separate routine easily. The third one technically depends on the second, it just doesn't have data until something is executed. At the same time it doesn't affect future executions in any way, so we can offload AER/notification processing to separate goroutine (while the main thread proceeds with other transactions). MPT update depends on all executions, so it can't be offloaded, but it can be done concurrently to AER processing. And only the last thing actually needs all previous ones to be finished, so it's a natural synchronization point. So we spawn two additional routines and let the main one execute transactions and update MPT as fast as it can. While technically all of these routines could share single DAO (they are working with different KV sets) benchmarking shows that using separate DAOs and then persisting them to lower one actually works about 7-8%% better. At the same time we can simplify DAOs used, Cached one is only relevant for AER processing because it caches NEP-17 tracking data, everything else can do just fine with Simple. The change was tested for performance with neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 50825 with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks with lots of transactions while RC4 dump mostly consists of empty blocks. Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53): Ryzen 9 5950X: RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05% TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86% CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62% Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08% Core i7-8565U: RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96% TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81% CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87% Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62% DB restore: real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80% user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64% sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32% After the change: Ryzen 9 5950X: RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54% TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13% CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65% Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66% Core i7-8565U: RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49% TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50% CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82% Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18% DB restore: real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18% user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81% sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21% On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM more used. Core i7-8565U changes don't seem to be statistically significant: 1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM used. It also is 1% worse in DB restore time. The result is somewhat expected, on a powerful machine with lots of spare cores we get 10%+ better results while on average resource-constrained laptop it doesn't change much (the machine is already saturated). Overall, this seems to be worthwhile.
2020-12-29 15:25:21 +00:00
var (
cache = bc.dao.GetPrivate()
aerCache = bc.dao.GetPrivate()
core: spread storeBlock actions to three goroutines Block processing consists of: * saving block/transactions to the DB * executing blocks/transactions * processing notifications/saving AERs * updating MPT * atomically updating Blockchain state Of these the first one is completely independent of others, it can be done in a separate routine easily. The third one technically depends on the second, it just doesn't have data until something is executed. At the same time it doesn't affect future executions in any way, so we can offload AER/notification processing to separate goroutine (while the main thread proceeds with other transactions). MPT update depends on all executions, so it can't be offloaded, but it can be done concurrently to AER processing. And only the last thing actually needs all previous ones to be finished, so it's a natural synchronization point. So we spawn two additional routines and let the main one execute transactions and update MPT as fast as it can. While technically all of these routines could share single DAO (they are working with different KV sets) benchmarking shows that using separate DAOs and then persisting them to lower one actually works about 7-8%% better. At the same time we can simplify DAOs used, Cached one is only relevant for AER processing because it caches NEP-17 tracking data, everything else can do just fine with Simple. The change was tested for performance with neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 50825 with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks with lots of transactions while RC4 dump mostly consists of empty blocks. Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53): Ryzen 9 5950X: RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05% TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86% CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62% Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08% Core i7-8565U: RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96% TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81% CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87% Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62% DB restore: real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80% user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64% sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32% After the change: Ryzen 9 5950X: RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54% TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13% CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65% Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66% Core i7-8565U: RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49% TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50% CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82% Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18% DB restore: real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18% user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81% sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21% On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM more used. Core i7-8565U changes don't seem to be statistically significant: 1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM used. It also is 1% worse in DB restore time. The result is somewhat expected, on a powerful machine with lots of spare cores we get 10%+ better results while on average resource-constrained laptop it doesn't change much (the machine is already saturated). Overall, this seems to be worthwhile.
2020-12-29 15:25:21 +00:00
appExecResults = make([]*state.AppExecResult, 0, 2+len(block.Transactions))
aerchan = make(chan *state.AppExecResult, len(block.Transactions)/8) // Tested 8 and 4 with no practical difference, but feel free to test more and tune.
aerdone = make(chan error)
)
go func() {
var (
kvcache = aerCache
err error
txCnt int
baer1, baer2 *state.AppExecResult
transCache = make(map[util.Uint160]transferData)
core: spread storeBlock actions to three goroutines Block processing consists of: * saving block/transactions to the DB * executing blocks/transactions * processing notifications/saving AERs * updating MPT * atomically updating Blockchain state Of these the first one is completely independent of others, it can be done in a separate routine easily. The third one technically depends on the second, it just doesn't have data until something is executed. At the same time it doesn't affect future executions in any way, so we can offload AER/notification processing to separate goroutine (while the main thread proceeds with other transactions). MPT update depends on all executions, so it can't be offloaded, but it can be done concurrently to AER processing. And only the last thing actually needs all previous ones to be finished, so it's a natural synchronization point. So we spawn two additional routines and let the main one execute transactions and update MPT as fast as it can. While technically all of these routines could share single DAO (they are working with different KV sets) benchmarking shows that using separate DAOs and then persisting them to lower one actually works about 7-8%% better. At the same time we can simplify DAOs used, Cached one is only relevant for AER processing because it caches NEP-17 tracking data, everything else can do just fine with Simple. The change was tested for performance with neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 50825 with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks with lots of transactions while RC4 dump mostly consists of empty blocks. Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53): Ryzen 9 5950X: RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05% TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86% CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62% Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08% Core i7-8565U: RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96% TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81% CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87% Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62% DB restore: real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80% user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64% sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32% After the change: Ryzen 9 5950X: RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54% TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13% CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65% Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66% Core i7-8565U: RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49% TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50% CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82% Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18% DB restore: real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18% user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81% sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21% On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM more used. Core i7-8565U changes don't seem to be statistically significant: 1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM used. It also is 1% worse in DB restore time. The result is somewhat expected, on a powerful machine with lots of spare cores we get 10%+ better results while on average resource-constrained laptop it doesn't change much (the machine is already saturated). Overall, this seems to be worthwhile.
2020-12-29 15:25:21 +00:00
)
kvcache.StoreAsCurrentBlock(block)
if bc.config.Ledger.RemoveUntraceableBlocks {
var start, stop uint32
if bc.config.P2PStateExchangeExtensions {
// remove batch of old blocks starting from P2-MaxTraceableBlocks-StateSyncInterval up to P2-MaxTraceableBlocks
if block.Index >= 2*uint32(bc.config.StateSyncInterval) &&
block.Index >= uint32(bc.config.StateSyncInterval)+bc.config.MaxTraceableBlocks && // check this in case if MaxTraceableBlocks>StateSyncInterval
int(block.Index)%bc.config.StateSyncInterval == 0 {
stop = block.Index - uint32(bc.config.StateSyncInterval) - bc.config.MaxTraceableBlocks
start = stop - min(stop, uint32(bc.config.StateSyncInterval))
}
} else if block.Index > bc.config.MaxTraceableBlocks {
start = block.Index - bc.config.MaxTraceableBlocks // is at least 1
stop = start + 1
}
for index := start; index < stop; index++ {
err := kvcache.DeleteBlock(bc.GetHeaderHash(index))
core: spread storeBlock actions to three goroutines Block processing consists of: * saving block/transactions to the DB * executing blocks/transactions * processing notifications/saving AERs * updating MPT * atomically updating Blockchain state Of these the first one is completely independent of others, it can be done in a separate routine easily. The third one technically depends on the second, it just doesn't have data until something is executed. At the same time it doesn't affect future executions in any way, so we can offload AER/notification processing to separate goroutine (while the main thread proceeds with other transactions). MPT update depends on all executions, so it can't be offloaded, but it can be done concurrently to AER processing. And only the last thing actually needs all previous ones to be finished, so it's a natural synchronization point. So we spawn two additional routines and let the main one execute transactions and update MPT as fast as it can. While technically all of these routines could share single DAO (they are working with different KV sets) benchmarking shows that using separate DAOs and then persisting them to lower one actually works about 7-8%% better. At the same time we can simplify DAOs used, Cached one is only relevant for AER processing because it caches NEP-17 tracking data, everything else can do just fine with Simple. The change was tested for performance with neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 50825 with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks with lots of transactions while RC4 dump mostly consists of empty blocks. Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53): Ryzen 9 5950X: RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05% TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86% CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62% Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08% Core i7-8565U: RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96% TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81% CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87% Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62% DB restore: real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80% user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64% sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32% After the change: Ryzen 9 5950X: RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54% TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13% CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65% Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66% Core i7-8565U: RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49% TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50% CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82% Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18% DB restore: real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18% user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81% sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21% On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM more used. Core i7-8565U changes don't seem to be statistically significant: 1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM used. It also is 1% worse in DB restore time. The result is somewhat expected, on a powerful machine with lots of spare cores we get 10%+ better results while on average resource-constrained laptop it doesn't change much (the machine is already saturated). Overall, this seems to be worthwhile.
2020-12-29 15:25:21 +00:00
if err != nil {
bc.log.Warn("error while removing old block",
zap.Uint32("index", index),
zap.Error(err))
}
}
}
for aer := range aerchan {
if aer.Container == block.Hash() {
if baer1 == nil {
baer1 = aer
} else {
baer2 = aer
core: spread storeBlock actions to three goroutines Block processing consists of: * saving block/transactions to the DB * executing blocks/transactions * processing notifications/saving AERs * updating MPT * atomically updating Blockchain state Of these the first one is completely independent of others, it can be done in a separate routine easily. The third one technically depends on the second, it just doesn't have data until something is executed. At the same time it doesn't affect future executions in any way, so we can offload AER/notification processing to separate goroutine (while the main thread proceeds with other transactions). MPT update depends on all executions, so it can't be offloaded, but it can be done concurrently to AER processing. And only the last thing actually needs all previous ones to be finished, so it's a natural synchronization point. So we spawn two additional routines and let the main one execute transactions and update MPT as fast as it can. While technically all of these routines could share single DAO (they are working with different KV sets) benchmarking shows that using separate DAOs and then persisting them to lower one actually works about 7-8%% better. At the same time we can simplify DAOs used, Cached one is only relevant for AER processing because it caches NEP-17 tracking data, everything else can do just fine with Simple. The change was tested for performance with neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 50825 with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks with lots of transactions while RC4 dump mostly consists of empty blocks. Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53): Ryzen 9 5950X: RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05% TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86% CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62% Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08% Core i7-8565U: RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96% TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81% CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87% Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62% DB restore: real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80% user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64% sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32% After the change: Ryzen 9 5950X: RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54% TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13% CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65% Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66% Core i7-8565U: RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49% TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50% CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82% Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18% DB restore: real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18% user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81% sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21% On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM more used. Core i7-8565U changes don't seem to be statistically significant: 1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM used. It also is 1% worse in DB restore time. The result is somewhat expected, on a powerful machine with lots of spare cores we get 10%+ better results while on average resource-constrained laptop it doesn't change much (the machine is already saturated). Overall, this seems to be worthwhile.
2020-12-29 15:25:21 +00:00
}
} else {
err = kvcache.StoreAsTransaction(block.Transactions[txCnt], block.Index, aer)
txCnt++
core: spread storeBlock actions to three goroutines Block processing consists of: * saving block/transactions to the DB * executing blocks/transactions * processing notifications/saving AERs * updating MPT * atomically updating Blockchain state Of these the first one is completely independent of others, it can be done in a separate routine easily. The third one technically depends on the second, it just doesn't have data until something is executed. At the same time it doesn't affect future executions in any way, so we can offload AER/notification processing to separate goroutine (while the main thread proceeds with other transactions). MPT update depends on all executions, so it can't be offloaded, but it can be done concurrently to AER processing. And only the last thing actually needs all previous ones to be finished, so it's a natural synchronization point. So we spawn two additional routines and let the main one execute transactions and update MPT as fast as it can. While technically all of these routines could share single DAO (they are working with different KV sets) benchmarking shows that using separate DAOs and then persisting them to lower one actually works about 7-8%% better. At the same time we can simplify DAOs used, Cached one is only relevant for AER processing because it caches NEP-17 tracking data, everything else can do just fine with Simple. The change was tested for performance with neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 50825 with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks with lots of transactions while RC4 dump mostly consists of empty blocks. Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53): Ryzen 9 5950X: RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05% TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86% CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62% Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08% Core i7-8565U: RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96% TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81% CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87% Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62% DB restore: real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80% user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64% sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32% After the change: Ryzen 9 5950X: RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54% TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13% CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65% Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66% Core i7-8565U: RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49% TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50% CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82% Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18% DB restore: real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18% user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81% sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21% On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM more used. Core i7-8565U changes don't seem to be statistically significant: 1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM used. It also is 1% worse in DB restore time. The result is somewhat expected, on a powerful machine with lots of spare cores we get 10%+ better results while on average resource-constrained laptop it doesn't change much (the machine is already saturated). Overall, this seems to be worthwhile.
2020-12-29 15:25:21 +00:00
}
if err != nil {
err = fmt.Errorf("failed to store exec result: %w", err)
break
}
if aer.Execution.VMState == vmstate.Halt {
core: spread storeBlock actions to three goroutines Block processing consists of: * saving block/transactions to the DB * executing blocks/transactions * processing notifications/saving AERs * updating MPT * atomically updating Blockchain state Of these the first one is completely independent of others, it can be done in a separate routine easily. The third one technically depends on the second, it just doesn't have data until something is executed. At the same time it doesn't affect future executions in any way, so we can offload AER/notification processing to separate goroutine (while the main thread proceeds with other transactions). MPT update depends on all executions, so it can't be offloaded, but it can be done concurrently to AER processing. And only the last thing actually needs all previous ones to be finished, so it's a natural synchronization point. So we spawn two additional routines and let the main one execute transactions and update MPT as fast as it can. While technically all of these routines could share single DAO (they are working with different KV sets) benchmarking shows that using separate DAOs and then persisting them to lower one actually works about 7-8%% better. At the same time we can simplify DAOs used, Cached one is only relevant for AER processing because it caches NEP-17 tracking data, everything else can do just fine with Simple. The change was tested for performance with neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 50825 with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks with lots of transactions while RC4 dump mostly consists of empty blocks. Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53): Ryzen 9 5950X: RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05% TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86% CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62% Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08% Core i7-8565U: RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96% TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81% CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87% Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62% DB restore: real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80% user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64% sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32% After the change: Ryzen 9 5950X: RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54% TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13% CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65% Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66% Core i7-8565U: RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49% TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50% CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82% Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18% DB restore: real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18% user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81% sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21% On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM more used. Core i7-8565U changes don't seem to be statistically significant: 1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM used. It also is 1% worse in DB restore time. The result is somewhat expected, on a powerful machine with lots of spare cores we get 10%+ better results while on average resource-constrained laptop it doesn't change much (the machine is already saturated). Overall, this seems to be worthwhile.
2020-12-29 15:25:21 +00:00
for j := range aer.Execution.Events {
bc.handleNotification(&aer.Execution.Events[j], kvcache, transCache, block, aer.Container)
core: spread storeBlock actions to three goroutines Block processing consists of: * saving block/transactions to the DB * executing blocks/transactions * processing notifications/saving AERs * updating MPT * atomically updating Blockchain state Of these the first one is completely independent of others, it can be done in a separate routine easily. The third one technically depends on the second, it just doesn't have data until something is executed. At the same time it doesn't affect future executions in any way, so we can offload AER/notification processing to separate goroutine (while the main thread proceeds with other transactions). MPT update depends on all executions, so it can't be offloaded, but it can be done concurrently to AER processing. And only the last thing actually needs all previous ones to be finished, so it's a natural synchronization point. So we spawn two additional routines and let the main one execute transactions and update MPT as fast as it can. While technically all of these routines could share single DAO (they are working with different KV sets) benchmarking shows that using separate DAOs and then persisting them to lower one actually works about 7-8%% better. At the same time we can simplify DAOs used, Cached one is only relevant for AER processing because it caches NEP-17 tracking data, everything else can do just fine with Simple. The change was tested for performance with neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 50825 with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks with lots of transactions while RC4 dump mostly consists of empty blocks. Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53): Ryzen 9 5950X: RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05% TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86% CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62% Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08% Core i7-8565U: RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96% TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81% CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87% Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62% DB restore: real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80% user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64% sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32% After the change: Ryzen 9 5950X: RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54% TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13% CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65% Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66% Core i7-8565U: RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49% TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50% CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82% Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18% DB restore: real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18% user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81% sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21% On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM more used. Core i7-8565U changes don't seem to be statistically significant: 1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM used. It also is 1% worse in DB restore time. The result is somewhat expected, on a powerful machine with lots of spare cores we get 10%+ better results while on average resource-constrained laptop it doesn't change much (the machine is already saturated). Overall, this seems to be worthwhile.
2020-12-29 15:25:21 +00:00
}
}
}
if err != nil {
aerdone <- err
return
}
if err := kvcache.StoreAsBlock(block, baer1, baer2); err != nil {
aerdone <- err
return
}
for acc, trData := range transCache {
err = kvcache.PutTokenTransferInfo(acc, &trData.Info)
if err != nil {
aerdone <- err
return
}
if !trData.Info.NewNEP11Batch {
kvcache.PutTokenTransferLog(acc, trData.Info.NextNEP11NewestTimestamp, trData.Info.NextNEP11Batch, true, &trData.Log11)
}
if !trData.Info.NewNEP17Batch {
kvcache.PutTokenTransferLog(acc, trData.Info.NextNEP17NewestTimestamp, trData.Info.NextNEP17Batch, false, &trData.Log17)
}
}
core: spread storeBlock actions to three goroutines Block processing consists of: * saving block/transactions to the DB * executing blocks/transactions * processing notifications/saving AERs * updating MPT * atomically updating Blockchain state Of these the first one is completely independent of others, it can be done in a separate routine easily. The third one technically depends on the second, it just doesn't have data until something is executed. At the same time it doesn't affect future executions in any way, so we can offload AER/notification processing to separate goroutine (while the main thread proceeds with other transactions). MPT update depends on all executions, so it can't be offloaded, but it can be done concurrently to AER processing. And only the last thing actually needs all previous ones to be finished, so it's a natural synchronization point. So we spawn two additional routines and let the main one execute transactions and update MPT as fast as it can. While technically all of these routines could share single DAO (they are working with different KV sets) benchmarking shows that using separate DAOs and then persisting them to lower one actually works about 7-8%% better. At the same time we can simplify DAOs used, Cached one is only relevant for AER processing because it caches NEP-17 tracking data, everything else can do just fine with Simple. The change was tested for performance with neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 50825 with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks with lots of transactions while RC4 dump mostly consists of empty blocks. Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53): Ryzen 9 5950X: RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05% TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86% CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62% Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08% Core i7-8565U: RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96% TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81% CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87% Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62% DB restore: real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80% user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64% sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32% After the change: Ryzen 9 5950X: RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54% TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13% CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65% Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66% Core i7-8565U: RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49% TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50% CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82% Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18% DB restore: real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18% user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81% sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21% On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM more used. Core i7-8565U changes don't seem to be statistically significant: 1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM used. It also is 1% worse in DB restore time. The result is somewhat expected, on a powerful machine with lots of spare cores we get 10%+ better results while on average resource-constrained laptop it doesn't change much (the machine is already saturated). Overall, this seems to be worthwhile.
2020-12-29 15:25:21 +00:00
close(aerdone)
}()
_ = cache.GetItemCtx() // Prime serialization context cache (it'll be reused by upper layer DAOs).
aer, v, err := bc.runPersist(bc.contracts.GetPersistScript(), block, cache, trigger.OnPersist, nil)
if err != nil {
core: spread storeBlock actions to three goroutines Block processing consists of: * saving block/transactions to the DB * executing blocks/transactions * processing notifications/saving AERs * updating MPT * atomically updating Blockchain state Of these the first one is completely independent of others, it can be done in a separate routine easily. The third one technically depends on the second, it just doesn't have data until something is executed. At the same time it doesn't affect future executions in any way, so we can offload AER/notification processing to separate goroutine (while the main thread proceeds with other transactions). MPT update depends on all executions, so it can't be offloaded, but it can be done concurrently to AER processing. And only the last thing actually needs all previous ones to be finished, so it's a natural synchronization point. So we spawn two additional routines and let the main one execute transactions and update MPT as fast as it can. While technically all of these routines could share single DAO (they are working with different KV sets) benchmarking shows that using separate DAOs and then persisting them to lower one actually works about 7-8%% better. At the same time we can simplify DAOs used, Cached one is only relevant for AER processing because it caches NEP-17 tracking data, everything else can do just fine with Simple. The change was tested for performance with neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 50825 with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks with lots of transactions while RC4 dump mostly consists of empty blocks. Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53): Ryzen 9 5950X: RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05% TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86% CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62% Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08% Core i7-8565U: RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96% TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81% CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87% Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62% DB restore: real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80% user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64% sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32% After the change: Ryzen 9 5950X: RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54% TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13% CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65% Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66% Core i7-8565U: RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49% TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50% CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82% Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18% DB restore: real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18% user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81% sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21% On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM more used. Core i7-8565U changes don't seem to be statistically significant: 1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM used. It also is 1% worse in DB restore time. The result is somewhat expected, on a powerful machine with lots of spare cores we get 10%+ better results while on average resource-constrained laptop it doesn't change much (the machine is already saturated). Overall, this seems to be worthwhile.
2020-12-29 15:25:21 +00:00
// Release goroutines, don't care about errors, we already have one.
close(aerchan)
<-aerdone
return fmt.Errorf("onPersist failed: %w", err)
}
appExecResults = append(appExecResults, aer)
core: spread storeBlock actions to three goroutines Block processing consists of: * saving block/transactions to the DB * executing blocks/transactions * processing notifications/saving AERs * updating MPT * atomically updating Blockchain state Of these the first one is completely independent of others, it can be done in a separate routine easily. The third one technically depends on the second, it just doesn't have data until something is executed. At the same time it doesn't affect future executions in any way, so we can offload AER/notification processing to separate goroutine (while the main thread proceeds with other transactions). MPT update depends on all executions, so it can't be offloaded, but it can be done concurrently to AER processing. And only the last thing actually needs all previous ones to be finished, so it's a natural synchronization point. So we spawn two additional routines and let the main one execute transactions and update MPT as fast as it can. While technically all of these routines could share single DAO (they are working with different KV sets) benchmarking shows that using separate DAOs and then persisting them to lower one actually works about 7-8%% better. At the same time we can simplify DAOs used, Cached one is only relevant for AER processing because it caches NEP-17 tracking data, everything else can do just fine with Simple. The change was tested for performance with neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 50825 with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks with lots of transactions while RC4 dump mostly consists of empty blocks. Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53): Ryzen 9 5950X: RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05% TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86% CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62% Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08% Core i7-8565U: RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96% TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81% CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87% Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62% DB restore: real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80% user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64% sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32% After the change: Ryzen 9 5950X: RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54% TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13% CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65% Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66% Core i7-8565U: RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49% TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50% CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82% Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18% DB restore: real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18% user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81% sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21% On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM more used. Core i7-8565U changes don't seem to be statistically significant: 1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM used. It also is 1% worse in DB restore time. The result is somewhat expected, on a powerful machine with lots of spare cores we get 10%+ better results while on average resource-constrained laptop it doesn't change much (the machine is already saturated). Overall, this seems to be worthwhile.
2020-12-29 15:25:21 +00:00
aerchan <- aer
for _, tx := range block.Transactions {
systemInterop := bc.newInteropContext(trigger.Application, cache, block, tx)
systemInterop.ReuseVM(v)
v.LoadScriptWithFlags(tx.Script, callflag.All)
v.GasLimit = tx.SystemFee
err := systemInterop.Exec()
var faultException string
if !v.HasFailed() {
_, err := systemInterop.DAO.Persist()
if err != nil {
core: spread storeBlock actions to three goroutines Block processing consists of: * saving block/transactions to the DB * executing blocks/transactions * processing notifications/saving AERs * updating MPT * atomically updating Blockchain state Of these the first one is completely independent of others, it can be done in a separate routine easily. The third one technically depends on the second, it just doesn't have data until something is executed. At the same time it doesn't affect future executions in any way, so we can offload AER/notification processing to separate goroutine (while the main thread proceeds with other transactions). MPT update depends on all executions, so it can't be offloaded, but it can be done concurrently to AER processing. And only the last thing actually needs all previous ones to be finished, so it's a natural synchronization point. So we spawn two additional routines and let the main one execute transactions and update MPT as fast as it can. While technically all of these routines could share single DAO (they are working with different KV sets) benchmarking shows that using separate DAOs and then persisting them to lower one actually works about 7-8%% better. At the same time we can simplify DAOs used, Cached one is only relevant for AER processing because it caches NEP-17 tracking data, everything else can do just fine with Simple. The change was tested for performance with neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 50825 with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks with lots of transactions while RC4 dump mostly consists of empty blocks. Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53): Ryzen 9 5950X: RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05% TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86% CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62% Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08% Core i7-8565U: RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96% TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81% CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87% Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62% DB restore: real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80% user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64% sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32% After the change: Ryzen 9 5950X: RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54% TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13% CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65% Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66% Core i7-8565U: RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49% TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50% CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82% Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18% DB restore: real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18% user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81% sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21% On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM more used. Core i7-8565U changes don't seem to be statistically significant: 1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM used. It also is 1% worse in DB restore time. The result is somewhat expected, on a powerful machine with lots of spare cores we get 10%+ better results while on average resource-constrained laptop it doesn't change much (the machine is already saturated). Overall, this seems to be worthwhile.
2020-12-29 15:25:21 +00:00
// Release goroutines, don't care about errors, we already have one.
close(aerchan)
<-aerdone
return fmt.Errorf("failed to persist invocation results: %w", err)
}
} else {
bc.log.Warn("contract invocation failed",
zap.String("tx", tx.Hash().StringLE()),
zap.Uint32("block", block.Index),
zap.Error(err))
faultException = err.Error()
}
aer := &state.AppExecResult{
Container: tx.Hash(),
Execution: state.Execution{
Trigger: trigger.Application,
VMState: v.State(),
GasConsumed: v.GasConsumed(),
Stack: v.Estack().ToArray(),
Events: systemInterop.Notifications,
FaultException: faultException,
},
}
appExecResults = append(appExecResults, aer)
core: spread storeBlock actions to three goroutines Block processing consists of: * saving block/transactions to the DB * executing blocks/transactions * processing notifications/saving AERs * updating MPT * atomically updating Blockchain state Of these the first one is completely independent of others, it can be done in a separate routine easily. The third one technically depends on the second, it just doesn't have data until something is executed. At the same time it doesn't affect future executions in any way, so we can offload AER/notification processing to separate goroutine (while the main thread proceeds with other transactions). MPT update depends on all executions, so it can't be offloaded, but it can be done concurrently to AER processing. And only the last thing actually needs all previous ones to be finished, so it's a natural synchronization point. So we spawn two additional routines and let the main one execute transactions and update MPT as fast as it can. While technically all of these routines could share single DAO (they are working with different KV sets) benchmarking shows that using separate DAOs and then persisting them to lower one actually works about 7-8%% better. At the same time we can simplify DAOs used, Cached one is only relevant for AER processing because it caches NEP-17 tracking data, everything else can do just fine with Simple. The change was tested for performance with neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 50825 with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks with lots of transactions while RC4 dump mostly consists of empty blocks. Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53): Ryzen 9 5950X: RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05% TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86% CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62% Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08% Core i7-8565U: RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96% TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81% CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87% Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62% DB restore: real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80% user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64% sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32% After the change: Ryzen 9 5950X: RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54% TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13% CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65% Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66% Core i7-8565U: RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49% TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50% CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82% Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18% DB restore: real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18% user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81% sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21% On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM more used. Core i7-8565U changes don't seem to be statistically significant: 1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM used. It also is 1% worse in DB restore time. The result is somewhat expected, on a powerful machine with lots of spare cores we get 10%+ better results while on average resource-constrained laptop it doesn't change much (the machine is already saturated). Overall, this seems to be worthwhile.
2020-12-29 15:25:21 +00:00
aerchan <- aer
}
aer, _, err = bc.runPersist(bc.contracts.GetPostPersistScript(), block, cache, trigger.PostPersist, v)
if err != nil {
core: spread storeBlock actions to three goroutines Block processing consists of: * saving block/transactions to the DB * executing blocks/transactions * processing notifications/saving AERs * updating MPT * atomically updating Blockchain state Of these the first one is completely independent of others, it can be done in a separate routine easily. The third one technically depends on the second, it just doesn't have data until something is executed. At the same time it doesn't affect future executions in any way, so we can offload AER/notification processing to separate goroutine (while the main thread proceeds with other transactions). MPT update depends on all executions, so it can't be offloaded, but it can be done concurrently to AER processing. And only the last thing actually needs all previous ones to be finished, so it's a natural synchronization point. So we spawn two additional routines and let the main one execute transactions and update MPT as fast as it can. While technically all of these routines could share single DAO (they are working with different KV sets) benchmarking shows that using separate DAOs and then persisting them to lower one actually works about 7-8%% better. At the same time we can simplify DAOs used, Cached one is only relevant for AER processing because it caches NEP-17 tracking data, everything else can do just fine with Simple. The change was tested for performance with neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 50825 with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks with lots of transactions while RC4 dump mostly consists of empty blocks. Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53): Ryzen 9 5950X: RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05% TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86% CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62% Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08% Core i7-8565U: RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96% TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81% CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87% Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62% DB restore: real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80% user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64% sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32% After the change: Ryzen 9 5950X: RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54% TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13% CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65% Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66% Core i7-8565U: RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49% TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50% CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82% Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18% DB restore: real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18% user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81% sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21% On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM more used. Core i7-8565U changes don't seem to be statistically significant: 1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM used. It also is 1% worse in DB restore time. The result is somewhat expected, on a powerful machine with lots of spare cores we get 10%+ better results while on average resource-constrained laptop it doesn't change much (the machine is already saturated). Overall, this seems to be worthwhile.
2020-12-29 15:25:21 +00:00
// Release goroutines, don't care about errors, we already have one.
close(aerchan)
<-aerdone
return fmt.Errorf("postPersist failed: %w", err)
}
appExecResults = append(appExecResults, aer)
core: spread storeBlock actions to three goroutines Block processing consists of: * saving block/transactions to the DB * executing blocks/transactions * processing notifications/saving AERs * updating MPT * atomically updating Blockchain state Of these the first one is completely independent of others, it can be done in a separate routine easily. The third one technically depends on the second, it just doesn't have data until something is executed. At the same time it doesn't affect future executions in any way, so we can offload AER/notification processing to separate goroutine (while the main thread proceeds with other transactions). MPT update depends on all executions, so it can't be offloaded, but it can be done concurrently to AER processing. And only the last thing actually needs all previous ones to be finished, so it's a natural synchronization point. So we spawn two additional routines and let the main one execute transactions and update MPT as fast as it can. While technically all of these routines could share single DAO (they are working with different KV sets) benchmarking shows that using separate DAOs and then persisting them to lower one actually works about 7-8%% better. At the same time we can simplify DAOs used, Cached one is only relevant for AER processing because it caches NEP-17 tracking data, everything else can do just fine with Simple. The change was tested for performance with neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 50825 with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks with lots of transactions while RC4 dump mostly consists of empty blocks. Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53): Ryzen 9 5950X: RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05% TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86% CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62% Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08% Core i7-8565U: RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96% TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81% CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87% Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62% DB restore: real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80% user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64% sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32% After the change: Ryzen 9 5950X: RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54% TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13% CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65% Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66% Core i7-8565U: RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49% TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50% CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82% Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18% DB restore: real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18% user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81% sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21% On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM more used. Core i7-8565U changes don't seem to be statistically significant: 1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM used. It also is 1% worse in DB restore time. The result is somewhat expected, on a powerful machine with lots of spare cores we get 10%+ better results while on average resource-constrained laptop it doesn't change much (the machine is already saturated). Overall, this seems to be worthwhile.
2020-12-29 15:25:21 +00:00
aerchan <- aer
close(aerchan)
b := mpt.MapToMPTBatch(cache.Store.GetStorageChanges())
mpt, sr, err := bc.stateRoot.AddMPTBatch(block.Index, b, cache.Store)
if err != nil {
core: spread storeBlock actions to three goroutines Block processing consists of: * saving block/transactions to the DB * executing blocks/transactions * processing notifications/saving AERs * updating MPT * atomically updating Blockchain state Of these the first one is completely independent of others, it can be done in a separate routine easily. The third one technically depends on the second, it just doesn't have data until something is executed. At the same time it doesn't affect future executions in any way, so we can offload AER/notification processing to separate goroutine (while the main thread proceeds with other transactions). MPT update depends on all executions, so it can't be offloaded, but it can be done concurrently to AER processing. And only the last thing actually needs all previous ones to be finished, so it's a natural synchronization point. So we spawn two additional routines and let the main one execute transactions and update MPT as fast as it can. While technically all of these routines could share single DAO (they are working with different KV sets) benchmarking shows that using separate DAOs and then persisting them to lower one actually works about 7-8%% better. At the same time we can simplify DAOs used, Cached one is only relevant for AER processing because it caches NEP-17 tracking data, everything else can do just fine with Simple. The change was tested for performance with neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 50825 with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks with lots of transactions while RC4 dump mostly consists of empty blocks. Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53): Ryzen 9 5950X: RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05% TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86% CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62% Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08% Core i7-8565U: RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96% TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81% CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87% Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62% DB restore: real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80% user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64% sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32% After the change: Ryzen 9 5950X: RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54% TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13% CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65% Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66% Core i7-8565U: RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49% TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50% CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82% Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18% DB restore: real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18% user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81% sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21% On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM more used. Core i7-8565U changes don't seem to be statistically significant: 1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM used. It also is 1% worse in DB restore time. The result is somewhat expected, on a powerful machine with lots of spare cores we get 10%+ better results while on average resource-constrained laptop it doesn't change much (the machine is already saturated). Overall, this seems to be worthwhile.
2020-12-29 15:25:21 +00:00
// Release goroutines, don't care about errors, we already have one.
<-aerdone
2020-12-26 10:27:59 +00:00
// Here MPT can be left in a half-applied state.
// However if this error occurs, this is a bug somewhere in code
// because changes applied are the ones from HALTed transactions.
return fmt.Errorf("error while trying to apply MPT changes: %w", err)
}
if bc.config.StateRootInHeader && bc.HeaderHeight() > sr.Index {
h, err := bc.GetHeader(bc.GetHeaderHash(sr.Index + 1))
if err != nil {
core: spread storeBlock actions to three goroutines Block processing consists of: * saving block/transactions to the DB * executing blocks/transactions * processing notifications/saving AERs * updating MPT * atomically updating Blockchain state Of these the first one is completely independent of others, it can be done in a separate routine easily. The third one technically depends on the second, it just doesn't have data until something is executed. At the same time it doesn't affect future executions in any way, so we can offload AER/notification processing to separate goroutine (while the main thread proceeds with other transactions). MPT update depends on all executions, so it can't be offloaded, but it can be done concurrently to AER processing. And only the last thing actually needs all previous ones to be finished, so it's a natural synchronization point. So we spawn two additional routines and let the main one execute transactions and update MPT as fast as it can. While technically all of these routines could share single DAO (they are working with different KV sets) benchmarking shows that using separate DAOs and then persisting them to lower one actually works about 7-8%% better. At the same time we can simplify DAOs used, Cached one is only relevant for AER processing because it caches NEP-17 tracking data, everything else can do just fine with Simple. The change was tested for performance with neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 50825 with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks with lots of transactions while RC4 dump mostly consists of empty blocks. Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53): Ryzen 9 5950X: RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05% TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86% CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62% Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08% Core i7-8565U: RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96% TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81% CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87% Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62% DB restore: real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80% user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64% sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32% After the change: Ryzen 9 5950X: RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54% TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13% CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65% Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66% Core i7-8565U: RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49% TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50% CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82% Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18% DB restore: real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18% user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81% sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21% On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM more used. Core i7-8565U changes don't seem to be statistically significant: 1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM used. It also is 1% worse in DB restore time. The result is somewhat expected, on a powerful machine with lots of spare cores we get 10%+ better results while on average resource-constrained laptop it doesn't change much (the machine is already saturated). Overall, this seems to be worthwhile.
2020-12-29 15:25:21 +00:00
err = fmt.Errorf("failed to get next header: %w", err)
} else if h.PrevStateRoot != sr.Root {
err = fmt.Errorf("local stateroot and next header's PrevStateRoot mismatch: %s vs %s", sr.Root.StringBE(), h.PrevStateRoot.StringBE())
}
core: spread storeBlock actions to three goroutines Block processing consists of: * saving block/transactions to the DB * executing blocks/transactions * processing notifications/saving AERs * updating MPT * atomically updating Blockchain state Of these the first one is completely independent of others, it can be done in a separate routine easily. The third one technically depends on the second, it just doesn't have data until something is executed. At the same time it doesn't affect future executions in any way, so we can offload AER/notification processing to separate goroutine (while the main thread proceeds with other transactions). MPT update depends on all executions, so it can't be offloaded, but it can be done concurrently to AER processing. And only the last thing actually needs all previous ones to be finished, so it's a natural synchronization point. So we spawn two additional routines and let the main one execute transactions and update MPT as fast as it can. While technically all of these routines could share single DAO (they are working with different KV sets) benchmarking shows that using separate DAOs and then persisting them to lower one actually works about 7-8%% better. At the same time we can simplify DAOs used, Cached one is only relevant for AER processing because it caches NEP-17 tracking data, everything else can do just fine with Simple. The change was tested for performance with neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 50825 with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks with lots of transactions while RC4 dump mostly consists of empty blocks. Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53): Ryzen 9 5950X: RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05% TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86% CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62% Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08% Core i7-8565U: RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96% TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81% CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87% Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62% DB restore: real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80% user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64% sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32% After the change: Ryzen 9 5950X: RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54% TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13% CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65% Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66% Core i7-8565U: RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49% TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50% CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82% Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18% DB restore: real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18% user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81% sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21% On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM more used. Core i7-8565U changes don't seem to be statistically significant: 1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM used. It also is 1% worse in DB restore time. The result is somewhat expected, on a powerful machine with lots of spare cores we get 10%+ better results while on average resource-constrained laptop it doesn't change much (the machine is already saturated). Overall, this seems to be worthwhile.
2020-12-29 15:25:21 +00:00
if err != nil {
// Release goroutines, don't care about errors, we already have one.
<-aerdone
return err
}
}
if bc.config.Ledger.SaveStorageBatch {
bc.lastBatch = cache.GetBatch()
}
// Every persist cycle we also compact our in-memory MPT. It's flushed
// already in AddMPTBatch, so collapsing it is safe.
persistedHeight := atomic.LoadUint32(&bc.persistedHeight)
if persistedHeight == block.Index-1 {
// 10 is good and roughly estimated to fit remaining trie into 1M of memory.
mpt.Collapse(10)
}
core: spread storeBlock actions to three goroutines Block processing consists of: * saving block/transactions to the DB * executing blocks/transactions * processing notifications/saving AERs * updating MPT * atomically updating Blockchain state Of these the first one is completely independent of others, it can be done in a separate routine easily. The third one technically depends on the second, it just doesn't have data until something is executed. At the same time it doesn't affect future executions in any way, so we can offload AER/notification processing to separate goroutine (while the main thread proceeds with other transactions). MPT update depends on all executions, so it can't be offloaded, but it can be done concurrently to AER processing. And only the last thing actually needs all previous ones to be finished, so it's a natural synchronization point. So we spawn two additional routines and let the main one execute transactions and update MPT as fast as it can. While technically all of these routines could share single DAO (they are working with different KV sets) benchmarking shows that using separate DAOs and then persisting them to lower one actually works about 7-8%% better. At the same time we can simplify DAOs used, Cached one is only relevant for AER processing because it caches NEP-17 tracking data, everything else can do just fine with Simple. The change was tested for performance with neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 50825 with VerifyBlocks set to false) on i7-8565U. neo-bench creates huge blocks with lots of transactions while RC4 dump mostly consists of empty blocks. Reference results (06c3dda5d1e713eb7fca59dd07621d22cb31dd53): Ryzen 9 5950X: RPS ≈ 20059.569 21186.328 20158.983 ≈ 20468 ± 3.05% TPS ≈ 19544.993 20585.450 19658.338 ≈ 19930 ± 2.86% CPU ≈ 18.682% 23.877% 22.852% ≈ 21.8 ± 12.62% Mem ≈ 618.981MB 559.246MB 541.539MB ≈ 573 ± 7.08% Core i7-8565U: RPS ≈ 5927.082 6526.739 6372.115 ≈ 6275 ± 4.96% TPS ≈ 5899.531 6477.187 6329.515 ≈ 6235 ± 4.81% CPU ≈ 56.346% 61.955% 58.125% ≈ 58.8 ± 4.87% Mem ≈ 212.191MB 224.974MB 205.479MB ≈ 214 ± 4.62% DB restore: real 0m12.683s 0m13.222s 0m13.382s ≈ 13.096 ± 2.80% user 0m18.501s 0m19.163s 0m19.489s ≈ 19.051 ± 2.64% sys 0m1.404s 0m1.396s 0m1.666s ≈ 1.489 ± 10.32% After the change: Ryzen 9 5950X: RPS ≈ 23056.899 22822.015 23006.543 ≈ 22962 ± 0.54% TPS ≈ 22594.785 22292.071 22800.857 ≈ 22562 ± 1.13% CPU ≈ 24.262% 23.185% 25.921% ≈ 24.5 ± 5.65% Mem ≈ 614.254MB 613.204MB 555.491MB ≈ 594 ± 5.66% Core i7-8565U: RPS ≈ 6378.702 6423.927 6363.788 ≈ 6389 ± 0.49% TPS ≈ 6327.072 6372.552 6311.179 ≈ 6337 ± 0.50% CPU ≈ 57.599% 58.622% 59.737% ≈ 58.7 ± 1.82% Mem ≈ 198.697MB 188.746MB 200.235MB ≈ 196 ± 3.18% DB restore: real 0m13.576s 0m13.334s 0m12.757s ≈ 13.222 ± 3.18% user 0m19.113s 0m19.490s 0m20.197s ≈ 19.600 ± 2.81% sys 0m2.211s 0m1.558s 0m1.559s ≈ 1.776 ± 21.21% On Ryzen 9 we've got 12% better RPS, 13% better TPS with 12% CPU and 3% RAM more used. Core i7-8565U changes don't seem to be statistically significant: 1.8% more RPS, 1.6% more TPS with about the same CPU and 8.5% less RAM used. It also is 1% worse in DB restore time. The result is somewhat expected, on a powerful machine with lots of spare cores we get 10%+ better results while on average resource-constrained laptop it doesn't change much (the machine is already saturated). Overall, this seems to be worthwhile.
2020-12-29 15:25:21 +00:00
aererr := <-aerdone
if aererr != nil {
return aererr
}
bc.lock.Lock()
_, err = aerCache.Persist()
if err != nil {
bc.lock.Unlock()
return err
}
_, err = cache.Persist()
if err != nil {
bc.lock.Unlock()
return err
}
mpt.Store = bc.dao.Store
bc.stateRoot.UpdateCurrentLocal(mpt, sr)
bc.topBlock.Store(block)
atomic.StoreUint32(&bc.blockHeight, block.Index)
2020-11-27 10:55:48 +00:00
bc.memPool.RemoveStale(func(tx *transaction.Transaction) bool { return bc.IsTxStillRelevant(tx, txpool, false) }, bc)
for _, f := range bc.postBlock {
f(bc.IsTxStillRelevant, txpool, block)
2020-11-27 10:55:48 +00:00
}
if err := bc.updateExtensibleWhitelist(block.Index); err != nil {
bc.lock.Unlock()
return err
}
bc.lock.Unlock()
updateBlockHeightMetric(block.Index)
// Genesis block is stored when Blockchain is not yet running, so there
// is no one to read this event. And it doesn't make much sense as event
// anyway.
if block.Index != 0 {
bc.events <- bcEvent{block, appExecResults}
}
return nil
}
func (bc *Blockchain) updateExtensibleWhitelist(height uint32) error {
updateCommittee := bc.config.ShouldUpdateCommitteeAt(height)
stateVals, sh, err := bc.contracts.Designate.GetDesignatedByRole(bc.dao, noderoles.StateValidator, height)
if err != nil {
return err
}
if bc.extensible.Load() != nil && !updateCommittee && sh != height {
return nil
}
newList := []util.Uint160{bc.contracts.NEO.GetCommitteeAddress(bc.dao)}
nextVals := bc.contracts.NEO.GetNextBlockValidatorsInternal(bc.dao)
script, err := smartcontract.CreateDefaultMultiSigRedeemScript(nextVals)
if err != nil {
return err
}
newList = append(newList, hash.Hash160(script))
bc.updateExtensibleList(&newList, nextVals)
if len(stateVals) > 0 {
h, err := bc.contracts.Designate.GetLastDesignatedHash(bc.dao, noderoles.StateValidator)
if err != nil {
return err
}
newList = append(newList, h)
bc.updateExtensibleList(&newList, stateVals)
}
sort.Slice(newList, func(i, j int) bool {
return newList[i].Less(newList[j])
})
bc.extensible.Store(newList)
return nil
}
func (bc *Blockchain) updateExtensibleList(s *[]util.Uint160, pubs keys.PublicKeys) {
for _, pub := range pubs {
*s = append(*s, pub.GetScriptHash())
}
}
// IsExtensibleAllowed determines if script hash is allowed to send extensible payloads.
func (bc *Blockchain) IsExtensibleAllowed(u util.Uint160) bool {
us := bc.extensible.Load().([]util.Uint160)
n := sort.Search(len(us), func(i int) bool { return !us[i].Less(u) })
return n < len(us)
}
func (bc *Blockchain) runPersist(script []byte, block *block.Block, cache *dao.Simple, trig trigger.Type, v *vm.VM) (*state.AppExecResult, *vm.VM, error) {
systemInterop := bc.newInteropContext(trig, cache, block, nil)
if v == nil {
v = systemInterop.SpawnVM()
} else {
systemInterop.ReuseVM(v)
}
v.LoadScriptWithFlags(script, callflag.All)
if err := systemInterop.Exec(); err != nil {
return nil, v, fmt.Errorf("VM has failed: %w", err)
} else if _, err := systemInterop.DAO.Persist(); err != nil {
return nil, v, fmt.Errorf("can't save changes: %w", err)
}
return &state.AppExecResult{
Container: block.Hash(), // application logs can be retrieved by block hash
Execution: state.Execution{
Trigger: trig,
VMState: v.State(),
GasConsumed: v.GasConsumed(),
Stack: v.Estack().ToArray(),
Events: systemInterop.Notifications,
},
}, v, nil
}
func (bc *Blockchain) handleNotification(note *state.NotificationEvent, d *dao.Simple,
transCache map[util.Uint160]transferData, b *block.Block, h util.Uint256) {
2020-11-19 15:01:42 +00:00
if note.Name != "Transfer" {
return
}
arr, ok := note.Item.Value().([]stackitem.Item)
if !ok || !(len(arr) == 3 || len(arr) == 4) {
return
}
from, err := parseUint160(arr[0])
if err != nil {
return
}
to, err := parseUint160(arr[1])
if err != nil {
return
}
amount, err := arr[2].TryInteger()
if err != nil {
return
}
var id []byte
if len(arr) == 4 {
id, err = arr[3].TryBytes()
if err != nil || len(id) > limits.MaxStorageKeyLen {
return
}
}
bc.processTokenTransfer(d, transCache, h, b, note.ScriptHash, from, to, amount, id)
}
func parseUint160(itm stackitem.Item) (util.Uint160, error) {
_, ok := itm.(stackitem.Null) // Minting or burning.
if ok {
return util.Uint160{}, nil
}
bytes, err := itm.TryBytes()
if err != nil {
return util.Uint160{}, err
2020-03-05 07:45:50 +00:00
}
return util.Uint160DecodeBytesBE(bytes)
2020-03-05 07:45:50 +00:00
}
func (bc *Blockchain) processTokenTransfer(cache *dao.Simple, transCache map[util.Uint160]transferData,
h util.Uint256, b *block.Block, sc util.Uint160, from util.Uint160, to util.Uint160,
amount *big.Int, tokenID []byte) {
var id int32
nativeContract := bc.contracts.ByHash(sc)
if nativeContract != nil {
id = nativeContract.Metadata().ID
} else {
assetContract, err := native.GetContract(cache, sc)
if err != nil {
return
}
id = assetContract.ID
}
var transfer io.Serializable
var nep17xfer *state.NEP17Transfer
var isNEP11 = (tokenID != nil)
if !isNEP11 {
nep17xfer = &state.NEP17Transfer{
Asset: id,
Amount: amount,
Block: b.Index,
Counterparty: to,
Timestamp: b.Timestamp,
Tx: h,
}
transfer = nep17xfer
} else {
nep11xfer := &state.NEP11Transfer{
NEP17Transfer: state.NEP17Transfer{
Asset: id,
Amount: amount,
Block: b.Index,
Counterparty: to,
Timestamp: b.Timestamp,
Tx: h,
},
ID: tokenID,
}
transfer = nep11xfer
nep17xfer = &nep11xfer.NEP17Transfer
2020-03-05 14:11:58 +00:00
}
if !from.Equals(util.Uint160{}) {
_ = nep17xfer.Amount.Neg(nep17xfer.Amount)
err := appendTokenTransfer(cache, transCache, from, transfer, id, b.Index, b.Timestamp, isNEP11)
_ = nep17xfer.Amount.Neg(nep17xfer.Amount)
if err != nil {
2020-03-05 14:11:58 +00:00
return
}
2020-03-05 07:45:50 +00:00
}
if !to.Equals(util.Uint160{}) {
nep17xfer.Counterparty = from
core, rpc: use Seek to iterate over NEP* transfers The results are controversial a bit. MemoryPS is a special storage and the new approach doesn't help it to iterate through NEP17 transfers. However, on long distances (if transfers are requested for more than 1000 blocks, which is ~4.1 hours of mainnet chain) both LevelDB and BoltDB perform good enough with the new approach. Taking into account the fact that default value for the query period of `getnep17transfers` RPC handler is a week (~40K mainnet blocks) we can sacrifice MemoryPS performance in favour of BoltDB and LevelDB optimisation. Close #2263. Benchmark results: name old time/op new time/op delta Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take100Blocks-8 783µs ±13% 1762µs ± 4% +125.12% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take1000Blocks-8 6.91ms ± 2% 9.00ms ± 2% +30.28% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take100Blocks-8 1.43ms ± 8% 1.79ms ± 4% +24.93% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take1000Blocks-8 7.78ms ± 3% 8.93ms ± 2% +14.78% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take100Blocks-8 7.69ms ± 3% 1.73ms ±10% -77.50% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take1000Blocks-8 14.1ms ± 2% 9.0ms ± 2% -36.53% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take100Blocks-8 768µs ± 3% 801µs ± 2% +4.24% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take1000Blocks-8 8.03ms ± 2% 8.05ms ± 8% ~ (p=0.436 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take100Blocks-8 1.70ms ±16% 0.85ms ± 7% -49.85% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take1000Blocks-8 10.8ms ± 2% 8.1ms ± 2% -25.21% (p=0.000 n=10+8) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take100Blocks-8 10.8ms ± 2% 0.9ms ± 7% -92.12% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take1000Blocks-8 18.2ms ±13% 8.2ms ± 6% -54.95% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take100Blocks-8 874µs ± 6% 823µs ± 2% -5.81% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take1000Blocks-8 9.35ms ± 2% 8.14ms ± 2% -12.91% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take100Blocks-8 1.85ms ± 4% 0.89ms ±10% -51.68% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take1000Blocks-8 10.6ms ± 4% 8.2ms ± 2% -22.44% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take100Blocks-8 10.7ms ± 2% 0.9ms ± 2% -91.90% (p=0.000 n=8+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take1000Blocks-8 21.8ms ±15% 8.2ms ± 1% -62.25% (p=0.000 n=10+10) name old alloc/op new alloc/op delta Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take100Blocks-8 650kB ± 0% 1010kB ± 7% +55.45% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take1000Blocks-8 6.39MB ± 0% 9.89MB ± 1% +54.74% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take100Blocks-8 1.28MB ± 0% 1.14MB ± 0% -11.25% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take1000Blocks-8 7.02MB ± 0% 10.01MB ± 0% +42.54% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take100Blocks-8 7.02MB ± 0% 1.08MB ± 0% -84.64% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take1000Blocks-8 12.8MB ± 0% 10.0MB ± 0% -22.04% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take100Blocks-8 823kB ± 3% 866kB ± 5% +5.19% (p=0.001 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take1000Blocks-8 9.92MB ± 0% 9.85MB ± 0% -0.71% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take100Blocks-8 1.84MB ± 4% 1.01MB ± 5% -44.75% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take1000Blocks-8 11.0MB ± 0% 10.0MB ± 1% -8.45% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take100Blocks-8 11.0MB ± 0% 1.0MB ± 1% -90.55% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take1000Blocks-8 20.0MB ± 0% 10.1MB ± 0% -49.69% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take100Blocks-8 913kB ± 5% 907kB ± 6% ~ (p=0.497 n=9+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take1000Blocks-8 10.0MB ± 1% 10.0MB ± 1% -0.63% (p=0.001 n=10+8) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take100Blocks-8 1.92MB ± 2% 1.04MB ± 0% -45.53% (p=0.000 n=9+8) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take1000Blocks-8 11.1MB ± 1% 10.1MB ± 0% -9.22% (p=0.000 n=10+7) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take100Blocks-8 11.1MB ± 1% 1.0MB ± 0% -90.61% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take1000Blocks-8 20.4MB ± 1% 10.1MB ± 0% -50.46% (p=0.000 n=10+10) name old allocs/op new allocs/op delta Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take100Blocks-8 11.1k ± 0% 11.7k ± 0% +5.35% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take1000Blocks-8 110k ± 0% 110k ± 0% +0.55% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take100Blocks-8 22.0k ± 0% 11.9k ± 0% -46.14% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take1000Blocks-8 120k ± 0% 110k ± 0% -8.44% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take100Blocks-8 120k ± 0% 12k ± 0% -90.36% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take1000Blocks-8 219k ± 0% 110k ± 0% -49.73% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take100Blocks-8 11.3k ± 0% 11.2k ± 0% -1.06% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take1000Blocks-8 112k ± 0% 110k ± 0% -2.36% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take100Blocks-8 22.5k ± 0% 11.3k ± 0% -49.65% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take1000Blocks-8 124k ± 0% 110k ± 0% -11.08% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take100Blocks-8 124k ± 0% 11k ± 0% -90.84% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take1000Blocks-8 225k ± 0% 110k ± 0% -51.07% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take100Blocks-8 11.4k ± 0% 11.3k ± 0% -1.36% (p=0.000 n=9+9) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take1000Blocks-8 114k ± 0% 111k ± 0% -2.33% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take100Blocks-8 22.7k ± 0% 11.5k ± 0% -49.56% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take1000Blocks-8 125k ± 0% 111k ± 0% -11.12% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take100Blocks-8 125k ± 0% 11k ± 0% -90.82% (p=0.000 n=8+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take1000Blocks-8 229k ± 1% 111k ± 0% -51.42% (p=0.000 n=10+10)
2022-01-18 15:28:24 +00:00
_ = appendTokenTransfer(cache, transCache, to, transfer, id, b.Index, b.Timestamp, isNEP11) // Nothing useful we can do.
}
}
2020-03-05 14:11:58 +00:00
func appendTokenTransfer(cache *dao.Simple, transCache map[util.Uint160]transferData, addr util.Uint160, transfer io.Serializable,
core, rpc: use Seek to iterate over NEP* transfers The results are controversial a bit. MemoryPS is a special storage and the new approach doesn't help it to iterate through NEP17 transfers. However, on long distances (if transfers are requested for more than 1000 blocks, which is ~4.1 hours of mainnet chain) both LevelDB and BoltDB perform good enough with the new approach. Taking into account the fact that default value for the query period of `getnep17transfers` RPC handler is a week (~40K mainnet blocks) we can sacrifice MemoryPS performance in favour of BoltDB and LevelDB optimisation. Close #2263. Benchmark results: name old time/op new time/op delta Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take100Blocks-8 783µs ±13% 1762µs ± 4% +125.12% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take1000Blocks-8 6.91ms ± 2% 9.00ms ± 2% +30.28% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take100Blocks-8 1.43ms ± 8% 1.79ms ± 4% +24.93% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take1000Blocks-8 7.78ms ± 3% 8.93ms ± 2% +14.78% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take100Blocks-8 7.69ms ± 3% 1.73ms ±10% -77.50% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take1000Blocks-8 14.1ms ± 2% 9.0ms ± 2% -36.53% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take100Blocks-8 768µs ± 3% 801µs ± 2% +4.24% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take1000Blocks-8 8.03ms ± 2% 8.05ms ± 8% ~ (p=0.436 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take100Blocks-8 1.70ms ±16% 0.85ms ± 7% -49.85% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take1000Blocks-8 10.8ms ± 2% 8.1ms ± 2% -25.21% (p=0.000 n=10+8) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take100Blocks-8 10.8ms ± 2% 0.9ms ± 7% -92.12% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take1000Blocks-8 18.2ms ±13% 8.2ms ± 6% -54.95% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take100Blocks-8 874µs ± 6% 823µs ± 2% -5.81% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take1000Blocks-8 9.35ms ± 2% 8.14ms ± 2% -12.91% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take100Blocks-8 1.85ms ± 4% 0.89ms ±10% -51.68% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take1000Blocks-8 10.6ms ± 4% 8.2ms ± 2% -22.44% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take100Blocks-8 10.7ms ± 2% 0.9ms ± 2% -91.90% (p=0.000 n=8+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take1000Blocks-8 21.8ms ±15% 8.2ms ± 1% -62.25% (p=0.000 n=10+10) name old alloc/op new alloc/op delta Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take100Blocks-8 650kB ± 0% 1010kB ± 7% +55.45% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take1000Blocks-8 6.39MB ± 0% 9.89MB ± 1% +54.74% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take100Blocks-8 1.28MB ± 0% 1.14MB ± 0% -11.25% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take1000Blocks-8 7.02MB ± 0% 10.01MB ± 0% +42.54% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take100Blocks-8 7.02MB ± 0% 1.08MB ± 0% -84.64% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take1000Blocks-8 12.8MB ± 0% 10.0MB ± 0% -22.04% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take100Blocks-8 823kB ± 3% 866kB ± 5% +5.19% (p=0.001 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take1000Blocks-8 9.92MB ± 0% 9.85MB ± 0% -0.71% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take100Blocks-8 1.84MB ± 4% 1.01MB ± 5% -44.75% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take1000Blocks-8 11.0MB ± 0% 10.0MB ± 1% -8.45% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take100Blocks-8 11.0MB ± 0% 1.0MB ± 1% -90.55% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take1000Blocks-8 20.0MB ± 0% 10.1MB ± 0% -49.69% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take100Blocks-8 913kB ± 5% 907kB ± 6% ~ (p=0.497 n=9+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take1000Blocks-8 10.0MB ± 1% 10.0MB ± 1% -0.63% (p=0.001 n=10+8) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take100Blocks-8 1.92MB ± 2% 1.04MB ± 0% -45.53% (p=0.000 n=9+8) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take1000Blocks-8 11.1MB ± 1% 10.1MB ± 0% -9.22% (p=0.000 n=10+7) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take100Blocks-8 11.1MB ± 1% 1.0MB ± 0% -90.61% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take1000Blocks-8 20.4MB ± 1% 10.1MB ± 0% -50.46% (p=0.000 n=10+10) name old allocs/op new allocs/op delta Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take100Blocks-8 11.1k ± 0% 11.7k ± 0% +5.35% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take1000Blocks-8 110k ± 0% 110k ± 0% +0.55% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take100Blocks-8 22.0k ± 0% 11.9k ± 0% -46.14% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take1000Blocks-8 120k ± 0% 110k ± 0% -8.44% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take100Blocks-8 120k ± 0% 12k ± 0% -90.36% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take1000Blocks-8 219k ± 0% 110k ± 0% -49.73% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take100Blocks-8 11.3k ± 0% 11.2k ± 0% -1.06% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take1000Blocks-8 112k ± 0% 110k ± 0% -2.36% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take100Blocks-8 22.5k ± 0% 11.3k ± 0% -49.65% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take1000Blocks-8 124k ± 0% 110k ± 0% -11.08% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take100Blocks-8 124k ± 0% 11k ± 0% -90.84% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take1000Blocks-8 225k ± 0% 110k ± 0% -51.07% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take100Blocks-8 11.4k ± 0% 11.3k ± 0% -1.36% (p=0.000 n=9+9) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take1000Blocks-8 114k ± 0% 111k ± 0% -2.33% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take100Blocks-8 22.7k ± 0% 11.5k ± 0% -49.56% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take1000Blocks-8 125k ± 0% 111k ± 0% -11.12% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take100Blocks-8 125k ± 0% 11k ± 0% -90.82% (p=0.000 n=8+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take1000Blocks-8 229k ± 1% 111k ± 0% -51.42% (p=0.000 n=10+10)
2022-01-18 15:28:24 +00:00
token int32, bIndex uint32, bTimestamp uint64, isNEP11 bool) error {
transferData, ok := transCache[addr]
if !ok {
balances, err := cache.GetTokenTransferInfo(addr)
if err != nil {
return err
}
if !balances.NewNEP11Batch {
core, rpc: use Seek to iterate over NEP* transfers The results are controversial a bit. MemoryPS is a special storage and the new approach doesn't help it to iterate through NEP17 transfers. However, on long distances (if transfers are requested for more than 1000 blocks, which is ~4.1 hours of mainnet chain) both LevelDB and BoltDB perform good enough with the new approach. Taking into account the fact that default value for the query period of `getnep17transfers` RPC handler is a week (~40K mainnet blocks) we can sacrifice MemoryPS performance in favour of BoltDB and LevelDB optimisation. Close #2263. Benchmark results: name old time/op new time/op delta Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take100Blocks-8 783µs ±13% 1762µs ± 4% +125.12% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take1000Blocks-8 6.91ms ± 2% 9.00ms ± 2% +30.28% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take100Blocks-8 1.43ms ± 8% 1.79ms ± 4% +24.93% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take1000Blocks-8 7.78ms ± 3% 8.93ms ± 2% +14.78% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take100Blocks-8 7.69ms ± 3% 1.73ms ±10% -77.50% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take1000Blocks-8 14.1ms ± 2% 9.0ms ± 2% -36.53% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take100Blocks-8 768µs ± 3% 801µs ± 2% +4.24% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take1000Blocks-8 8.03ms ± 2% 8.05ms ± 8% ~ (p=0.436 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take100Blocks-8 1.70ms ±16% 0.85ms ± 7% -49.85% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take1000Blocks-8 10.8ms ± 2% 8.1ms ± 2% -25.21% (p=0.000 n=10+8) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take100Blocks-8 10.8ms ± 2% 0.9ms ± 7% -92.12% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take1000Blocks-8 18.2ms ±13% 8.2ms ± 6% -54.95% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take100Blocks-8 874µs ± 6% 823µs ± 2% -5.81% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take1000Blocks-8 9.35ms ± 2% 8.14ms ± 2% -12.91% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take100Blocks-8 1.85ms ± 4% 0.89ms ±10% -51.68% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take1000Blocks-8 10.6ms ± 4% 8.2ms ± 2% -22.44% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take100Blocks-8 10.7ms ± 2% 0.9ms ± 2% -91.90% (p=0.000 n=8+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take1000Blocks-8 21.8ms ±15% 8.2ms ± 1% -62.25% (p=0.000 n=10+10) name old alloc/op new alloc/op delta Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take100Blocks-8 650kB ± 0% 1010kB ± 7% +55.45% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take1000Blocks-8 6.39MB ± 0% 9.89MB ± 1% +54.74% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take100Blocks-8 1.28MB ± 0% 1.14MB ± 0% -11.25% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take1000Blocks-8 7.02MB ± 0% 10.01MB ± 0% +42.54% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take100Blocks-8 7.02MB ± 0% 1.08MB ± 0% -84.64% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take1000Blocks-8 12.8MB ± 0% 10.0MB ± 0% -22.04% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take100Blocks-8 823kB ± 3% 866kB ± 5% +5.19% (p=0.001 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take1000Blocks-8 9.92MB ± 0% 9.85MB ± 0% -0.71% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take100Blocks-8 1.84MB ± 4% 1.01MB ± 5% -44.75% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take1000Blocks-8 11.0MB ± 0% 10.0MB ± 1% -8.45% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take100Blocks-8 11.0MB ± 0% 1.0MB ± 1% -90.55% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take1000Blocks-8 20.0MB ± 0% 10.1MB ± 0% -49.69% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take100Blocks-8 913kB ± 5% 907kB ± 6% ~ (p=0.497 n=9+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take1000Blocks-8 10.0MB ± 1% 10.0MB ± 1% -0.63% (p=0.001 n=10+8) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take100Blocks-8 1.92MB ± 2% 1.04MB ± 0% -45.53% (p=0.000 n=9+8) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take1000Blocks-8 11.1MB ± 1% 10.1MB ± 0% -9.22% (p=0.000 n=10+7) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take100Blocks-8 11.1MB ± 1% 1.0MB ± 0% -90.61% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take1000Blocks-8 20.4MB ± 1% 10.1MB ± 0% -50.46% (p=0.000 n=10+10) name old allocs/op new allocs/op delta Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take100Blocks-8 11.1k ± 0% 11.7k ± 0% +5.35% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take1000Blocks-8 110k ± 0% 110k ± 0% +0.55% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take100Blocks-8 22.0k ± 0% 11.9k ± 0% -46.14% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take1000Blocks-8 120k ± 0% 110k ± 0% -8.44% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take100Blocks-8 120k ± 0% 12k ± 0% -90.36% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take1000Blocks-8 219k ± 0% 110k ± 0% -49.73% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take100Blocks-8 11.3k ± 0% 11.2k ± 0% -1.06% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take1000Blocks-8 112k ± 0% 110k ± 0% -2.36% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take100Blocks-8 22.5k ± 0% 11.3k ± 0% -49.65% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take1000Blocks-8 124k ± 0% 110k ± 0% -11.08% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take100Blocks-8 124k ± 0% 11k ± 0% -90.84% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take1000Blocks-8 225k ± 0% 110k ± 0% -51.07% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take100Blocks-8 11.4k ± 0% 11.3k ± 0% -1.36% (p=0.000 n=9+9) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take1000Blocks-8 114k ± 0% 111k ± 0% -2.33% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take100Blocks-8 22.7k ± 0% 11.5k ± 0% -49.56% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take1000Blocks-8 125k ± 0% 111k ± 0% -11.12% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take100Blocks-8 125k ± 0% 11k ± 0% -90.82% (p=0.000 n=8+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take1000Blocks-8 229k ± 1% 111k ± 0% -51.42% (p=0.000 n=10+10)
2022-01-18 15:28:24 +00:00
trLog, err := cache.GetTokenTransferLog(addr, balances.NextNEP11NewestTimestamp, balances.NextNEP11Batch, true)
if err != nil {
return err
}
transferData.Log11 = *trLog
}
if !balances.NewNEP17Batch {
core, rpc: use Seek to iterate over NEP* transfers The results are controversial a bit. MemoryPS is a special storage and the new approach doesn't help it to iterate through NEP17 transfers. However, on long distances (if transfers are requested for more than 1000 blocks, which is ~4.1 hours of mainnet chain) both LevelDB and BoltDB perform good enough with the new approach. Taking into account the fact that default value for the query period of `getnep17transfers` RPC handler is a week (~40K mainnet blocks) we can sacrifice MemoryPS performance in favour of BoltDB and LevelDB optimisation. Close #2263. Benchmark results: name old time/op new time/op delta Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take100Blocks-8 783µs ±13% 1762µs ± 4% +125.12% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take1000Blocks-8 6.91ms ± 2% 9.00ms ± 2% +30.28% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take100Blocks-8 1.43ms ± 8% 1.79ms ± 4% +24.93% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take1000Blocks-8 7.78ms ± 3% 8.93ms ± 2% +14.78% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take100Blocks-8 7.69ms ± 3% 1.73ms ±10% -77.50% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take1000Blocks-8 14.1ms ± 2% 9.0ms ± 2% -36.53% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take100Blocks-8 768µs ± 3% 801µs ± 2% +4.24% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take1000Blocks-8 8.03ms ± 2% 8.05ms ± 8% ~ (p=0.436 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take100Blocks-8 1.70ms ±16% 0.85ms ± 7% -49.85% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take1000Blocks-8 10.8ms ± 2% 8.1ms ± 2% -25.21% (p=0.000 n=10+8) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take100Blocks-8 10.8ms ± 2% 0.9ms ± 7% -92.12% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take1000Blocks-8 18.2ms ±13% 8.2ms ± 6% -54.95% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take100Blocks-8 874µs ± 6% 823µs ± 2% -5.81% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take1000Blocks-8 9.35ms ± 2% 8.14ms ± 2% -12.91% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take100Blocks-8 1.85ms ± 4% 0.89ms ±10% -51.68% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take1000Blocks-8 10.6ms ± 4% 8.2ms ± 2% -22.44% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take100Blocks-8 10.7ms ± 2% 0.9ms ± 2% -91.90% (p=0.000 n=8+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take1000Blocks-8 21.8ms ±15% 8.2ms ± 1% -62.25% (p=0.000 n=10+10) name old alloc/op new alloc/op delta Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take100Blocks-8 650kB ± 0% 1010kB ± 7% +55.45% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take1000Blocks-8 6.39MB ± 0% 9.89MB ± 1% +54.74% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take100Blocks-8 1.28MB ± 0% 1.14MB ± 0% -11.25% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take1000Blocks-8 7.02MB ± 0% 10.01MB ± 0% +42.54% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take100Blocks-8 7.02MB ± 0% 1.08MB ± 0% -84.64% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take1000Blocks-8 12.8MB ± 0% 10.0MB ± 0% -22.04% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take100Blocks-8 823kB ± 3% 866kB ± 5% +5.19% (p=0.001 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take1000Blocks-8 9.92MB ± 0% 9.85MB ± 0% -0.71% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take100Blocks-8 1.84MB ± 4% 1.01MB ± 5% -44.75% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take1000Blocks-8 11.0MB ± 0% 10.0MB ± 1% -8.45% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take100Blocks-8 11.0MB ± 0% 1.0MB ± 1% -90.55% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take1000Blocks-8 20.0MB ± 0% 10.1MB ± 0% -49.69% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take100Blocks-8 913kB ± 5% 907kB ± 6% ~ (p=0.497 n=9+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take1000Blocks-8 10.0MB ± 1% 10.0MB ± 1% -0.63% (p=0.001 n=10+8) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take100Blocks-8 1.92MB ± 2% 1.04MB ± 0% -45.53% (p=0.000 n=9+8) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take1000Blocks-8 11.1MB ± 1% 10.1MB ± 0% -9.22% (p=0.000 n=10+7) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take100Blocks-8 11.1MB ± 1% 1.0MB ± 0% -90.61% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take1000Blocks-8 20.4MB ± 1% 10.1MB ± 0% -50.46% (p=0.000 n=10+10) name old allocs/op new allocs/op delta Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take100Blocks-8 11.1k ± 0% 11.7k ± 0% +5.35% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take1000Blocks-8 110k ± 0% 110k ± 0% +0.55% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take100Blocks-8 22.0k ± 0% 11.9k ± 0% -46.14% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take1000Blocks-8 120k ± 0% 110k ± 0% -8.44% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take100Blocks-8 120k ± 0% 12k ± 0% -90.36% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take1000Blocks-8 219k ± 0% 110k ± 0% -49.73% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take100Blocks-8 11.3k ± 0% 11.2k ± 0% -1.06% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take1000Blocks-8 112k ± 0% 110k ± 0% -2.36% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take100Blocks-8 22.5k ± 0% 11.3k ± 0% -49.65% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take1000Blocks-8 124k ± 0% 110k ± 0% -11.08% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take100Blocks-8 124k ± 0% 11k ± 0% -90.84% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take1000Blocks-8 225k ± 0% 110k ± 0% -51.07% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take100Blocks-8 11.4k ± 0% 11.3k ± 0% -1.36% (p=0.000 n=9+9) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take1000Blocks-8 114k ± 0% 111k ± 0% -2.33% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take100Blocks-8 22.7k ± 0% 11.5k ± 0% -49.56% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take1000Blocks-8 125k ± 0% 111k ± 0% -11.12% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take100Blocks-8 125k ± 0% 11k ± 0% -90.82% (p=0.000 n=8+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take1000Blocks-8 229k ± 1% 111k ± 0% -51.42% (p=0.000 n=10+10)
2022-01-18 15:28:24 +00:00
trLog, err := cache.GetTokenTransferLog(addr, balances.NextNEP17NewestTimestamp, balances.NextNEP17Batch, false)
if err != nil {
return err
}
transferData.Log17 = *trLog
}
transferData.Info = *balances
}
var (
core, rpc: use Seek to iterate over NEP* transfers The results are controversial a bit. MemoryPS is a special storage and the new approach doesn't help it to iterate through NEP17 transfers. However, on long distances (if transfers are requested for more than 1000 blocks, which is ~4.1 hours of mainnet chain) both LevelDB and BoltDB perform good enough with the new approach. Taking into account the fact that default value for the query period of `getnep17transfers` RPC handler is a week (~40K mainnet blocks) we can sacrifice MemoryPS performance in favour of BoltDB and LevelDB optimisation. Close #2263. Benchmark results: name old time/op new time/op delta Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take100Blocks-8 783µs ±13% 1762µs ± 4% +125.12% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take1000Blocks-8 6.91ms ± 2% 9.00ms ± 2% +30.28% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take100Blocks-8 1.43ms ± 8% 1.79ms ± 4% +24.93% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take1000Blocks-8 7.78ms ± 3% 8.93ms ± 2% +14.78% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take100Blocks-8 7.69ms ± 3% 1.73ms ±10% -77.50% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take1000Blocks-8 14.1ms ± 2% 9.0ms ± 2% -36.53% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take100Blocks-8 768µs ± 3% 801µs ± 2% +4.24% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take1000Blocks-8 8.03ms ± 2% 8.05ms ± 8% ~ (p=0.436 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take100Blocks-8 1.70ms ±16% 0.85ms ± 7% -49.85% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take1000Blocks-8 10.8ms ± 2% 8.1ms ± 2% -25.21% (p=0.000 n=10+8) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take100Blocks-8 10.8ms ± 2% 0.9ms ± 7% -92.12% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take1000Blocks-8 18.2ms ±13% 8.2ms ± 6% -54.95% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take100Blocks-8 874µs ± 6% 823µs ± 2% -5.81% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take1000Blocks-8 9.35ms ± 2% 8.14ms ± 2% -12.91% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take100Blocks-8 1.85ms ± 4% 0.89ms ±10% -51.68% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take1000Blocks-8 10.6ms ± 4% 8.2ms ± 2% -22.44% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take100Blocks-8 10.7ms ± 2% 0.9ms ± 2% -91.90% (p=0.000 n=8+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take1000Blocks-8 21.8ms ±15% 8.2ms ± 1% -62.25% (p=0.000 n=10+10) name old alloc/op new alloc/op delta Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take100Blocks-8 650kB ± 0% 1010kB ± 7% +55.45% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take1000Blocks-8 6.39MB ± 0% 9.89MB ± 1% +54.74% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take100Blocks-8 1.28MB ± 0% 1.14MB ± 0% -11.25% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take1000Blocks-8 7.02MB ± 0% 10.01MB ± 0% +42.54% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take100Blocks-8 7.02MB ± 0% 1.08MB ± 0% -84.64% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take1000Blocks-8 12.8MB ± 0% 10.0MB ± 0% -22.04% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take100Blocks-8 823kB ± 3% 866kB ± 5% +5.19% (p=0.001 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take1000Blocks-8 9.92MB ± 0% 9.85MB ± 0% -0.71% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take100Blocks-8 1.84MB ± 4% 1.01MB ± 5% -44.75% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take1000Blocks-8 11.0MB ± 0% 10.0MB ± 1% -8.45% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take100Blocks-8 11.0MB ± 0% 1.0MB ± 1% -90.55% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take1000Blocks-8 20.0MB ± 0% 10.1MB ± 0% -49.69% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take100Blocks-8 913kB ± 5% 907kB ± 6% ~ (p=0.497 n=9+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take1000Blocks-8 10.0MB ± 1% 10.0MB ± 1% -0.63% (p=0.001 n=10+8) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take100Blocks-8 1.92MB ± 2% 1.04MB ± 0% -45.53% (p=0.000 n=9+8) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take1000Blocks-8 11.1MB ± 1% 10.1MB ± 0% -9.22% (p=0.000 n=10+7) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take100Blocks-8 11.1MB ± 1% 1.0MB ± 0% -90.61% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take1000Blocks-8 20.4MB ± 1% 10.1MB ± 0% -50.46% (p=0.000 n=10+10) name old allocs/op new allocs/op delta Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take100Blocks-8 11.1k ± 0% 11.7k ± 0% +5.35% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take1000Blocks-8 110k ± 0% 110k ± 0% +0.55% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take100Blocks-8 22.0k ± 0% 11.9k ± 0% -46.14% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take1000Blocks-8 120k ± 0% 110k ± 0% -8.44% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take100Blocks-8 120k ± 0% 12k ± 0% -90.36% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take1000Blocks-8 219k ± 0% 110k ± 0% -49.73% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take100Blocks-8 11.3k ± 0% 11.2k ± 0% -1.06% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take1000Blocks-8 112k ± 0% 110k ± 0% -2.36% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take100Blocks-8 22.5k ± 0% 11.3k ± 0% -49.65% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take1000Blocks-8 124k ± 0% 110k ± 0% -11.08% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take100Blocks-8 124k ± 0% 11k ± 0% -90.84% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take1000Blocks-8 225k ± 0% 110k ± 0% -51.07% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take100Blocks-8 11.4k ± 0% 11.3k ± 0% -1.36% (p=0.000 n=9+9) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take1000Blocks-8 114k ± 0% 111k ± 0% -2.33% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take100Blocks-8 22.7k ± 0% 11.5k ± 0% -49.56% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take1000Blocks-8 125k ± 0% 111k ± 0% -11.12% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take100Blocks-8 125k ± 0% 11k ± 0% -90.82% (p=0.000 n=8+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take1000Blocks-8 229k ± 1% 111k ± 0% -51.42% (p=0.000 n=10+10)
2022-01-18 15:28:24 +00:00
log *state.TokenTransferLog
nextBatch uint32
currTimestamp uint64
)
if !isNEP11 {
log = &transferData.Log17
nextBatch = transferData.Info.NextNEP17Batch
currTimestamp = transferData.Info.NextNEP17NewestTimestamp
} else {
log = &transferData.Log11
nextBatch = transferData.Info.NextNEP11Batch
currTimestamp = transferData.Info.NextNEP11NewestTimestamp
}
err := log.Append(transfer)
if err != nil {
return err
}
newBatch := log.Size() >= state.TokenTransferBatchSize
if newBatch {
cache.PutTokenTransferLog(addr, currTimestamp, nextBatch, isNEP11, log)
// Put makes a copy of it anyway.
log.Reset()
2020-03-05 07:45:50 +00:00
}
appendTokenTransferInfo(&transferData.Info, token, bIndex, bTimestamp, isNEP11, newBatch)
transCache[addr] = transferData
return nil
2020-03-05 07:45:50 +00:00
}
core, rpc: use Seek to iterate over NEP* transfers The results are controversial a bit. MemoryPS is a special storage and the new approach doesn't help it to iterate through NEP17 transfers. However, on long distances (if transfers are requested for more than 1000 blocks, which is ~4.1 hours of mainnet chain) both LevelDB and BoltDB perform good enough with the new approach. Taking into account the fact that default value for the query period of `getnep17transfers` RPC handler is a week (~40K mainnet blocks) we can sacrifice MemoryPS performance in favour of BoltDB and LevelDB optimisation. Close #2263. Benchmark results: name old time/op new time/op delta Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take100Blocks-8 783µs ±13% 1762µs ± 4% +125.12% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take1000Blocks-8 6.91ms ± 2% 9.00ms ± 2% +30.28% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take100Blocks-8 1.43ms ± 8% 1.79ms ± 4% +24.93% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take1000Blocks-8 7.78ms ± 3% 8.93ms ± 2% +14.78% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take100Blocks-8 7.69ms ± 3% 1.73ms ±10% -77.50% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take1000Blocks-8 14.1ms ± 2% 9.0ms ± 2% -36.53% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take100Blocks-8 768µs ± 3% 801µs ± 2% +4.24% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take1000Blocks-8 8.03ms ± 2% 8.05ms ± 8% ~ (p=0.436 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take100Blocks-8 1.70ms ±16% 0.85ms ± 7% -49.85% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take1000Blocks-8 10.8ms ± 2% 8.1ms ± 2% -25.21% (p=0.000 n=10+8) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take100Blocks-8 10.8ms ± 2% 0.9ms ± 7% -92.12% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take1000Blocks-8 18.2ms ±13% 8.2ms ± 6% -54.95% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take100Blocks-8 874µs ± 6% 823µs ± 2% -5.81% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take1000Blocks-8 9.35ms ± 2% 8.14ms ± 2% -12.91% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take100Blocks-8 1.85ms ± 4% 0.89ms ±10% -51.68% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take1000Blocks-8 10.6ms ± 4% 8.2ms ± 2% -22.44% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take100Blocks-8 10.7ms ± 2% 0.9ms ± 2% -91.90% (p=0.000 n=8+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take1000Blocks-8 21.8ms ±15% 8.2ms ± 1% -62.25% (p=0.000 n=10+10) name old alloc/op new alloc/op delta Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take100Blocks-8 650kB ± 0% 1010kB ± 7% +55.45% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take1000Blocks-8 6.39MB ± 0% 9.89MB ± 1% +54.74% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take100Blocks-8 1.28MB ± 0% 1.14MB ± 0% -11.25% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take1000Blocks-8 7.02MB ± 0% 10.01MB ± 0% +42.54% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take100Blocks-8 7.02MB ± 0% 1.08MB ± 0% -84.64% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take1000Blocks-8 12.8MB ± 0% 10.0MB ± 0% -22.04% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take100Blocks-8 823kB ± 3% 866kB ± 5% +5.19% (p=0.001 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take1000Blocks-8 9.92MB ± 0% 9.85MB ± 0% -0.71% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take100Blocks-8 1.84MB ± 4% 1.01MB ± 5% -44.75% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take1000Blocks-8 11.0MB ± 0% 10.0MB ± 1% -8.45% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take100Blocks-8 11.0MB ± 0% 1.0MB ± 1% -90.55% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take1000Blocks-8 20.0MB ± 0% 10.1MB ± 0% -49.69% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take100Blocks-8 913kB ± 5% 907kB ± 6% ~ (p=0.497 n=9+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take1000Blocks-8 10.0MB ± 1% 10.0MB ± 1% -0.63% (p=0.001 n=10+8) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take100Blocks-8 1.92MB ± 2% 1.04MB ± 0% -45.53% (p=0.000 n=9+8) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take1000Blocks-8 11.1MB ± 1% 10.1MB ± 0% -9.22% (p=0.000 n=10+7) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take100Blocks-8 11.1MB ± 1% 1.0MB ± 0% -90.61% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take1000Blocks-8 20.4MB ± 1% 10.1MB ± 0% -50.46% (p=0.000 n=10+10) name old allocs/op new allocs/op delta Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take100Blocks-8 11.1k ± 0% 11.7k ± 0% +5.35% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take1000Blocks-8 110k ± 0% 110k ± 0% +0.55% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take100Blocks-8 22.0k ± 0% 11.9k ± 0% -46.14% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take1000Blocks-8 120k ± 0% 110k ± 0% -8.44% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take100Blocks-8 120k ± 0% 12k ± 0% -90.36% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take1000Blocks-8 219k ± 0% 110k ± 0% -49.73% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take100Blocks-8 11.3k ± 0% 11.2k ± 0% -1.06% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take1000Blocks-8 112k ± 0% 110k ± 0% -2.36% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take100Blocks-8 22.5k ± 0% 11.3k ± 0% -49.65% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take1000Blocks-8 124k ± 0% 110k ± 0% -11.08% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take100Blocks-8 124k ± 0% 11k ± 0% -90.84% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take1000Blocks-8 225k ± 0% 110k ± 0% -51.07% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take100Blocks-8 11.4k ± 0% 11.3k ± 0% -1.36% (p=0.000 n=9+9) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take1000Blocks-8 114k ± 0% 111k ± 0% -2.33% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take100Blocks-8 22.7k ± 0% 11.5k ± 0% -49.56% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take1000Blocks-8 125k ± 0% 111k ± 0% -11.12% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take100Blocks-8 125k ± 0% 11k ± 0% -90.82% (p=0.000 n=8+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take1000Blocks-8 229k ± 1% 111k ± 0% -51.42% (p=0.000 n=10+10)
2022-01-18 15:28:24 +00:00
// ForEachNEP17Transfer executes f for each NEP-17 transfer in log starting from
// the transfer with the newest timestamp up to the oldest transfer. It continues
// iteration until false is returned from f. The last non-nil error is returned.
func (bc *Blockchain) ForEachNEP17Transfer(acc util.Uint160, newestTimestamp uint64, f func(*state.NEP17Transfer) (bool, error)) error {
return bc.dao.SeekNEP17TransferLog(acc, newestTimestamp, f)
2020-03-05 12:16:03 +00:00
}
core, rpc: use Seek to iterate over NEP* transfers The results are controversial a bit. MemoryPS is a special storage and the new approach doesn't help it to iterate through NEP17 transfers. However, on long distances (if transfers are requested for more than 1000 blocks, which is ~4.1 hours of mainnet chain) both LevelDB and BoltDB perform good enough with the new approach. Taking into account the fact that default value for the query period of `getnep17transfers` RPC handler is a week (~40K mainnet blocks) we can sacrifice MemoryPS performance in favour of BoltDB and LevelDB optimisation. Close #2263. Benchmark results: name old time/op new time/op delta Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take100Blocks-8 783µs ±13% 1762µs ± 4% +125.12% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take1000Blocks-8 6.91ms ± 2% 9.00ms ± 2% +30.28% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take100Blocks-8 1.43ms ± 8% 1.79ms ± 4% +24.93% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take1000Blocks-8 7.78ms ± 3% 8.93ms ± 2% +14.78% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take100Blocks-8 7.69ms ± 3% 1.73ms ±10% -77.50% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take1000Blocks-8 14.1ms ± 2% 9.0ms ± 2% -36.53% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take100Blocks-8 768µs ± 3% 801µs ± 2% +4.24% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take1000Blocks-8 8.03ms ± 2% 8.05ms ± 8% ~ (p=0.436 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take100Blocks-8 1.70ms ±16% 0.85ms ± 7% -49.85% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take1000Blocks-8 10.8ms ± 2% 8.1ms ± 2% -25.21% (p=0.000 n=10+8) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take100Blocks-8 10.8ms ± 2% 0.9ms ± 7% -92.12% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take1000Blocks-8 18.2ms ±13% 8.2ms ± 6% -54.95% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take100Blocks-8 874µs ± 6% 823µs ± 2% -5.81% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take1000Blocks-8 9.35ms ± 2% 8.14ms ± 2% -12.91% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take100Blocks-8 1.85ms ± 4% 0.89ms ±10% -51.68% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take1000Blocks-8 10.6ms ± 4% 8.2ms ± 2% -22.44% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take100Blocks-8 10.7ms ± 2% 0.9ms ± 2% -91.90% (p=0.000 n=8+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take1000Blocks-8 21.8ms ±15% 8.2ms ± 1% -62.25% (p=0.000 n=10+10) name old alloc/op new alloc/op delta Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take100Blocks-8 650kB ± 0% 1010kB ± 7% +55.45% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take1000Blocks-8 6.39MB ± 0% 9.89MB ± 1% +54.74% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take100Blocks-8 1.28MB ± 0% 1.14MB ± 0% -11.25% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take1000Blocks-8 7.02MB ± 0% 10.01MB ± 0% +42.54% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take100Blocks-8 7.02MB ± 0% 1.08MB ± 0% -84.64% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take1000Blocks-8 12.8MB ± 0% 10.0MB ± 0% -22.04% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take100Blocks-8 823kB ± 3% 866kB ± 5% +5.19% (p=0.001 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take1000Blocks-8 9.92MB ± 0% 9.85MB ± 0% -0.71% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take100Blocks-8 1.84MB ± 4% 1.01MB ± 5% -44.75% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take1000Blocks-8 11.0MB ± 0% 10.0MB ± 1% -8.45% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take100Blocks-8 11.0MB ± 0% 1.0MB ± 1% -90.55% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take1000Blocks-8 20.0MB ± 0% 10.1MB ± 0% -49.69% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take100Blocks-8 913kB ± 5% 907kB ± 6% ~ (p=0.497 n=9+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take1000Blocks-8 10.0MB ± 1% 10.0MB ± 1% -0.63% (p=0.001 n=10+8) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take100Blocks-8 1.92MB ± 2% 1.04MB ± 0% -45.53% (p=0.000 n=9+8) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take1000Blocks-8 11.1MB ± 1% 10.1MB ± 0% -9.22% (p=0.000 n=10+7) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take100Blocks-8 11.1MB ± 1% 1.0MB ± 0% -90.61% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take1000Blocks-8 20.4MB ± 1% 10.1MB ± 0% -50.46% (p=0.000 n=10+10) name old allocs/op new allocs/op delta Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take100Blocks-8 11.1k ± 0% 11.7k ± 0% +5.35% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1_Take1000Blocks-8 110k ± 0% 110k ± 0% +0.55% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take100Blocks-8 22.0k ± 0% 11.9k ± 0% -46.14% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-100_Take1000Blocks-8 120k ± 0% 110k ± 0% -8.44% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take100Blocks-8 120k ± 0% 12k ± 0% -90.36% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/MemPS_StartFromBlockN-1000_Take1000Blocks-8 219k ± 0% 110k ± 0% -49.73% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take100Blocks-8 11.3k ± 0% 11.2k ± 0% -1.06% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1_Take1000Blocks-8 112k ± 0% 110k ± 0% -2.36% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take100Blocks-8 22.5k ± 0% 11.3k ± 0% -49.65% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-100_Take1000Blocks-8 124k ± 0% 110k ± 0% -11.08% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take100Blocks-8 124k ± 0% 11k ± 0% -90.84% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/BoltPS_StartFromBlockN-1000_Take1000Blocks-8 225k ± 0% 110k ± 0% -51.07% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take100Blocks-8 11.4k ± 0% 11.3k ± 0% -1.36% (p=0.000 n=9+9) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1_Take1000Blocks-8 114k ± 0% 111k ± 0% -2.33% (p=0.000 n=10+9) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take100Blocks-8 22.7k ± 0% 11.5k ± 0% -49.56% (p=0.000 n=9+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-100_Take1000Blocks-8 125k ± 0% 111k ± 0% -11.12% (p=0.000 n=10+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take100Blocks-8 125k ± 0% 11k ± 0% -90.82% (p=0.000 n=8+10) Blockchain_ForEachNEP17Transfer/LevelPS_StartFromBlockN-1000_Take1000Blocks-8 229k ± 1% 111k ± 0% -51.42% (p=0.000 n=10+10)
2022-01-18 15:28:24 +00:00
// ForEachNEP11Transfer executes f for each NEP-11 transfer in log starting from
// the transfer with the newest timestamp up to the oldest transfer. It continues
// iteration until false is returned from f. The last non-nil error is returned.
func (bc *Blockchain) ForEachNEP11Transfer(acc util.Uint160, newestTimestamp uint64, f func(*state.NEP11Transfer) (bool, error)) error {
return bc.dao.SeekNEP11TransferLog(acc, newestTimestamp, f)
}
// GetNEP17Contracts returns the list of deployed NEP-17 contracts.
func (bc *Blockchain) GetNEP17Contracts() []util.Uint160 {
return bc.contracts.Management.GetNEP17Contracts(bc.dao)
}
// GetNEP11Contracts returns the list of deployed NEP-11 contracts.
func (bc *Blockchain) GetNEP11Contracts() []util.Uint160 {
return bc.contracts.Management.GetNEP11Contracts(bc.dao)
}
// GetTokenLastUpdated returns a set of contract ids with the corresponding last updated
// block indexes. In case of an empty account, latest stored state synchronisation point
// is returned under Math.MinInt32 key.
func (bc *Blockchain) GetTokenLastUpdated(acc util.Uint160) (map[int32]uint32, error) {
info, err := bc.dao.GetTokenTransferInfo(acc)
if err != nil {
return nil, err
}
if bc.config.P2PStateExchangeExtensions && bc.config.Ledger.RemoveUntraceableBlocks {
if _, ok := info.LastUpdated[bc.contracts.NEO.ID]; !ok {
nBalance, lub := bc.contracts.NEO.BalanceOf(bc.dao, acc)
if nBalance.Sign() != 0 {
info.LastUpdated[bc.contracts.NEO.ID] = lub
}
}
}
stateSyncPoint, err := bc.dao.GetStateSyncPoint()
if err == nil {
info.LastUpdated[math.MinInt32] = stateSyncPoint
}
return info.LastUpdated, nil
}
// GetUtilityTokenBalance returns utility token (GAS) balance for the acc.
func (bc *Blockchain) GetUtilityTokenBalance(acc util.Uint160) *big.Int {
bs := bc.contracts.GAS.BalanceOf(bc.dao, acc)
if bs == nil {
return big.NewInt(0)
}
return bs
}
// GetGoverningTokenBalance returns governing token (NEO) balance and the height
// of the last balance change for the account.
func (bc *Blockchain) GetGoverningTokenBalance(acc util.Uint160) (*big.Int, uint32) {
return bc.contracts.NEO.BalanceOf(bc.dao, acc)
}
2020-11-27 10:55:48 +00:00
// GetNotaryBalance returns Notary deposit amount for the specified account.
func (bc *Blockchain) GetNotaryBalance(acc util.Uint160) *big.Int {
return bc.contracts.Notary.BalanceOf(bc.dao, acc)
}
// GetNotaryServiceFeePerKey returns a NotaryAssisted transaction attribute fee
// per key which is a reward per notary request key for designated notary nodes.
func (bc *Blockchain) GetNotaryServiceFeePerKey() int64 {
return bc.contracts.Policy.GetAttributeFeeInternal(bc.dao, transaction.NotaryAssistedT)
}
2020-11-27 10:55:48 +00:00
// GetNotaryContractScriptHash returns Notary native contract hash.
func (bc *Blockchain) GetNotaryContractScriptHash() util.Uint160 {
if bc.P2PSigExtensionsEnabled() {
return bc.contracts.Notary.Hash
}
return util.Uint160{}
}
// GetNotaryDepositExpiration returns Notary deposit expiration height for the specified account.
func (bc *Blockchain) GetNotaryDepositExpiration(acc util.Uint160) uint32 {
return bc.contracts.Notary.ExpirationOf(bc.dao, acc)
}
// LastBatch returns last persisted storage batch.
func (bc *Blockchain) LastBatch() *storage.MemBatch {
return bc.lastBatch
}
2020-04-07 09:41:12 +00:00
// persist flushes current in-memory Store contents to the persistent storage.
func (bc *Blockchain) persist(isSync bool) (time.Duration, error) {
var (
start = time.Now()
core: don't spawn goroutine for persist function It doesn't make any sense, in some situations it leads to a number of goroutines created that will Persist one after another (as we can't Persist concurrently). We can manage it better in a single thread. This doesn't change performance in any way, but somewhat reduces resource consumption. It was tested neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 62800 with VerifyBlocks set to false) on i7-8565U. Reference (b9be892bf9f658652e2d1f074f366914dc62e830): Ryzen 9 5950X: RPS 27747.349 27407.726 27520.210 ≈ 27558 ± 0.63% TPS 26992.010 26993.468 27010.966 ≈ 26999 ± 0.04% CPU % 28.928 28.096 29.105 ≈ 28.7 ± 1.88% Mem MB 760.385 726.320 756.118 ≈ 748 ± 2.48% Core i7-8565U: RPS 7783.229 7628.409 7542.340 ≈ 7651 ± 1.60% TPS 7708.436 7607.397 7489.459 ≈ 7602 ± 1.44% CPU % 74.899 71.020 72.697 ≈ 72.9 ± 2.67% Mem MB 438.047 436.967 416.350 ≈ 430 ± 2.84% DB restore: real 0m20.838s 0m21.895s 0m21.794s ≈ 21.51 ± 2.71% user 0m39.091s 0m40.565s 0m41.493s ≈ 40.38 ± 3.00% sys 0m3.184s 0m2.923s 0m3.062s ≈ 3.06 ± 4.27% Patched: Ryzen 9 5950X: RPS 27636.957 27246.911 27462.036 ≈ 27449 ± 0.71% ↓ 0.40% TPS 27003.672 26993.468 27011.696 ≈ 27003 ± 0.03% ↑ 0.01% CPU % 28.562 28.475 28.012 ≈ 28.3 ± 1.04% ↓ 1.39% Mem MB 627.007 648.110 794.895 ≈ 690 ± 13.25% ↓ 7.75% Core i7-8565U: RPS 7497.210 7527.797 7897.532 ≈ 7641 ± 2.92% ↓ 0.13% TPS 7461.128 7482.678 7841.723 ≈ 7595 ± 2.81% ↓ 0.09% CPU % 71.559 73.423 69.005 ≈ 71.3 ± 3.11% ↓ 2.19% Mem MB 393.090 395.899 482.264 ≈ 424 ± 11.96% ↓ 1.40% DB restore: real 0m20.773s 0m21.583s 0m20.522s ≈ 20.96 ± 2.65% ↓ 2.56% user 0m39.322s 0m42.268s 0m38.626s ≈ 40.07 ± 4.82% ↓ 0.77% sys 0m3.006s 0m3.597s 0m3.042s ≈ 3.22 ± 10.31% ↑ 5.23%
2021-07-30 20:47:48 +00:00
duration time.Duration
persisted int
err error
)
if isSync {
persisted, err = bc.dao.PersistSync()
} else {
persisted, err = bc.dao.Persist()
}
if err != nil {
core: don't spawn goroutine for persist function It doesn't make any sense, in some situations it leads to a number of goroutines created that will Persist one after another (as we can't Persist concurrently). We can manage it better in a single thread. This doesn't change performance in any way, but somewhat reduces resource consumption. It was tested neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 62800 with VerifyBlocks set to false) on i7-8565U. Reference (b9be892bf9f658652e2d1f074f366914dc62e830): Ryzen 9 5950X: RPS 27747.349 27407.726 27520.210 ≈ 27558 ± 0.63% TPS 26992.010 26993.468 27010.966 ≈ 26999 ± 0.04% CPU % 28.928 28.096 29.105 ≈ 28.7 ± 1.88% Mem MB 760.385 726.320 756.118 ≈ 748 ± 2.48% Core i7-8565U: RPS 7783.229 7628.409 7542.340 ≈ 7651 ± 1.60% TPS 7708.436 7607.397 7489.459 ≈ 7602 ± 1.44% CPU % 74.899 71.020 72.697 ≈ 72.9 ± 2.67% Mem MB 438.047 436.967 416.350 ≈ 430 ± 2.84% DB restore: real 0m20.838s 0m21.895s 0m21.794s ≈ 21.51 ± 2.71% user 0m39.091s 0m40.565s 0m41.493s ≈ 40.38 ± 3.00% sys 0m3.184s 0m2.923s 0m3.062s ≈ 3.06 ± 4.27% Patched: Ryzen 9 5950X: RPS 27636.957 27246.911 27462.036 ≈ 27449 ± 0.71% ↓ 0.40% TPS 27003.672 26993.468 27011.696 ≈ 27003 ± 0.03% ↑ 0.01% CPU % 28.562 28.475 28.012 ≈ 28.3 ± 1.04% ↓ 1.39% Mem MB 627.007 648.110 794.895 ≈ 690 ± 13.25% ↓ 7.75% Core i7-8565U: RPS 7497.210 7527.797 7897.532 ≈ 7641 ± 2.92% ↓ 0.13% TPS 7461.128 7482.678 7841.723 ≈ 7595 ± 2.81% ↓ 0.09% CPU % 71.559 73.423 69.005 ≈ 71.3 ± 3.11% ↓ 2.19% Mem MB 393.090 395.899 482.264 ≈ 424 ± 11.96% ↓ 1.40% DB restore: real 0m20.773s 0m21.583s 0m20.522s ≈ 20.96 ± 2.65% ↓ 2.56% user 0m39.322s 0m42.268s 0m38.626s ≈ 40.07 ± 4.82% ↓ 0.77% sys 0m3.006s 0m3.597s 0m3.042s ≈ 3.22 ± 10.31% ↑ 5.23%
2021-07-30 20:47:48 +00:00
return 0, err
}
if persisted > 0 {
bHeight, err := bc.persistent.GetCurrentBlockHeight()
if err != nil {
core: don't spawn goroutine for persist function It doesn't make any sense, in some situations it leads to a number of goroutines created that will Persist one after another (as we can't Persist concurrently). We can manage it better in a single thread. This doesn't change performance in any way, but somewhat reduces resource consumption. It was tested neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 62800 with VerifyBlocks set to false) on i7-8565U. Reference (b9be892bf9f658652e2d1f074f366914dc62e830): Ryzen 9 5950X: RPS 27747.349 27407.726 27520.210 ≈ 27558 ± 0.63% TPS 26992.010 26993.468 27010.966 ≈ 26999 ± 0.04% CPU % 28.928 28.096 29.105 ≈ 28.7 ± 1.88% Mem MB 760.385 726.320 756.118 ≈ 748 ± 2.48% Core i7-8565U: RPS 7783.229 7628.409 7542.340 ≈ 7651 ± 1.60% TPS 7708.436 7607.397 7489.459 ≈ 7602 ± 1.44% CPU % 74.899 71.020 72.697 ≈ 72.9 ± 2.67% Mem MB 438.047 436.967 416.350 ≈ 430 ± 2.84% DB restore: real 0m20.838s 0m21.895s 0m21.794s ≈ 21.51 ± 2.71% user 0m39.091s 0m40.565s 0m41.493s ≈ 40.38 ± 3.00% sys 0m3.184s 0m2.923s 0m3.062s ≈ 3.06 ± 4.27% Patched: Ryzen 9 5950X: RPS 27636.957 27246.911 27462.036 ≈ 27449 ± 0.71% ↓ 0.40% TPS 27003.672 26993.468 27011.696 ≈ 27003 ± 0.03% ↑ 0.01% CPU % 28.562 28.475 28.012 ≈ 28.3 ± 1.04% ↓ 1.39% Mem MB 627.007 648.110 794.895 ≈ 690 ± 13.25% ↓ 7.75% Core i7-8565U: RPS 7497.210 7527.797 7897.532 ≈ 7641 ± 2.92% ↓ 0.13% TPS 7461.128 7482.678 7841.723 ≈ 7595 ± 2.81% ↓ 0.09% CPU % 71.559 73.423 69.005 ≈ 71.3 ± 3.11% ↓ 2.19% Mem MB 393.090 395.899 482.264 ≈ 424 ± 11.96% ↓ 1.40% DB restore: real 0m20.773s 0m21.583s 0m20.522s ≈ 20.96 ± 2.65% ↓ 2.56% user 0m39.322s 0m42.268s 0m38.626s ≈ 40.07 ± 4.82% ↓ 0.77% sys 0m3.006s 0m3.597s 0m3.042s ≈ 3.22 ± 10.31% ↑ 5.23%
2021-07-30 20:47:48 +00:00
return 0, err
}
oldHeight := atomic.SwapUint32(&bc.persistedHeight, bHeight)
diff := bHeight - oldHeight
storedHeaderHeight, _, err := bc.persistent.GetCurrentHeaderHeight()
if err != nil {
core: don't spawn goroutine for persist function It doesn't make any sense, in some situations it leads to a number of goroutines created that will Persist one after another (as we can't Persist concurrently). We can manage it better in a single thread. This doesn't change performance in any way, but somewhat reduces resource consumption. It was tested neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 62800 with VerifyBlocks set to false) on i7-8565U. Reference (b9be892bf9f658652e2d1f074f366914dc62e830): Ryzen 9 5950X: RPS 27747.349 27407.726 27520.210 ≈ 27558 ± 0.63% TPS 26992.010 26993.468 27010.966 ≈ 26999 ± 0.04% CPU % 28.928 28.096 29.105 ≈ 28.7 ± 1.88% Mem MB 760.385 726.320 756.118 ≈ 748 ± 2.48% Core i7-8565U: RPS 7783.229 7628.409 7542.340 ≈ 7651 ± 1.60% TPS 7708.436 7607.397 7489.459 ≈ 7602 ± 1.44% CPU % 74.899 71.020 72.697 ≈ 72.9 ± 2.67% Mem MB 438.047 436.967 416.350 ≈ 430 ± 2.84% DB restore: real 0m20.838s 0m21.895s 0m21.794s ≈ 21.51 ± 2.71% user 0m39.091s 0m40.565s 0m41.493s ≈ 40.38 ± 3.00% sys 0m3.184s 0m2.923s 0m3.062s ≈ 3.06 ± 4.27% Patched: Ryzen 9 5950X: RPS 27636.957 27246.911 27462.036 ≈ 27449 ± 0.71% ↓ 0.40% TPS 27003.672 26993.468 27011.696 ≈ 27003 ± 0.03% ↑ 0.01% CPU % 28.562 28.475 28.012 ≈ 28.3 ± 1.04% ↓ 1.39% Mem MB 627.007 648.110 794.895 ≈ 690 ± 13.25% ↓ 7.75% Core i7-8565U: RPS 7497.210 7527.797 7897.532 ≈ 7641 ± 2.92% ↓ 0.13% TPS 7461.128 7482.678 7841.723 ≈ 7595 ± 2.81% ↓ 0.09% CPU % 71.559 73.423 69.005 ≈ 71.3 ± 3.11% ↓ 2.19% Mem MB 393.090 395.899 482.264 ≈ 424 ± 11.96% ↓ 1.40% DB restore: real 0m20.773s 0m21.583s 0m20.522s ≈ 20.96 ± 2.65% ↓ 2.56% user 0m39.322s 0m42.268s 0m38.626s ≈ 40.07 ± 4.82% ↓ 0.77% sys 0m3.006s 0m3.597s 0m3.042s ≈ 3.22 ± 10.31% ↑ 5.23%
2021-07-30 20:47:48 +00:00
return 0, err
}
core: don't spawn goroutine for persist function It doesn't make any sense, in some situations it leads to a number of goroutines created that will Persist one after another (as we can't Persist concurrently). We can manage it better in a single thread. This doesn't change performance in any way, but somewhat reduces resource consumption. It was tested neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 62800 with VerifyBlocks set to false) on i7-8565U. Reference (b9be892bf9f658652e2d1f074f366914dc62e830): Ryzen 9 5950X: RPS 27747.349 27407.726 27520.210 ≈ 27558 ± 0.63% TPS 26992.010 26993.468 27010.966 ≈ 26999 ± 0.04% CPU % 28.928 28.096 29.105 ≈ 28.7 ± 1.88% Mem MB 760.385 726.320 756.118 ≈ 748 ± 2.48% Core i7-8565U: RPS 7783.229 7628.409 7542.340 ≈ 7651 ± 1.60% TPS 7708.436 7607.397 7489.459 ≈ 7602 ± 1.44% CPU % 74.899 71.020 72.697 ≈ 72.9 ± 2.67% Mem MB 438.047 436.967 416.350 ≈ 430 ± 2.84% DB restore: real 0m20.838s 0m21.895s 0m21.794s ≈ 21.51 ± 2.71% user 0m39.091s 0m40.565s 0m41.493s ≈ 40.38 ± 3.00% sys 0m3.184s 0m2.923s 0m3.062s ≈ 3.06 ± 4.27% Patched: Ryzen 9 5950X: RPS 27636.957 27246.911 27462.036 ≈ 27449 ± 0.71% ↓ 0.40% TPS 27003.672 26993.468 27011.696 ≈ 27003 ± 0.03% ↑ 0.01% CPU % 28.562 28.475 28.012 ≈ 28.3 ± 1.04% ↓ 1.39% Mem MB 627.007 648.110 794.895 ≈ 690 ± 13.25% ↓ 7.75% Core i7-8565U: RPS 7497.210 7527.797 7897.532 ≈ 7641 ± 2.92% ↓ 0.13% TPS 7461.128 7482.678 7841.723 ≈ 7595 ± 2.81% ↓ 0.09% CPU % 71.559 73.423 69.005 ≈ 71.3 ± 3.11% ↓ 2.19% Mem MB 393.090 395.899 482.264 ≈ 424 ± 11.96% ↓ 1.40% DB restore: real 0m20.773s 0m21.583s 0m20.522s ≈ 20.96 ± 2.65% ↓ 2.56% user 0m39.322s 0m42.268s 0m38.626s ≈ 40.07 ± 4.82% ↓ 0.77% sys 0m3.006s 0m3.597s 0m3.042s ≈ 3.22 ± 10.31% ↑ 5.23%
2021-07-30 20:47:48 +00:00
duration = time.Since(start)
bc.log.Info("persisted to disk",
zap.Uint32("blocks", diff),
zap.Int("keys", persisted),
2019-12-30 07:43:05 +00:00
zap.Uint32("headerHeight", storedHeaderHeight),
zap.Uint32("blockHeight", bHeight),
core: don't spawn goroutine for persist function It doesn't make any sense, in some situations it leads to a number of goroutines created that will Persist one after another (as we can't Persist concurrently). We can manage it better in a single thread. This doesn't change performance in any way, but somewhat reduces resource consumption. It was tested neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 62800 with VerifyBlocks set to false) on i7-8565U. Reference (b9be892bf9f658652e2d1f074f366914dc62e830): Ryzen 9 5950X: RPS 27747.349 27407.726 27520.210 ≈ 27558 ± 0.63% TPS 26992.010 26993.468 27010.966 ≈ 26999 ± 0.04% CPU % 28.928 28.096 29.105 ≈ 28.7 ± 1.88% Mem MB 760.385 726.320 756.118 ≈ 748 ± 2.48% Core i7-8565U: RPS 7783.229 7628.409 7542.340 ≈ 7651 ± 1.60% TPS 7708.436 7607.397 7489.459 ≈ 7602 ± 1.44% CPU % 74.899 71.020 72.697 ≈ 72.9 ± 2.67% Mem MB 438.047 436.967 416.350 ≈ 430 ± 2.84% DB restore: real 0m20.838s 0m21.895s 0m21.794s ≈ 21.51 ± 2.71% user 0m39.091s 0m40.565s 0m41.493s ≈ 40.38 ± 3.00% sys 0m3.184s 0m2.923s 0m3.062s ≈ 3.06 ± 4.27% Patched: Ryzen 9 5950X: RPS 27636.957 27246.911 27462.036 ≈ 27449 ± 0.71% ↓ 0.40% TPS 27003.672 26993.468 27011.696 ≈ 27003 ± 0.03% ↑ 0.01% CPU % 28.562 28.475 28.012 ≈ 28.3 ± 1.04% ↓ 1.39% Mem MB 627.007 648.110 794.895 ≈ 690 ± 13.25% ↓ 7.75% Core i7-8565U: RPS 7497.210 7527.797 7897.532 ≈ 7641 ± 2.92% ↓ 0.13% TPS 7461.128 7482.678 7841.723 ≈ 7595 ± 2.81% ↓ 0.09% CPU % 71.559 73.423 69.005 ≈ 71.3 ± 3.11% ↓ 2.19% Mem MB 393.090 395.899 482.264 ≈ 424 ± 11.96% ↓ 1.40% DB restore: real 0m20.773s 0m21.583s 0m20.522s ≈ 20.96 ± 2.65% ↓ 2.56% user 0m39.322s 0m42.268s 0m38.626s ≈ 40.07 ± 4.82% ↓ 0.77% sys 0m3.006s 0m3.597s 0m3.042s ≈ 3.22 ± 10.31% ↑ 5.23%
2021-07-30 20:47:48 +00:00
zap.Duration("took", duration))
// update monitoring metrics.
updatePersistedHeightMetric(bHeight)
}
core: don't spawn goroutine for persist function It doesn't make any sense, in some situations it leads to a number of goroutines created that will Persist one after another (as we can't Persist concurrently). We can manage it better in a single thread. This doesn't change performance in any way, but somewhat reduces resource consumption. It was tested neo-bench (single node, 10 workers, LevelDB) on two machines and block dump processing (RC4 testnet up to 62800 with VerifyBlocks set to false) on i7-8565U. Reference (b9be892bf9f658652e2d1f074f366914dc62e830): Ryzen 9 5950X: RPS 27747.349 27407.726 27520.210 ≈ 27558 ± 0.63% TPS 26992.010 26993.468 27010.966 ≈ 26999 ± 0.04% CPU % 28.928 28.096 29.105 ≈ 28.7 ± 1.88% Mem MB 760.385 726.320 756.118 ≈ 748 ± 2.48% Core i7-8565U: RPS 7783.229 7628.409 7542.340 ≈ 7651 ± 1.60% TPS 7708.436 7607.397 7489.459 ≈ 7602 ± 1.44% CPU % 74.899 71.020 72.697 ≈ 72.9 ± 2.67% Mem MB 438.047 436.967 416.350 ≈ 430 ± 2.84% DB restore: real 0m20.838s 0m21.895s 0m21.794s ≈ 21.51 ± 2.71% user 0m39.091s 0m40.565s 0m41.493s ≈ 40.38 ± 3.00% sys 0m3.184s 0m2.923s 0m3.062s ≈ 3.06 ± 4.27% Patched: Ryzen 9 5950X: RPS 27636.957 27246.911 27462.036 ≈ 27449 ± 0.71% ↓ 0.40% TPS 27003.672 26993.468 27011.696 ≈ 27003 ± 0.03% ↑ 0.01% CPU % 28.562 28.475 28.012 ≈ 28.3 ± 1.04% ↓ 1.39% Mem MB 627.007 648.110 794.895 ≈ 690 ± 13.25% ↓ 7.75% Core i7-8565U: RPS 7497.210 7527.797 7897.532 ≈ 7641 ± 2.92% ↓ 0.13% TPS 7461.128 7482.678 7841.723 ≈ 7595 ± 2.81% ↓ 0.09% CPU % 71.559 73.423 69.005 ≈ 71.3 ± 3.11% ↓ 2.19% Mem MB 393.090 395.899 482.264 ≈ 424 ± 11.96% ↓ 1.40% DB restore: real 0m20.773s 0m21.583s 0m20.522s ≈ 20.96 ± 2.65% ↓ 2.56% user 0m39.322s 0m42.268s 0m38.626s ≈ 40.07 ± 4.82% ↓ 0.77% sys 0m3.006s 0m3.597s 0m3.042s ≈ 3.22 ± 10.31% ↑ 5.23%
2021-07-30 20:47:48 +00:00
return duration, nil
}
// GetTransaction returns a TX and its height by the given hash. The height is MaxUint32 if tx is in the mempool.
func (bc *Blockchain) GetTransaction(hash util.Uint256) (*transaction.Transaction, uint32, error) {
if tx, ok := bc.memPool.TryGetValue(hash); ok {
return tx, math.MaxUint32, nil // the height is not actually defined for memPool transaction.
}
return bc.dao.GetTransaction(hash)
}
// GetAppExecResults returns application execution results with the specified trigger by the given
// tx hash or block hash.
func (bc *Blockchain) GetAppExecResults(hash util.Uint256, trig trigger.Type) ([]state.AppExecResult, error) {
return bc.dao.GetAppExecResults(hash, trig)
}
// GetStorageItem returns an item from storage.
func (bc *Blockchain) GetStorageItem(id int32, key []byte) state.StorageItem {
return bc.dao.GetStorageItem(id, key)
}
// SeekStorage performs seek operation over contract storage. Prefix is trimmed in the resulting pair's key.
func (bc *Blockchain) SeekStorage(id int32, prefix []byte, cont func(k, v []byte) bool) {
bc.dao.Seek(id, storage.SeekRange{Prefix: prefix}, cont)
}
// GetBlock returns a Block by the given hash.
func (bc *Blockchain) GetBlock(hash util.Uint256) (*block.Block, error) {
topBlock := bc.topBlock.Load()
if topBlock != nil {
tb := topBlock.(*block.Block)
if tb.Hash().Equals(hash) {
return tb, nil
}
}
block, err := bc.dao.GetBlock(hash)
if err != nil {
return nil, err
}
if !block.MerkleRoot.Equals(util.Uint256{}) && len(block.Transactions) == 0 {
return nil, errors.New("only header is found")
}
for _, tx := range block.Transactions {
core: get transactions from dao when mempool should not be used All of these places deal strictly with the chain and shouldn't ever be bothered with mempool. It also fixes a deadlock on reverification of non-standard tx: 1 @ 0x42f62f 0x43fbe9 0x43fbbf 0x43f95d 0x967059 0x966f66 0x972c7c 0x974e13 0x97a5d9 0x97bdf0 0x976147 0x966cc0 0x970f70 0x96c8cb 0x9ba858 0x45ca51 0x43f95c sync.runtime_SemacquireMutex+0x3c /usr/local/go/src/runtime/sema.go:71 0x967058 sync.(*RWMutex).RLock+0x128 /usr/local/go/src/sync/rwmutex.go:50 0x966f65 github.com/CityOfZion/neo-go/pkg/core/mempool.(*Pool).TryGetValue+0x35 /go/src/github.com/CityOfZion/neo-go/pkg/core/mempool/mem_pool.go:229 0x972c7b github.com/CityOfZion/neo-go/pkg/core.(*Blockchain).GetTransaction+0x4b /go/src/github.com/CityOfZion/neo-go/pkg/core/blockchain.go:782 0x974e12 github.com/CityOfZion/neo-go/pkg/core.(*Blockchain).References+0x132 /go/src/github.com/CityOfZion/neo-go/pkg/core/blockchain.go:944 0x97a5d8 github.com/CityOfZion/neo-go/pkg/core.(*Blockchain).GetScriptHashesForVerifying+0x58 /go/src/github.com/CityOfZion/neo-go/pkg/core/blockchain.go:1410 0x97bdef github.com/CityOfZion/neo-go/pkg/core.(*Blockchain).verifyTxWitnesses+0x4f /go/src/github.com/CityOfZion/neo-go/pkg/core/blockchain.go:1545 0x976146 github.com/CityOfZion/neo-go/pkg/core.(*Blockchain).isTxStillRelevant+0x216 /go/src/github.com/CityOfZion/neo-go/pkg/core/blockchain.go:1067 0x966cbf github.com/CityOfZion/neo-go/pkg/core/mempool.(*Pool).RemoveStale+0xff /go/src/github.com/CityOfZion/neo-go/pkg/core/mempool/mem_pool.go:208 0x970f6f github.com/CityOfZion/neo-go/pkg/core.(*Blockchain).storeBlock+0x2ecf /go/src/github.com/CityOfZion/neo-go/pkg/core/blockchain.go:614 0x96c8ca github.com/CityOfZion/neo-go/pkg/core.(*Blockchain).AddBlock+0xea /go/src/github.com/CityOfZion/neo-go/pkg/core/blockchain.go:308 0x9ba857 github.com/CityOfZion/neo-go/pkg/network.(*blockQueue).run+0x157 /go/src/github.com/CityOfZion/neo-go/pkg/network/blockqueue.go:48
2020-02-10 15:53:22 +00:00
stx, _, err := bc.dao.GetTransaction(tx.Hash())
if err != nil {
return nil, err
}
*tx = *stx
}
return block, nil
}
// GetHeader returns data block header identified with the given hash value.
func (bc *Blockchain) GetHeader(hash util.Uint256) (*block.Header, error) {
topBlock := bc.topBlock.Load()
if topBlock != nil {
tb := topBlock.(*block.Block)
if tb.Hash().Equals(hash) {
2021-03-01 13:44:47 +00:00
return &tb.Header, nil
}
}
block, err := bc.dao.GetBlock(hash)
if err != nil {
return nil, err
}
2021-03-01 13:44:47 +00:00
return &block.Header, nil
}
2019-10-22 14:56:03 +00:00
// HasBlock returns true if the blockchain contains the given
// block hash.
func (bc *Blockchain) HasBlock(hash util.Uint256) bool {
if bc.HeaderHashes.haveRecentHash(hash, bc.BlockHeight()) {
return true
}
Implemented rpc server method GetRawTransaction (#135) * Added utility function GetVarSize * 1) Added Size method: this implied that Fixed8 implements now the serializable interface. 2) Added few arithmetic operation (Add, Sub, div): this will be used to calculated networkfeeand feePerByte. Changed return value of the Value() method to int instead of int64. Modified fixed8_test accordingly. * Implemented Size or MarshalJSON method. - Structs accepting the Size method implement the serializable interface. - Structs accepting the MarshalJSON method implements the customized json marshaller interface. * Added fee calculation * Implemented rcp server method GetRawTransaction * Updated Tests * Fixed: 1) NewFixed8 will accept as input int64 2) race condition affecting configDeafault, blockchainDefault * Simplified Size calculation * 1) Removed global variable blockchainDefault, configDefault 2) Extended Blockchainer interface to include the methods: References, FeePerByte, SystemFee, NetworkFee 3) Deleted fee_test.go, fee.go. Moved corresponding methods to blockchain_test.go and blockchain.go respectively 4) Amended tx_raw_output.go * Simplified GetVarSize Method * Replaced ValueAtAndType with ValueWithType * Cosmetic changes + Added test case getrawtransaction_7 * Clean up Print statement * Filled up keys * Aligned verbose logic with the C#-neo implementation * Implemented @Kim requests Refactor server_test.go * Small fixes * Fixed verbose logic Added more tests Cosmetic changes * Replaced assert.NoError with require.NoError * Fixed tests by adding context.Background() as argument * Fixed tests
2019-02-20 17:39:32 +00:00
if header, err := bc.GetHeader(hash); err == nil {
return header.Index <= bc.BlockHeight()
}
return false
}
// CurrentBlockHash returns the highest processed block hash.
func (bc *Blockchain) CurrentBlockHash() util.Uint256 {
topBlock := bc.topBlock.Load()
if topBlock != nil {
tb := topBlock.(*block.Block)
return tb.Hash()
}
return bc.GetHeaderHash(bc.BlockHeight())
}
// BlockHeight returns the height/index of the highest block.
func (bc *Blockchain) BlockHeight() uint32 {
return atomic.LoadUint32(&bc.blockHeight)
}
// GetContractState returns contract by its script hash.
func (bc *Blockchain) GetContractState(hash util.Uint160) *state.Contract {
contract, err := native.GetContract(bc.dao, hash)
if contract == nil && !errors.Is(err, storage.ErrKeyNotFound) {
2019-12-30 07:43:05 +00:00
bc.log.Warn("failed to get contract state", zap.Error(err))
}
return contract
}
// GetContractScriptHash returns contract script hash by its ID.
func (bc *Blockchain) GetContractScriptHash(id int32) (util.Uint160, error) {
return native.GetContractScriptHash(bc.dao, id)
}
// GetNativeContractScriptHash returns native contract script hash by its name.
func (bc *Blockchain) GetNativeContractScriptHash(name string) (util.Uint160, error) {
c := bc.contracts.ByName(name)
if c != nil {
return c.Metadata().Hash, nil
}
return util.Uint160{}, errors.New("Unknown native contract")
}
// GetNatives returns list of native contracts.
func (bc *Blockchain) GetNatives() []state.Contract {
res := make([]state.Contract, 0, len(bc.contracts.Contracts))
current := bc.getCurrentHF()
for _, c := range bc.contracts.Contracts {
activeIn := c.ActiveIn()
if !(activeIn == nil || activeIn.Cmp(current) <= 0) {
continue
}
st := bc.GetContractState(c.Metadata().Hash)
if st != nil { // Should never happen, but better safe than sorry.
res = append(res, *st)
}
}
return res
}
2019-10-22 14:56:03 +00:00
// GetConfig returns the config stored in the blockchain.
func (bc *Blockchain) GetConfig() config.Blockchain {
Implemented rpc server method GetRawTransaction (#135) * Added utility function GetVarSize * 1) Added Size method: this implied that Fixed8 implements now the serializable interface. 2) Added few arithmetic operation (Add, Sub, div): this will be used to calculated networkfeeand feePerByte. Changed return value of the Value() method to int instead of int64. Modified fixed8_test accordingly. * Implemented Size or MarshalJSON method. - Structs accepting the Size method implement the serializable interface. - Structs accepting the MarshalJSON method implements the customized json marshaller interface. * Added fee calculation * Implemented rcp server method GetRawTransaction * Updated Tests * Fixed: 1) NewFixed8 will accept as input int64 2) race condition affecting configDeafault, blockchainDefault * Simplified Size calculation * 1) Removed global variable blockchainDefault, configDefault 2) Extended Blockchainer interface to include the methods: References, FeePerByte, SystemFee, NetworkFee 3) Deleted fee_test.go, fee.go. Moved corresponding methods to blockchain_test.go and blockchain.go respectively 4) Amended tx_raw_output.go * Simplified GetVarSize Method * Replaced ValueAtAndType with ValueWithType * Cosmetic changes + Added test case getrawtransaction_7 * Clean up Print statement * Filled up keys * Aligned verbose logic with the C#-neo implementation * Implemented @Kim requests Refactor server_test.go * Small fixes * Fixed verbose logic Added more tests Cosmetic changes * Replaced assert.NoError with require.NoError * Fixed tests by adding context.Background() as argument * Fixed tests
2019-02-20 17:39:32 +00:00
return bc.config
}
// SubscribeForBlocks adds given channel to new block event broadcasting, so when
// there is a new block added to the chain you'll receive it via this channel.
// Make sure it's read from regularly as not reading these events might affect
// other Blockchain functions. Make sure you're not changing the received blocks,
// as it may affect the functionality of Blockchain and other subscribers.
func (bc *Blockchain) SubscribeForBlocks(ch chan *block.Block) {
bc.subCh <- ch
}
// SubscribeForHeadersOfAddedBlocks adds given channel to new header event broadcasting, so
// when there is a new block added to the chain you'll receive its header via this
// channel. Make sure it's read from regularly as not reading these events might
// affect other Blockchain functions. Make sure you're not changing the received
// headers, as it may affect the functionality of Blockchain and other
// subscribers.
func (bc *Blockchain) SubscribeForHeadersOfAddedBlocks(ch chan *block.Header) {
bc.subCh <- ch
}
// SubscribeForTransactions adds given channel to new transaction event
// broadcasting, so when there is a new transaction added to the chain (in a
// block) you'll receive it via this channel. Make sure it's read from regularly
// as not reading these events might affect other Blockchain functions. Make sure
// you're not changing the received transactions, as it may affect the
// functionality of Blockchain and other subscribers.
func (bc *Blockchain) SubscribeForTransactions(ch chan *transaction.Transaction) {
bc.subCh <- ch
}
// SubscribeForNotifications adds given channel to new notifications event
// broadcasting, so when an in-block transaction execution generates a
// notification you'll receive it via this channel. Only notifications from
// successful transactions are broadcasted, if you're interested in failed
// transactions use SubscribeForExecutions instead. Make sure this channel is
// read from regularly as not reading these events might affect other Blockchain
// functions. Make sure you're not changing the received notification events, as
// it may affect the functionality of Blockchain and other subscribers.
func (bc *Blockchain) SubscribeForNotifications(ch chan *state.ContainedNotificationEvent) {
bc.subCh <- ch
}
// SubscribeForExecutions adds given channel to new transaction execution event
// broadcasting, so when an in-block transaction execution happens you'll receive
// the result of it via this channel. Make sure it's read from regularly as not
// reading these events might affect other Blockchain functions. Make sure you're
// not changing the received execution results, as it may affect the
// functionality of Blockchain and other subscribers.
func (bc *Blockchain) SubscribeForExecutions(ch chan *state.AppExecResult) {
bc.subCh <- ch
}
// UnsubscribeFromBlocks unsubscribes given channel from new block notifications,
// you can close it afterwards. Passing non-subscribed channel is a no-op, but
// the method can read from this channel (discarding any read data).
func (bc *Blockchain) UnsubscribeFromBlocks(ch chan *block.Block) {
unsubloop:
for {
select {
case <-ch:
case bc.unsubCh <- ch:
break unsubloop
}
}
}
// UnsubscribeFromHeadersOfAddedBlocks unsubscribes given channel from new
// block's header notifications, you can close it afterwards. Passing
// non-subscribed channel is a no-op, but the method can read from this
// channel (discarding any read data).
func (bc *Blockchain) UnsubscribeFromHeadersOfAddedBlocks(ch chan *block.Header) {
unsubloop:
for {
select {
case <-ch:
case bc.unsubCh <- ch:
break unsubloop
}
}
}
// UnsubscribeFromTransactions unsubscribes given channel from new transaction
// notifications, you can close it afterwards. Passing non-subscribed channel is
// a no-op, but the method can read from this channel (discarding any read data).
func (bc *Blockchain) UnsubscribeFromTransactions(ch chan *transaction.Transaction) {
unsubloop:
for {
select {
case <-ch:
case bc.unsubCh <- ch:
break unsubloop
}
}
}
// UnsubscribeFromNotifications unsubscribes given channel from new
// execution-generated notifications, you can close it afterwards. Passing
// non-subscribed channel is a no-op, but the method can read from this channel
// (discarding any read data).
func (bc *Blockchain) UnsubscribeFromNotifications(ch chan *state.ContainedNotificationEvent) {
unsubloop:
for {
select {
case <-ch:
case bc.unsubCh <- ch:
break unsubloop
}
}
}
// UnsubscribeFromExecutions unsubscribes given channel from new execution
// notifications, you can close it afterwards. Passing non-subscribed channel is
// a no-op, but the method can read from this channel (discarding any read data).
func (bc *Blockchain) UnsubscribeFromExecutions(ch chan *state.AppExecResult) {
unsubloop:
for {
select {
case <-ch:
case bc.unsubCh <- ch:
break unsubloop
}
}
}
// CalculateClaimable calculates the amount of GAS generated by owning specified
2020-08-26 09:07:30 +00:00
// amount of NEO between specified blocks.
func (bc *Blockchain) CalculateClaimable(acc util.Uint160, endHeight uint32) (*big.Int, error) {
nextBlock, err := bc.getFakeNextBlock(bc.BlockHeight() + 1)
if err != nil {
return nil, err
}
ic := bc.newInteropContext(trigger.Application, bc.dao, nextBlock, nil)
return bc.contracts.NEO.CalculateBonus(ic, acc, endHeight)
}
// FeePerByte returns transaction network fee per byte.
func (bc *Blockchain) FeePerByte() int64 {
return bc.contracts.Policy.GetFeePerByteInternal(bc.dao)
Implemented rpc server method GetRawTransaction (#135) * Added utility function GetVarSize * 1) Added Size method: this implied that Fixed8 implements now the serializable interface. 2) Added few arithmetic operation (Add, Sub, div): this will be used to calculated networkfeeand feePerByte. Changed return value of the Value() method to int instead of int64. Modified fixed8_test accordingly. * Implemented Size or MarshalJSON method. - Structs accepting the Size method implement the serializable interface. - Structs accepting the MarshalJSON method implements the customized json marshaller interface. * Added fee calculation * Implemented rcp server method GetRawTransaction * Updated Tests * Fixed: 1) NewFixed8 will accept as input int64 2) race condition affecting configDeafault, blockchainDefault * Simplified Size calculation * 1) Removed global variable blockchainDefault, configDefault 2) Extended Blockchainer interface to include the methods: References, FeePerByte, SystemFee, NetworkFee 3) Deleted fee_test.go, fee.go. Moved corresponding methods to blockchain_test.go and blockchain.go respectively 4) Amended tx_raw_output.go * Simplified GetVarSize Method * Replaced ValueAtAndType with ValueWithType * Cosmetic changes + Added test case getrawtransaction_7 * Clean up Print statement * Filled up keys * Aligned verbose logic with the C#-neo implementation * Implemented @Kim requests Refactor server_test.go * Small fixes * Fixed verbose logic Added more tests Cosmetic changes * Replaced assert.NoError with require.NoError * Fixed tests by adding context.Background() as argument * Fixed tests
2019-02-20 17:39:32 +00:00
}
// GetMemPool returns the memory pool of the blockchain.
func (bc *Blockchain) GetMemPool() *mempool.Pool {
return bc.memPool
}
// ApplyPolicyToTxSet applies configured policies to given transaction set. It
// expects slice to be ordered by fee and returns a subslice of it.
func (bc *Blockchain) ApplyPolicyToTxSet(txes []*transaction.Transaction) []*transaction.Transaction {
maxTx := bc.config.MaxTransactionsPerBlock
if maxTx != 0 && len(txes) > int(maxTx) {
txes = txes[:maxTx]
}
maxBlockSize := bc.config.MaxBlockSize
maxBlockSysFee := bc.config.MaxBlockSystemFee
oldVC := bc.knownValidatorsCount.Load()
2021-03-15 10:00:04 +00:00
defaultWitness := bc.defaultBlockWitness.Load()
curVC := bc.config.GetNumOfCNs(bc.BlockHeight() + 1)
if oldVC == nil || oldVC != curVC {
m := smartcontract.GetDefaultHonestNodeCount(curVC)
verification, _ := smartcontract.CreateDefaultMultiSigRedeemScript(bc.contracts.NEO.GetNextBlockValidatorsInternal(bc.dao))
2021-03-15 10:00:04 +00:00
defaultWitness = transaction.Witness{
InvocationScript: make([]byte, 66*m),
VerificationScript: verification,
}
bc.knownValidatorsCount.Store(curVC)
2021-03-15 10:00:04 +00:00
bc.defaultBlockWitness.Store(defaultWitness)
}
var (
2021-03-15 10:51:07 +00:00
b = &block.Block{Header: block.Header{Script: defaultWitness.(transaction.Witness)}}
blockSize = uint32(b.GetExpectedBlockSizeWithoutTransactions(len(txes)))
blockSysFee int64
2021-03-15 10:00:04 +00:00
)
for i, tx := range txes {
blockSize += uint32(tx.Size())
2021-03-15 10:51:07 +00:00
blockSysFee += tx.SystemFee
if blockSize > maxBlockSize || blockSysFee > maxBlockSysFee {
2021-03-15 10:00:04 +00:00
txes = txes[:i]
break
}
}
return txes
}
// Various errors that could be returns upon header verification.
var (
ErrHdrHashMismatch = errors.New("previous header hash doesn't match")
ErrHdrIndexMismatch = errors.New("previous header index doesn't match")
ErrHdrInvalidTimestamp = errors.New("block is not newer than the previous one")
ErrHdrStateRootSetting = errors.New("state root setting mismatch")
ErrHdrInvalidStateRoot = errors.New("state root for previous block is invalid")
)
func (bc *Blockchain) verifyHeader(currHeader, prevHeader *block.Header) error {
if bc.config.StateRootInHeader {
if bc.stateRoot.CurrentLocalHeight() == prevHeader.Index {
if sr := bc.stateRoot.CurrentLocalStateRoot(); currHeader.PrevStateRoot != sr {
return fmt.Errorf("%w: %s != %s",
ErrHdrInvalidStateRoot, currHeader.PrevStateRoot.StringLE(), sr.StringLE())
}
}
}
if prevHeader.Hash() != currHeader.PrevHash {
return ErrHdrHashMismatch
}
if prevHeader.Index+1 != currHeader.Index {
return ErrHdrIndexMismatch
}
if prevHeader.Timestamp >= currHeader.Timestamp {
return ErrHdrInvalidTimestamp
}
return bc.verifyHeaderWitnesses(currHeader, prevHeader)
}
// Various errors that could be returned upon verification.
var (
ErrTxExpired = errors.New("transaction has expired")
ErrInsufficientFunds = errors.New("insufficient funds")
ErrTxSmallNetworkFee = errors.New("too small network fee")
ErrTxTooBig = errors.New("too big transaction")
ErrMemPoolConflict = errors.New("invalid transaction due to conflicts with the memory pool")
ErrInvalidScript = errors.New("invalid script")
ErrInvalidAttribute = errors.New("invalid attribute")
)
// verifyAndPoolTx verifies whether a transaction is bonafide or not and tries
// to add it to the mempool given.
func (bc *Blockchain) verifyAndPoolTx(t *transaction.Transaction, pool *mempool.Pool, feer mempool.Feer, data ...any) error {
// This code can technically be moved out of here, because it doesn't
// really require a chain lock.
err := vm.IsScriptCorrect(t.Script, nil)
if err != nil {
return fmt.Errorf("%w: %w", ErrInvalidScript, err)
}
height := bc.BlockHeight()
2020-11-27 10:55:48 +00:00
isPartialTx := data != nil
if t.ValidUntilBlock <= height || !isPartialTx && t.ValidUntilBlock > height+bc.config.MaxValidUntilBlockIncrement {
return fmt.Errorf("%w: ValidUntilBlock = %d, current height = %d", ErrTxExpired, t.ValidUntilBlock, height)
}
// Policying.
if err := bc.contracts.Policy.CheckPolicy(bc.dao, t); err != nil {
// Only one %w can be used.
return fmt.Errorf("%w: %w", ErrPolicy, err)
}
if t.SystemFee > bc.config.MaxBlockSystemFee {
return fmt.Errorf("%w: too big system fee (%d > MaxBlockSystemFee %d)", ErrPolicy, t.SystemFee, bc.config.MaxBlockSystemFee)
}
size := t.Size()
if size > transaction.MaxTransactionSize {
return fmt.Errorf("%w: (%d > MaxTransactionSize %d)", ErrTxTooBig, size, transaction.MaxTransactionSize)
}
needNetworkFee := int64(size)*bc.FeePerByte() + bc.CalculateAttributesFee(t)
netFee := t.NetworkFee - needNetworkFee
if netFee < 0 {
return fmt.Errorf("%w: net fee is %v, need %v", ErrTxSmallNetworkFee, t.NetworkFee, needNetworkFee)
}
// check that current tx wasn't included in the conflicts attributes of some other transaction which is already in the chain
if err := bc.dao.HasTransaction(t.Hash(), t.Signers, height, bc.config.MaxTraceableBlocks); err != nil {
switch {
case errors.Is(err, dao.ErrAlreadyExists):
return ErrAlreadyExists
case errors.Is(err, dao.ErrHasConflicts):
return fmt.Errorf("blockchain: %w", ErrHasConflicts)
default:
return err
}
}
err = bc.verifyTxWitnesses(t, nil, isPartialTx, netFee)
if err != nil {
return err
}
if err := bc.verifyTxAttributes(bc.dao, t, isPartialTx); err != nil {
return err
}
2020-11-27 10:55:48 +00:00
err = pool.Add(t, feer, data...)
if err != nil {
switch {
case errors.Is(err, mempool.ErrConflict):
return ErrMemPoolConflict
case errors.Is(err, mempool.ErrDup):
return ErrAlreadyInPool
case errors.Is(err, mempool.ErrInsufficientFunds):
return ErrInsufficientFunds
case errors.Is(err, mempool.ErrOOM):
return ErrOOM
case errors.Is(err, mempool.ErrConflictsAttribute):
return fmt.Errorf("mempool: %w: %w", ErrHasConflicts, err)
default:
return err
}
}
return nil
}
// CalculateAttributesFee returns network fee for all transaction attributes that should be
// paid according to native Policy.
func (bc *Blockchain) CalculateAttributesFee(tx *transaction.Transaction) int64 {
var feeSum int64
for _, attr := range tx.Attributes {
base := bc.contracts.Policy.GetAttributeFeeInternal(bc.dao, attr.Type)
switch attr.Type {
case transaction.ConflictsT:
feeSum += base * int64(len(tx.Signers))
case transaction.NotaryAssistedT:
if bc.P2PSigExtensionsEnabled() {
na := attr.Value.(*transaction.NotaryAssisted)
feeSum += base * (int64(na.NKeys) + 1)
}
default:
feeSum += base
}
}
return feeSum
}
func (bc *Blockchain) verifyTxAttributes(d *dao.Simple, tx *transaction.Transaction, isPartialTx bool) error {
for i := range tx.Attributes {
switch attrType := tx.Attributes[i].Type; attrType {
case transaction.HighPriority:
h := bc.contracts.NEO.GetCommitteeAddress(d)
if !tx.HasSigner(h) {
return fmt.Errorf("%w: high priority tx is not signed by committee", ErrInvalidAttribute)
}
2020-09-24 13:33:40 +00:00
case transaction.OracleResponseT:
h, err := bc.contracts.Oracle.GetScriptHash(bc.dao)
if err != nil || h.Equals(util.Uint160{}) {
return fmt.Errorf("%w: %w", ErrInvalidAttribute, err)
2020-09-24 13:33:40 +00:00
}
hasOracle := false
for i := range tx.Signers {
if tx.Signers[i].Scopes != transaction.None {
2020-09-24 13:33:40 +00:00
return fmt.Errorf("%w: oracle tx has invalid signer scope", ErrInvalidAttribute)
}
if tx.Signers[i].Account.Equals(h) {
hasOracle = true
}
}
if !hasOracle {
return fmt.Errorf("%w: oracle tx is not signed by oracle nodes", ErrInvalidAttribute)
}
if !bytes.Equal(tx.Script, bc.contracts.Oracle.GetOracleResponseScript()) {
2020-09-24 13:33:40 +00:00
return fmt.Errorf("%w: oracle tx has invalid script", ErrInvalidAttribute)
}
resp := tx.Attributes[i].Value.(*transaction.OracleResponse)
req, err := bc.contracts.Oracle.GetRequestInternal(bc.dao, resp.ID)
if err != nil {
return fmt.Errorf("%w: oracle tx points to invalid request: %w", ErrInvalidAttribute, err)
2020-09-24 13:33:40 +00:00
}
if uint64(tx.NetworkFee+tx.SystemFee) < req.GasForResponse {
return fmt.Errorf("%w: oracle tx has insufficient gas", ErrInvalidAttribute)
}
case transaction.NotValidBeforeT:
2020-11-27 10:55:48 +00:00
nvb := tx.Attributes[i].Value.(*transaction.NotValidBefore).Height
curHeight := bc.BlockHeight()
2020-11-27 10:55:48 +00:00
if isPartialTx {
maxNVBDelta, err := bc.GetMaxNotValidBeforeDelta()
if err != nil {
return fmt.Errorf("%w: failed to retrieve MaxNotValidBeforeDelta value from native Notary contract: %w", ErrInvalidAttribute, err)
}
if curHeight+maxNVBDelta < nvb {
return fmt.Errorf("%w: NotValidBefore (%d) bigger than MaxNVBDelta (%d) allows at height %d", ErrInvalidAttribute, nvb, maxNVBDelta, curHeight)
2020-11-27 10:55:48 +00:00
}
if nvb+maxNVBDelta < tx.ValidUntilBlock {
return fmt.Errorf("%w: NotValidBefore (%d) set more than MaxNVBDelta (%d) away from VUB (%d)", ErrInvalidAttribute, nvb, maxNVBDelta, tx.ValidUntilBlock)
2020-11-27 10:55:48 +00:00
}
} else {
if curHeight < nvb {
return fmt.Errorf("%w: transaction is not yet valid: NotValidBefore = %d, current height = %d", ErrInvalidAttribute, nvb, curHeight)
2020-11-27 10:55:48 +00:00
}
}
case transaction.ConflictsT:
conflicts := tx.Attributes[i].Value.(*transaction.Conflicts)
// Only fully-qualified dao.ErrAlreadyExists error bothers us here, thus, we
// can safely omit the signers, current index and MTB arguments to HasTransaction call to improve performance a bit.
if err := bc.dao.HasTransaction(conflicts.Hash, nil, 0, 0); errors.Is(err, dao.ErrAlreadyExists) {
return fmt.Errorf("%w: conflicting transaction %s is already on chain", ErrInvalidAttribute, conflicts.Hash.StringLE())
}
case transaction.NotaryAssistedT:
if !bc.config.P2PSigExtensions {
return fmt.Errorf("%w: NotaryAssisted attribute was found, but P2PSigExtensions are disabled", ErrInvalidAttribute)
}
2020-11-19 10:00:46 +00:00
if !tx.HasSigner(bc.contracts.Notary.Hash) {
return fmt.Errorf("%w: NotaryAssisted attribute was found, but transaction is not signed by the Notary native contract", ErrInvalidAttribute)
}
default:
if !bc.config.ReservedAttributes && attrType >= transaction.ReservedLowerBound && attrType <= transaction.ReservedUpperBound {
return fmt.Errorf("%w: attribute of reserved type was found, but ReservedAttributes are disabled", ErrInvalidAttribute)
}
}
}
return nil
}
2020-11-27 10:55:48 +00:00
// IsTxStillRelevant is a callback for mempool transaction filtering after the
// new block addition. It returns false for transactions added by the new block
// (passed via txpool) and does witness reverification for non-standard
// contracts. It operates under the assumption that full transaction verification
// was already done so we don't need to check basic things like size, input/output
// correctness, presence in blocks before the new one, etc.
2020-11-27 10:55:48 +00:00
func (bc *Blockchain) IsTxStillRelevant(t *transaction.Transaction, txpool *mempool.Pool, isPartialTx bool) bool {
var (
recheckWitness bool
curheight = bc.BlockHeight()
)
if t.ValidUntilBlock <= curheight {
return false
}
if txpool == nil {
if bc.dao.HasTransaction(t.Hash(), t.Signers, curheight, bc.config.MaxTraceableBlocks) != nil {
return false
}
} else if txpool.HasConflicts(t, bc) {
return false
}
if err := bc.verifyTxAttributes(bc.dao, t, isPartialTx); err != nil {
return false
}
for i := range t.Scripts {
if !vm.IsStandardContract(t.Scripts[i].VerificationScript) {
recheckWitness = true
break
}
}
if recheckWitness {
2020-11-27 10:55:48 +00:00
return bc.verifyTxWitnesses(t, nil, isPartialTx) == nil
}
return true
}
// VerifyTx verifies whether transaction is bonafide or not relative to the
// current blockchain state. Note that this verification is completely isolated
// from the main node's mempool.
func (bc *Blockchain) VerifyTx(t *transaction.Transaction) error {
var mp = mempool.New(1, 0, false, nil)
bc.lock.RLock()
defer bc.lock.RUnlock()
2020-11-27 10:55:48 +00:00
return bc.verifyAndPoolTx(t, mp, bc)
}
// PoolTx verifies and tries to add given transaction into the mempool. If not
// given, the default mempool is used. Passing multiple pools is not supported.
func (bc *Blockchain) PoolTx(t *transaction.Transaction, pools ...*mempool.Pool) error {
var pool = bc.memPool
bc.lock.RLock()
defer bc.lock.RUnlock()
// Programmer error.
if len(pools) > 1 {
panic("too many pools given")
}
if len(pools) == 1 {
pool = pools[0]
}
2020-11-27 10:55:48 +00:00
return bc.verifyAndPoolTx(t, pool, bc)
}
// PoolTxWithData verifies and tries to add given transaction with additional data into the mempool.
func (bc *Blockchain) PoolTxWithData(t *transaction.Transaction, data any, mp *mempool.Pool, feer mempool.Feer, verificationFunction func(tx *transaction.Transaction, data any) error) error {
2020-11-27 10:55:48 +00:00
bc.lock.RLock()
defer bc.lock.RUnlock()
if verificationFunction != nil {
err := verificationFunction(t, data)
2020-11-27 10:55:48 +00:00
if err != nil {
return err
}
}
2021-01-15 12:40:15 +00:00
return bc.verifyAndPoolTx(t, mp, feer, data)
}
// GetCommittee returns the sorted list of public keys of nodes in committee.
func (bc *Blockchain) GetCommittee() (keys.PublicKeys, error) {
pubs := bc.contracts.NEO.GetCommitteeMembers(bc.dao)
sort.Sort(pubs)
return pubs, nil
}
// ComputeNextBlockValidators returns current validators. Validators list
// returned from this method is updated once per CommitteeSize number of blocks.
// For the last block in the dBFT epoch this method returns the list of validators
// recalculated from the latest relevant information about NEO votes; in this case
// list of validators may differ from the one returned by GetNextBlockValidators.
// For the not-last block of dBFT epoch this method returns the same list as
// GetNextBlockValidators.
func (bc *Blockchain) ComputeNextBlockValidators() []*keys.PublicKey {
native: rework native NEO next block validators cache Blockchain passes his own pure unwrapped DAO to (*Blockchain).ComputeNextBlockValidators which means that native RW NEO cache structure stored inside this DAO can be modified by anyone who uses exported ComputeNextBlockValidators Blockchain API, and technically it's valid, and we should allow this, because it's the only purpose of `validators` caching. However, at the same time some RPC server is allowed to request a subsequent wrapped DAO for some test invocation. It means that descendant wrapped DAO eventually will request RW NEO cache and try to `Copy()` the underlying's DAO cache which is in direct use of ComputeNextBlockValidators. Here's the race: ComputeNextBlockValidators called by Consensus service tries to update cached `validators` value, and descendant wrapped DAO created by the RPC server tries to copy DAO's native cache and read the cached `validators` value. So the problem is that native cache not designated to handle concurrent access between parent DAO layer and derived (wrapped) DAO layer. I've carefully reviewed all the usages of native cache, and turns out that the described situation is the only place where parent DAO is used directly to modify its cache concurrently with some descendant DAO that is trying to access the cache. All other usages of native cache (not only NEO, but also all other native contrcts) strictly rely on the hierarchical DAO structure and don't try to perform these concurrent operations between DAO layers. There's also persist operation, but it keeps cache RW lock taken, so it doesn't have this problem as far. Thus, in this commit we rework NEO's `validators` cache value so that it always contain the relevant list for upper Blockchain's DAO and is updated every PostPersist (if needed). Note: we must be very careful extending our native cache in the future, every usage of native cache must be checked against the described problem. Close #2989. Signed-off-by: Anna Shaleva <shaleva.ann@nspcc.ru>
2023-08-30 11:02:42 +00:00
return bc.contracts.NEO.ComputeNextBlockValidators(bc.dao)
}
// GetNextBlockValidators returns next block validators. Validators list returned
// from this method is the sorted top NumOfCNs number of public keys from the
// committee of the current dBFT round (that was calculated once for the
// CommitteeSize number of blocks), thus, validators list returned from this
// method is being updated once per (committee size) number of blocks, but not
// every block.
func (bc *Blockchain) GetNextBlockValidators() ([]*keys.PublicKey, error) {
return bc.contracts.NEO.GetNextBlockValidatorsInternal(bc.dao), nil
}
// GetEnrollments returns all registered validators.
func (bc *Blockchain) GetEnrollments() ([]state.Validator, error) {
2020-08-03 08:43:51 +00:00
return bc.contracts.NEO.GetCandidates(bc.dao)
2019-11-18 12:24:48 +00:00
}
// GetTestVM returns an interop context with VM set up for a test run.
func (bc *Blockchain) GetTestVM(t trigger.Type, tx *transaction.Transaction, b *block.Block) (*interop.Context, error) {
if b == nil {
var err error
h := bc.BlockHeight() + 1
b, err = bc.getFakeNextBlock(h)
if err != nil {
return nil, fmt.Errorf("failed to create fake block for height %d: %w", h, err)
}
}
systemInterop := bc.newInteropContext(t, bc.dao, b, tx)
2022-06-06 19:00:16 +00:00
_ = systemInterop.SpawnVM() // All the other code suppose that the VM is ready.
return systemInterop, nil
}
// GetTestHistoricVM returns an interop context with VM set up for a test run.
func (bc *Blockchain) GetTestHistoricVM(t trigger.Type, tx *transaction.Transaction, nextBlockHeight uint32) (*interop.Context, error) {
if bc.config.Ledger.KeepOnlyLatestState {
return nil, errors.New("only latest state is supported")
}
b, err := bc.getFakeNextBlock(nextBlockHeight)
if err != nil {
return nil, fmt.Errorf("failed to create fake block for height %d: %w", nextBlockHeight, err)
}
var mode = mpt.ModeAll
if bc.config.Ledger.RemoveUntraceableBlocks {
if b.Index < bc.BlockHeight()-bc.config.MaxTraceableBlocks {
return nil, fmt.Errorf("state for height %d is outdated and removed from the storage", b.Index)
}
mode |= mpt.ModeGCFlag
}
if b.Index < 1 || b.Index > bc.BlockHeight()+1 {
return nil, fmt.Errorf("unsupported historic chain's height: requested state for %d, chain height %d", b.Index, bc.blockHeight)
}
// Assuming that block N-th is processing during historic call, the historic invocation should be based on the storage state of height N-1.
sr, err := bc.stateRoot.GetStateRoot(b.Index - 1)
if err != nil {
return nil, fmt.Errorf("failed to retrieve stateroot for height %d: %w", b.Index, err)
}
s := mpt.NewTrieStore(sr.Root, mode, storage.NewPrivateMemCachedStore(bc.dao.Store))
dTrie := dao.NewSimple(s, bc.config.StateRootInHeader)
dTrie.Version = bc.dao.Version
// Initialize native cache before passing DAO to interop context constructor, because
// the constructor will call BaseExecFee/StoragePrice policy methods on the passed DAO.
err = bc.initializeNativeCache(b.Index, dTrie)
if err != nil {
return nil, fmt.Errorf("failed to initialize native cache backed by historic DAO: %w", err)
}
systemInterop := bc.newInteropContext(t, dTrie, b, tx)
2022-06-06 19:00:16 +00:00
_ = systemInterop.SpawnVM() // All the other code suppose that the VM is ready.
return systemInterop, nil
}
// getFakeNextBlock returns fake block with the specified index and pre-filled Timestamp field.
func (bc *Blockchain) getFakeNextBlock(nextBlockHeight uint32) (*block.Block, error) {
b := block.New(bc.config.StateRootInHeader)
b.Index = nextBlockHeight
hdr, err := bc.GetHeader(bc.GetHeaderHash(nextBlockHeight - 1))
if err != nil {
return nil, err
}
b.Timestamp = hdr.Timestamp + uint64(bc.config.TimePerBlock/time.Millisecond)
return b, nil
}
// Various witness verification errors.
var (
ErrWitnessHashMismatch = errors.New("witness hash mismatch")
ErrNativeContractWitness = errors.New("native contract witness must have empty verification script")
ErrVerificationFailed = errors.New("signature check failed")
ErrInvalidInvocationScript = errors.New("invalid invocation script")
ErrInvalidSignature = fmt.Errorf("%w: invalid signature", ErrVerificationFailed)
ErrInvalidVerificationScript = errors.New("invalid verification script")
ErrUnknownVerificationContract = errors.New("unknown verification contract")
ErrInvalidVerificationContract = errors.New("verification contract is missing `verify` method or `verify` method has unexpected return value")
)
// InitVerificationContext initializes context for witness check.
func (bc *Blockchain) InitVerificationContext(ic *interop.Context, hash util.Uint160, witness *transaction.Witness) error {
if len(witness.VerificationScript) != 0 {
if witness.ScriptHash() != hash {
return fmt.Errorf("%w: expected %s, got %s", ErrWitnessHashMismatch, hash.StringLE(), witness.ScriptHash().StringLE())
}
if bc.contracts.ByHash(hash) != nil {
return ErrNativeContractWitness
}
err := vm.IsScriptCorrect(witness.VerificationScript, nil)
if err != nil {
return fmt.Errorf("%w: %w", ErrInvalidVerificationScript, err)
}
ic.VM.LoadScriptWithHash(witness.VerificationScript, hash, callflag.ReadOnly)
} else {
cs, err := ic.GetContract(hash)
if err != nil {
return ErrUnknownVerificationContract
}
md := cs.Manifest.ABI.GetMethod(manifest.MethodVerify, -1)
if md == nil || md.ReturnType != smartcontract.BoolType {
return ErrInvalidVerificationContract
}
verifyOffset := md.Offset
initOffset := -1
md = cs.Manifest.ABI.GetMethod(manifest.MethodInit, 0)
if md != nil {
initOffset = md.Offset
}
ic.Invocations[cs.Hash]++
ic.VM.LoadNEFMethod(&cs.NEF, &cs.Manifest, util.Uint160{}, hash, callflag.ReadOnly,
true, verifyOffset, initOffset, nil)
}
2021-01-22 09:54:17 +00:00
if len(witness.InvocationScript) != 0 {
err := vm.IsScriptCorrect(witness.InvocationScript, nil)
if err != nil {
return fmt.Errorf("%w: %w", ErrInvalidInvocationScript, err)
}
ic.VM.LoadScript(witness.InvocationScript)
2021-01-22 09:54:17 +00:00
}
return nil
}
// VerifyWitness checks that w is a correct witness for c signed by h. It returns
// the amount of GAS consumed during verification and an error.
func (bc *Blockchain) VerifyWitness(h util.Uint160, c hash.Hashable, w *transaction.Witness, gas int64) (int64, error) {
ic := bc.newInteropContext(trigger.Verification, bc.dao, nil, nil)
ic.Container = c
if tx, ok := c.(*transaction.Transaction); ok {
ic.Tx = tx
}
return bc.verifyHashAgainstScript(h, w, ic, gas)
}
// verifyHashAgainstScript verifies given hash against the given witness and returns the amount of GAS consumed.
func (bc *Blockchain) verifyHashAgainstScript(hash util.Uint160, witness *transaction.Witness, interopCtx *interop.Context, gas int64) (int64, error) {
gas = min(gas, bc.contracts.Policy.GetMaxVerificationGas(interopCtx.DAO))
vm := interopCtx.SpawnVM()
vm.GasLimit = gas
if err := bc.InitVerificationContext(interopCtx, hash, witness); err != nil {
return 0, err
}
err := interopCtx.Exec()
if vm.HasFailed() {
return 0, fmt.Errorf("%w: vm execution has failed: %w", ErrVerificationFailed, err)
}
vm: rework stack as a simple slice Double-linked list is quite expensive to manage especially given that it requires microallocations for each Element. It can be replaced by simple slice which is much more effective for simple push/pop operations that are very typical in a VM. I've worried a little about more complex operations like XDROP/1024 or REVERSEN/1024 because these require copying quite substantial number of elements, but turns out these work fine too. At the moment Element is kept as a convenient wrapper for Bytes/BigInt/Bool/etc methods, but it can be changed in future. Many other potential optimizations are also possible now. Complex scripts: name old time/op new time/op delta ScriptFibonacci-8 1.11ms ± 2% 0.85ms ± 2% -23.40% (p=0.000 n=10+10) ScriptNestedRefCount-8 1.46ms ± 2% 1.16ms ± 1% -20.65% (p=0.000 n=10+10) ScriptPushPop/4-8 1.81µs ± 1% 1.54µs ± 4% -14.96% (p=0.000 n=8+10) ScriptPushPop/16-8 4.88µs ± 2% 3.91µs ± 2% -19.87% (p=0.000 n=9+9) ScriptPushPop/128-8 31.9µs ± 9% 26.7µs ± 3% -16.28% (p=0.000 n=9+8) ScriptPushPop/1024-8 235µs ± 1% 192µs ± 3% -18.31% (p=0.000 n=9+10) name old alloc/op new alloc/op delta ScriptFibonacci-8 392kB ± 0% 123kB ± 0% -68.68% (p=0.000 n=8+8) ScriptNestedRefCount-8 535kB ± 0% 266kB ± 0% -50.38% (p=0.000 n=6+10) ScriptPushPop/4-8 352B ± 0% 160B ± 0% -54.55% (p=0.000 n=10+10) ScriptPushPop/16-8 1.41kB ± 0% 0.64kB ± 0% -54.55% (p=0.000 n=10+10) ScriptPushPop/128-8 11.3kB ± 0% 8.7kB ± 0% -22.73% (p=0.000 n=10+10) ScriptPushPop/1024-8 90.1kB ± 0% 73.2kB ± 0% -18.75% (p=0.000 n=10+10) name old allocs/op new allocs/op delta ScriptFibonacci-8 9.14k ± 0% 3.53k ± 0% -61.41% (p=0.000 n=10+10) ScriptNestedRefCount-8 17.4k ± 0% 11.8k ± 0% -32.35% (p=0.000 n=10+10) ScriptPushPop/4-8 12.0 ± 0% 8.0 ± 0% -33.33% (p=0.000 n=10+10) ScriptPushPop/16-8 48.0 ± 0% 32.0 ± 0% -33.33% (p=0.000 n=10+10) ScriptPushPop/128-8 384 ± 0% 259 ± 0% -32.55% (p=0.000 n=10+10) ScriptPushPop/1024-8 3.07k ± 0% 2.05k ± 0% -33.14% (p=0.000 n=10+10) Some stack-management opcodes: name old time/op new time/op delta Opcodes/XDROP/0/1-8 255ns ± 9% 273ns ±11% +6.92% (p=0.016 n=11+10) Opcodes/XDROP/0/1024-8 362ns ± 2% 365ns ± 8% ~ (p=0.849 n=10+11) Opcodes/XDROP/1024/1024-8 3.20µs ± 2% 1.99µs ±12% -37.69% (p=0.000 n=11+11) Opcodes/XDROP/2047/2048-8 6.55µs ± 3% 1.75µs ± 5% -73.26% (p=0.000 n=10+11) Opcodes/DUP/null-8 414ns ± 6% 245ns ±12% -40.88% (p=0.000 n=11+11) Opcodes/DUP/boolean-8 411ns ± 8% 245ns ± 6% -40.31% (p=0.000 n=11+11) Opcodes/DUP/integer/small-8 684ns ± 8% 574ns ± 3% -16.02% (p=0.000 n=11+10) Opcodes/DUP/integer/big-8 675ns ± 6% 601ns ±10% -10.98% (p=0.000 n=11+11) Opcodes/DUP/bytearray/small-8 675ns ±10% 566ns ±10% -16.22% (p=0.000 n=11+11) Opcodes/DUP/bytearray/big-8 6.39µs ±11% 6.13µs ± 3% ~ (p=0.148 n=10+10) Opcodes/DUP/buffer/small-8 412ns ± 5% 261ns ± 8% -36.55% (p=0.000 n=9+11) Opcodes/DUP/buffer/big-8 586ns ±10% 337ns ± 7% -42.53% (p=0.000 n=11+11) Opcodes/DUP/struct/small-8 458ns ±12% 256ns ±12% -44.09% (p=0.000 n=11+11) Opcodes/DUP/struct/big-8 489ns ± 7% 274ns ± 5% -44.06% (p=0.000 n=10+10) Opcodes/DUP/pointer-8 586ns ± 7% 494ns ± 7% -15.67% (p=0.000 n=11+11) Opcodes/OVER/null-8 450ns ±14% 264ns ±10% -41.30% (p=0.000 n=11+11) Opcodes/OVER/boolean-8 450ns ±14% 264ns ±10% -41.31% (p=0.000 n=11+11) Opcodes/OVER/integer/small-8 716ns ± 9% 604ns ± 6% -15.65% (p=0.000 n=11+11) Opcodes/OVER/integer/big-8 696ns ± 5% 634ns ± 6% -8.89% (p=0.000 n=10+11) Opcodes/OVER/bytearray/small-8 693ns ± 1% 539ns ± 9% -22.18% (p=0.000 n=9+10) Opcodes/OVER/bytearray/big-8 6.33µs ± 2% 6.16µs ± 4% -2.79% (p=0.004 n=8+10) Opcodes/OVER/buffer/small-8 415ns ± 4% 263ns ± 8% -36.76% (p=0.000 n=9+11) Opcodes/OVER/buffer/big-8 587ns ± 5% 342ns ± 7% -41.70% (p=0.000 n=11+11) Opcodes/OVER/struct/small-8 446ns ±14% 257ns ± 8% -42.42% (p=0.000 n=11+11) Opcodes/OVER/struct/big-8 607ns ±26% 278ns ± 7% -54.25% (p=0.000 n=11+11) Opcodes/OVER/pointer-8 645ns ±12% 476ns ±10% -26.21% (p=0.000 n=11+11) Opcodes/PICK/2/null-8 460ns ±11% 264ns ± 9% -42.68% (p=0.000 n=11+11) Opcodes/PICK/2/boolean-8 460ns ± 4% 260ns ± 4% -43.37% (p=0.000 n=8+11) Opcodes/PICK/2/integer/small-8 725ns ± 7% 557ns ± 4% -23.19% (p=0.000 n=11+10) Opcodes/PICK/2/integer/big-8 722ns ±12% 582ns ± 6% -19.51% (p=0.000 n=11+11) Opcodes/PICK/2/bytearray/small-8 705ns ± 6% 545ns ± 4% -22.69% (p=0.000 n=11+11) Opcodes/PICK/2/bytearray/big-8 7.17µs ±36% 6.37µs ± 8% ~ (p=0.065 n=11+11) Opcodes/PICK/2/buffer/small-8 427ns ± 8% 253ns ± 8% -40.82% (p=0.000 n=11+11) Opcodes/PICK/2/buffer/big-8 590ns ± 3% 331ns ± 6% -43.83% (p=0.000 n=11+11) Opcodes/PICK/2/struct/small-8 428ns ± 8% 254ns ± 7% -40.64% (p=0.000 n=11+11) Opcodes/PICK/2/struct/big-8 489ns ±15% 283ns ± 7% -42.11% (p=0.000 n=11+11) Opcodes/PICK/2/pointer-8 553ns ± 7% 414ns ± 8% -25.18% (p=0.000 n=11+11) Opcodes/PICK/1024/null-8 531ns ± 4% 327ns ± 6% -38.49% (p=0.000 n=10+10) Opcodes/PICK/1024/boolean-8 527ns ± 5% 318ns ± 5% -39.78% (p=0.000 n=11+9) Opcodes/PICK/1024/integer/small-8 861ns ± 4% 683ns ± 4% -20.66% (p=0.000 n=11+11) Opcodes/PICK/1024/integer/big-8 882ns ± 4% 1060ns ±47% ~ (p=0.748 n=11+11) Opcodes/PICK/1024/bytearray/small-8 850ns ± 4% 671ns ± 5% -21.12% (p=0.000 n=10+11) Opcodes/PICK/1024/bytearray/big-8 6.32µs ±26% 6.75µs ± 4% +6.86% (p=0.019 n=10+11) Opcodes/PICK/1024/buffer/small-8 530ns ± 6% 324ns ± 5% -38.86% (p=0.000 n=10+11) Opcodes/PICK/1024/buffer/big-8 570ns ± 4% 417ns ±45% -26.82% (p=0.001 n=11+10) Opcodes/PICK/1024/struct/small-8 1.11µs ±122% 0.34µs ±11% -69.38% (p=0.000 n=11+10) Opcodes/PICK/1024/pointer-8 693ns ± 5% 568ns ±31% -18.10% (p=0.002 n=10+10) Opcodes/TUCK/null-8 450ns ±10% 275ns ± 8% -38.93% (p=0.000 n=11+11) Opcodes/TUCK/boolean-8 449ns ±13% 268ns ± 9% -40.16% (p=0.000 n=11+10) Opcodes/TUCK/integer/small-8 716ns ± 7% 599ns ± 7% -16.30% (p=0.000 n=11+11) Opcodes/TUCK/integer/big-8 718ns ± 8% 613ns ±11% -14.55% (p=0.000 n=11+11) Opcodes/TUCK/bytearray/small-8 700ns ±12% 558ns ± 7% -20.39% (p=0.000 n=11+11) Opcodes/TUCK/bytearray/big-8 5.88µs ± 7% 6.37µs ± 3% +8.31% (p=0.000 n=10+11) Opcodes/TUCK/buffer/small-8 425ns ± 6% 258ns ±12% -39.28% (p=0.000 n=11+11) Opcodes/TUCK/buffer/big-8 553ns ±19% 334ns ± 6% -39.57% (p=0.000 n=11+11) Opcodes/TUCK/struct/small-8 474ns ± 3% 263ns ±12% -44.51% (p=0.000 n=10+11) Opcodes/TUCK/struct/big-8 641ns ±24% 284ns ± 8% -55.63% (p=0.000 n=11+11) Opcodes/TUCK/pointer-8 635ns ±13% 468ns ±16% -26.31% (p=0.000 n=11+11) Opcodes/SWAP/null-8 227ns ±31% 212ns ±11% ~ (p=0.847 n=11+11) Opcodes/SWAP/integer-8 233ns ±32% 210ns ±14% ~ (p=0.072 n=10+11) Opcodes/SWAP/big_bytes-8 263ns ±39% 211ns ±11% ~ (p=0.056 n=11+11) Opcodes/ROT/null-8 308ns ±68% 223ns ±12% ~ (p=0.519 n=11+11) Opcodes/ROT/integer-8 226ns ±25% 228ns ± 9% ~ (p=0.705 n=10+11) Opcodes/ROT/big_bytes-8 215ns ±18% 218ns ± 7% ~ (p=0.756 n=10+11) Opcodes/ROLL/4/null-8 269ns ±10% 295ns ± 9% +9.42% (p=0.002 n=10+11) Opcodes/ROLL/4/integer-8 344ns ±48% 280ns ± 2% ~ (p=0.882 n=11+9) Opcodes/ROLL/4/big_bytes-8 276ns ±13% 288ns ± 4% +4.38% (p=0.046 n=9+11) Opcodes/ROLL/1024/null-8 4.21µs ±70% 1.01µs ± 9% -76.15% (p=0.000 n=11+11) Opcodes/ROLL/1024/integer-8 4.78µs ±82% 0.71µs ± 3% -85.06% (p=0.000 n=11+11) Opcodes/ROLL/1024/big_bytes-8 3.28µs ± 5% 1.35µs ±36% -58.91% (p=0.000 n=9+11) Opcodes/REVERSE3/null-8 219ns ± 9% 224ns ± 9% ~ (p=0.401 n=11+11) Opcodes/REVERSE3/integer-8 261ns ±28% 220ns ± 6% -15.67% (p=0.015 n=11+11) Opcodes/REVERSE3/big_bytes-8 245ns ±31% 218ns ± 7% ~ (p=0.051 n=10+11) Opcodes/REVERSE4/null-8 223ns ±10% 218ns ± 6% ~ (p=0.300 n=11+11) Opcodes/REVERSE4/integer-8 233ns ±10% 220ns ± 7% -5.74% (p=0.016 n=11+11) Opcodes/REVERSE4/big_bytes-8 225ns ±10% 220ns ± 7% ~ (p=0.157 n=10+11) Opcodes/REVERSEN/5/null-8 281ns ±12% 277ns ± 4% ~ (p=0.847 n=11+11) Opcodes/REVERSEN/5/integer-8 280ns ±11% 275ns ± 5% ~ (p=0.243 n=11+11) Opcodes/REVERSEN/5/big_bytes-8 283ns ± 9% 276ns ± 7% ~ (p=0.133 n=11+11) Opcodes/REVERSEN/1024/null-8 4.85µs ± 6% 1.94µs ± 6% -60.07% (p=0.000 n=10+11) Opcodes/REVERSEN/1024/integer-8 4.97µs ± 7% 1.99µs ±22% -59.88% (p=0.000 n=11+11) Opcodes/REVERSEN/1024/big_bytes-8 5.11µs ±10% 2.00µs ± 4% -60.87% (p=0.000 n=10+9) Opcodes/PACK/1-8 1.22µs ± 7% 0.95µs ± 6% -22.17% (p=0.000 n=10+11) Opcodes/PACK/255-8 11.1µs ± 4% 10.2µs ± 6% -7.96% (p=0.000 n=11+11) Opcodes/PACK/1024-8 38.9µs ± 4% 37.4µs ± 9% ~ (p=0.173 n=10+11) Opcodes/UNPACK/1-8 1.32µs ±34% 0.96µs ± 6% -27.57% (p=0.000 n=10+11) Opcodes/UNPACK/255-8 27.2µs ±14% 16.0µs ±13% -41.04% (p=0.000 n=11+11) Opcodes/UNPACK/1024-8 102µs ±10% 64µs ±16% -37.33% (p=0.000 n=10+11) name old alloc/op new alloc/op delta Opcodes/XDROP/0/1-8 0.00B 0.00B ~ (all equal) Opcodes/XDROP/0/1024-8 0.00B 0.00B ~ (all equal) Opcodes/XDROP/1024/1024-8 0.00B 0.00B ~ (all equal) Opcodes/XDROP/2047/2048-8 0.00B 0.00B ~ (all equal) Opcodes/DUP/null-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/DUP/boolean-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/DUP/integer/small-8 96.0B ± 0% 48.0B ± 0% -50.00% (p=0.000 n=11+11) Opcodes/DUP/integer/big-8 104B ± 0% 56B ± 0% -46.15% (p=0.000 n=11+11) Opcodes/DUP/bytearray/small-8 88.0B ± 0% 40.0B ± 0% -54.55% (p=0.000 n=11+11) Opcodes/DUP/bytearray/big-8 65.6kB ± 0% 65.6kB ± 0% -0.07% (p=0.000 n=10+9) Opcodes/DUP/buffer/small-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/DUP/buffer/big-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/DUP/struct/small-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/DUP/struct/big-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/DUP/pointer-8 112B ± 0% 64B ± 0% -42.86% (p=0.000 n=11+11) Opcodes/OVER/null-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/OVER/boolean-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/OVER/integer/small-8 96.0B ± 0% 48.0B ± 0% -50.00% (p=0.000 n=11+11) Opcodes/OVER/integer/big-8 104B ± 0% 56B ± 0% -46.15% (p=0.000 n=11+11) Opcodes/OVER/bytearray/small-8 88.0B ± 0% 40.0B ± 0% -54.55% (p=0.000 n=11+11) Opcodes/OVER/bytearray/big-8 65.6kB ± 0% 65.6kB ± 0% -0.07% (p=0.000 n=9+11) Opcodes/OVER/buffer/small-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/OVER/buffer/big-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/OVER/struct/small-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/OVER/struct/big-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/OVER/pointer-8 112B ± 0% 64B ± 0% -42.86% (p=0.000 n=11+11) Opcodes/PICK/2/null-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/PICK/2/boolean-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/PICK/2/integer/small-8 96.0B ± 0% 48.0B ± 0% -50.00% (p=0.000 n=11+11) Opcodes/PICK/2/integer/big-8 104B ± 0% 56B ± 0% -46.15% (p=0.000 n=11+11) Opcodes/PICK/2/bytearray/small-8 88.0B ± 0% 40.0B ± 0% -54.55% (p=0.000 n=11+11) Opcodes/PICK/2/bytearray/big-8 65.6kB ± 0% 65.6kB ± 0% -0.07% (p=0.001 n=9+11) Opcodes/PICK/2/buffer/small-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/PICK/2/buffer/big-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/PICK/2/struct/small-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/PICK/2/struct/big-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/PICK/2/pointer-8 112B ± 0% 64B ± 0% -42.86% (p=0.000 n=11+11) Opcodes/PICK/1024/null-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/PICK/1024/boolean-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/PICK/1024/integer/small-8 96.0B ± 0% 48.0B ± 0% -50.00% (p=0.000 n=11+11) Opcodes/PICK/1024/integer/big-8 104B ± 0% 56B ± 0% -46.15% (p=0.000 n=11+11) Opcodes/PICK/1024/bytearray/small-8 88.0B ± 0% 40.0B ± 0% -54.55% (p=0.000 n=11+11) Opcodes/PICK/1024/bytearray/big-8 65.6kB ± 0% 65.6kB ± 0% -0.07% (p=0.000 n=11+11) Opcodes/PICK/1024/buffer/small-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/PICK/1024/buffer/big-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/PICK/1024/struct/small-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/PICK/1024/pointer-8 112B ± 0% 64B ± 0% -42.86% (p=0.000 n=11+11) Opcodes/TUCK/null-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/TUCK/boolean-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/TUCK/integer/small-8 96.0B ± 0% 48.0B ± 0% -50.00% (p=0.000 n=11+11) Opcodes/TUCK/integer/big-8 104B ± 0% 56B ± 0% -46.15% (p=0.000 n=11+11) Opcodes/TUCK/bytearray/small-8 88.0B ± 0% 40.0B ± 0% -54.55% (p=0.000 n=11+11) Opcodes/TUCK/bytearray/big-8 65.6kB ± 0% 65.6kB ± 0% -0.07% (p=0.000 n=10+11) Opcodes/TUCK/buffer/small-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/TUCK/buffer/big-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/TUCK/struct/small-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/TUCK/struct/big-8 48.0B ± 0% 0.0B -100.00% (p=0.000 n=11+11) Opcodes/TUCK/pointer-8 112B ± 0% 64B ± 0% -42.86% (p=0.000 n=11+11) Opcodes/SWAP/null-8 0.00B 0.00B ~ (all equal) Opcodes/SWAP/integer-8 0.00B 0.00B ~ (all equal) Opcodes/SWAP/big_bytes-8 0.00B 0.00B ~ (all equal) Opcodes/ROT/null-8 0.00B 0.00B ~ (all equal) Opcodes/ROT/integer-8 0.00B 0.00B ~ (all equal) Opcodes/ROT/big_bytes-8 0.00B 0.00B ~ (all equal) Opcodes/ROLL/4/null-8 0.00B 0.00B ~ (all equal) Opcodes/ROLL/4/integer-8 0.00B 0.00B ~ (all equal) Opcodes/ROLL/4/big_bytes-8 0.00B 0.00B ~ (all equal) Opcodes/ROLL/1024/null-8 0.00B 0.00B ~ (all equal) Opcodes/ROLL/1024/integer-8 0.00B 0.00B ~ (all equal) Opcodes/ROLL/1024/big_bytes-8 0.00B 0.00B ~ (all equal) Opcodes/REVERSE3/null-8 0.00B 0.00B ~ (all equal) Opcodes/REVERSE3/integer-8 0.00B 0.00B ~ (all equal) Opcodes/REVERSE3/big_bytes-8 0.00B 0.00B ~ (all equal) Opcodes/REVERSE4/null-8 0.00B 0.00B ~ (all equal) Opcodes/REVERSE4/integer-8 0.00B 0.00B ~ (all equal) Opcodes/REVERSE4/big_bytes-8 0.00B 0.00B ~ (all equal) Opcodes/REVERSEN/5/null-8 0.00B 0.00B ~ (all equal) Opcodes/REVERSEN/5/integer-8 0.00B 0.00B ~ (all equal) Opcodes/REVERSEN/5/big_bytes-8 0.00B 0.00B ~ (all equal) Opcodes/REVERSEN/1024/null-8 0.00B 0.00B ~ (all equal) Opcodes/REVERSEN/1024/integer-8 0.00B 0.00B ~ (all equal) Opcodes/REVERSEN/1024/big_bytes-8 0.00B 0.00B ~ (all equal) Opcodes/PACK/1-8 144B ± 0% 96B ± 0% -33.33% (p=0.000 n=11+11) Opcodes/PACK/255-8 4.22kB ± 0% 4.18kB ± 0% -1.14% (p=0.000 n=11+11) Opcodes/PACK/1024-8 16.5kB ± 0% 16.5kB ± 0% -0.29% (p=0.000 n=11+11) Opcodes/UNPACK/1-8 168B ± 0% 72B ± 0% -57.14% (p=0.000 n=11+11) Opcodes/UNPACK/255-8 12.4kB ± 0% 7.8kB ± 0% -37.28% (p=0.000 n=11+11) Opcodes/UNPACK/1024-8 49.3kB ± 0% 52.8kB ± 0% +7.18% (p=0.000 n=11+11) name old allocs/op new allocs/op delta Opcodes/XDROP/0/1-8 0.00 0.00 ~ (all equal) Opcodes/XDROP/0/1024-8 0.00 0.00 ~ (all equal) Opcodes/XDROP/1024/1024-8 0.00 0.00 ~ (all equal) Opcodes/XDROP/2047/2048-8 0.00 0.00 ~ (all equal) Opcodes/DUP/null-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/DUP/boolean-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/DUP/integer/small-8 3.00 ± 0% 2.00 ± 0% -33.33% (p=0.000 n=11+11) Opcodes/DUP/integer/big-8 3.00 ± 0% 2.00 ± 0% -33.33% (p=0.000 n=11+11) Opcodes/DUP/bytearray/small-8 3.00 ± 0% 2.00 ± 0% -33.33% (p=0.000 n=11+11) Opcodes/DUP/bytearray/big-8 3.00 ± 0% 2.00 ± 0% -33.33% (p=0.000 n=11+11) Opcodes/DUP/buffer/small-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/DUP/buffer/big-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/DUP/struct/small-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/DUP/struct/big-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/DUP/pointer-8 2.00 ± 0% 1.00 ± 0% -50.00% (p=0.000 n=11+11) Opcodes/OVER/null-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/OVER/boolean-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/OVER/integer/small-8 3.00 ± 0% 2.00 ± 0% -33.33% (p=0.000 n=11+11) Opcodes/OVER/integer/big-8 3.00 ± 0% 2.00 ± 0% -33.33% (p=0.000 n=11+11) Opcodes/OVER/bytearray/small-8 3.00 ± 0% 2.00 ± 0% -33.33% (p=0.000 n=11+11) Opcodes/OVER/bytearray/big-8 3.00 ± 0% 2.00 ± 0% -33.33% (p=0.000 n=11+11) Opcodes/OVER/buffer/small-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/OVER/buffer/big-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/OVER/struct/small-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/OVER/struct/big-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/OVER/pointer-8 2.00 ± 0% 1.00 ± 0% -50.00% (p=0.000 n=11+11) Opcodes/PICK/2/null-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/PICK/2/boolean-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/PICK/2/integer/small-8 3.00 ± 0% 2.00 ± 0% -33.33% (p=0.000 n=11+11) Opcodes/PICK/2/integer/big-8 3.00 ± 0% 2.00 ± 0% -33.33% (p=0.000 n=11+11) Opcodes/PICK/2/bytearray/small-8 3.00 ± 0% 2.00 ± 0% -33.33% (p=0.000 n=11+11) Opcodes/PICK/2/bytearray/big-8 3.00 ± 0% 2.00 ± 0% -33.33% (p=0.000 n=11+11) Opcodes/PICK/2/buffer/small-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/PICK/2/buffer/big-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/PICK/2/struct/small-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/PICK/2/struct/big-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/PICK/2/pointer-8 2.00 ± 0% 1.00 ± 0% -50.00% (p=0.000 n=11+11) Opcodes/PICK/1024/null-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/PICK/1024/boolean-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/PICK/1024/integer/small-8 3.00 ± 0% 2.00 ± 0% -33.33% (p=0.000 n=11+11) Opcodes/PICK/1024/integer/big-8 3.00 ± 0% 2.00 ± 0% -33.33% (p=0.000 n=11+11) Opcodes/PICK/1024/bytearray/small-8 3.00 ± 0% 2.00 ± 0% -33.33% (p=0.000 n=11+11) Opcodes/PICK/1024/bytearray/big-8 3.00 ± 0% 2.00 ± 0% -33.33% (p=0.000 n=11+11) Opcodes/PICK/1024/buffer/small-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/PICK/1024/buffer/big-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/PICK/1024/struct/small-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/PICK/1024/pointer-8 2.00 ± 0% 1.00 ± 0% -50.00% (p=0.000 n=11+11) Opcodes/TUCK/null-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/TUCK/boolean-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/TUCK/integer/small-8 3.00 ± 0% 2.00 ± 0% -33.33% (p=0.000 n=11+11) Opcodes/TUCK/integer/big-8 3.00 ± 0% 2.00 ± 0% -33.33% (p=0.000 n=11+11) Opcodes/TUCK/bytearray/small-8 3.00 ± 0% 2.00 ± 0% -33.33% (p=0.000 n=11+11) Opcodes/TUCK/bytearray/big-8 3.00 ± 0% 2.00 ± 0% -33.33% (p=0.000 n=11+11) Opcodes/TUCK/buffer/small-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/TUCK/buffer/big-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/TUCK/struct/small-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/TUCK/struct/big-8 1.00 ± 0% 0.00 -100.00% (p=0.000 n=11+11) Opcodes/TUCK/pointer-8 2.00 ± 0% 1.00 ± 0% -50.00% (p=0.000 n=11+11) Opcodes/SWAP/null-8 0.00 0.00 ~ (all equal) Opcodes/SWAP/integer-8 0.00 0.00 ~ (all equal) Opcodes/SWAP/big_bytes-8 0.00 0.00 ~ (all equal) Opcodes/ROT/null-8 0.00 0.00 ~ (all equal) Opcodes/ROT/integer-8 0.00 0.00 ~ (all equal) Opcodes/ROT/big_bytes-8 0.00 0.00 ~ (all equal) Opcodes/ROLL/4/null-8 0.00 0.00 ~ (all equal) Opcodes/ROLL/4/integer-8 0.00 0.00 ~ (all equal) Opcodes/ROLL/4/big_bytes-8 0.00 0.00 ~ (all equal) Opcodes/ROLL/1024/null-8 0.00 0.00 ~ (all equal) Opcodes/ROLL/1024/integer-8 0.00 0.00 ~ (all equal) Opcodes/ROLL/1024/big_bytes-8 0.00 0.00 ~ (all equal) Opcodes/REVERSE3/null-8 0.00 0.00 ~ (all equal) Opcodes/REVERSE3/integer-8 0.00 0.00 ~ (all equal) Opcodes/REVERSE3/big_bytes-8 0.00 0.00 ~ (all equal) Opcodes/REVERSE4/null-8 0.00 0.00 ~ (all equal) Opcodes/REVERSE4/integer-8 0.00 0.00 ~ (all equal) Opcodes/REVERSE4/big_bytes-8 0.00 0.00 ~ (all equal) Opcodes/REVERSEN/5/null-8 0.00 0.00 ~ (all equal) Opcodes/REVERSEN/5/integer-8 0.00 0.00 ~ (all equal) Opcodes/REVERSEN/5/big_bytes-8 0.00 0.00 ~ (all equal) Opcodes/REVERSEN/1024/null-8 0.00 0.00 ~ (all equal) Opcodes/REVERSEN/1024/integer-8 0.00 0.00 ~ (all equal) Opcodes/REVERSEN/1024/big_bytes-8 0.00 0.00 ~ (all equal) Opcodes/PACK/1-8 5.00 ± 0% 4.00 ± 0% -20.00% (p=0.000 n=11+11) Opcodes/PACK/255-8 5.00 ± 0% 4.00 ± 0% -20.00% (p=0.000 n=11+11) Opcodes/PACK/1024-8 5.00 ± 0% 4.00 ± 0% -20.00% (p=0.000 n=11+11) Opcodes/UNPACK/1-8 5.00 ± 0% 3.00 ± 0% -40.00% (p=0.000 n=11+11) Opcodes/UNPACK/255-8 259 ± 0% 7 ± 0% -97.30% (p=0.000 n=11+11) Opcodes/UNPACK/1024-8 1.03k ± 0% 0.01k ± 0% -98.93% (p=0.000 n=11+11)
2021-08-21 16:09:44 +00:00
estack := vm.Estack()
if estack.Len() > 0 {
resEl := estack.Pop()
res, err := resEl.Item().TryBool()
if err != nil {
return 0, fmt.Errorf("%w: invalid return value", ErrVerificationFailed)
}
if vm.Estack().Len() != 0 {
return 0, fmt.Errorf("%w: expected exactly one returned value", ErrVerificationFailed)
}
if !res {
return vm.GasConsumed(), ErrInvalidSignature
}
} else {
return 0, fmt.Errorf("%w: no result returned from the script", ErrVerificationFailed)
}
return vm.GasConsumed(), nil
}
2019-10-22 14:56:03 +00:00
// verifyTxWitnesses verifies the scripts (witnesses) that come with a given
// transaction. It can reorder them by ScriptHash, because that's required to
// match a slice of script hashes from the Blockchain. Block parameter
// is used for easy interop access and can be omitted for transactions that are
// not yet added into any block. verificationFee argument can be provided to
// restrict the maximum amount of GAS allowed to spend on transaction
// verification.
// Golang implementation of VerifyWitnesses method in C# (https://github.com/neo-project/neo/blob/master/neo/SmartContract/Helper.cs#L87).
func (bc *Blockchain) verifyTxWitnesses(t *transaction.Transaction, block *block.Block, isPartialTx bool, verificationFee ...int64) error {
interopCtx := bc.newInteropContext(trigger.Verification, bc.dao, block, t)
var gasLimit int64
if len(verificationFee) == 0 {
gasLimit = t.NetworkFee - int64(t.Size())*bc.FeePerByte() - bc.CalculateAttributesFee(t)
} else {
gasLimit = verificationFee[0]
}
for i := range t.Signers {
gasConsumed, err := bc.verifyHashAgainstScript(t.Signers[i].Account, &t.Scripts[i], interopCtx, gasLimit)
2020-11-27 10:55:48 +00:00
if err != nil &&
!(i == 0 && isPartialTx && errors.Is(err, ErrInvalidSignature)) { // it's OK for partially-filled transaction with dummy first witness.
return fmt.Errorf("witness #%d: %w", i, err)
}
gasLimit -= gasConsumed
}
return nil
}
// verifyHeaderWitnesses is a block-specific implementation of VerifyWitnesses logic.
func (bc *Blockchain) verifyHeaderWitnesses(currHeader, prevHeader *block.Header) error {
hash := prevHeader.NextConsensus
_, err := bc.VerifyWitness(hash, currHeader, &currHeader.Script, HeaderVerificationGasLimit)
return err
}
// GoverningTokenHash returns the governing token (NEO) native contract hash.
func (bc *Blockchain) GoverningTokenHash() util.Uint160 {
return bc.contracts.NEO.Hash
}
// UtilityTokenHash returns the utility token (GAS) native contract hash.
func (bc *Blockchain) UtilityTokenHash() util.Uint160 {
return bc.contracts.GAS.Hash
}
// ManagementContractHash returns management contract's hash.
func (bc *Blockchain) ManagementContractHash() util.Uint160 {
return bc.contracts.Management.Hash
}
func (bc *Blockchain) newInteropContext(trigger trigger.Type, d *dao.Simple, block *block.Block, tx *transaction.Transaction) *interop.Context {
baseExecFee := int64(interop.DefaultBaseExecFee)
if block == nil || block.Index != 0 {
// Use provided dao instead of Blockchain's one to fetch possible ExecFeeFactor
// changes that were not yet persisted to Blockchain's dao.
baseExecFee = bc.contracts.Policy.GetExecFeeFactorInternal(d)
}
baseStorageFee := int64(native.DefaultStoragePrice)
if block == nil || block.Index != 0 {
// Use provided dao instead of Blockchain's one to fetch possible StoragePrice
// changes that were not yet persisted to Blockchain's dao.
baseStorageFee = bc.contracts.Policy.GetStoragePriceInternal(d)
}
ic := interop.NewContext(trigger, bc, d, baseExecFee, baseStorageFee, native.GetContract, bc.contracts.Contracts, contract.LoadToken, block, tx, bc.log)
ic.Functions = systemInterops
switch {
case tx != nil:
ic.Container = tx
case block != nil:
ic.Container = block
}
ic.InitNonceData()
return ic
2019-12-30 11:01:49 +00:00
}
// P2PSigExtensionsEnabled defines whether P2P signature extensions are enabled.
func (bc *Blockchain) P2PSigExtensionsEnabled() bool {
return bc.config.P2PSigExtensions
}
2020-11-27 10:55:48 +00:00
// RegisterPostBlock appends provided function to the list of functions which should be run after new block
// is stored.
func (bc *Blockchain) RegisterPostBlock(f func(func(*transaction.Transaction, *mempool.Pool, bool) bool, *mempool.Pool, *block.Block)) {
2020-11-27 10:55:48 +00:00
bc.postBlock = append(bc.postBlock, f)
}
// GetBaseExecFee return execution price for `NOP`.
func (bc *Blockchain) GetBaseExecFee() int64 {
if bc.BlockHeight() == 0 {
return interop.DefaultBaseExecFee
}
return bc.contracts.Policy.GetExecFeeFactorInternal(bc.dao)
}
2020-11-27 10:55:48 +00:00
// GetMaxVerificationGAS returns maximum verification GAS Policy limit.
func (bc *Blockchain) GetMaxVerificationGAS() int64 {
return bc.contracts.Policy.GetMaxVerificationGas(bc.dao)
}
// GetMaxNotValidBeforeDelta returns maximum NotValidBeforeDelta Notary limit.
func (bc *Blockchain) GetMaxNotValidBeforeDelta() (uint32, error) {
if !bc.config.P2PSigExtensions {
panic("disallowed call to Notary") // critical error, thus panic.
}
if !bc.isHardforkEnabled(bc.contracts.Notary.ActiveIn(), bc.BlockHeight()) {
return 0, fmt.Errorf("native Notary is active starting from %s", bc.contracts.Notary.ActiveIn().String())
}
return bc.contracts.Notary.GetMaxNotValidBeforeDelta(bc.dao), nil
}
// GetStoragePrice returns current storage price.
func (bc *Blockchain) GetStoragePrice() int64 {
if bc.BlockHeight() == 0 {
return native.DefaultStoragePrice
}
return bc.contracts.Policy.GetStoragePriceInternal(bc.dao)
}