2021-07-30 13:57:42 +00:00
|
|
|
/*
|
|
|
|
Package statesync implements module for the P2P state synchronisation process. The
|
|
|
|
module manages state synchronisation for non-archival nodes which are joining the
|
|
|
|
network and don't have the ability to resync from the genesis block.
|
|
|
|
|
|
|
|
Given the currently available state synchronisation point P, sate sync process
|
|
|
|
includes the following stages:
|
|
|
|
|
|
|
|
1. Fetching headers starting from height 0 up to P+1.
|
|
|
|
2. Fetching MPT nodes for height P stating from the corresponding state root.
|
|
|
|
3. Fetching blocks starting from height P-MaxTraceableBlocks (or 0) up to P.
|
|
|
|
|
|
|
|
Steps 2 and 3 are being performed in parallel. Once all the data are collected
|
|
|
|
and stored in the db, an atomic state jump is occurred to the state sync point P.
|
|
|
|
Further node operation process is performed using standard sync mechanism until
|
|
|
|
the node reaches synchronised state.
|
|
|
|
*/
|
|
|
|
package statesync
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/hex"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"sync"
|
|
|
|
|
2022-01-13 01:34:14 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/config"
|
2021-07-30 13:57:42 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/core/block"
|
|
|
|
"github.com/nspcc-dev/neo-go/pkg/core/dao"
|
|
|
|
"github.com/nspcc-dev/neo-go/pkg/core/mpt"
|
2022-01-13 22:51:24 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/core/stateroot"
|
2021-07-30 13:57:42 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/core/storage"
|
|
|
|
"github.com/nspcc-dev/neo-go/pkg/io"
|
|
|
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
)
|
|
|
|
|
|
|
|
// stateSyncStage is a type of state synchronisation stage.
|
|
|
|
type stateSyncStage uint8
|
|
|
|
|
|
|
|
const (
|
|
|
|
// inactive means that state exchange is disabled by the protocol configuration.
|
|
|
|
// Can't be combined with other states.
|
|
|
|
inactive stateSyncStage = 1 << iota
|
|
|
|
// none means that state exchange is enabled in the configuration, but
|
|
|
|
// initialisation of the state sync module wasn't yet performed, i.e.
|
|
|
|
// (*Module).Init wasn't called. Can't be combined with other states.
|
|
|
|
none
|
|
|
|
// initialized means that (*Module).Init was called, but other sync stages
|
|
|
|
// are not yet reached (i.e. that headers are requested, but not yet fetched).
|
|
|
|
// Can't be combined with other states.
|
|
|
|
initialized
|
|
|
|
// headersSynced means that headers for the current state sync point are fetched.
|
|
|
|
// May be combined with mptSynced and/or blocksSynced.
|
|
|
|
headersSynced
|
|
|
|
// mptSynced means that MPT nodes for the current state sync point are fetched.
|
|
|
|
// Always combined with headersSynced; may be combined with blocksSynced.
|
|
|
|
mptSynced
|
|
|
|
// blocksSynced means that blocks up to the current state sync point are stored.
|
|
|
|
// Always combined with headersSynced; may be combined with mptSynced.
|
|
|
|
blocksSynced
|
|
|
|
)
|
|
|
|
|
2022-01-13 01:34:14 +00:00
|
|
|
// Ledger is the interface required from Blockchain for Module to operate.
|
|
|
|
type Ledger interface {
|
|
|
|
AddHeaders(...*block.Header) error
|
|
|
|
BlockHeight() uint32
|
2022-12-06 13:34:38 +00:00
|
|
|
GetConfig() config.Blockchain
|
2022-01-13 01:34:14 +00:00
|
|
|
GetHeader(hash util.Uint256) (*block.Header, error)
|
2022-11-18 20:19:50 +00:00
|
|
|
GetHeaderHash(uint32) util.Uint256
|
2022-01-13 01:34:14 +00:00
|
|
|
HeaderHeight() uint32
|
|
|
|
}
|
|
|
|
|
2021-07-30 13:57:42 +00:00
|
|
|
// Module represents state sync module and aimed to gather state-related data to
|
|
|
|
// perform an atomic state jump.
|
|
|
|
type Module struct {
|
|
|
|
lock sync.RWMutex
|
|
|
|
log *zap.Logger
|
|
|
|
|
|
|
|
// syncPoint is the state synchronisation point P we're currently working against.
|
|
|
|
syncPoint uint32
|
|
|
|
// syncStage is the stage of the sync process.
|
|
|
|
syncStage stateSyncStage
|
|
|
|
// syncInterval is the delta between two adjacent state sync points.
|
|
|
|
syncInterval uint32
|
|
|
|
// blockHeight is the index of the latest stored block.
|
|
|
|
blockHeight uint32
|
|
|
|
|
2022-01-13 22:51:24 +00:00
|
|
|
dao *dao.Simple
|
|
|
|
bc Ledger
|
|
|
|
stateMod *stateroot.Module
|
|
|
|
mptpool *Pool
|
2021-07-30 13:57:42 +00:00
|
|
|
|
|
|
|
billet *mpt.Billet
|
2021-08-25 13:24:20 +00:00
|
|
|
|
|
|
|
jumpCallback func(p uint32) error
|
2021-07-30 13:57:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewModule returns new instance of statesync module.
|
2022-01-13 22:51:24 +00:00
|
|
|
func NewModule(bc Ledger, stateMod *stateroot.Module, log *zap.Logger, s *dao.Simple, jumpCallback func(p uint32) error) *Module {
|
2022-12-06 13:34:38 +00:00
|
|
|
if !(bc.GetConfig().P2PStateExchangeExtensions && bc.GetConfig().Ledger.RemoveUntraceableBlocks) {
|
2021-07-30 13:57:42 +00:00
|
|
|
return &Module{
|
|
|
|
dao: s,
|
|
|
|
bc: bc,
|
2022-01-13 22:51:24 +00:00
|
|
|
stateMod: stateMod,
|
2021-07-30 13:57:42 +00:00
|
|
|
syncStage: inactive,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return &Module{
|
|
|
|
dao: s,
|
|
|
|
bc: bc,
|
2022-01-13 22:51:24 +00:00
|
|
|
stateMod: stateMod,
|
2021-07-30 13:57:42 +00:00
|
|
|
log: log,
|
|
|
|
syncInterval: uint32(bc.GetConfig().StateSyncInterval),
|
|
|
|
mptpool: NewPool(),
|
|
|
|
syncStage: none,
|
2021-08-25 13:24:20 +00:00
|
|
|
jumpCallback: jumpCallback,
|
2021-07-30 13:57:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Init initializes state sync module for the current chain's height with given
|
|
|
|
// callback for MPT nodes requests.
|
|
|
|
func (s *Module) Init(currChainHeight uint32) error {
|
|
|
|
s.lock.Lock()
|
|
|
|
defer s.lock.Unlock()
|
|
|
|
|
|
|
|
if s.syncStage != none {
|
|
|
|
return errors.New("already initialized or inactive")
|
|
|
|
}
|
|
|
|
|
|
|
|
p := (currChainHeight / s.syncInterval) * s.syncInterval
|
|
|
|
if p < 2*s.syncInterval {
|
|
|
|
// chain is too low to start state exchange process, use the standard sync mechanism
|
|
|
|
s.syncStage = inactive
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
pOld, err := s.dao.GetStateSyncPoint()
|
|
|
|
if err == nil && pOld >= p-s.syncInterval {
|
|
|
|
// old point is still valid, so try to resync states for this point.
|
|
|
|
p = pOld
|
2021-08-17 15:16:10 +00:00
|
|
|
} else {
|
|
|
|
if s.bc.BlockHeight() > p-2*s.syncInterval {
|
|
|
|
// chain has already been synchronised up to old state sync point and regular blocks processing was started.
|
|
|
|
// Current block height is enough to start regular blocks processing.
|
|
|
|
s.syncStage = inactive
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if err == nil {
|
|
|
|
// pOld was found, it is outdated, and chain wasn't completely synchronised for pOld. Need to drop the db.
|
|
|
|
return fmt.Errorf("state sync point %d is found in the storage, "+
|
|
|
|
"but sync process wasn't completed and point is outdated. Please, drop the database manually and restart the node to run state sync process", pOld)
|
|
|
|
}
|
|
|
|
if s.bc.BlockHeight() != 0 {
|
|
|
|
// pOld wasn't found, but blocks processing was started in a regular manner and latest stored block is too outdated
|
|
|
|
// to start regular blocks processing again. Need to drop the db.
|
|
|
|
return fmt.Errorf("current chain's height is too low to start regular blocks processing from the oldest sync point %d. "+
|
|
|
|
"Please, drop the database manually and restart the node to run state sync process", p-s.syncInterval)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We've reached this point, so chain has genesis block only. As far as we can't ruin
|
|
|
|
// current chain's state until new state is completely fetched, outdated state-related data
|
2021-08-25 13:24:20 +00:00
|
|
|
// will be removed from storage during (*Blockchain).jumpToState(...) execution.
|
2021-08-17 15:16:10 +00:00
|
|
|
// All we need to do right now is to remove genesis-related MPT nodes.
|
2022-01-13 22:51:24 +00:00
|
|
|
err = s.stateMod.CleanStorage()
|
2021-08-17 15:16:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to remove outdated MPT data from storage: %w", err)
|
|
|
|
}
|
2021-07-30 13:57:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
s.syncPoint = p
|
2022-02-16 14:48:15 +00:00
|
|
|
s.dao.PutStateSyncPoint(p)
|
2021-07-30 13:57:42 +00:00
|
|
|
s.syncStage = initialized
|
|
|
|
s.log.Info("try to sync state for the latest state synchronisation point",
|
|
|
|
zap.Uint32("point", p),
|
|
|
|
zap.Uint32("evaluated chain's blockHeight", currChainHeight))
|
|
|
|
|
2021-08-17 12:35:20 +00:00
|
|
|
return s.defineSyncStage()
|
|
|
|
}
|
|
|
|
|
2021-09-27 13:35:25 +00:00
|
|
|
// TemporaryPrefix accepts current storage prefix and returns prefix
|
|
|
|
// to use for storing intermediate items during synchronization.
|
|
|
|
func TemporaryPrefix(currPrefix storage.KeyPrefix) storage.KeyPrefix {
|
|
|
|
switch currPrefix {
|
|
|
|
case storage.STStorage:
|
|
|
|
return storage.STTempStorage
|
|
|
|
case storage.STTempStorage:
|
|
|
|
return storage.STStorage
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("invalid storage prefix: %x", currPrefix))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-17 12:35:20 +00:00
|
|
|
// defineSyncStage sequentially checks and sets sync state process stage after Module
|
|
|
|
// initialization. It also performs initialization of MPT Billet if necessary.
|
|
|
|
func (s *Module) defineSyncStage() error {
|
|
|
|
// check headers sync stage first
|
2021-07-30 13:57:42 +00:00
|
|
|
ltstHeaderHeight := s.bc.HeaderHeight()
|
2021-08-17 12:35:20 +00:00
|
|
|
if ltstHeaderHeight > s.syncPoint {
|
2021-07-30 13:57:42 +00:00
|
|
|
s.syncStage = headersSynced
|
|
|
|
s.log.Info("headers are in sync",
|
|
|
|
zap.Uint32("headerHeight", s.bc.HeaderHeight()))
|
|
|
|
}
|
|
|
|
|
2021-08-17 12:35:20 +00:00
|
|
|
// check blocks sync stage
|
|
|
|
s.blockHeight = s.getLatestSavedBlock(s.syncPoint)
|
|
|
|
if s.blockHeight >= s.syncPoint {
|
2021-07-30 13:57:42 +00:00
|
|
|
s.syncStage |= blocksSynced
|
|
|
|
s.log.Info("blocks are in sync",
|
|
|
|
zap.Uint32("blockHeight", s.blockHeight))
|
|
|
|
}
|
|
|
|
|
2021-08-17 12:35:20 +00:00
|
|
|
// check MPT sync stage
|
|
|
|
if s.blockHeight > s.syncPoint {
|
2021-07-30 13:57:42 +00:00
|
|
|
s.syncStage |= mptSynced
|
|
|
|
s.log.Info("MPT is in sync",
|
2022-01-13 22:51:24 +00:00
|
|
|
zap.Uint32("stateroot height", s.stateMod.CurrentLocalHeight()))
|
2021-07-30 13:57:42 +00:00
|
|
|
} else if s.syncStage&headersSynced != 0 {
|
2022-11-18 20:19:50 +00:00
|
|
|
header, err := s.bc.GetHeader(s.bc.GetHeaderHash(s.syncPoint + 1))
|
2021-07-30 13:57:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to get header to initialize MPT billet: %w", err)
|
|
|
|
}
|
2022-01-28 08:56:33 +00:00
|
|
|
var mode mpt.TrieMode
|
2022-01-28 12:05:13 +00:00
|
|
|
// No need to enable GC here, it only has latest things.
|
2022-12-06 13:34:38 +00:00
|
|
|
if s.bc.GetConfig().Ledger.KeepOnlyLatestState || s.bc.GetConfig().Ledger.RemoveUntraceableBlocks {
|
2022-01-28 08:56:33 +00:00
|
|
|
mode |= mpt.ModeLatest
|
|
|
|
}
|
|
|
|
s.billet = mpt.NewBillet(header.PrevStateRoot, mode,
|
2021-10-22 07:58:53 +00:00
|
|
|
TemporaryPrefix(s.dao.Version.StoragePrefix), s.dao.Store)
|
2021-07-30 13:57:42 +00:00
|
|
|
s.log.Info("MPT billet initialized",
|
|
|
|
zap.Uint32("height", s.syncPoint),
|
|
|
|
zap.String("state root", header.PrevStateRoot.StringBE()))
|
|
|
|
pool := NewPool()
|
|
|
|
pool.Add(header.PrevStateRoot, []byte{})
|
2021-10-07 13:56:27 +00:00
|
|
|
err = s.billet.Traverse(func(_ []byte, n mpt.Node, _ []byte) bool {
|
2021-07-30 13:57:42 +00:00
|
|
|
nPaths, ok := pool.TryGet(n.Hash())
|
|
|
|
if !ok {
|
|
|
|
// if this situation occurs, then it's a bug in MPT pool or Traverse.
|
|
|
|
panic("failed to get MPT node from the pool")
|
|
|
|
}
|
|
|
|
pool.Remove(n.Hash())
|
|
|
|
childrenPaths := make(map[util.Uint256][][]byte)
|
|
|
|
for _, path := range nPaths {
|
|
|
|
nChildrenPaths := mpt.GetChildrenPaths(path, n)
|
|
|
|
for hash, paths := range nChildrenPaths {
|
|
|
|
childrenPaths[hash] = append(childrenPaths[hash], paths...) // it's OK to have duplicates, they'll be handled by mempool
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pool.Update(nil, childrenPaths)
|
|
|
|
return false
|
|
|
|
}, true)
|
|
|
|
if err != nil {
|
2021-08-17 12:35:20 +00:00
|
|
|
return fmt.Errorf("failed to traverse MPT during initialization: %w", err)
|
2021-07-30 13:57:42 +00:00
|
|
|
}
|
|
|
|
s.mptpool.Update(nil, pool.GetAll())
|
|
|
|
if s.mptpool.Count() == 0 {
|
|
|
|
s.syncStage |= mptSynced
|
|
|
|
s.log.Info("MPT is in sync",
|
2021-08-17 12:35:20 +00:00
|
|
|
zap.Uint32("stateroot height", s.syncPoint))
|
2021-07-30 13:57:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if s.syncStage == headersSynced|blocksSynced|mptSynced {
|
|
|
|
s.log.Info("state is in sync, starting regular blocks processing")
|
|
|
|
s.syncStage = inactive
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// getLatestSavedBlock returns either current block index (if it's still relevant
|
|
|
|
// to continue state sync process) or H-1 where H is the index of the earliest
|
|
|
|
// block that should be saved next.
|
|
|
|
func (s *Module) getLatestSavedBlock(p uint32) uint32 {
|
|
|
|
var result uint32
|
|
|
|
mtb := s.bc.GetConfig().MaxTraceableBlocks
|
|
|
|
if p > mtb {
|
|
|
|
result = p - mtb
|
|
|
|
}
|
|
|
|
storedH, err := s.dao.GetStateSyncCurrentBlockHeight()
|
|
|
|
if err == nil && storedH > result {
|
|
|
|
result = storedH
|
|
|
|
}
|
|
|
|
actualH := s.bc.BlockHeight()
|
|
|
|
if actualH > result {
|
|
|
|
result = actualH
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddHeaders validates and adds specified headers to the chain.
|
|
|
|
func (s *Module) AddHeaders(hdrs ...*block.Header) error {
|
|
|
|
s.lock.Lock()
|
|
|
|
defer s.lock.Unlock()
|
|
|
|
|
|
|
|
if s.syncStage != initialized {
|
|
|
|
return errors.New("headers were not requested")
|
|
|
|
}
|
|
|
|
|
|
|
|
hdrsErr := s.bc.AddHeaders(hdrs...)
|
|
|
|
if s.bc.HeaderHeight() > s.syncPoint {
|
2021-08-17 12:35:20 +00:00
|
|
|
err := s.defineSyncStage()
|
2021-07-30 13:57:42 +00:00
|
|
|
if err != nil {
|
2021-08-17 12:35:20 +00:00
|
|
|
return fmt.Errorf("failed to define current sync stage: %w", err)
|
2021-07-30 13:57:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return hdrsErr
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddBlock verifies and saves block skipping executable scripts.
|
|
|
|
func (s *Module) AddBlock(block *block.Block) error {
|
|
|
|
s.lock.Lock()
|
|
|
|
defer s.lock.Unlock()
|
|
|
|
|
|
|
|
if s.syncStage&headersSynced == 0 || s.syncStage&blocksSynced != 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if s.blockHeight == s.syncPoint {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
expectedHeight := s.blockHeight + 1
|
|
|
|
if expectedHeight != block.Index {
|
|
|
|
return fmt.Errorf("expected %d, got %d: invalid block index", expectedHeight, block.Index)
|
|
|
|
}
|
|
|
|
if s.bc.GetConfig().StateRootInHeader != block.StateRootEnabled {
|
|
|
|
return fmt.Errorf("stateroot setting mismatch: %v != %v", s.bc.GetConfig().StateRootInHeader, block.StateRootEnabled)
|
|
|
|
}
|
2022-12-06 15:13:40 +00:00
|
|
|
if !s.bc.GetConfig().SkipBlockVerification {
|
2021-07-30 13:57:42 +00:00
|
|
|
merkle := block.ComputeMerkleRoot()
|
|
|
|
if !block.MerkleRoot.Equals(merkle) {
|
|
|
|
return errors.New("invalid block: MerkleRoot mismatch")
|
|
|
|
}
|
|
|
|
}
|
2022-02-16 16:13:06 +00:00
|
|
|
cache := s.dao.GetPrivate()
|
2022-02-16 20:33:53 +00:00
|
|
|
if err := cache.StoreAsBlock(block, nil, nil); err != nil {
|
2021-07-30 13:57:42 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-02-16 14:48:15 +00:00
|
|
|
cache.PutStateSyncCurrentBlockHeight(block.Index)
|
2021-07-30 13:57:42 +00:00
|
|
|
|
|
|
|
for _, tx := range block.Transactions {
|
2022-02-16 20:33:53 +00:00
|
|
|
if err := cache.StoreAsTransaction(tx, block.Index, nil); err != nil {
|
2021-07-30 13:57:42 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-16 14:48:15 +00:00
|
|
|
_, err := cache.Persist()
|
2021-07-30 13:57:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to persist results: %w", err)
|
|
|
|
}
|
|
|
|
s.blockHeight = block.Index
|
|
|
|
if s.blockHeight == s.syncPoint {
|
|
|
|
s.syncStage |= blocksSynced
|
|
|
|
s.log.Info("blocks are in sync",
|
|
|
|
zap.Uint32("blockHeight", s.blockHeight))
|
|
|
|
s.checkSyncIsCompleted()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddMPTNodes tries to add provided set of MPT nodes to the MPT billet if they are
|
|
|
|
// not yet collected.
|
|
|
|
func (s *Module) AddMPTNodes(nodes [][]byte) error {
|
|
|
|
s.lock.Lock()
|
|
|
|
defer s.lock.Unlock()
|
|
|
|
|
|
|
|
if s.syncStage&headersSynced == 0 || s.syncStage&mptSynced != 0 {
|
|
|
|
return errors.New("MPT nodes were not requested")
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, nBytes := range nodes {
|
|
|
|
var n mpt.NodeObject
|
|
|
|
r := io.NewBinReaderFromBuf(nBytes)
|
|
|
|
n.DecodeBinary(r)
|
|
|
|
if r.Err != nil {
|
|
|
|
return fmt.Errorf("failed to decode MPT node: %w", r.Err)
|
|
|
|
}
|
2021-08-13 09:46:23 +00:00
|
|
|
err := s.restoreNode(n.Node)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2021-07-30 13:57:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if s.mptpool.Count() == 0 {
|
|
|
|
s.syncStage |= mptSynced
|
|
|
|
s.log.Info("MPT is in sync",
|
|
|
|
zap.Uint32("height", s.syncPoint))
|
|
|
|
s.checkSyncIsCompleted()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-08-13 09:46:23 +00:00
|
|
|
func (s *Module) restoreNode(n mpt.Node) error {
|
|
|
|
nPaths, ok := s.mptpool.TryGet(n.Hash())
|
|
|
|
if !ok {
|
|
|
|
// it can easily happen after receiving the same data from different peers.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
var childrenPaths = make(map[util.Uint256][][]byte)
|
|
|
|
for _, path := range nPaths {
|
2021-08-27 13:58:27 +00:00
|
|
|
// Must clone here in order to avoid future collapse collisions. If the node's refcount>1 then MPT pool
|
|
|
|
// will manage all paths for this node and call RestoreHashNode separately for each of the paths.
|
|
|
|
err := s.billet.RestoreHashNode(path, n.Clone())
|
2021-08-13 09:46:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to restore MPT node with hash %s and path %s: %w", n.Hash().StringBE(), hex.EncodeToString(path), err)
|
|
|
|
}
|
|
|
|
for h, paths := range mpt.GetChildrenPaths(path, n) {
|
|
|
|
childrenPaths[h] = append(childrenPaths[h], paths...) // it's OK to have duplicates, they'll be handled by mempool
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
s.mptpool.Update(map[util.Uint256][][]byte{n.Hash(): nPaths}, childrenPaths)
|
|
|
|
|
|
|
|
for h := range childrenPaths {
|
|
|
|
if child, err := s.billet.GetFromStore(h); err == nil {
|
|
|
|
// child is already in the storage, so we don't need to request it one more time.
|
|
|
|
err = s.restoreNode(child)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to restore saved children: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-07-30 13:57:42 +00:00
|
|
|
// checkSyncIsCompleted checks whether state sync process is completed, i.e. headers up to P+1
|
|
|
|
// height are fetched, blocks up to P height are stored and MPT nodes for P height are stored.
|
|
|
|
// If so, then jumping to P state sync point occurs. It is not protected by lock, thus caller
|
|
|
|
// should take care of it.
|
|
|
|
func (s *Module) checkSyncIsCompleted() {
|
|
|
|
if s.syncStage != headersSynced|mptSynced|blocksSynced {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
s.log.Info("state is in sync",
|
|
|
|
zap.Uint32("state sync point", s.syncPoint))
|
2021-08-25 13:24:20 +00:00
|
|
|
err := s.jumpCallback(s.syncPoint)
|
2021-07-30 13:57:42 +00:00
|
|
|
if err != nil {
|
|
|
|
s.log.Fatal("failed to jump to the latest state sync point", zap.Error(err))
|
|
|
|
}
|
|
|
|
s.syncStage = inactive
|
|
|
|
s.dispose()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Module) dispose() {
|
|
|
|
s.billet = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// BlockHeight returns index of the last stored block.
|
|
|
|
func (s *Module) BlockHeight() uint32 {
|
|
|
|
s.lock.RLock()
|
|
|
|
defer s.lock.RUnlock()
|
|
|
|
|
|
|
|
return s.blockHeight
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsActive tells whether state sync module is on and still gathering state
|
|
|
|
// synchronisation data (headers, blocks or MPT nodes).
|
|
|
|
func (s *Module) IsActive() bool {
|
|
|
|
s.lock.RLock()
|
|
|
|
defer s.lock.RUnlock()
|
|
|
|
|
|
|
|
return !(s.syncStage == inactive || (s.syncStage == headersSynced|mptSynced|blocksSynced))
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsInitialized tells whether state sync module does not require initialization.
|
|
|
|
// If `false` is returned then Init can be safely called.
|
|
|
|
func (s *Module) IsInitialized() bool {
|
|
|
|
s.lock.RLock()
|
|
|
|
defer s.lock.RUnlock()
|
|
|
|
|
|
|
|
return s.syncStage != none
|
|
|
|
}
|
|
|
|
|
|
|
|
// NeedHeaders tells whether the module hasn't completed headers synchronisation.
|
|
|
|
func (s *Module) NeedHeaders() bool {
|
|
|
|
s.lock.RLock()
|
|
|
|
defer s.lock.RUnlock()
|
|
|
|
|
|
|
|
return s.syncStage == initialized
|
|
|
|
}
|
|
|
|
|
|
|
|
// NeedMPTNodes returns whether the module hasn't completed MPT synchronisation.
|
|
|
|
func (s *Module) NeedMPTNodes() bool {
|
|
|
|
s.lock.RLock()
|
|
|
|
defer s.lock.RUnlock()
|
|
|
|
|
|
|
|
return s.syncStage&headersSynced != 0 && s.syncStage&mptSynced == 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// Traverse traverses local MPT nodes starting from the specified root down to its
|
|
|
|
// children calling `process` for each serialised node until stop condition is satisfied.
|
|
|
|
func (s *Module) Traverse(root util.Uint256, process func(node mpt.Node, nodeBytes []byte) bool) error {
|
|
|
|
s.lock.RLock()
|
|
|
|
defer s.lock.RUnlock()
|
|
|
|
|
2022-01-28 08:56:33 +00:00
|
|
|
var mode mpt.TrieMode
|
2022-01-28 12:05:13 +00:00
|
|
|
// GC must be turned off here to allow access to the archived nodes.
|
2022-12-06 13:34:38 +00:00
|
|
|
if s.bc.GetConfig().Ledger.KeepOnlyLatestState || s.bc.GetConfig().Ledger.RemoveUntraceableBlocks {
|
2022-01-28 08:56:33 +00:00
|
|
|
mode |= mpt.ModeLatest
|
|
|
|
}
|
|
|
|
b := mpt.NewBillet(root, mode, 0, storage.NewMemCachedStore(s.dao.Store))
|
2021-10-07 13:56:27 +00:00
|
|
|
return b.Traverse(func(pathToNode []byte, node mpt.Node, nodeBytes []byte) bool {
|
|
|
|
return process(node, nodeBytes)
|
|
|
|
}, false)
|
2021-07-30 13:57:42 +00:00
|
|
|
}
|
|
|
|
|
2021-08-13 09:46:23 +00:00
|
|
|
// GetUnknownMPTNodesBatch returns set of currently unknown MPT nodes (`limit` at max).
|
|
|
|
func (s *Module) GetUnknownMPTNodesBatch(limit int) []util.Uint256 {
|
|
|
|
s.lock.RLock()
|
|
|
|
defer s.lock.RUnlock()
|
|
|
|
|
|
|
|
return s.mptpool.GetBatch(limit)
|
|
|
|
}
|