2018-02-01 20:28:45 +00:00
|
|
|
package core
|
|
|
|
|
2018-02-04 19:54:51 +00:00
|
|
|
import (
|
2018-02-06 06:43:32 +00:00
|
|
|
"bytes"
|
|
|
|
"encoding/binary"
|
2018-03-09 15:55:25 +00:00
|
|
|
"fmt"
|
|
|
|
"sync/atomic"
|
2018-02-04 19:54:51 +00:00
|
|
|
"time"
|
|
|
|
|
2018-03-25 10:45:54 +00:00
|
|
|
"github.com/CityOfZion/neo-go/config"
|
2018-03-17 11:53:21 +00:00
|
|
|
"github.com/CityOfZion/neo-go/pkg/core/storage"
|
2018-03-21 16:11:04 +00:00
|
|
|
"github.com/CityOfZion/neo-go/pkg/core/transaction"
|
2018-02-04 19:54:51 +00:00
|
|
|
"github.com/CityOfZion/neo-go/pkg/util"
|
2018-03-14 09:36:59 +00:00
|
|
|
log "github.com/sirupsen/logrus"
|
2018-02-04 19:54:51 +00:00
|
|
|
)
|
|
|
|
|
2018-02-01 20:28:45 +00:00
|
|
|
// tuning parameters
|
|
|
|
const (
|
2018-02-06 06:43:32 +00:00
|
|
|
secondsPerBlock = 15
|
2018-03-09 15:55:25 +00:00
|
|
|
headerBatchCount = 2000
|
2018-03-25 10:45:54 +00:00
|
|
|
version = "0.0.1"
|
2018-02-01 20:28:45 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2018-03-25 10:45:54 +00:00
|
|
|
genAmount = []int{8, 7, 6, 5, 4, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
|
|
|
|
decrementInterval = 2000000
|
2018-04-13 10:14:08 +00:00
|
|
|
persistInterval = 1 * time.Second
|
2018-02-01 20:28:45 +00:00
|
|
|
)
|
|
|
|
|
2018-03-25 10:45:54 +00:00
|
|
|
// Blockchain represents the blockchain.
|
2018-02-01 20:28:45 +00:00
|
|
|
type Blockchain struct {
|
2018-03-25 10:45:54 +00:00
|
|
|
config config.ProtocolConfiguration
|
|
|
|
|
2018-02-01 20:28:45 +00:00
|
|
|
// Any object that satisfies the BlockchainStorer interface.
|
2018-03-17 11:53:21 +00:00
|
|
|
storage.Store
|
2018-02-01 20:28:45 +00:00
|
|
|
|
2018-03-09 15:55:25 +00:00
|
|
|
// Current index/height of the highest block.
|
|
|
|
// Read access should always be called by BlockHeight().
|
2018-03-17 11:53:21 +00:00
|
|
|
// Write access should only happen in persist().
|
2018-03-09 15:55:25 +00:00
|
|
|
blockHeight uint32
|
2018-02-04 19:54:51 +00:00
|
|
|
|
2018-03-17 11:53:21 +00:00
|
|
|
// Number of headers stored in the chain file.
|
2018-02-06 06:43:32 +00:00
|
|
|
storedHeaderCount uint32
|
|
|
|
|
2018-03-09 15:55:25 +00:00
|
|
|
blockCache *Cache
|
2018-02-06 06:43:32 +00:00
|
|
|
|
2018-03-17 11:53:21 +00:00
|
|
|
// All operation on headerList must be called from an
|
|
|
|
// headersOp to be routine safe.
|
|
|
|
headerList *HeaderHashList
|
|
|
|
|
2018-03-09 15:55:25 +00:00
|
|
|
// Only for operating on the headerList.
|
|
|
|
headersOp chan headersOpFunc
|
|
|
|
headersOpDone chan struct{}
|
2018-03-10 12:04:06 +00:00
|
|
|
|
|
|
|
// Whether we will verify received blocks.
|
|
|
|
verifyBlocks bool
|
2018-02-01 20:28:45 +00:00
|
|
|
}
|
|
|
|
|
2018-03-09 15:55:25 +00:00
|
|
|
type headersOpFunc func(headerList *HeaderHashList)
|
2018-02-06 06:43:32 +00:00
|
|
|
|
2018-03-17 11:53:21 +00:00
|
|
|
// NewBlockchain return a new blockchain object the will use the
|
|
|
|
// given Store as its underlying storage.
|
2018-03-25 10:45:54 +00:00
|
|
|
func NewBlockchain(s storage.Store, cfg config.ProtocolConfiguration) (*Blockchain, error) {
|
2018-03-09 15:55:25 +00:00
|
|
|
bc := &Blockchain{
|
2018-03-25 10:45:54 +00:00
|
|
|
config: cfg,
|
2018-03-09 15:55:25 +00:00
|
|
|
Store: s,
|
|
|
|
headersOp: make(chan headersOpFunc),
|
|
|
|
headersOpDone: make(chan struct{}),
|
|
|
|
blockCache: NewCache(),
|
2018-03-14 09:36:59 +00:00
|
|
|
verifyBlocks: false,
|
2018-03-09 15:55:25 +00:00
|
|
|
}
|
|
|
|
go bc.run()
|
2018-02-06 06:43:32 +00:00
|
|
|
|
2018-03-17 11:53:21 +00:00
|
|
|
if err := bc.init(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return bc, nil
|
2018-02-04 19:54:51 +00:00
|
|
|
}
|
|
|
|
|
2019-01-22 12:14:52 +00:00
|
|
|
// GetBlockchainLevelDB returns blockchain based on configuration
|
|
|
|
func NewBlockchainLevelDB(cfg config.Config) (*Blockchain, error) {
|
|
|
|
store, err := storage.NewLevelDBStore(
|
|
|
|
cfg.ApplicationConfiguration.DataDirectoryPath,
|
|
|
|
nil,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return NewBlockchain(store, cfg.ProtocolConfiguration)
|
|
|
|
}
|
|
|
|
|
2018-03-17 11:53:21 +00:00
|
|
|
func (bc *Blockchain) init() error {
|
2018-03-25 10:45:54 +00:00
|
|
|
genesisBlock, err := createGenesisBlock(bc.config)
|
2018-03-17 11:53:21 +00:00
|
|
|
if err != nil {
|
2018-03-25 10:45:54 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
bc.headerList = NewHeaderHashList(genesisBlock.Hash())
|
|
|
|
|
2018-04-09 16:58:09 +00:00
|
|
|
// If we could not find the version in the Store, we know that there is nothing stored.
|
|
|
|
ver, err := storage.Version(bc.Store)
|
|
|
|
if err != nil {
|
|
|
|
log.Infof("no storage version found! creating genesis block")
|
2019-01-25 11:20:35 +00:00
|
|
|
if err = storage.PutVersion(bc.Store, version); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-04-09 16:58:09 +00:00
|
|
|
return bc.persistBlock(genesisBlock)
|
|
|
|
}
|
|
|
|
if ver != version {
|
|
|
|
return fmt.Errorf("storage version mismatch betweeen %s and %s", version, ver)
|
2018-03-25 10:45:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// At this point there was no version found in the storage which
|
|
|
|
// implies a creating fresh storage with the version specified
|
|
|
|
// and the genesis block as first block.
|
2018-04-09 16:58:09 +00:00
|
|
|
log.Infof("restoring blockchain with version: %s", version)
|
2018-03-25 10:45:54 +00:00
|
|
|
|
2018-04-09 16:58:09 +00:00
|
|
|
bHeight, err := storage.CurrentBlockHeight(bc.Store)
|
2018-03-25 10:45:54 +00:00
|
|
|
if err != nil {
|
2018-03-17 11:53:21 +00:00
|
|
|
return err
|
|
|
|
}
|
2018-04-09 16:58:09 +00:00
|
|
|
bc.blockHeight = bHeight
|
2018-03-17 11:53:21 +00:00
|
|
|
|
2018-04-09 16:58:09 +00:00
|
|
|
hashes, err := storage.HeaderHashes(bc.Store)
|
2018-03-17 11:53:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-03-25 10:45:54 +00:00
|
|
|
|
2018-03-17 11:53:21 +00:00
|
|
|
for _, hash := range hashes {
|
2018-03-25 10:45:54 +00:00
|
|
|
if !genesisBlock.Hash().Equals(hash) {
|
2018-03-17 11:53:21 +00:00
|
|
|
bc.headerList.Add(hash)
|
|
|
|
bc.storedHeaderCount++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-09 16:58:09 +00:00
|
|
|
currHeaderHeight, currHeaderHash, err := storage.CurrentHeaderHeight(bc.Store)
|
2018-03-17 11:53:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-04-09 16:58:09 +00:00
|
|
|
// There is a high chance that the Node is stopped before the next
|
2018-03-17 11:53:21 +00:00
|
|
|
// batch of 2000 headers was stored. Via the currentHeaders stored we can sync
|
|
|
|
// that with stored blocks.
|
|
|
|
if currHeaderHeight > bc.storedHeaderCount {
|
|
|
|
hash := currHeaderHash
|
|
|
|
targetHash := bc.headerList.Get(bc.headerList.Len() - 1)
|
2018-04-09 16:58:09 +00:00
|
|
|
headers := make([]*Header, 0)
|
2018-03-17 11:53:21 +00:00
|
|
|
|
|
|
|
for hash != targetHash {
|
|
|
|
header, err := bc.getHeader(hash)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("could not get header %s: %s", hash, err)
|
|
|
|
}
|
|
|
|
headers = append(headers, header)
|
|
|
|
hash = header.PrevHash
|
|
|
|
}
|
|
|
|
|
|
|
|
headerSliceReverse(headers)
|
|
|
|
if err := bc.AddHeaders(headers...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2018-03-09 15:55:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (bc *Blockchain) run() {
|
2018-03-17 11:53:21 +00:00
|
|
|
persistTimer := time.NewTimer(persistInterval)
|
2018-03-09 15:55:25 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case op := <-bc.headersOp:
|
2018-03-17 11:53:21 +00:00
|
|
|
op(bc.headerList)
|
2018-03-09 15:55:25 +00:00
|
|
|
bc.headersOpDone <- struct{}{}
|
2018-03-14 09:36:59 +00:00
|
|
|
case <-persistTimer.C:
|
|
|
|
go bc.persist()
|
|
|
|
persistTimer.Reset(persistInterval)
|
2018-03-09 15:55:25 +00:00
|
|
|
}
|
2018-02-04 19:54:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
// AddBlock processes the given block and will add it to the cache so it
|
|
|
|
// can be persisted.
|
2018-02-04 19:54:51 +00:00
|
|
|
func (bc *Blockchain) AddBlock(block *Block) error {
|
2018-03-09 15:55:25 +00:00
|
|
|
if !bc.blockCache.Has(block.Hash()) {
|
|
|
|
bc.blockCache.Add(block.Hash(), block)
|
|
|
|
}
|
2018-02-04 19:54:51 +00:00
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
headerLen := bc.headerListLen()
|
2018-02-04 19:54:51 +00:00
|
|
|
if int(block.Index-1) >= headerLen {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if int(block.Index) == headerLen {
|
2018-03-10 12:04:06 +00:00
|
|
|
if bc.verifyBlocks && !block.Verify(false) {
|
|
|
|
return fmt.Errorf("block %s is invalid", block.Hash())
|
|
|
|
}
|
|
|
|
return bc.AddHeaders(block.Header())
|
2018-02-01 20:28:45 +00:00
|
|
|
}
|
2018-03-10 12:04:06 +00:00
|
|
|
return nil
|
2018-02-04 19:54:51 +00:00
|
|
|
}
|
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
// AddHeaders will process the given headers and add them to the
|
|
|
|
// HeaderHashList.
|
2018-03-09 15:55:25 +00:00
|
|
|
func (bc *Blockchain) AddHeaders(headers ...*Header) (err error) {
|
|
|
|
var (
|
|
|
|
start = time.Now()
|
2018-03-17 11:53:21 +00:00
|
|
|
batch = bc.Batch()
|
2018-03-09 15:55:25 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
bc.headersOp <- func(headerList *HeaderHashList) {
|
|
|
|
for _, h := range headers {
|
|
|
|
if int(h.Index-1) >= headerList.Len() {
|
|
|
|
err = fmt.Errorf(
|
2018-03-17 11:53:21 +00:00
|
|
|
"height of received header %d is higher then the current header %d",
|
2018-03-09 15:55:25 +00:00
|
|
|
h.Index, headerList.Len(),
|
|
|
|
)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if int(h.Index) < headerList.Len() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !h.Verify() {
|
|
|
|
err = fmt.Errorf("header %v is invalid", h)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err = bc.processHeader(h, batch, headerList); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2018-02-04 19:54:51 +00:00
|
|
|
}
|
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
if batch.Len() > 0 {
|
2018-03-17 11:53:21 +00:00
|
|
|
if err = bc.PutBatch(batch); err != nil {
|
2018-03-09 15:55:25 +00:00
|
|
|
return
|
|
|
|
}
|
2018-03-14 09:36:59 +00:00
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"headerIndex": headerList.Len() - 1,
|
|
|
|
"blockHeight": bc.BlockHeight(),
|
|
|
|
"took": time.Since(start),
|
|
|
|
}).Debug("done processing headers")
|
2018-02-06 06:43:32 +00:00
|
|
|
}
|
2018-02-04 19:54:51 +00:00
|
|
|
}
|
2018-03-09 15:55:25 +00:00
|
|
|
<-bc.headersOpDone
|
|
|
|
return err
|
2018-02-06 06:43:32 +00:00
|
|
|
}
|
2018-02-04 19:54:51 +00:00
|
|
|
|
2018-03-09 15:55:25 +00:00
|
|
|
// processHeader processes the given header. Note that this is only thread safe
|
|
|
|
// if executed in headers operation.
|
2018-03-17 11:53:21 +00:00
|
|
|
func (bc *Blockchain) processHeader(h *Header, batch storage.Batch, headerList *HeaderHashList) error {
|
2018-03-09 15:55:25 +00:00
|
|
|
headerList.Add(h.Hash())
|
2018-02-04 19:54:51 +00:00
|
|
|
|
2018-03-09 15:55:25 +00:00
|
|
|
buf := new(bytes.Buffer)
|
|
|
|
for int(h.Index)-headerBatchCount >= int(bc.storedHeaderCount) {
|
|
|
|
if err := headerList.Write(buf, int(bc.storedHeaderCount), headerBatchCount); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-03-17 11:53:21 +00:00
|
|
|
key := storage.AppendPrefixInt(storage.IXHeaderHashList, int(bc.storedHeaderCount))
|
2018-03-14 09:36:59 +00:00
|
|
|
batch.Put(key, buf.Bytes())
|
2018-03-09 15:55:25 +00:00
|
|
|
bc.storedHeaderCount += headerBatchCount
|
|
|
|
buf.Reset()
|
2018-02-04 19:54:51 +00:00
|
|
|
}
|
|
|
|
|
2018-03-09 15:55:25 +00:00
|
|
|
buf.Reset()
|
2018-02-06 06:43:32 +00:00
|
|
|
if err := h.EncodeBinary(buf); err != nil {
|
|
|
|
return err
|
2018-02-04 19:54:51 +00:00
|
|
|
}
|
|
|
|
|
2018-03-17 11:53:21 +00:00
|
|
|
key := storage.AppendPrefix(storage.DataBlock, h.Hash().BytesReverse())
|
2018-03-14 09:36:59 +00:00
|
|
|
batch.Put(key, buf.Bytes())
|
2018-03-17 11:53:21 +00:00
|
|
|
batch.Put(storage.SYSCurrentHeader.Bytes(), hashAndIndexToBytes(h.Hash(), h.Index))
|
2018-02-06 06:43:32 +00:00
|
|
|
|
2018-02-04 19:54:51 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-03-21 16:11:04 +00:00
|
|
|
// TODO: persistBlock needs some more love, its implemented as in the original
|
|
|
|
// project. This for the sake of development speed and understanding of what
|
|
|
|
// is happening here, quite allot as you can see :). If things are wired together
|
|
|
|
// and all tests are in place, we can make a more optimized and cleaner implementation.
|
2018-03-09 15:55:25 +00:00
|
|
|
func (bc *Blockchain) persistBlock(block *Block) error {
|
2018-03-21 16:11:04 +00:00
|
|
|
var (
|
|
|
|
batch = bc.Batch()
|
|
|
|
unspentCoins = make(UnspentCoins)
|
2018-04-16 20:15:30 +00:00
|
|
|
spentCoins = make(SpentCoins)
|
2018-03-21 16:11:04 +00:00
|
|
|
accounts = make(Accounts)
|
2018-04-16 20:15:30 +00:00
|
|
|
assets = make(Assets)
|
2018-03-21 16:11:04 +00:00
|
|
|
)
|
2018-03-14 09:36:59 +00:00
|
|
|
|
2019-01-25 11:20:35 +00:00
|
|
|
if err := storeAsBlock(batch, block, 0); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-03-17 11:53:21 +00:00
|
|
|
storeAsCurrentBlock(batch, block)
|
2018-03-14 09:36:59 +00:00
|
|
|
|
2018-03-21 16:11:04 +00:00
|
|
|
for _, tx := range block.Transactions {
|
2019-01-25 11:20:35 +00:00
|
|
|
if err := storeAsTransaction(batch, tx, block.Index); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-04-16 20:15:30 +00:00
|
|
|
unspentCoins[tx.Hash()] = NewUnspentCoinState(len(tx.Outputs))
|
2018-03-21 16:11:04 +00:00
|
|
|
|
|
|
|
// Process TX outputs.
|
|
|
|
for _, output := range tx.Outputs {
|
2018-04-16 20:15:30 +00:00
|
|
|
account, err := accounts.getAndUpdate(bc.Store, output.ScriptHash)
|
2018-03-21 16:11:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, ok := account.Balances[output.AssetID]; ok {
|
|
|
|
account.Balances[output.AssetID] += output.Amount
|
|
|
|
} else {
|
|
|
|
account.Balances[output.AssetID] = output.Amount
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Process TX inputs that are grouped by previous hash.
|
|
|
|
for prevHash, inputs := range tx.GroupInputsByPrevHash() {
|
2018-04-16 20:15:30 +00:00
|
|
|
prevTX, prevTXHeight, err := bc.GetTransaction(prevHash)
|
2018-03-21 16:11:04 +00:00
|
|
|
if err != nil {
|
2018-03-25 10:45:54 +00:00
|
|
|
return fmt.Errorf("could not find previous TX: %s", prevHash)
|
2018-03-21 16:11:04 +00:00
|
|
|
}
|
|
|
|
for _, input := range inputs {
|
2018-04-16 20:15:30 +00:00
|
|
|
unspent, err := unspentCoins.getAndUpdate(bc.Store, input.PrevHash)
|
2018-03-21 16:11:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
unspent.states[input.PrevIndex] = CoinStateSpent
|
|
|
|
|
|
|
|
prevTXOutput := prevTX.Outputs[input.PrevIndex]
|
2018-04-16 20:15:30 +00:00
|
|
|
account, err := accounts.getAndUpdate(bc.Store, prevTXOutput.ScriptHash)
|
2018-03-21 16:11:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-04-16 20:15:30 +00:00
|
|
|
if prevTXOutput.AssetID.Equals(governingTokenTX().Hash()) {
|
|
|
|
spentCoin := NewSpentCoinState(input.PrevHash, prevTXHeight)
|
|
|
|
spentCoin.items[input.PrevIndex] = block.Index
|
|
|
|
spentCoins[input.PrevHash] = spentCoin
|
|
|
|
}
|
|
|
|
|
2018-03-21 16:11:04 +00:00
|
|
|
account.Balances[prevTXOutput.AssetID] -= prevTXOutput.Amount
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Process the underlying type of the TX.
|
2018-04-16 20:15:30 +00:00
|
|
|
switch t := tx.Data.(type) {
|
2018-03-21 16:11:04 +00:00
|
|
|
case *transaction.RegisterTX:
|
2018-04-16 20:15:30 +00:00
|
|
|
assets[tx.Hash()] = &AssetState{
|
|
|
|
ID: tx.Hash(),
|
|
|
|
AssetType: t.AssetType,
|
|
|
|
Name: t.Name,
|
|
|
|
Amount: t.Amount,
|
|
|
|
Precision: t.Precision,
|
|
|
|
Owner: t.Owner,
|
|
|
|
Admin: t.Admin,
|
|
|
|
}
|
2018-03-21 16:11:04 +00:00
|
|
|
case *transaction.IssueTX:
|
|
|
|
case *transaction.ClaimTX:
|
|
|
|
case *transaction.EnrollmentTX:
|
|
|
|
case *transaction.StateTX:
|
|
|
|
case *transaction.PublishTX:
|
2018-04-16 20:15:30 +00:00
|
|
|
contract := &ContractState{
|
|
|
|
Script: t.Script,
|
|
|
|
ParamList: t.ParamList,
|
|
|
|
ReturnType: t.ReturnType,
|
|
|
|
HasStorage: t.NeedStorage,
|
|
|
|
Name: t.Name,
|
|
|
|
CodeVersion: t.CodeVersion,
|
|
|
|
Author: t.Author,
|
|
|
|
Email: t.Email,
|
|
|
|
Description: t.Description,
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Printf("%+v", contract)
|
|
|
|
|
2018-03-21 16:11:04 +00:00
|
|
|
case *transaction.InvocationTX:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Persist all to storage.
|
|
|
|
if err := accounts.commit(batch); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := unspentCoins.commit(batch); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-04-16 20:15:30 +00:00
|
|
|
if err := spentCoins.commit(batch); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := assets.commit(batch); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-03-17 11:53:21 +00:00
|
|
|
if err := bc.PutBatch(batch); err != nil {
|
2018-03-14 09:36:59 +00:00
|
|
|
return err
|
|
|
|
}
|
2018-03-21 16:11:04 +00:00
|
|
|
|
2018-03-25 10:45:54 +00:00
|
|
|
atomic.StoreUint32(&bc.blockHeight, block.Index)
|
2018-03-09 15:55:25 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (bc *Blockchain) persist() (err error) {
|
|
|
|
var (
|
2018-03-14 09:36:59 +00:00
|
|
|
start = time.Now()
|
2018-03-09 15:55:25 +00:00
|
|
|
persisted = 0
|
|
|
|
lenCache = bc.blockCache.Len()
|
|
|
|
)
|
|
|
|
|
2018-04-13 10:14:08 +00:00
|
|
|
if lenCache == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
bc.headersOp <- func(headerList *HeaderHashList) {
|
|
|
|
for i := 0; i < lenCache; i++ {
|
|
|
|
if uint32(headerList.Len()) <= bc.BlockHeight() {
|
|
|
|
return
|
|
|
|
}
|
2018-03-09 15:55:25 +00:00
|
|
|
hash := headerList.Get(int(bc.BlockHeight() + 1))
|
|
|
|
if block, ok := bc.blockCache.GetBlock(hash); ok {
|
|
|
|
if err = bc.persistBlock(block); err != nil {
|
2018-03-25 10:45:54 +00:00
|
|
|
log.Warnf("failed to persist blocks: %s", err)
|
2018-03-09 15:55:25 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
bc.blockCache.Delete(hash)
|
|
|
|
persisted++
|
|
|
|
}
|
|
|
|
}
|
2018-02-04 19:54:51 +00:00
|
|
|
}
|
2018-03-14 09:36:59 +00:00
|
|
|
<-bc.headersOpDone
|
|
|
|
|
|
|
|
if persisted > 0 {
|
|
|
|
log.WithFields(log.Fields{
|
2018-04-09 16:58:09 +00:00
|
|
|
"persisted": persisted,
|
|
|
|
"headerHeight": bc.HeaderHeight(),
|
|
|
|
"blockHeight": bc.BlockHeight(),
|
|
|
|
"took": time.Since(start),
|
2018-03-14 09:36:59 +00:00
|
|
|
}).Info("blockchain persist completed")
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (bc *Blockchain) headerListLen() (n int) {
|
|
|
|
bc.headersOp <- func(headerList *HeaderHashList) {
|
|
|
|
n = headerList.Len()
|
|
|
|
}
|
|
|
|
<-bc.headersOpDone
|
2018-03-09 15:55:25 +00:00
|
|
|
return
|
|
|
|
}
|
2018-02-04 19:54:51 +00:00
|
|
|
|
2018-03-21 16:11:04 +00:00
|
|
|
// GetTransaction returns a TX and its height by the given hash.
|
|
|
|
func (bc *Blockchain) GetTransaction(hash util.Uint256) (*transaction.Transaction, uint32, error) {
|
|
|
|
key := storage.AppendPrefix(storage.DataTransaction, hash.BytesReverse())
|
|
|
|
b, err := bc.Get(key)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
|
|
|
r := bytes.NewReader(b)
|
|
|
|
|
|
|
|
var height uint32
|
|
|
|
if err := binary.Read(r, binary.LittleEndian, &height); err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
tx := &transaction.Transaction{}
|
|
|
|
if err := tx.DecodeBinary(r); err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
|
|
|
return tx, height, nil
|
|
|
|
}
|
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
// GetBlock returns a Block by the given hash.
|
|
|
|
func (bc *Blockchain) GetBlock(hash util.Uint256) (*Block, error) {
|
2018-03-17 11:53:21 +00:00
|
|
|
key := storage.AppendPrefix(storage.DataBlock, hash.BytesReverse())
|
|
|
|
b, err := bc.Get(key)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
block, err := NewBlockFromTrimmedBytes(b)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// TODO: persist TX first before we can handle this logic.
|
2019-01-25 11:20:35 +00:00
|
|
|
// if len(block.Transactions) == 0 {
|
|
|
|
// return nil, fmt.Errorf("block has no TX")
|
|
|
|
// }
|
2018-03-17 11:53:21 +00:00
|
|
|
return block, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (bc *Blockchain) getHeader(hash util.Uint256) (*Header, error) {
|
|
|
|
b, err := bc.Get(storage.AppendPrefix(storage.DataBlock, hash.BytesReverse()))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
block, err := NewBlockFromTrimmedBytes(b)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return block.Header(), nil
|
2018-03-14 09:36:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// HasBlock return true if the blockchain contains he given
|
|
|
|
// transaction hash.
|
|
|
|
func (bc *Blockchain) HasTransaction(hash util.Uint256) bool {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// HasBlock return true if the blockchain contains the given
|
|
|
|
// block hash.
|
|
|
|
func (bc *Blockchain) HasBlock(hash util.Uint256) bool {
|
2018-03-17 11:53:21 +00:00
|
|
|
if header, err := bc.getHeader(hash); err == nil {
|
|
|
|
return header.Index <= bc.BlockHeight()
|
|
|
|
}
|
2018-03-14 09:36:59 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2018-03-09 15:55:25 +00:00
|
|
|
// CurrentBlockHash returns the heighest processed block hash.
|
|
|
|
func (bc *Blockchain) CurrentBlockHash() (hash util.Uint256) {
|
|
|
|
bc.headersOp <- func(headerList *HeaderHashList) {
|
|
|
|
hash = headerList.Get(int(bc.BlockHeight()))
|
|
|
|
}
|
|
|
|
<-bc.headersOpDone
|
|
|
|
return
|
2018-02-01 20:28:45 +00:00
|
|
|
}
|
2018-02-06 06:43:32 +00:00
|
|
|
|
2018-02-07 14:16:50 +00:00
|
|
|
// CurrentHeaderHash returns the hash of the latest known header.
|
|
|
|
func (bc *Blockchain) CurrentHeaderHash() (hash util.Uint256) {
|
2018-03-09 15:55:25 +00:00
|
|
|
bc.headersOp <- func(headerList *HeaderHashList) {
|
|
|
|
hash = headerList.Last()
|
|
|
|
}
|
|
|
|
<-bc.headersOpDone
|
|
|
|
return
|
2018-02-07 14:16:50 +00:00
|
|
|
}
|
|
|
|
|
2018-03-14 09:36:59 +00:00
|
|
|
// GetHeaderHash return the hash from the headerList by its
|
|
|
|
// height/index.
|
|
|
|
func (bc *Blockchain) GetHeaderHash(i int) (hash util.Uint256) {
|
|
|
|
bc.headersOp <- func(headerList *HeaderHashList) {
|
|
|
|
hash = headerList.Get(i)
|
|
|
|
}
|
|
|
|
<-bc.headersOpDone
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-03-09 15:55:25 +00:00
|
|
|
// BlockHeight returns the height/index of the highest block.
|
2018-02-06 06:43:32 +00:00
|
|
|
func (bc *Blockchain) BlockHeight() uint32 {
|
2018-03-09 15:55:25 +00:00
|
|
|
return atomic.LoadUint32(&bc.blockHeight)
|
2018-02-06 06:43:32 +00:00
|
|
|
}
|
|
|
|
|
2018-03-09 15:55:25 +00:00
|
|
|
// HeaderHeight returns the index/height of the highest header.
|
2018-03-14 09:36:59 +00:00
|
|
|
func (bc *Blockchain) HeaderHeight() uint32 {
|
|
|
|
return uint32(bc.headerListLen() - 1)
|
2018-02-06 06:43:32 +00:00
|
|
|
}
|
|
|
|
|
2018-11-26 21:12:33 +00:00
|
|
|
func (bc *Blockchain) GetAssetState(assetID util.Uint256) *AssetState {
|
|
|
|
|
|
|
|
var as *AssetState
|
|
|
|
bc.Store.Seek(storage.STAsset.Bytes(), func(k, v []byte) {
|
|
|
|
var a AssetState
|
|
|
|
a.DecodeBinary(bytes.NewReader(v))
|
|
|
|
if a.ID == assetID {
|
|
|
|
as = &a
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
return as
|
|
|
|
}
|
|
|
|
|
2018-02-06 06:43:32 +00:00
|
|
|
func hashAndIndexToBytes(h util.Uint256, index uint32) []byte {
|
|
|
|
buf := make([]byte, 4)
|
|
|
|
binary.LittleEndian.PutUint32(buf, index)
|
2018-03-02 15:24:09 +00:00
|
|
|
return append(h.BytesReverse(), buf...)
|
2018-02-06 06:43:32 +00:00
|
|
|
}
|