2018-02-01 20:28:45 +00:00
|
|
|
package core
|
|
|
|
|
2018-02-04 19:54:51 +00:00
|
|
|
import (
|
2018-02-06 06:43:32 +00:00
|
|
|
"bytes"
|
|
|
|
"encoding/binary"
|
2018-02-04 19:54:51 +00:00
|
|
|
"log"
|
2018-02-06 06:43:32 +00:00
|
|
|
"sync"
|
2018-02-04 19:54:51 +00:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/CityOfZion/neo-go/pkg/util"
|
|
|
|
)
|
|
|
|
|
2018-02-01 20:28:45 +00:00
|
|
|
// tuning parameters
|
|
|
|
const (
|
2018-02-06 06:43:32 +00:00
|
|
|
secondsPerBlock = 15
|
|
|
|
writeHdrBatchCnt = 2000
|
2018-02-01 20:28:45 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
genAmount = []int{8, 7, 6, 5, 4, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
|
|
|
|
)
|
|
|
|
|
|
|
|
// Blockchain holds the chain.
|
|
|
|
type Blockchain struct {
|
2018-02-06 06:43:32 +00:00
|
|
|
logger *log.Logger
|
|
|
|
|
2018-02-01 20:28:45 +00:00
|
|
|
// Any object that satisfies the BlockchainStorer interface.
|
2018-02-06 06:43:32 +00:00
|
|
|
Store
|
2018-02-01 20:28:45 +00:00
|
|
|
|
2018-02-06 06:43:32 +00:00
|
|
|
// current index of the heighest block
|
2018-02-04 19:54:51 +00:00
|
|
|
currentBlockHeight uint32
|
|
|
|
|
2018-02-06 06:43:32 +00:00
|
|
|
// number of headers stored
|
|
|
|
storedHeaderCount uint32
|
|
|
|
|
|
|
|
mtx sync.RWMutex
|
|
|
|
|
2018-02-04 19:54:51 +00:00
|
|
|
// index of headers hashes
|
|
|
|
headerIndex []util.Uint256
|
2018-02-01 20:28:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewBlockchain returns a pointer to a Blockchain.
|
2018-02-06 06:43:32 +00:00
|
|
|
func NewBlockchain(s Store, l *log.Logger, startHash util.Uint256) *Blockchain {
|
|
|
|
bc := &Blockchain{
|
|
|
|
logger: l,
|
|
|
|
Store: s,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Starthash is 0, so we will create the genesis block.
|
|
|
|
if startHash.Equals(util.Uint256{}) {
|
|
|
|
bc.logger.Fatal("genesis block not yet implemented")
|
2018-02-04 19:54:51 +00:00
|
|
|
}
|
2018-02-06 06:43:32 +00:00
|
|
|
|
|
|
|
bc.headerIndex = []util.Uint256{startHash}
|
|
|
|
|
|
|
|
return bc
|
2018-02-04 19:54:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// genesisBlock creates the genesis block for the chain.
|
|
|
|
// hash of the genesis block:
|
|
|
|
// d42561e3d30e15be6400b6df2f328e02d2bf6354c41dce433bc57687c82144bf
|
|
|
|
func (bc *Blockchain) genesisBlock() *Block {
|
|
|
|
timestamp := uint32(time.Date(2016, 7, 15, 15, 8, 21, 0, time.UTC).Unix())
|
|
|
|
|
|
|
|
// TODO: for testing I will hardcode the merkleroot.
|
|
|
|
// This let's me focus on the bringing all the puzzle pieces
|
|
|
|
// togheter much faster.
|
|
|
|
// For more information about the genesis block:
|
|
|
|
// https://neotracker.io/block/height/0
|
|
|
|
mr, _ := util.Uint256DecodeFromString("803ff4abe3ea6533bcc0be574efa02f83ae8fdc651c879056b0d9be336c01bf4")
|
|
|
|
|
|
|
|
return &Block{
|
|
|
|
BlockBase: BlockBase{
|
|
|
|
Version: 0,
|
|
|
|
PrevHash: util.Uint256{},
|
|
|
|
MerkleRoot: mr,
|
|
|
|
Timestamp: timestamp,
|
|
|
|
Index: 0,
|
|
|
|
ConsensusData: 2083236893, // nioctib ^^
|
|
|
|
NextConsensus: util.Uint160{}, // todo
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-06 06:43:32 +00:00
|
|
|
// AddBlock (to be continued after headers is finished..)
|
2018-02-04 19:54:51 +00:00
|
|
|
func (bc *Blockchain) AddBlock(block *Block) error {
|
|
|
|
// TODO: caching
|
|
|
|
headerLen := len(bc.headerIndex)
|
|
|
|
|
|
|
|
if int(block.Index-1) >= headerLen {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if int(block.Index) == headerLen {
|
|
|
|
// todo: if (VerifyBlocks && !block.Verify()) return false;
|
2018-02-01 20:28:45 +00:00
|
|
|
}
|
2018-02-04 19:54:51 +00:00
|
|
|
|
|
|
|
if int(block.Index) < headerLen {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (bc *Blockchain) addHeader(header *Header) error {
|
|
|
|
return bc.AddHeaders(header)
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddHeaders processes the given headers.
|
|
|
|
func (bc *Blockchain) AddHeaders(headers ...*Header) error {
|
2018-02-06 06:43:32 +00:00
|
|
|
start := time.Now()
|
2018-02-04 19:54:51 +00:00
|
|
|
|
2018-02-06 06:43:32 +00:00
|
|
|
bc.mtx.Lock()
|
|
|
|
defer bc.mtx.Unlock()
|
2018-02-04 19:54:51 +00:00
|
|
|
|
2018-02-06 06:43:32 +00:00
|
|
|
batch := Batch{}
|
|
|
|
for _, h := range headers {
|
|
|
|
if int(h.Index-1) >= len(bc.headerIndex) {
|
|
|
|
bc.logger.Printf("height of block higher then header index %d %d\n",
|
2018-02-04 19:54:51 +00:00
|
|
|
h.Index, len(bc.headerIndex))
|
|
|
|
break
|
|
|
|
}
|
2018-02-06 06:43:32 +00:00
|
|
|
if int(h.Index) < len(bc.headerIndex) {
|
2018-02-04 19:54:51 +00:00
|
|
|
continue
|
|
|
|
}
|
2018-02-06 06:43:32 +00:00
|
|
|
if !h.Verify() {
|
|
|
|
bc.logger.Printf("header %v is invalid", h)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err := bc.processHeader(h, batch); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-02-04 19:54:51 +00:00
|
|
|
}
|
|
|
|
|
2018-02-06 06:43:32 +00:00
|
|
|
// TODO: Implement caching strategy.
|
|
|
|
if len(batch) > 0 {
|
|
|
|
// Write all batches.
|
|
|
|
if err := bc.writeBatch(batch); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-02-04 19:54:51 +00:00
|
|
|
|
2018-02-06 06:43:32 +00:00
|
|
|
bc.logger.Printf("done processing headers up to index %d took %f Seconds",
|
|
|
|
bc.HeaderHeight(), time.Since(start).Seconds())
|
2018-02-04 19:54:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2018-02-06 06:43:32 +00:00
|
|
|
}
|
2018-02-04 19:54:51 +00:00
|
|
|
|
2018-02-06 06:43:32 +00:00
|
|
|
// processHeader processes 1 header.
|
|
|
|
func (bc *Blockchain) processHeader(h *Header, batch Batch) error {
|
|
|
|
hash, err := h.Hash()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
bc.headerIndex = append(bc.headerIndex, hash)
|
2018-02-04 19:54:51 +00:00
|
|
|
|
2018-02-06 06:43:32 +00:00
|
|
|
for int(h.Index)-writeHdrBatchCnt >= int(bc.storedHeaderCount) {
|
|
|
|
// hdrsToWrite = bc.headerIndex[bc.storedHeaderCount : bc.storedHeaderCount+writeHdrBatchCnt]
|
2018-02-04 19:54:51 +00:00
|
|
|
|
2018-02-06 06:43:32 +00:00
|
|
|
// NOTE: from original #c to be implemented:
|
|
|
|
//
|
|
|
|
// w.Write(header_index.Skip((int)stored_header_count).Take(2000).ToArray());
|
|
|
|
// w.Flush();
|
|
|
|
// batch.Put(SliceBuilder.Begin(DataEntryPrefix.IX_HeaderHashList).Add(stored_header_count), ms.ToArray());
|
2018-02-04 19:54:51 +00:00
|
|
|
|
2018-02-06 06:43:32 +00:00
|
|
|
bc.storedHeaderCount += writeHdrBatchCnt
|
2018-02-04 19:54:51 +00:00
|
|
|
}
|
|
|
|
|
2018-02-06 06:43:32 +00:00
|
|
|
buf := new(bytes.Buffer)
|
|
|
|
if err := h.EncodeBinary(buf); err != nil {
|
|
|
|
return err
|
2018-02-04 19:54:51 +00:00
|
|
|
}
|
|
|
|
|
2018-02-06 06:43:32 +00:00
|
|
|
preBlock := preDataBlock.add(hash.ToSliceReverse())
|
|
|
|
batch[&preBlock] = buf.Bytes()
|
|
|
|
preHeader := preSYSCurrentHeader.toSlice()
|
|
|
|
batch[&preHeader] = hashAndIndexToBytes(hash, h.Index)
|
|
|
|
|
2018-02-04 19:54:51 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CurrentBlockHash return the lastest hash in the header index.
|
|
|
|
func (bc *Blockchain) CurrentBlockHash() (hash util.Uint256) {
|
|
|
|
if len(bc.headerIndex) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if len(bc.headerIndex) < int(bc.currentBlockHeight) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
return bc.headerIndex[bc.currentBlockHeight]
|
2018-02-01 20:28:45 +00:00
|
|
|
}
|
2018-02-06 06:43:32 +00:00
|
|
|
|
2018-02-07 14:16:50 +00:00
|
|
|
// CurrentHeaderHash returns the hash of the latest known header.
|
|
|
|
func (bc *Blockchain) CurrentHeaderHash() (hash util.Uint256) {
|
|
|
|
return bc.headerIndex[len(bc.headerIndex)-1]
|
|
|
|
}
|
|
|
|
|
2018-02-06 06:43:32 +00:00
|
|
|
// BlockHeight return the height/index of the latest block this node has.
|
|
|
|
func (bc *Blockchain) BlockHeight() uint32 {
|
|
|
|
return bc.currentBlockHeight
|
|
|
|
}
|
|
|
|
|
|
|
|
// HeaderHeight returns the current index of the headers.
|
|
|
|
func (bc *Blockchain) HeaderHeight() uint32 {
|
|
|
|
return uint32(len(bc.headerIndex)) - 1
|
|
|
|
}
|
|
|
|
|
|
|
|
func hashAndIndexToBytes(h util.Uint256, index uint32) []byte {
|
|
|
|
buf := make([]byte, 4)
|
|
|
|
binary.LittleEndian.PutUint32(buf, index)
|
|
|
|
return append(h.ToSliceReverse(), buf...)
|
|
|
|
}
|