2020-01-14 12:32:07 +00:00
|
|
|
package block
|
2018-01-30 10:56:36 +00:00
|
|
|
|
|
|
|
import (
|
2020-05-13 18:27:08 +00:00
|
|
|
"encoding/json"
|
2019-10-11 08:40:54 +00:00
|
|
|
"errors"
|
2020-10-02 14:25:55 +00:00
|
|
|
"math"
|
2019-10-11 08:40:54 +00:00
|
|
|
|
2019-09-25 16:54:31 +00:00
|
|
|
"github.com/Workiva/go-datastructures/queue"
|
2020-06-18 09:00:51 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/config/netmode"
|
2020-03-03 14:21:42 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
|
|
|
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
|
|
|
|
"github.com/nspcc-dev/neo-go/pkg/io"
|
|
|
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
2018-01-30 10:56:36 +00:00
|
|
|
)
|
|
|
|
|
2020-10-02 14:25:55 +00:00
|
|
|
const (
|
|
|
|
// MaxContentsPerBlock is the maximum number of contents (transactions + consensus data) per block.
|
|
|
|
MaxContentsPerBlock = math.MaxUint16
|
|
|
|
// MaxTransactionsPerBlock is the maximum number of transactions per block.
|
|
|
|
MaxTransactionsPerBlock = MaxContentsPerBlock - 1
|
|
|
|
)
|
|
|
|
|
|
|
|
// ErrMaxContentsPerBlock is returned when the maximum number of contents per block is reached.
|
|
|
|
var ErrMaxContentsPerBlock = errors.New("the number of contents exceeds the maximum number of contents per block")
|
|
|
|
|
2018-02-01 20:28:45 +00:00
|
|
|
// Block represents one block in the chain.
|
|
|
|
type Block struct {
|
2018-03-17 11:53:21 +00:00
|
|
|
// The base of the block.
|
2020-01-15 08:29:50 +00:00
|
|
|
Base
|
2018-03-17 11:53:21 +00:00
|
|
|
|
2020-04-22 05:57:55 +00:00
|
|
|
// Primary index and nonce
|
2020-07-09 12:13:07 +00:00
|
|
|
ConsensusData ConsensusData `json:"consensusdata"`
|
2020-04-22 05:57:55 +00:00
|
|
|
|
2018-03-17 11:53:21 +00:00
|
|
|
// Transaction list.
|
2020-05-13 18:27:08 +00:00
|
|
|
Transactions []*transaction.Transaction
|
2018-03-17 11:53:21 +00:00
|
|
|
|
|
|
|
// True if this block is created from trimmed data.
|
2020-05-13 18:27:08 +00:00
|
|
|
Trimmed bool
|
|
|
|
}
|
|
|
|
|
2020-06-18 09:00:51 +00:00
|
|
|
// auxBlockOut is used for JSON i/o.
|
|
|
|
type auxBlockOut struct {
|
2020-07-09 12:13:07 +00:00
|
|
|
ConsensusData ConsensusData `json:"consensusdata"`
|
2020-05-13 18:27:08 +00:00
|
|
|
Transactions []*transaction.Transaction `json:"tx"`
|
2018-01-30 10:56:36 +00:00
|
|
|
}
|
2018-01-31 08:27:08 +00:00
|
|
|
|
2020-06-18 09:00:51 +00:00
|
|
|
// auxBlockIn is used for JSON i/o.
|
|
|
|
type auxBlockIn struct {
|
2020-07-09 12:13:07 +00:00
|
|
|
ConsensusData ConsensusData `json:"consensusdata"`
|
2020-06-18 09:00:51 +00:00
|
|
|
Transactions []json.RawMessage `json:"tx"`
|
|
|
|
}
|
|
|
|
|
2018-03-17 11:53:21 +00:00
|
|
|
// Header returns the Header of the Block.
|
2018-02-04 19:54:51 +00:00
|
|
|
func (b *Block) Header() *Header {
|
|
|
|
return &Header{
|
2020-01-15 08:29:50 +00:00
|
|
|
Base: b.Base,
|
2018-02-04 19:54:51 +00:00
|
|
|
}
|
|
|
|
}
|
2018-02-01 20:28:45 +00:00
|
|
|
|
2020-09-16 09:33:39 +00:00
|
|
|
// ComputeMerkleRoot computes Merkle tree root hash based on actual block's data.
|
|
|
|
func (b *Block) ComputeMerkleRoot() util.Uint256 {
|
2020-04-22 05:57:55 +00:00
|
|
|
hashes := make([]util.Uint256, len(b.Transactions)+1)
|
|
|
|
hashes[0] = b.ConsensusData.Hash()
|
|
|
|
for i, tx := range b.Transactions {
|
|
|
|
hashes[i+1] = tx.Hash()
|
2018-03-25 10:45:54 +00:00
|
|
|
}
|
|
|
|
|
2020-09-15 15:38:15 +00:00
|
|
|
return hash.CalcMerkleRoot(hashes)
|
2019-10-15 09:52:10 +00:00
|
|
|
}
|
|
|
|
|
2020-01-14 12:32:07 +00:00
|
|
|
// RebuildMerkleRoot rebuilds the merkleroot of the block.
|
2020-09-15 15:38:15 +00:00
|
|
|
func (b *Block) RebuildMerkleRoot() {
|
2020-09-16 09:33:39 +00:00
|
|
|
b.MerkleRoot = b.ComputeMerkleRoot()
|
2018-02-01 20:28:45 +00:00
|
|
|
}
|
|
|
|
|
2018-03-17 11:53:21 +00:00
|
|
|
// NewBlockFromTrimmedBytes returns a new block from trimmed data.
|
|
|
|
// This is commonly used to create a block from stored data.
|
|
|
|
// Blocks created from trimmed data will have their Trimmed field
|
|
|
|
// set to true.
|
2020-11-17 12:57:50 +00:00
|
|
|
func NewBlockFromTrimmedBytes(network netmode.Magic, stateRootEnabled bool, b []byte) (*Block, error) {
|
2018-03-17 11:53:21 +00:00
|
|
|
block := &Block{
|
2020-06-18 09:00:51 +00:00
|
|
|
Base: Base{
|
2020-11-17 12:57:50 +00:00
|
|
|
Network: network,
|
|
|
|
StateRootEnabled: stateRootEnabled,
|
2020-06-18 09:00:51 +00:00
|
|
|
},
|
2018-03-17 11:53:21 +00:00
|
|
|
Trimmed: true,
|
|
|
|
}
|
|
|
|
|
2019-09-16 09:18:13 +00:00
|
|
|
br := io.NewBinReaderFromBuf(b)
|
2019-09-16 16:31:49 +00:00
|
|
|
block.decodeHashableFields(br)
|
2018-03-17 11:53:21 +00:00
|
|
|
|
2019-12-12 17:17:50 +00:00
|
|
|
_ = br.ReadB()
|
2018-03-17 11:53:21 +00:00
|
|
|
|
2019-09-16 16:31:49 +00:00
|
|
|
block.Script.DecodeBinary(br)
|
2018-03-17 11:53:21 +00:00
|
|
|
|
2020-04-22 05:57:55 +00:00
|
|
|
lenHashes := br.ReadVarUint()
|
2020-10-02 14:25:55 +00:00
|
|
|
if lenHashes > MaxContentsPerBlock {
|
|
|
|
return nil, ErrMaxContentsPerBlock
|
|
|
|
}
|
2020-04-22 05:57:55 +00:00
|
|
|
if lenHashes > 0 {
|
|
|
|
var consensusDataHash util.Uint256
|
|
|
|
consensusDataHash.DecodeBinary(br)
|
|
|
|
lenTX := lenHashes - 1
|
|
|
|
block.Transactions = make([]*transaction.Transaction, lenTX)
|
|
|
|
for i := 0; i < int(lenTX); i++ {
|
|
|
|
var hash util.Uint256
|
|
|
|
hash.DecodeBinary(br)
|
|
|
|
block.Transactions[i] = transaction.NewTrimmedTX(hash)
|
|
|
|
}
|
|
|
|
block.ConsensusData.DecodeBinary(br)
|
2018-03-17 11:53:21 +00:00
|
|
|
}
|
|
|
|
|
2019-08-28 16:27:06 +00:00
|
|
|
return block, br.Err
|
2018-03-17 11:53:21 +00:00
|
|
|
}
|
|
|
|
|
2020-06-18 09:00:51 +00:00
|
|
|
// New creates a new blank block tied to the specific network.
|
2020-11-17 12:57:50 +00:00
|
|
|
func New(network netmode.Magic, stateRootEnabled bool) *Block {
|
2020-06-18 09:00:51 +00:00
|
|
|
return &Block{
|
|
|
|
Base: Base{
|
2020-11-17 12:57:50 +00:00
|
|
|
Network: network,
|
|
|
|
StateRootEnabled: stateRootEnabled,
|
2020-06-18 09:00:51 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-17 11:53:21 +00:00
|
|
|
// Trim returns a subset of the block data to save up space
|
|
|
|
// in storage.
|
|
|
|
// Notice that only the hashes of the transactions are stored.
|
|
|
|
func (b *Block) Trim() ([]byte, error) {
|
2019-09-16 09:18:13 +00:00
|
|
|
buf := io.NewBufBinWriter()
|
2019-09-16 16:31:49 +00:00
|
|
|
b.encodeHashableFields(buf.BinWriter)
|
2019-12-12 17:17:50 +00:00
|
|
|
buf.WriteB(1)
|
2019-09-16 16:31:49 +00:00
|
|
|
b.Script.EncodeBinary(buf.BinWriter)
|
2018-03-17 11:53:21 +00:00
|
|
|
|
2020-04-22 05:57:55 +00:00
|
|
|
buf.WriteVarUint(uint64(len(b.Transactions)) + 1)
|
|
|
|
hash := b.ConsensusData.Hash()
|
|
|
|
hash.EncodeBinary(buf.BinWriter)
|
|
|
|
|
2018-03-17 11:53:21 +00:00
|
|
|
for _, tx := range b.Transactions {
|
2019-12-09 15:25:15 +00:00
|
|
|
h := tx.Hash()
|
|
|
|
h.EncodeBinary(buf.BinWriter)
|
2019-08-28 16:27:06 +00:00
|
|
|
}
|
2020-04-22 05:57:55 +00:00
|
|
|
|
|
|
|
b.ConsensusData.EncodeBinary(buf.BinWriter)
|
2019-09-16 09:18:13 +00:00
|
|
|
if buf.Err != nil {
|
|
|
|
return nil, buf.Err
|
2018-03-17 11:53:21 +00:00
|
|
|
}
|
2020-04-22 05:57:55 +00:00
|
|
|
|
2018-03-17 11:53:21 +00:00
|
|
|
return buf.Bytes(), nil
|
2018-01-31 08:27:08 +00:00
|
|
|
}
|
|
|
|
|
2019-09-16 16:31:49 +00:00
|
|
|
// DecodeBinary decodes the block from the given BinReader, implementing
|
|
|
|
// Serializable interface.
|
|
|
|
func (b *Block) DecodeBinary(br *io.BinReader) {
|
2020-01-15 08:29:50 +00:00
|
|
|
b.Base.DecodeBinary(br)
|
2020-04-22 05:57:55 +00:00
|
|
|
contentsCount := br.ReadVarUint()
|
|
|
|
if contentsCount == 0 {
|
|
|
|
br.Err = errors.New("invalid block format")
|
|
|
|
return
|
|
|
|
}
|
2020-10-02 14:25:55 +00:00
|
|
|
if contentsCount > MaxContentsPerBlock {
|
|
|
|
br.Err = ErrMaxContentsPerBlock
|
|
|
|
return
|
|
|
|
}
|
2020-04-22 05:57:55 +00:00
|
|
|
b.ConsensusData.DecodeBinary(br)
|
|
|
|
txes := make([]*transaction.Transaction, contentsCount-1)
|
|
|
|
for i := 0; i < int(contentsCount)-1; i++ {
|
2020-06-18 09:00:51 +00:00
|
|
|
tx := &transaction.Transaction{Network: b.Network}
|
2020-04-22 05:57:55 +00:00
|
|
|
tx.DecodeBinary(br)
|
|
|
|
txes[i] = tx
|
|
|
|
}
|
|
|
|
b.Transactions = txes
|
2020-06-05 12:24:23 +00:00
|
|
|
if br.Err != nil {
|
|
|
|
return
|
|
|
|
}
|
2018-02-01 20:28:45 +00:00
|
|
|
}
|
2018-03-17 11:53:21 +00:00
|
|
|
|
2019-09-16 16:31:49 +00:00
|
|
|
// EncodeBinary encodes the block to the given BinWriter, implementing
|
|
|
|
// Serializable interface.
|
|
|
|
func (b *Block) EncodeBinary(bw *io.BinWriter) {
|
2020-01-15 08:29:50 +00:00
|
|
|
b.Base.EncodeBinary(bw)
|
2020-04-22 05:57:55 +00:00
|
|
|
bw.WriteVarUint(uint64(len(b.Transactions) + 1))
|
|
|
|
b.ConsensusData.EncodeBinary(bw)
|
|
|
|
for i := 0; i < len(b.Transactions); i++ {
|
|
|
|
b.Transactions[i].EncodeBinary(bw)
|
|
|
|
}
|
2018-03-17 11:53:21 +00:00
|
|
|
}
|
2019-09-25 16:54:31 +00:00
|
|
|
|
|
|
|
// Compare implements the queue Item interface.
|
|
|
|
func (b *Block) Compare(item queue.Item) int {
|
|
|
|
other := item.(*Block)
|
|
|
|
switch {
|
|
|
|
case b.Index > other.Index:
|
|
|
|
return 1
|
|
|
|
case b.Index == other.Index:
|
|
|
|
return 0
|
|
|
|
default:
|
|
|
|
return -1
|
|
|
|
}
|
|
|
|
}
|
2020-05-13 18:27:08 +00:00
|
|
|
|
|
|
|
// MarshalJSON implements json.Marshaler interface.
|
|
|
|
func (b Block) MarshalJSON() ([]byte, error) {
|
2020-06-18 09:00:51 +00:00
|
|
|
auxb, err := json.Marshal(auxBlockOut{
|
2020-05-13 18:27:08 +00:00
|
|
|
ConsensusData: b.ConsensusData,
|
|
|
|
Transactions: b.Transactions,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
baseBytes, err := json.Marshal(b.Base)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stitch them together.
|
|
|
|
if baseBytes[len(baseBytes)-1] != '}' || auxb[0] != '{' {
|
|
|
|
return nil, errors.New("can't merge internal jsons")
|
|
|
|
}
|
|
|
|
baseBytes[len(baseBytes)-1] = ','
|
|
|
|
baseBytes = append(baseBytes, auxb[1:]...)
|
|
|
|
return baseBytes, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// UnmarshalJSON implements json.Unmarshaler interface.
|
|
|
|
func (b *Block) UnmarshalJSON(data []byte) error {
|
|
|
|
// As Base and auxb are at the same level in json,
|
|
|
|
// do unmarshalling separately for both structs.
|
2020-06-18 09:00:51 +00:00
|
|
|
auxb := new(auxBlockIn)
|
2020-05-13 18:27:08 +00:00
|
|
|
err := json.Unmarshal(data, auxb)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-06-18 09:00:51 +00:00
|
|
|
err = json.Unmarshal(data, &b.Base)
|
2020-05-13 18:27:08 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-06-18 09:00:51 +00:00
|
|
|
if len(auxb.Transactions) != 0 {
|
|
|
|
b.Transactions = make([]*transaction.Transaction, 0, len(auxb.Transactions))
|
|
|
|
for _, txBytes := range auxb.Transactions {
|
|
|
|
tx := &transaction.Transaction{Network: b.Network}
|
|
|
|
err = tx.UnmarshalJSON(txBytes)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
b.Transactions = append(b.Transactions, tx)
|
|
|
|
}
|
|
|
|
}
|
2020-05-13 18:27:08 +00:00
|
|
|
b.ConsensusData = auxb.ConsensusData
|
2020-06-05 09:14:27 +00:00
|
|
|
// Some tests rely on hash presence and we're usually precomputing
|
|
|
|
// other hashes upon deserialization.
|
|
|
|
_ = b.ConsensusData.Hash()
|
2020-05-13 18:27:08 +00:00
|
|
|
return nil
|
|
|
|
}
|