Merge pull request #1291 from nspcc-dev/stateroot-relaying-during-commit-2.x
Stateroot relaying during commit 2.x
This commit is contained in:
commit
d8aac7c675
17 changed files with 197 additions and 144 deletions
|
@ -2,6 +2,8 @@ ProtocolConfiguration:
|
|||
Magic: 7630401
|
||||
AddressVersion: 23
|
||||
SecondsPerBlock: 15
|
||||
EnableStateRoot: true
|
||||
StateRootEnableIndex: 6016000
|
||||
LowPriorityThreshold: 0.001
|
||||
MemPoolSize: 50000
|
||||
StandbyValidators:
|
||||
|
@ -28,7 +30,7 @@ ProtocolConfiguration:
|
|||
IssueTransaction: 500
|
||||
PublishTransaction: 500
|
||||
RegisterTransaction: 10000
|
||||
VerifyBlocks: true
|
||||
VerifyBlocks: false
|
||||
VerifyTransactions: false
|
||||
FreeGasLimit: 10.0
|
||||
MaxTransactionsPerBlock: 500
|
||||
|
@ -40,7 +42,7 @@ ApplicationConfiguration:
|
|||
# LogPath could be set up in case you need stdout logs to some proper file.
|
||||
# LogPath: "./log/neogo.log"
|
||||
DBConfiguration:
|
||||
Type: "leveldb" #other options: 'inmemory','redis','boltdb', 'badgerdb'.
|
||||
Type: "boltdb" #other options: 'inmemory','redis','boltdb', 'badgerdb'.
|
||||
# DB type options. Uncomment those you need in case you want to switch DB type.
|
||||
LevelDBOptions:
|
||||
DataDirectoryPath: "./chains/mainnet"
|
||||
|
@ -48,10 +50,10 @@ ApplicationConfiguration:
|
|||
# Addr: "localhost:6379"
|
||||
# Password: ""
|
||||
# DB: 0
|
||||
# BoltDBOptions:
|
||||
# FilePath: "./chains/mainnet.bolt"
|
||||
# BadgerDBOptions:
|
||||
# BadgerDir: "./chains/mainnet.badger"
|
||||
BoltDBOptions:
|
||||
FilePath: "./chains/mainnet.bolt"
|
||||
BadgerDBOptions:
|
||||
BadgerDir: "./chains/mainnet.badger"
|
||||
# Uncomment in order to set up custom address for node.
|
||||
# Address: 127.0.0.1
|
||||
NodePort: 10333
|
||||
|
|
29
docs/neox.md
29
docs/neox.md
|
@ -6,8 +6,9 @@ with two configuration options.
|
|||
|
||||
## What is neox
|
||||
|
||||
Neox is an extension of original Neo 2 node implemented in neox-2.x branch of
|
||||
C# implementation. It includes the following main changes:
|
||||
Neox is an extension of original Neo 2 node originally implemented in neox-2.x
|
||||
branch of C# implementation (and then presented in the 2.11.0 official
|
||||
release). It includes the following main changes:
|
||||
* local state root generation for contract storages based on MPT
|
||||
* consensus updates for state root exchange between CNs and generation of
|
||||
verified (signed by CNs) state root
|
||||
|
@ -43,22 +44,16 @@ entities which are consensus nodes.
|
|||
|
||||
### How and why consensus process was changed in neox
|
||||
|
||||
Consensus nodes now exchange state root information with PrepareRequest
|
||||
messages, so the Primary node tells everyone its current state root hash
|
||||
(along with the block index that state root corresponds to) and the hash of
|
||||
the previous state root message. This data might also be versioned in case of
|
||||
future updates, so there is a special field reserved for that too, but at the
|
||||
moment it's always 0. Backups either confirm this data (if it matches their
|
||||
local state) by proceeding with PrepareResponse or request a ChangeView if
|
||||
there is some mismatch detected.
|
||||
Consensus nodes now exchange state root signatures for height N-1 during
|
||||
consensus process for block N with PrepareRequest and PrepareResponse
|
||||
messages.
|
||||
|
||||
If all goes well CNs generate a signature for this state root data and
|
||||
exchange it with their Commit messages (along with new block
|
||||
signatures). Effectively this creates another signed chain on the network that
|
||||
is always one block behind from the main chain because the process of block `N`
|
||||
creation confirms the state resulting from processing of block `N - 1`. A
|
||||
separate `stateroot` message is generated and sent along with the new block
|
||||
broadcast.
|
||||
If all goes well CNs collect enough signatures for this state root data and
|
||||
generate (and broadcast) a `stateroot` message along with regular Commit
|
||||
consensus messages. Effectively this creates another signed chain on the
|
||||
network that is always one block behind from the main chain because the
|
||||
process of block `N` creation confirms the state resulting from processing of
|
||||
block `N - 1`.
|
||||
|
||||
### How P2P protocol was changed
|
||||
|
||||
|
|
2
go.mod
2
go.mod
|
@ -9,7 +9,7 @@ require (
|
|||
github.com/go-yaml/yaml v2.1.0+incompatible
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/mr-tron/base58 v1.1.2
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200623100921-5a182c20965e
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200810081309-f40804dbf8a0
|
||||
github.com/nspcc-dev/rfc6979 v0.2.0
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/prometheus/client_golang v1.2.1
|
||||
|
|
4
go.sum
4
go.sum
|
@ -156,8 +156,8 @@ github.com/nspcc-dev/dbft v0.0.0-20200117124306-478e5cfbf03a h1:ajvxgEe9qY4vvoSm
|
|||
github.com/nspcc-dev/dbft v0.0.0-20200117124306-478e5cfbf03a/go.mod h1:/YFK+XOxxg0Bfm6P92lY5eDSLYfp06XOdL8KAVgXjVk=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200219114139-199d286ed6c1 h1:yEx9WznS+rjE0jl0dLujCxuZSIb+UTjF+005TJu/nNI=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200219114139-199d286ed6c1/go.mod h1:O0qtn62prQSqizzoagHmuuKoz8QMkU3SzBoKdEvm3aQ=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200623100921-5a182c20965e h1:QOT9slflIkEKb5wY0ZUC0dCmCgoqGlhOAh9+xWMIxfg=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200623100921-5a182c20965e/go.mod h1:1FYQXSbb6/9HQIkoF8XO7W/S8N7AZRkBsgwbcXRvk0E=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200810081309-f40804dbf8a0 h1:4XrUJSvClcBQVZJQqI9EHW/kAIWcrycgTa5J0lBO3R8=
|
||||
github.com/nspcc-dev/dbft v0.0.0-20200810081309-f40804dbf8a0/go.mod h1:1FYQXSbb6/9HQIkoF8XO7W/S8N7AZRkBsgwbcXRvk0E=
|
||||
github.com/nspcc-dev/neo-go v0.73.1-pre.0.20200303142215-f5a1b928ce09/go.mod h1:pPYwPZ2ks+uMnlRLUyXOpLieaDQSEaf4NM3zHVbRjmg=
|
||||
github.com/nspcc-dev/neofs-crypto v0.2.0 h1:ftN+59WqxSWz/RCgXYOfhmltOOqU+udsNQSvN6wkFck=
|
||||
github.com/nspcc-dev/neofs-crypto v0.2.0/go.mod h1:F/96fUzPM3wR+UGsPi3faVNmFlA9KAEAUQR7dMxZmNA=
|
||||
|
|
|
@ -30,7 +30,13 @@ func (c changeView) NewViewNumber() byte { return c.newViewNumber }
|
|||
func (c *changeView) SetNewViewNumber(view byte) { c.newViewNumber = view }
|
||||
|
||||
// Timestamp implements payload.ChangeView interface.
|
||||
func (c changeView) Timestamp() uint32 { return c.timestamp }
|
||||
func (c changeView) Timestamp() uint64 { return uint64(c.timestamp) * nanoInSec }
|
||||
|
||||
// SetTimestamp implements payload.ChangeView interface.
|
||||
func (c *changeView) SetTimestamp(ts uint32) { c.timestamp = ts }
|
||||
func (c *changeView) SetTimestamp(ts uint64) { c.timestamp = uint32(ts / nanoInSec) }
|
||||
|
||||
// Reason implements payload.ChangeView interface.
|
||||
func (c changeView) Reason() payload.ChangeViewReason { return payload.CVUnknown }
|
||||
|
||||
// SetReason implements payload.ChangeView interface.
|
||||
func (c *changeView) SetReason(_ payload.ChangeViewReason) {}
|
||||
|
|
|
@ -9,8 +9,8 @@ import (
|
|||
func TestChangeView_Setters(t *testing.T) {
|
||||
var c changeView
|
||||
|
||||
c.SetTimestamp(123)
|
||||
require.EqualValues(t, 123, c.Timestamp())
|
||||
c.SetTimestamp(123 * nanoInSec)
|
||||
require.EqualValues(t, 123*nanoInSec, c.Timestamp())
|
||||
|
||||
c.SetNewViewNumber(2)
|
||||
require.EqualValues(t, 2, c.NewViewNumber())
|
||||
|
|
|
@ -8,9 +8,6 @@ import (
|
|||
// commit represents dBFT Commit message.
|
||||
type commit struct {
|
||||
signature [signatureSize]byte
|
||||
stateSig [signatureSize]byte
|
||||
|
||||
stateRootEnabled bool
|
||||
}
|
||||
|
||||
// signatureSize is an rfc6989 signature size in bytes
|
||||
|
@ -22,17 +19,11 @@ var _ payload.Commit = (*commit)(nil)
|
|||
// EncodeBinary implements io.Serializable interface.
|
||||
func (c *commit) EncodeBinary(w *io.BinWriter) {
|
||||
w.WriteBytes(c.signature[:])
|
||||
if c.stateRootEnabled {
|
||||
w.WriteBytes(c.stateSig[:])
|
||||
}
|
||||
}
|
||||
|
||||
// DecodeBinary implements io.Serializable interface.
|
||||
func (c *commit) DecodeBinary(r *io.BinReader) {
|
||||
r.ReadBytes(c.signature[:])
|
||||
if c.stateRootEnabled {
|
||||
r.ReadBytes(c.stateSig[:])
|
||||
}
|
||||
}
|
||||
|
||||
// Signature implements payload.Commit interface.
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
coreb "github.com/nspcc-dev/neo-go/pkg/core/block"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/cache"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/mempool"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
|
@ -136,7 +135,7 @@ func NewService(cfg Config) (Service, error) {
|
|||
dbft.WithVerifyBlock(srv.verifyBlock),
|
||||
dbft.WithGetBlock(srv.getBlock),
|
||||
dbft.WithWatchOnly(func() bool { return false }),
|
||||
dbft.WithNewBlockFromContext(newBlockFromContext),
|
||||
dbft.WithNewBlockFromContext(srv.newBlockFromContext),
|
||||
dbft.WithCurrentHeight(cfg.Chain.BlockHeight),
|
||||
dbft.WithCurrentBlockHash(cfg.Chain.CurrentBlockHash),
|
||||
dbft.WithGetValidators(srv.getValidators),
|
||||
|
@ -144,12 +143,13 @@ func NewService(cfg Config) (Service, error) {
|
|||
|
||||
dbft.WithNewConsensusPayload(srv.newPayload),
|
||||
dbft.WithNewPrepareRequest(srv.newPrepareRequest),
|
||||
dbft.WithNewPrepareResponse(func() payload.PrepareResponse { return new(prepareResponse) }),
|
||||
dbft.WithNewPrepareResponse(srv.newPrepareResponse),
|
||||
dbft.WithNewChangeView(func() payload.ChangeView { return new(changeView) }),
|
||||
dbft.WithNewCommit(srv.newCommit),
|
||||
dbft.WithNewCommit(func() payload.Commit { return new(commit) }),
|
||||
dbft.WithNewRecoveryRequest(func() payload.RecoveryRequest { return new(recoveryRequest) }),
|
||||
dbft.WithNewRecoveryMessage(srv.newRecoveryMessage),
|
||||
dbft.WithVerifyPrepareRequest(srv.verifyRequest),
|
||||
dbft.WithVerifyPrepareResponse(srv.verifyResponse),
|
||||
)
|
||||
|
||||
if srv.dbft == nil {
|
||||
|
@ -235,36 +235,42 @@ func (s *service) stateRootEnabled() bool {
|
|||
}
|
||||
|
||||
func (s *service) newPrepareRequest() payload.PrepareRequest {
|
||||
res := &prepareRequest{
|
||||
stateRootEnabled: s.stateRootEnabled(),
|
||||
}
|
||||
if !s.stateRootEnabled() {
|
||||
return new(prepareRequest)
|
||||
return res
|
||||
}
|
||||
sr, err := s.Chain.GetStateRoot(s.Chain.BlockHeight())
|
||||
if err == nil {
|
||||
return &prepareRequest{
|
||||
stateRootEnabled: true,
|
||||
proposalStateRoot: sr.MPTRootBase,
|
||||
sig := s.getStateRootSig()
|
||||
if sig != nil {
|
||||
copy(res.stateRootSig[:], sig)
|
||||
}
|
||||
}
|
||||
return &prepareRequest{stateRootEnabled: true}
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *service) newCommit() payload.Commit {
|
||||
if !s.stateRootEnabled() {
|
||||
return new(commit)
|
||||
}
|
||||
c := &commit{stateRootEnabled: true}
|
||||
for _, p := range s.dbft.Context.PreparationPayloads {
|
||||
if p != nil && p.ViewNumber() == s.dbft.ViewNumber && p.Type() == payload.PrepareRequestType {
|
||||
pr := p.GetPrepareRequest().(*prepareRequest)
|
||||
data := pr.proposalStateRoot.GetSignedPart()
|
||||
sign, err := s.dbft.Priv.Sign(data)
|
||||
func (s *service) getStateRootSig() []byte {
|
||||
var sig []byte
|
||||
|
||||
sr, err := s.Chain.GetStateRoot(s.dbft.BlockIndex - 1)
|
||||
if err == nil {
|
||||
copy(c.stateSig[:], sign)
|
||||
data := sr.GetSignedPart()
|
||||
sig, _ = s.dbft.Priv.Sign(data)
|
||||
}
|
||||
break
|
||||
return sig
|
||||
}
|
||||
|
||||
func (s *service) newPrepareResponse() payload.PrepareResponse {
|
||||
res := &prepareResponse{
|
||||
stateRootEnabled: s.stateRootEnabled(),
|
||||
}
|
||||
return c
|
||||
if !s.stateRootEnabled() {
|
||||
return res
|
||||
}
|
||||
sig := s.getStateRootSig()
|
||||
if sig != nil {
|
||||
copy(res.stateRootSig[:], sig)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *service) newRecoveryMessage() payload.RecoveryMessage {
|
||||
|
@ -393,15 +399,29 @@ func (s *service) verifyBlock(b block.Block) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (s *service) verifyRequest(p payload.ConsensusPayload) error {
|
||||
req := p.GetPrepareRequest().(*prepareRequest)
|
||||
if s.stateRootEnabled() {
|
||||
func (s *service) verifyStateRootSig(index int, sig []byte) error {
|
||||
r, err := s.Chain.GetStateRoot(s.dbft.BlockIndex - 1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get local state root: %v", err)
|
||||
}
|
||||
if !r.Equals(&req.proposalStateRoot) {
|
||||
return errors.New("state root mismatch")
|
||||
validators := s.getValidators()
|
||||
if index >= len(validators) {
|
||||
return errors.New("bad validator index")
|
||||
}
|
||||
|
||||
pub := validators[index]
|
||||
if pub.Verify(r.GetSignedPart(), sig) != nil {
|
||||
return errors.New("bad state root signature")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *service) verifyRequest(p payload.ConsensusPayload) error {
|
||||
req := p.GetPrepareRequest().(*prepareRequest)
|
||||
if s.stateRootEnabled() {
|
||||
err := s.verifyStateRootSig(int(p.ValidatorIndex()), req.stateRootSig[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Save lastProposal for getVerified().
|
||||
|
@ -411,6 +431,14 @@ func (s *service) verifyRequest(p payload.ConsensusPayload) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *service) verifyResponse(p payload.ConsensusPayload) error {
|
||||
if !s.stateRootEnabled() {
|
||||
return nil
|
||||
}
|
||||
resp := p.GetPrepareResponse().(*prepareResponse)
|
||||
return s.verifyStateRootSig(int(p.ValidatorIndex()), resp.stateRootSig[:])
|
||||
}
|
||||
|
||||
func (s *service) processBlock(b block.Block) {
|
||||
bb := &b.(*neoBlock).Block
|
||||
bb.Script = *(s.getBlockWitness(bb))
|
||||
|
@ -422,36 +450,26 @@ func (s *service) processBlock(b block.Block) {
|
|||
s.log.Warn("error on add block", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
var rb *state.MPTRootBase
|
||||
for _, p := range s.dbft.PreparationPayloads {
|
||||
if p != nil && p.Type() == payload.PrepareRequestType {
|
||||
rb = &p.GetPrepareRequest().(*prepareRequest).proposalStateRoot
|
||||
}
|
||||
}
|
||||
w := s.getWitness(func(p payload.Commit) []byte { return p.(*commit).stateSig[:] })
|
||||
r := &state.MPTRoot{
|
||||
MPTRootBase: *rb,
|
||||
Witness: w,
|
||||
}
|
||||
if err := s.Chain.AddStateRoot(r); err != nil {
|
||||
s.log.Warn("errors while adding state root", zap.Error(err))
|
||||
}
|
||||
s.Broadcast(r)
|
||||
}
|
||||
|
||||
func (s *service) getBlockWitness(_ *coreb.Block) *transaction.Witness {
|
||||
return s.getWitness(func(p payload.Commit) []byte { return p.Signature() })
|
||||
return s.getWitness(func(ctx dbft.Context, i int) []byte {
|
||||
if p := ctx.CommitPayloads[i]; p != nil && p.ViewNumber() == ctx.ViewNumber {
|
||||
return p.GetCommit().Signature()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *service) getWitness(f func(p payload.Commit) []byte) *transaction.Witness {
|
||||
func (s *service) getWitness(f func(dbft.Context, int) []byte) *transaction.Witness {
|
||||
dctx := s.dbft.Context
|
||||
pubs := convertKeys(dctx.Validators)
|
||||
sigs := make(map[*keys.PublicKey][]byte)
|
||||
|
||||
for i := range pubs {
|
||||
if p := dctx.CommitPayloads[i]; p != nil && p.ViewNumber() == dctx.ViewNumber {
|
||||
sigs[pubs[i]] = f(p.GetCommit())
|
||||
sig := f(dctx, i)
|
||||
if sig != nil {
|
||||
sigs[pubs[i]] = sig
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -603,7 +621,29 @@ func convertKeys(validators []crypto.PublicKey) (pubs []*keys.PublicKey) {
|
|||
return
|
||||
}
|
||||
|
||||
func newBlockFromContext(ctx *dbft.Context) block.Block {
|
||||
func (s *service) newBlockFromContext(ctx *dbft.Context) block.Block {
|
||||
if s.stateRootEnabled() {
|
||||
// This is being called when we're ready to commit, so we can safely
|
||||
// relay stateroot here.
|
||||
stateRoot, err := s.Chain.GetStateRoot(s.dbft.Context.BlockIndex - 1)
|
||||
if err != nil {
|
||||
s.log.Warn("can't get stateroot", zap.Uint32("block", s.dbft.Context.BlockIndex-1))
|
||||
}
|
||||
r := stateRoot.MPTRoot
|
||||
r.Witness = s.getWitness(func(ctx dbft.Context, i int) []byte {
|
||||
if p := ctx.PreparationPayloads[i]; p != nil && p.ViewNumber() == ctx.ViewNumber {
|
||||
if int(ctx.PrimaryIndex) == i {
|
||||
return p.GetPrepareRequest().(*prepareRequest).stateRootSig[:]
|
||||
}
|
||||
return p.GetPrepareResponse().(*prepareResponse).stateRootSig[:]
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err := s.Chain.AddStateRoot(&r); err != nil {
|
||||
s.log.Warn("errors while adding state root", zap.Error(err))
|
||||
}
|
||||
s.Broadcast(&r)
|
||||
}
|
||||
block := new(neoBlock)
|
||||
if len(ctx.TransactionHashes) == 0 {
|
||||
return nil
|
||||
|
|
|
@ -49,20 +49,24 @@ func TestService_GetVerified(t *testing.T) {
|
|||
|
||||
// Everyone sends a message.
|
||||
for i := 0; i < 4; i++ {
|
||||
p := new(Payload)
|
||||
p.message = &message{}
|
||||
p := srv.newPayload().(*Payload)
|
||||
p.SetHeight(1)
|
||||
p.SetValidatorIndex(uint16(i))
|
||||
priv, _ := getTestValidator(i)
|
||||
// To properly sign stateroot in prepare request.
|
||||
srv.dbft.Priv = priv
|
||||
// One PrepareRequest and three ChangeViews.
|
||||
if i == 1 {
|
||||
p.SetType(payload.PrepareRequestType)
|
||||
p.SetPayload(&prepareRequest{transactionHashes: hashes, minerTx: *newMinerTx(999)})
|
||||
preq := srv.newPrepareRequest().(*prepareRequest)
|
||||
preq.transactionHashes = hashes
|
||||
preq.minerTx = *newMinerTx(999)
|
||||
p.SetPayload(preq)
|
||||
} else {
|
||||
p.SetType(payload.ChangeViewType)
|
||||
p.SetPayload(&changeView{newViewNumber: 1, timestamp: uint32(time.Now().Unix())})
|
||||
}
|
||||
p.SetHeight(1)
|
||||
p.SetValidatorIndex(uint16(i))
|
||||
|
||||
priv, _ := getTestValidator(i)
|
||||
require.NoError(t, p.Sign(priv))
|
||||
|
||||
// Skip srv.OnPayload, because the service is not really started.
|
||||
|
|
|
@ -49,6 +49,8 @@ const (
|
|||
commitType messageType = 0x30
|
||||
recoveryRequestType messageType = 0x40
|
||||
recoveryMessageType messageType = 0x41
|
||||
|
||||
nanoInSec = 1000_000_000
|
||||
)
|
||||
|
||||
// ViewNumber implements payload.ConsensusPayload interface.
|
||||
|
@ -289,11 +291,11 @@ func (m *message) DecodeBinary(r *io.BinReader) {
|
|||
stateRootEnabled: m.stateRootEnabled,
|
||||
}
|
||||
case prepareResponseType:
|
||||
m.payload = new(prepareResponse)
|
||||
case commitType:
|
||||
m.payload = &commit{
|
||||
m.payload = &prepareResponse{
|
||||
stateRootEnabled: m.stateRootEnabled,
|
||||
}
|
||||
case commitType:
|
||||
m.payload = new(commit)
|
||||
case recoveryRequestType:
|
||||
m.payload = new(recoveryRequest)
|
||||
case recoveryMessageType:
|
||||
|
|
|
@ -173,13 +173,13 @@ func testEncodeDecode(srEnabled bool, mt messageType, actual io.Serializable) fu
|
|||
}
|
||||
|
||||
func TestCommit_Serializable(t *testing.T) {
|
||||
t.Run("WithStateRoot", testEncodeDecode(true, commitType, &commit{stateRootEnabled: true}))
|
||||
t.Run("NoStateRoot", testEncodeDecode(false, commitType, &commit{stateRootEnabled: false}))
|
||||
testEncodeDecode(false, commitType, &commit{})
|
||||
}
|
||||
|
||||
func TestPrepareResponse_Serializable(t *testing.T) {
|
||||
resp := randomMessage(t, prepareResponseType)
|
||||
testserdes.EncodeDecodeBinary(t, resp, new(prepareResponse))
|
||||
t.Run("WithStateRoot", testEncodeDecode(true, prepareResponseType, &prepareResponse{stateRootEnabled: true}))
|
||||
t.Run("NoStateRoot", testEncodeDecode(false, prepareResponseType, &prepareResponse{stateRootEnabled: false}))
|
||||
|
||||
}
|
||||
|
||||
func TestPrepareRequest_Serializable(t *testing.T) {
|
||||
|
@ -231,14 +231,15 @@ func randomMessage(t *testing.T, mt messageType, srEnabled ...bool) io.Serializa
|
|||
case prepareRequestType:
|
||||
return randomPrepareRequest(t, srEnabled...)
|
||||
case prepareResponseType:
|
||||
return &prepareResponse{preparationHash: random.Uint256()}
|
||||
var p = prepareResponse{preparationHash: random.Uint256()}
|
||||
if len(srEnabled) > 0 && srEnabled[0] {
|
||||
p.stateRootEnabled = true
|
||||
random.Fill(p.stateRootSig[:])
|
||||
}
|
||||
return &p
|
||||
case commitType:
|
||||
var c commit
|
||||
random.Fill(c.signature[:])
|
||||
if len(srEnabled) > 0 && srEnabled[0] {
|
||||
c.stateRootEnabled = true
|
||||
random.Fill(c.stateSig[:])
|
||||
}
|
||||
return &c
|
||||
case recoveryRequestType:
|
||||
return &recoveryRequest{timestamp: rand.Uint32()}
|
||||
|
@ -268,9 +269,7 @@ func randomPrepareRequest(t *testing.T, srEnabled ...bool) *prepareRequest {
|
|||
|
||||
if len(srEnabled) > 0 && srEnabled[0] {
|
||||
req.stateRootEnabled = true
|
||||
req.proposalStateRoot.Index = rand.Uint32()
|
||||
req.proposalStateRoot.PrevHash = random.Uint256()
|
||||
req.proposalStateRoot.Root = random.Uint256()
|
||||
random.Fill(req.stateRootSig[:])
|
||||
}
|
||||
|
||||
return req
|
||||
|
@ -318,9 +317,10 @@ func randomRecoveryMessage(t *testing.T, srEnabled ...bool) *recoveryMessage {
|
|||
if len(srEnabled) > 0 && srEnabled[0] {
|
||||
rec.stateRootEnabled = true
|
||||
rec.prepareRequest.stateRootEnabled = true
|
||||
for _, c := range rec.commitPayloads {
|
||||
c.stateRootEnabled = true
|
||||
random.Fill(c.StateSignature[:])
|
||||
random.Fill(prepReq.stateRootSig[:])
|
||||
for _, p := range rec.preparationPayloads {
|
||||
p.stateRootEnabled = true
|
||||
random.Fill(p.StateRootSig[:])
|
||||
}
|
||||
}
|
||||
return rec
|
||||
|
|
|
@ -2,7 +2,6 @@ package consensus
|
|||
|
||||
import (
|
||||
"github.com/nspcc-dev/dbft/payload"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
|
@ -15,7 +14,7 @@ type prepareRequest struct {
|
|||
transactionHashes []util.Uint256
|
||||
minerTx transaction.Transaction
|
||||
nextConsensus util.Uint160
|
||||
proposalStateRoot state.MPTRootBase
|
||||
stateRootSig [signatureSize]byte
|
||||
|
||||
stateRootEnabled bool
|
||||
}
|
||||
|
@ -30,7 +29,7 @@ func (p *prepareRequest) EncodeBinary(w *io.BinWriter) {
|
|||
w.WriteArray(p.transactionHashes)
|
||||
p.minerTx.EncodeBinary(w)
|
||||
if p.stateRootEnabled {
|
||||
p.proposalStateRoot.EncodeBinary(w)
|
||||
w.WriteBytes(p.stateRootSig[:])
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -42,7 +41,7 @@ func (p *prepareRequest) DecodeBinary(r *io.BinReader) {
|
|||
r.ReadArray(&p.transactionHashes)
|
||||
p.minerTx.DecodeBinary(r)
|
||||
if p.stateRootEnabled {
|
||||
p.proposalStateRoot.DecodeBinary(r)
|
||||
r.ReadBytes(p.stateRootSig[:])
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -9,6 +9,9 @@ import (
|
|||
// prepareResponse represents dBFT PrepareResponse message.
|
||||
type prepareResponse struct {
|
||||
preparationHash util.Uint256
|
||||
stateRootSig [signatureSize]byte
|
||||
|
||||
stateRootEnabled bool
|
||||
}
|
||||
|
||||
var _ payload.PrepareResponse = (*prepareResponse)(nil)
|
||||
|
@ -16,11 +19,17 @@ var _ payload.PrepareResponse = (*prepareResponse)(nil)
|
|||
// EncodeBinary implements io.Serializable interface.
|
||||
func (p *prepareResponse) EncodeBinary(w *io.BinWriter) {
|
||||
w.WriteBytes(p.preparationHash[:])
|
||||
if p.stateRootEnabled {
|
||||
w.WriteBytes(p.stateRootSig[:])
|
||||
}
|
||||
}
|
||||
|
||||
// DecodeBinary implements io.Serializable interface.
|
||||
func (p *prepareResponse) DecodeBinary(r *io.BinReader) {
|
||||
r.ReadBytes(p.preparationHash[:])
|
||||
if p.stateRootEnabled {
|
||||
r.ReadBytes(p.stateRootSig[:])
|
||||
}
|
||||
}
|
||||
|
||||
// PreparationHash implements payload.PrepareResponse interface.
|
||||
|
|
|
@ -32,15 +32,15 @@ type (
|
|||
ViewNumber byte
|
||||
ValidatorIndex uint16
|
||||
Signature [signatureSize]byte
|
||||
StateSignature [signatureSize]byte
|
||||
InvocationScript []byte
|
||||
|
||||
stateRootEnabled bool
|
||||
}
|
||||
|
||||
preparationCompact struct {
|
||||
ValidatorIndex uint16
|
||||
InvocationScript []byte
|
||||
StateRootSig [signatureSize]byte
|
||||
|
||||
stateRootEnabled bool
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -72,17 +72,17 @@ func (m *recoveryMessage) DecodeBinary(r *io.BinReader) {
|
|||
}
|
||||
}
|
||||
|
||||
r.ReadArray(&m.preparationPayloads)
|
||||
lu := r.ReadVarUint()
|
||||
if lu > state.MaxValidatorsVoted {
|
||||
r.Err = errors.New("too many preparation payloads")
|
||||
return
|
||||
}
|
||||
m.commitPayloads = make([]*commitCompact, lu)
|
||||
m.preparationPayloads = make([]*preparationCompact, lu)
|
||||
for i := uint64(0); i < lu; i++ {
|
||||
m.commitPayloads[i] = &commitCompact{stateRootEnabled: m.stateRootEnabled}
|
||||
m.commitPayloads[i].DecodeBinary(r)
|
||||
m.preparationPayloads[i] = &preparationCompact{stateRootEnabled: m.stateRootEnabled}
|
||||
m.preparationPayloads[i].DecodeBinary(r)
|
||||
}
|
||||
r.ReadArray(&m.commitPayloads)
|
||||
}
|
||||
|
||||
// EncodeBinary implements io.Serializable interface.
|
||||
|
@ -127,9 +127,6 @@ func (p *commitCompact) DecodeBinary(r *io.BinReader) {
|
|||
p.ViewNumber = r.ReadB()
|
||||
p.ValidatorIndex = r.ReadU16LE()
|
||||
r.ReadBytes(p.Signature[:])
|
||||
if p.stateRootEnabled {
|
||||
r.ReadBytes(p.StateSignature[:])
|
||||
}
|
||||
p.InvocationScript = r.ReadVarBytes(1024)
|
||||
}
|
||||
|
||||
|
@ -138,9 +135,6 @@ func (p *commitCompact) EncodeBinary(w *io.BinWriter) {
|
|||
w.WriteB(p.ViewNumber)
|
||||
w.WriteU16LE(p.ValidatorIndex)
|
||||
w.WriteBytes(p.Signature[:])
|
||||
if p.stateRootEnabled {
|
||||
w.WriteBytes(p.StateSignature[:])
|
||||
}
|
||||
w.WriteVarBytes(p.InvocationScript)
|
||||
}
|
||||
|
||||
|
@ -148,12 +142,18 @@ func (p *commitCompact) EncodeBinary(w *io.BinWriter) {
|
|||
func (p *preparationCompact) DecodeBinary(r *io.BinReader) {
|
||||
p.ValidatorIndex = r.ReadU16LE()
|
||||
p.InvocationScript = r.ReadVarBytes(1024)
|
||||
if p.stateRootEnabled {
|
||||
r.ReadBytes(p.StateRootSig[:])
|
||||
}
|
||||
}
|
||||
|
||||
// EncodeBinary implements io.Serializable interface.
|
||||
func (p *preparationCompact) EncodeBinary(w *io.BinWriter) {
|
||||
w.WriteU16LE(p.ValidatorIndex)
|
||||
w.WriteVarBytes(p.InvocationScript)
|
||||
if p.stateRootEnabled {
|
||||
w.WriteBytes(p.StateRootSig[:])
|
||||
}
|
||||
}
|
||||
|
||||
// AddPayload implements payload.RecoveryMessage interface.
|
||||
|
@ -170,13 +170,17 @@ func (m *recoveryMessage) AddPayload(p payload.ConsensusPayload) {
|
|||
h := p.Hash()
|
||||
m.preparationHash = &h
|
||||
m.preparationPayloads = append(m.preparationPayloads, &preparationCompact{
|
||||
stateRootEnabled: m.stateRootEnabled,
|
||||
ValidatorIndex: p.ValidatorIndex(),
|
||||
InvocationScript: p.(*Payload).Witness.InvocationScript,
|
||||
StateRootSig: p.GetPrepareRequest().(*prepareRequest).stateRootSig,
|
||||
})
|
||||
case payload.PrepareResponseType:
|
||||
m.preparationPayloads = append(m.preparationPayloads, &preparationCompact{
|
||||
stateRootEnabled: m.stateRootEnabled,
|
||||
ValidatorIndex: p.ValidatorIndex(),
|
||||
InvocationScript: p.(*Payload).Witness.InvocationScript,
|
||||
StateRootSig: p.GetPrepareResponse().(*prepareResponse).stateRootSig,
|
||||
})
|
||||
|
||||
if m.preparationHash == nil {
|
||||
|
@ -187,16 +191,14 @@ func (m *recoveryMessage) AddPayload(p payload.ConsensusPayload) {
|
|||
m.changeViewPayloads = append(m.changeViewPayloads, &changeViewCompact{
|
||||
ValidatorIndex: p.ValidatorIndex(),
|
||||
OriginalViewNumber: p.ViewNumber(),
|
||||
Timestamp: p.GetChangeView().Timestamp(),
|
||||
Timestamp: p.GetChangeView().(*changeView).timestamp,
|
||||
InvocationScript: p.(*Payload).Witness.InvocationScript,
|
||||
})
|
||||
case payload.CommitType:
|
||||
m.commitPayloads = append(m.commitPayloads, &commitCompact{
|
||||
stateRootEnabled: m.stateRootEnabled,
|
||||
ValidatorIndex: p.ValidatorIndex(),
|
||||
ViewNumber: p.ViewNumber(),
|
||||
Signature: p.GetCommit().(*commit).signature,
|
||||
StateSignature: p.GetCommit().(*commit).stateSig,
|
||||
InvocationScript: p.(*Payload).Witness.InvocationScript,
|
||||
})
|
||||
}
|
||||
|
@ -239,6 +241,9 @@ func (m *recoveryMessage) GetPrepareResponses(p payload.ConsensusPayload, valida
|
|||
for i, resp := range m.preparationPayloads {
|
||||
r := fromPayload(prepareResponseType, p.(*Payload), &prepareResponse{
|
||||
preparationHash: *m.preparationHash,
|
||||
stateRootSig: resp.StateRootSig,
|
||||
|
||||
stateRootEnabled: m.stateRootEnabled,
|
||||
})
|
||||
r.SetValidatorIndex(resp.ValidatorIndex)
|
||||
r.Witness.InvocationScript = resp.InvocationScript
|
||||
|
@ -277,9 +282,6 @@ func (m *recoveryMessage) GetCommits(p payload.ConsensusPayload, validators []cr
|
|||
for i, c := range m.commitPayloads {
|
||||
cc := fromPayload(commitType, p.(*Payload), &commit{
|
||||
signature: c.Signature,
|
||||
stateSig: c.StateSignature,
|
||||
|
||||
stateRootEnabled: m.stateRootEnabled,
|
||||
})
|
||||
cc.SetValidatorIndex(c.ValidatorIndex)
|
||||
cc.Witness.InvocationScript = c.InvocationScript
|
||||
|
|
|
@ -23,7 +23,7 @@ func (m *recoveryRequest) EncodeBinary(w *io.BinWriter) {
|
|||
}
|
||||
|
||||
// Timestamp implements payload.RecoveryRequest interface.
|
||||
func (m *recoveryRequest) Timestamp() uint32 { return m.timestamp }
|
||||
func (m *recoveryRequest) Timestamp() uint64 { return uint64(m.timestamp) * nanoInSec }
|
||||
|
||||
// SetTimestamp implements payload.RecoveryRequest interface.
|
||||
func (m *recoveryRequest) SetTimestamp(ts uint32) { m.timestamp = ts }
|
||||
func (m *recoveryRequest) SetTimestamp(ts uint64) { m.timestamp = uint32(ts / nanoInSec) }
|
||||
|
|
|
@ -9,6 +9,6 @@ import (
|
|||
func TestRecoveryRequest_Setters(t *testing.T) {
|
||||
var r recoveryRequest
|
||||
|
||||
r.SetTimestamp(123)
|
||||
require.EqualValues(t, 123, r.Timestamp())
|
||||
r.SetTimestamp(123 * nanoInSec)
|
||||
require.EqualValues(t, 123*nanoInSec, r.Timestamp())
|
||||
}
|
||||
|
|
|
@ -657,7 +657,7 @@ func (s *Server) requestStateRoot(p Peer) error {
|
|||
if diff := hdrHeight - stateHeight; diff < count {
|
||||
count = diff
|
||||
}
|
||||
if count == 0 {
|
||||
if count <= 1 {
|
||||
return nil
|
||||
}
|
||||
gr := &payload.GetStateRoots{
|
||||
|
@ -852,7 +852,10 @@ func (s *Server) handleNewPayload(item cache.Hashable) {
|
|||
case *state.MPTRoot:
|
||||
s.stateCache.Add(p)
|
||||
msg := s.MkMsg(CMDStateRoot, p)
|
||||
s.broadcastMessage(msg)
|
||||
// Stalling on broadcast here would mean delaying commit which
|
||||
// is not good for consensus. MPTRoot is being generated once
|
||||
// per block, so it shouldn't be a problem.
|
||||
s.broadcastHPMessage(msg)
|
||||
default:
|
||||
s.log.Warn("unknown item type", zap.String("type", fmt.Sprintf("%T", p)))
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue