2019-11-29 12:40:21 +00:00
|
|
|
package consensus
|
|
|
|
|
|
|
|
import (
|
|
|
|
"testing"
|
consensus: prevent synchronization stalls
When CN is not up to date with the network is synchonizes blocks first and
only then starts consensus process. But while synchronizing it receives
consensus payloads and tries to process them even though messages reader
routine is not started yet. This leads to lots of goroutines waiting to send
their messages:
Jun 25 23:55:53 nodoka neo-go[32733]: goroutine 1639919 [chan send, 4 minutes]:
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/consensus.(*service).OnPayload(0xc0000ecb40, 0xc005bd7680)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/consensus/consensus.go:329 +0x31b
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleConsensusCmd(...)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:687
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleMessage(0xc0000ba160, 0x1053260, 0xc00507d170, 0xc005bdd560, 0x0, 0x0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:806 +0xd58
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*TCPPeer).handleConn(0xc00507d170)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_peer.go:160 +0x294
Jun 25 23:55:53 nodoka neo-go[32733]: created by github.com/nspcc-dev/neo-go/pkg/network.(*TCPTransport).Dial
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_transport.go:38 +0x1ad
Jun 25 23:55:53 nodoka neo-go[32733]: goroutine 1639181 [chan send, 10 minutes]:
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/consensus.(*service).OnPayload(0xc0000ecb40, 0xc013bb6600)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/consensus/consensus.go:329 +0x31b
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleConsensusCmd(...)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:687
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleMessage(0xc0000ba160, 0x1053260, 0xc01361ee10, 0xc01342c780, 0x0, 0x0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:806 +0xd58
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*TCPPeer).handleConn(0xc01361ee10)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_peer.go:160 +0x294
Jun 25 23:55:53 nodoka neo-go[32733]: created by github.com/nspcc-dev/neo-go/pkg/network.(*TCPTransport).Dial
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_transport.go:38 +0x1ad
Jun 25 23:55:53 nodoka neo-go[32733]: goroutine 39454 [chan send, 32 minutes]:
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/consensus.(*service).OnPayload(0xc0000ecb40, 0xc014fea680)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/consensus/consensus.go:329 +0x31b
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleConsensusCmd(...)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:687
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleMessage(0xc0000ba160, 0x1053260, 0xc0140b2ea0, 0xc014fe0ed0, 0x0, 0x0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:806 +0xd58
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*TCPPeer).handleConn(0xc0140b2ea0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_peer.go:160 +0x294
Jun 25 23:55:53 nodoka neo-go[32733]: created by github.com/nspcc-dev/neo-go/pkg/network.(*TCPTransport).Dial
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_transport.go:38 +0x1ad
Luckily it doesn't break synchronization completely as eventually connection
timers fire, the node breaks all connections, create new ones and these new
ones request blocks successfully until another consensus payload stalls them
too. In the end the node reaches synchronization, message processing loop
starts and releases all of these waiting goroutines, but it's better for us to
avoid this happening at all.
This also makes double-starting a no-op which is a nice property.
2020-06-26 08:19:01 +00:00
|
|
|
"time"
|
2019-11-29 12:40:21 +00:00
|
|
|
|
2024-03-21 19:49:39 +00:00
|
|
|
"github.com/nspcc-dev/dbft"
|
2021-01-14 11:17:00 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/internal/random"
|
2020-11-23 11:09:00 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/internal/testchain"
|
2020-03-25 15:30:21 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/config"
|
2020-06-14 07:34:50 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/config/netmode"
|
2020-03-03 14:21:42 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/core"
|
2023-03-07 09:06:53 +00:00
|
|
|
coreb "github.com/nspcc-dev/neo-go/pkg/core/block"
|
2020-09-28 14:56:16 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/core/fee"
|
2020-08-20 15:47:52 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/core/native"
|
2020-03-03 14:21:42 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/core/storage"
|
|
|
|
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
2020-11-09 12:11:51 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
|
2020-03-03 14:21:42 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
2020-04-21 13:45:48 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/io"
|
2021-01-14 13:38:40 +00:00
|
|
|
npayload "github.com/nspcc-dev/neo-go/pkg/network/payload"
|
2020-04-14 14:24:21 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
2020-03-03 14:21:42 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
2020-04-21 13:45:48 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
|
2020-06-05 09:17:16 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
|
2020-11-09 12:11:51 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
2019-11-29 12:40:21 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2019-12-30 07:43:05 +00:00
|
|
|
"go.uber.org/zap/zaptest"
|
2019-11-29 12:40:21 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestNewService(t *testing.T) {
|
|
|
|
srv := newTestService(t)
|
2021-03-25 16:18:01 +00:00
|
|
|
tx := transaction.New([]byte{byte(opcode.PUSH1)}, 100000)
|
2020-04-15 06:50:13 +00:00
|
|
|
tx.ValidUntilBlock = 1
|
2020-04-16 14:10:42 +00:00
|
|
|
addSender(t, tx)
|
2020-12-11 12:22:49 +00:00
|
|
|
signTx(t, srv.Chain, tx)
|
2020-02-06 14:49:05 +00:00
|
|
|
require.NoError(t, srv.Chain.PoolTx(tx))
|
2019-11-29 12:40:21 +00:00
|
|
|
|
2024-03-21 19:49:39 +00:00
|
|
|
var txx []dbft.Transaction[util.Uint256]
|
2020-06-16 11:59:09 +00:00
|
|
|
require.NotPanics(t, func() { txx = srv.getVerifiedTx() })
|
2020-04-22 17:42:38 +00:00
|
|
|
require.Len(t, txx, 1)
|
|
|
|
require.Equal(t, tx, txx[0])
|
2019-11-29 12:40:21 +00:00
|
|
|
}
|
|
|
|
|
2022-12-05 15:11:18 +00:00
|
|
|
func TestNewWatchingService(t *testing.T) {
|
|
|
|
bc := newTestChain(t, false)
|
|
|
|
srv, err := NewService(Config{
|
|
|
|
Logger: zaptest.NewLogger(t),
|
|
|
|
Broadcast: func(*npayload.Extensible) {},
|
|
|
|
Chain: bc,
|
2023-03-07 09:06:53 +00:00
|
|
|
BlockQueue: testBlockQueuer{bc: bc},
|
2022-12-06 13:34:38 +00:00
|
|
|
ProtocolConfiguration: bc.GetConfig().ProtocolConfiguration,
|
2022-12-05 15:11:18 +00:00
|
|
|
RequestTx: func(...util.Uint256) {},
|
|
|
|
StopTxFlow: func() {},
|
|
|
|
TimePerBlock: bc.GetConfig().TimePerBlock,
|
|
|
|
// No wallet provided.
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.NotPanics(t, srv.Start)
|
|
|
|
require.NotPanics(t, srv.Shutdown)
|
|
|
|
}
|
|
|
|
|
2023-03-15 10:22:08 +00:00
|
|
|
func collectBlock(t *testing.T, bc *core.Blockchain, srv *service) {
|
|
|
|
h := bc.BlockHeight()
|
2024-03-21 19:49:39 +00:00
|
|
|
srv.dbft.OnTimeout(srv.dbft.Context.BlockIndex, 0) // Collect and add block to the chain.
|
2023-03-15 10:22:08 +00:00
|
|
|
header, err := bc.GetHeader(bc.GetHeaderHash(h + 1))
|
|
|
|
require.NoError(t, err)
|
2024-03-21 19:49:39 +00:00
|
|
|
srv.dbft.Reset(header.Timestamp * nsInMs) // Init consensus manually at the next height, as we don't run the consensus service.
|
2023-03-15 10:22:08 +00:00
|
|
|
}
|
|
|
|
|
2020-11-09 12:11:51 +00:00
|
|
|
func initServiceNextConsensus(t *testing.T, newAcc *wallet.Account, offset uint32) (*service, *wallet.Account) {
|
|
|
|
acc, err := wallet.NewAccountFromWIF(testchain.WIF(testchain.IDToOrder(0)))
|
|
|
|
require.NoError(t, err)
|
|
|
|
priv := acc.PrivateKey()
|
|
|
|
require.NoError(t, acc.ConvertMultisig(1, keys.PublicKeys{priv.PublicKey()}))
|
|
|
|
|
|
|
|
bc := newSingleTestChain(t)
|
|
|
|
newPriv := newAcc.PrivateKey()
|
|
|
|
|
|
|
|
// Transfer funds to new validator.
|
2022-07-25 19:07:13 +00:00
|
|
|
b := smartcontract.NewBuilder()
|
|
|
|
b.InvokeWithAssert(bc.GoverningTokenHash(), "transfer",
|
2020-11-19 15:01:42 +00:00
|
|
|
acc.Contract.ScriptHash().BytesBE(), newPriv.GetScriptHash().BytesBE(), int64(native.NEOTotalSupply), nil)
|
2022-07-25 19:07:13 +00:00
|
|
|
|
|
|
|
b.InvokeWithAssert(bc.UtilityTokenHash(), "transfer",
|
2021-01-21 20:02:59 +00:00
|
|
|
acc.Contract.ScriptHash().BytesBE(), newPriv.GetScriptHash().BytesBE(), int64(10000_000_000_000), nil)
|
2022-07-25 19:07:13 +00:00
|
|
|
script, err := b.Script()
|
|
|
|
require.NoError(t, err)
|
2020-11-09 12:11:51 +00:00
|
|
|
|
2022-07-25 19:07:13 +00:00
|
|
|
tx := transaction.New(script, 21_000_000)
|
2020-11-09 12:11:51 +00:00
|
|
|
tx.ValidUntilBlock = bc.BlockHeight() + 1
|
|
|
|
tx.NetworkFee = 10_000_000
|
|
|
|
tx.Signers = []transaction.Signer{{Scopes: transaction.Global, Account: acc.Contract.ScriptHash()}}
|
2021-03-25 16:18:01 +00:00
|
|
|
require.NoError(t, acc.SignTx(netmode.UnitTestNet, tx))
|
2020-11-09 12:11:51 +00:00
|
|
|
require.NoError(t, bc.PoolTx(tx))
|
|
|
|
|
|
|
|
srv := newTestServiceWithChain(t, bc)
|
2023-03-15 10:22:08 +00:00
|
|
|
h := bc.BlockHeight()
|
2022-10-19 15:43:23 +00:00
|
|
|
srv.dbft.Start(0)
|
2023-03-15 10:22:08 +00:00
|
|
|
header, err := bc.GetHeader(bc.GetHeaderHash(h + 1))
|
|
|
|
require.NoError(t, err)
|
2024-03-21 19:49:39 +00:00
|
|
|
srv.dbft.Reset(header.Timestamp * nsInMs) // Init consensus manually at the next height, as we don't run the consensus service.
|
2020-11-09 12:11:51 +00:00
|
|
|
|
|
|
|
// Register new candidate.
|
2022-07-25 19:07:13 +00:00
|
|
|
b.Reset()
|
|
|
|
b.InvokeWithAssert(bc.GoverningTokenHash(), "registerCandidate", newPriv.PublicKey().Bytes())
|
|
|
|
script, err = b.Script()
|
|
|
|
require.NoError(t, err)
|
2020-11-09 12:11:51 +00:00
|
|
|
|
2022-07-25 19:07:13 +00:00
|
|
|
tx = transaction.New(script, 1001_00000000)
|
2020-11-09 12:11:51 +00:00
|
|
|
tx.ValidUntilBlock = bc.BlockHeight() + 1
|
|
|
|
tx.NetworkFee = 20_000_000
|
|
|
|
tx.Signers = []transaction.Signer{{Scopes: transaction.Global, Account: newPriv.GetScriptHash()}}
|
2021-03-25 16:18:01 +00:00
|
|
|
require.NoError(t, newAcc.SignTx(netmode.UnitTestNet, tx))
|
2020-11-09 12:11:51 +00:00
|
|
|
|
|
|
|
require.NoError(t, bc.PoolTx(tx))
|
2023-03-15 10:22:08 +00:00
|
|
|
collectBlock(t, bc, srv)
|
2020-11-09 12:11:51 +00:00
|
|
|
|
2022-01-21 02:33:06 +00:00
|
|
|
cfg := bc.GetConfig()
|
|
|
|
for i := srv.dbft.BlockIndex; !cfg.ShouldUpdateCommitteeAt(i + offset); i++ {
|
2023-03-15 10:22:08 +00:00
|
|
|
collectBlock(t, bc, srv)
|
2020-11-09 12:11:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Vote for new candidate.
|
2022-07-25 19:07:13 +00:00
|
|
|
b.Reset()
|
|
|
|
b.InvokeWithAssert(bc.GoverningTokenHash(), "vote",
|
2020-11-09 12:11:51 +00:00
|
|
|
newPriv.GetScriptHash(), newPriv.PublicKey().Bytes())
|
2022-07-25 19:07:13 +00:00
|
|
|
script, err = b.Script()
|
|
|
|
require.NoError(t, err)
|
2020-11-09 12:11:51 +00:00
|
|
|
|
2022-07-25 19:07:13 +00:00
|
|
|
tx = transaction.New(script, 20_000_000)
|
2020-11-09 12:11:51 +00:00
|
|
|
tx.ValidUntilBlock = bc.BlockHeight() + 1
|
|
|
|
tx.NetworkFee = 20_000_000
|
|
|
|
tx.Signers = []transaction.Signer{{Scopes: transaction.Global, Account: newPriv.GetScriptHash()}}
|
2021-03-25 16:18:01 +00:00
|
|
|
require.NoError(t, newAcc.SignTx(netmode.UnitTestNet, tx))
|
2020-11-09 12:11:51 +00:00
|
|
|
|
|
|
|
require.NoError(t, bc.PoolTx(tx))
|
2023-03-15 10:22:08 +00:00
|
|
|
collectBlock(t, bc, srv)
|
2020-11-09 12:11:51 +00:00
|
|
|
|
|
|
|
return srv, acc
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestService_NextConsensus(t *testing.T) {
|
|
|
|
newAcc, err := wallet.NewAccount()
|
|
|
|
require.NoError(t, err)
|
2022-09-01 14:52:44 +00:00
|
|
|
script, err := smartcontract.CreateMajorityMultiSigRedeemScript(keys.PublicKeys{newAcc.PublicKey()})
|
2020-11-09 12:11:51 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkNextConsensus := func(t *testing.T, bc *core.Blockchain, height uint32, h util.Uint160) {
|
2022-11-18 20:19:50 +00:00
|
|
|
hdrHash := bc.GetHeaderHash(height)
|
2020-11-09 12:11:51 +00:00
|
|
|
hdr, err := bc.GetHeader(hdrHash)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, h, hdr.NextConsensus)
|
|
|
|
}
|
|
|
|
|
2020-11-16 17:12:23 +00:00
|
|
|
t.Run("vote 1 block before update", func(t *testing.T) { // voting occurs every block in SingleTestChain
|
2020-11-09 12:11:51 +00:00
|
|
|
srv, acc := initServiceNextConsensus(t, newAcc, 1)
|
|
|
|
bc := srv.Chain.(*core.Blockchain)
|
|
|
|
|
|
|
|
height := bc.BlockHeight()
|
|
|
|
checkNextConsensus(t, bc, height, acc.Contract.ScriptHash())
|
|
|
|
// Reset <- we are here, update NextConsensus
|
|
|
|
// OnPersist <- update committee
|
|
|
|
// Block <-
|
|
|
|
|
2023-03-15 10:22:08 +00:00
|
|
|
collectBlock(t, bc, srv)
|
2020-11-09 12:11:51 +00:00
|
|
|
checkNextConsensus(t, bc, height+1, hash.Hash160(script))
|
|
|
|
})
|
2020-11-16 17:12:23 +00:00
|
|
|
/*
|
|
|
|
t.Run("vote 2 blocks before update", func(t *testing.T) {
|
|
|
|
srv, acc := initServiceNextConsensus(t, newAcc, 2)
|
|
|
|
bc := srv.Chain.(*core.Blockchain)
|
|
|
|
defer bc.Close()
|
|
|
|
|
|
|
|
height := bc.BlockHeight()
|
|
|
|
checkNextConsensus(t, bc, height, acc.Contract.ScriptHash())
|
|
|
|
// Reset <- we are here
|
|
|
|
// OnPersist <- nothing to do
|
|
|
|
// Block <-
|
|
|
|
//
|
|
|
|
// Reset <- update next consensus
|
|
|
|
// OnPersist <- update committee
|
|
|
|
// Block <-
|
|
|
|
srv.dbft.OnTimeout(timer.HV{Height: srv.dbft.BlockIndex})
|
|
|
|
checkNextConsensus(t, bc, height+1, acc.Contract.ScriptHash())
|
|
|
|
|
|
|
|
srv.dbft.OnTimeout(timer.HV{Height: srv.dbft.BlockIndex})
|
|
|
|
checkNextConsensus(t, bc, height+2, hash.Hash160(script))
|
|
|
|
})
|
|
|
|
*/
|
2020-11-09 12:11:51 +00:00
|
|
|
}
|
|
|
|
|
2020-01-14 11:34:09 +00:00
|
|
|
func TestService_GetVerified(t *testing.T) {
|
|
|
|
srv := newTestService(t)
|
2022-10-19 15:43:23 +00:00
|
|
|
srv.dbft.Start(0)
|
2020-04-15 06:50:13 +00:00
|
|
|
var txs []*transaction.Transaction
|
|
|
|
for i := 0; i < 4; i++ {
|
2021-03-25 16:18:01 +00:00
|
|
|
tx := transaction.New([]byte{byte(opcode.PUSH1)}, 100000)
|
2020-04-22 17:42:38 +00:00
|
|
|
tx.Nonce = 123 + uint32(i)
|
2020-04-15 06:50:13 +00:00
|
|
|
tx.ValidUntilBlock = 1
|
|
|
|
txs = append(txs, tx)
|
2020-01-14 11:34:09 +00:00
|
|
|
}
|
2020-04-16 14:10:42 +00:00
|
|
|
addSender(t, txs...)
|
2020-12-11 12:22:49 +00:00
|
|
|
signTx(t, srv.Chain, txs...)
|
2020-02-06 14:49:05 +00:00
|
|
|
require.NoError(t, srv.Chain.PoolTx(txs[3]))
|
2020-01-14 11:34:09 +00:00
|
|
|
|
|
|
|
hashes := []util.Uint256{txs[0].Hash(), txs[1].Hash(), txs[2].Hash()}
|
|
|
|
|
consensus: prevent synchronization stalls
When CN is not up to date with the network is synchonizes blocks first and
only then starts consensus process. But while synchronizing it receives
consensus payloads and tries to process them even though messages reader
routine is not started yet. This leads to lots of goroutines waiting to send
their messages:
Jun 25 23:55:53 nodoka neo-go[32733]: goroutine 1639919 [chan send, 4 minutes]:
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/consensus.(*service).OnPayload(0xc0000ecb40, 0xc005bd7680)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/consensus/consensus.go:329 +0x31b
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleConsensusCmd(...)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:687
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleMessage(0xc0000ba160, 0x1053260, 0xc00507d170, 0xc005bdd560, 0x0, 0x0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:806 +0xd58
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*TCPPeer).handleConn(0xc00507d170)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_peer.go:160 +0x294
Jun 25 23:55:53 nodoka neo-go[32733]: created by github.com/nspcc-dev/neo-go/pkg/network.(*TCPTransport).Dial
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_transport.go:38 +0x1ad
Jun 25 23:55:53 nodoka neo-go[32733]: goroutine 1639181 [chan send, 10 minutes]:
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/consensus.(*service).OnPayload(0xc0000ecb40, 0xc013bb6600)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/consensus/consensus.go:329 +0x31b
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleConsensusCmd(...)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:687
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleMessage(0xc0000ba160, 0x1053260, 0xc01361ee10, 0xc01342c780, 0x0, 0x0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:806 +0xd58
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*TCPPeer).handleConn(0xc01361ee10)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_peer.go:160 +0x294
Jun 25 23:55:53 nodoka neo-go[32733]: created by github.com/nspcc-dev/neo-go/pkg/network.(*TCPTransport).Dial
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_transport.go:38 +0x1ad
Jun 25 23:55:53 nodoka neo-go[32733]: goroutine 39454 [chan send, 32 minutes]:
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/consensus.(*service).OnPayload(0xc0000ecb40, 0xc014fea680)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/consensus/consensus.go:329 +0x31b
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleConsensusCmd(...)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:687
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleMessage(0xc0000ba160, 0x1053260, 0xc0140b2ea0, 0xc014fe0ed0, 0x0, 0x0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:806 +0xd58
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*TCPPeer).handleConn(0xc0140b2ea0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_peer.go:160 +0x294
Jun 25 23:55:53 nodoka neo-go[32733]: created by github.com/nspcc-dev/neo-go/pkg/network.(*TCPTransport).Dial
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_transport.go:38 +0x1ad
Luckily it doesn't break synchronization completely as eventually connection
timers fire, the node breaks all connections, create new ones and these new
ones request blocks successfully until another consensus payload stalls them
too. In the end the node reaches synchronization, message processing loop
starts and releases all of these waiting goroutines, but it's better for us to
avoid this happening at all.
This also makes double-starting a no-op which is a nice property.
2020-06-26 08:19:01 +00:00
|
|
|
// Everyone sends a message.
|
|
|
|
for i := 0; i < 4; i++ {
|
|
|
|
p := new(Payload)
|
|
|
|
// One PrepareRequest and three ChangeViews.
|
|
|
|
if i == 1 {
|
2024-03-21 19:49:39 +00:00
|
|
|
p.SetType(dbft.PrepareRequestType)
|
2021-01-14 11:17:00 +00:00
|
|
|
p.SetPayload(&prepareRequest{prevHash: srv.Chain.CurrentBlockHash(), transactionHashes: hashes})
|
consensus: prevent synchronization stalls
When CN is not up to date with the network is synchonizes blocks first and
only then starts consensus process. But while synchronizing it receives
consensus payloads and tries to process them even though messages reader
routine is not started yet. This leads to lots of goroutines waiting to send
their messages:
Jun 25 23:55:53 nodoka neo-go[32733]: goroutine 1639919 [chan send, 4 minutes]:
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/consensus.(*service).OnPayload(0xc0000ecb40, 0xc005bd7680)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/consensus/consensus.go:329 +0x31b
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleConsensusCmd(...)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:687
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleMessage(0xc0000ba160, 0x1053260, 0xc00507d170, 0xc005bdd560, 0x0, 0x0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:806 +0xd58
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*TCPPeer).handleConn(0xc00507d170)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_peer.go:160 +0x294
Jun 25 23:55:53 nodoka neo-go[32733]: created by github.com/nspcc-dev/neo-go/pkg/network.(*TCPTransport).Dial
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_transport.go:38 +0x1ad
Jun 25 23:55:53 nodoka neo-go[32733]: goroutine 1639181 [chan send, 10 minutes]:
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/consensus.(*service).OnPayload(0xc0000ecb40, 0xc013bb6600)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/consensus/consensus.go:329 +0x31b
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleConsensusCmd(...)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:687
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleMessage(0xc0000ba160, 0x1053260, 0xc01361ee10, 0xc01342c780, 0x0, 0x0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:806 +0xd58
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*TCPPeer).handleConn(0xc01361ee10)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_peer.go:160 +0x294
Jun 25 23:55:53 nodoka neo-go[32733]: created by github.com/nspcc-dev/neo-go/pkg/network.(*TCPTransport).Dial
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_transport.go:38 +0x1ad
Jun 25 23:55:53 nodoka neo-go[32733]: goroutine 39454 [chan send, 32 minutes]:
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/consensus.(*service).OnPayload(0xc0000ecb40, 0xc014fea680)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/consensus/consensus.go:329 +0x31b
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleConsensusCmd(...)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:687
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleMessage(0xc0000ba160, 0x1053260, 0xc0140b2ea0, 0xc014fe0ed0, 0x0, 0x0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:806 +0xd58
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*TCPPeer).handleConn(0xc0140b2ea0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_peer.go:160 +0x294
Jun 25 23:55:53 nodoka neo-go[32733]: created by github.com/nspcc-dev/neo-go/pkg/network.(*TCPTransport).Dial
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_transport.go:38 +0x1ad
Luckily it doesn't break synchronization completely as eventually connection
timers fire, the node breaks all connections, create new ones and these new
ones request blocks successfully until another consensus payload stalls them
too. In the end the node reaches synchronization, message processing loop
starts and releases all of these waiting goroutines, but it's better for us to
avoid this happening at all.
This also makes double-starting a no-op which is a nice property.
2020-06-26 08:19:01 +00:00
|
|
|
} else {
|
2024-03-21 19:49:39 +00:00
|
|
|
p.SetType(dbft.ChangeViewType)
|
2020-07-11 12:22:14 +00:00
|
|
|
p.SetPayload(&changeView{newViewNumber: 1, timestamp: uint64(time.Now().UnixNano() / nsInMs)})
|
consensus: prevent synchronization stalls
When CN is not up to date with the network is synchonizes blocks first and
only then starts consensus process. But while synchronizing it receives
consensus payloads and tries to process them even though messages reader
routine is not started yet. This leads to lots of goroutines waiting to send
their messages:
Jun 25 23:55:53 nodoka neo-go[32733]: goroutine 1639919 [chan send, 4 minutes]:
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/consensus.(*service).OnPayload(0xc0000ecb40, 0xc005bd7680)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/consensus/consensus.go:329 +0x31b
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleConsensusCmd(...)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:687
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleMessage(0xc0000ba160, 0x1053260, 0xc00507d170, 0xc005bdd560, 0x0, 0x0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:806 +0xd58
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*TCPPeer).handleConn(0xc00507d170)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_peer.go:160 +0x294
Jun 25 23:55:53 nodoka neo-go[32733]: created by github.com/nspcc-dev/neo-go/pkg/network.(*TCPTransport).Dial
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_transport.go:38 +0x1ad
Jun 25 23:55:53 nodoka neo-go[32733]: goroutine 1639181 [chan send, 10 minutes]:
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/consensus.(*service).OnPayload(0xc0000ecb40, 0xc013bb6600)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/consensus/consensus.go:329 +0x31b
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleConsensusCmd(...)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:687
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleMessage(0xc0000ba160, 0x1053260, 0xc01361ee10, 0xc01342c780, 0x0, 0x0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:806 +0xd58
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*TCPPeer).handleConn(0xc01361ee10)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_peer.go:160 +0x294
Jun 25 23:55:53 nodoka neo-go[32733]: created by github.com/nspcc-dev/neo-go/pkg/network.(*TCPTransport).Dial
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_transport.go:38 +0x1ad
Jun 25 23:55:53 nodoka neo-go[32733]: goroutine 39454 [chan send, 32 minutes]:
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/consensus.(*service).OnPayload(0xc0000ecb40, 0xc014fea680)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/consensus/consensus.go:329 +0x31b
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleConsensusCmd(...)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:687
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleMessage(0xc0000ba160, 0x1053260, 0xc0140b2ea0, 0xc014fe0ed0, 0x0, 0x0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:806 +0xd58
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*TCPPeer).handleConn(0xc0140b2ea0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_peer.go:160 +0x294
Jun 25 23:55:53 nodoka neo-go[32733]: created by github.com/nspcc-dev/neo-go/pkg/network.(*TCPTransport).Dial
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_transport.go:38 +0x1ad
Luckily it doesn't break synchronization completely as eventually connection
timers fire, the node breaks all connections, create new ones and these new
ones request blocks successfully until another consensus payload stalls them
too. In the end the node reaches synchronization, message processing loop
starts and releases all of these waiting goroutines, but it's better for us to
avoid this happening at all.
This also makes double-starting a no-op which is a nice property.
2020-06-26 08:19:01 +00:00
|
|
|
}
|
|
|
|
p.SetHeight(1)
|
|
|
|
p.SetValidatorIndex(uint16(i))
|
2020-01-14 11:34:09 +00:00
|
|
|
|
consensus: prevent synchronization stalls
When CN is not up to date with the network is synchonizes blocks first and
only then starts consensus process. But while synchronizing it receives
consensus payloads and tries to process them even though messages reader
routine is not started yet. This leads to lots of goroutines waiting to send
their messages:
Jun 25 23:55:53 nodoka neo-go[32733]: goroutine 1639919 [chan send, 4 minutes]:
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/consensus.(*service).OnPayload(0xc0000ecb40, 0xc005bd7680)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/consensus/consensus.go:329 +0x31b
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleConsensusCmd(...)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:687
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleMessage(0xc0000ba160, 0x1053260, 0xc00507d170, 0xc005bdd560, 0x0, 0x0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:806 +0xd58
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*TCPPeer).handleConn(0xc00507d170)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_peer.go:160 +0x294
Jun 25 23:55:53 nodoka neo-go[32733]: created by github.com/nspcc-dev/neo-go/pkg/network.(*TCPTransport).Dial
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_transport.go:38 +0x1ad
Jun 25 23:55:53 nodoka neo-go[32733]: goroutine 1639181 [chan send, 10 minutes]:
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/consensus.(*service).OnPayload(0xc0000ecb40, 0xc013bb6600)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/consensus/consensus.go:329 +0x31b
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleConsensusCmd(...)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:687
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleMessage(0xc0000ba160, 0x1053260, 0xc01361ee10, 0xc01342c780, 0x0, 0x0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:806 +0xd58
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*TCPPeer).handleConn(0xc01361ee10)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_peer.go:160 +0x294
Jun 25 23:55:53 nodoka neo-go[32733]: created by github.com/nspcc-dev/neo-go/pkg/network.(*TCPTransport).Dial
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_transport.go:38 +0x1ad
Jun 25 23:55:53 nodoka neo-go[32733]: goroutine 39454 [chan send, 32 minutes]:
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/consensus.(*service).OnPayload(0xc0000ecb40, 0xc014fea680)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/consensus/consensus.go:329 +0x31b
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleConsensusCmd(...)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:687
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleMessage(0xc0000ba160, 0x1053260, 0xc0140b2ea0, 0xc014fe0ed0, 0x0, 0x0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:806 +0xd58
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*TCPPeer).handleConn(0xc0140b2ea0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_peer.go:160 +0x294
Jun 25 23:55:53 nodoka neo-go[32733]: created by github.com/nspcc-dev/neo-go/pkg/network.(*TCPTransport).Dial
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_transport.go:38 +0x1ad
Luckily it doesn't break synchronization completely as eventually connection
timers fire, the node breaks all connections, create new ones and these new
ones request blocks successfully until another consensus payload stalls them
too. In the end the node reaches synchronization, message processing loop
starts and releases all of these waiting goroutines, but it's better for us to
avoid this happening at all.
This also makes double-starting a no-op which is a nice property.
2020-06-26 08:19:01 +00:00
|
|
|
priv, _ := getTestValidator(i)
|
|
|
|
require.NoError(t, p.Sign(priv))
|
2020-01-14 11:34:09 +00:00
|
|
|
|
consensus: prevent synchronization stalls
When CN is not up to date with the network is synchonizes blocks first and
only then starts consensus process. But while synchronizing it receives
consensus payloads and tries to process them even though messages reader
routine is not started yet. This leads to lots of goroutines waiting to send
their messages:
Jun 25 23:55:53 nodoka neo-go[32733]: goroutine 1639919 [chan send, 4 minutes]:
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/consensus.(*service).OnPayload(0xc0000ecb40, 0xc005bd7680)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/consensus/consensus.go:329 +0x31b
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleConsensusCmd(...)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:687
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleMessage(0xc0000ba160, 0x1053260, 0xc00507d170, 0xc005bdd560, 0x0, 0x0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:806 +0xd58
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*TCPPeer).handleConn(0xc00507d170)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_peer.go:160 +0x294
Jun 25 23:55:53 nodoka neo-go[32733]: created by github.com/nspcc-dev/neo-go/pkg/network.(*TCPTransport).Dial
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_transport.go:38 +0x1ad
Jun 25 23:55:53 nodoka neo-go[32733]: goroutine 1639181 [chan send, 10 minutes]:
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/consensus.(*service).OnPayload(0xc0000ecb40, 0xc013bb6600)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/consensus/consensus.go:329 +0x31b
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleConsensusCmd(...)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:687
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleMessage(0xc0000ba160, 0x1053260, 0xc01361ee10, 0xc01342c780, 0x0, 0x0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:806 +0xd58
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*TCPPeer).handleConn(0xc01361ee10)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_peer.go:160 +0x294
Jun 25 23:55:53 nodoka neo-go[32733]: created by github.com/nspcc-dev/neo-go/pkg/network.(*TCPTransport).Dial
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_transport.go:38 +0x1ad
Jun 25 23:55:53 nodoka neo-go[32733]: goroutine 39454 [chan send, 32 minutes]:
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/consensus.(*service).OnPayload(0xc0000ecb40, 0xc014fea680)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/consensus/consensus.go:329 +0x31b
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleConsensusCmd(...)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:687
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleMessage(0xc0000ba160, 0x1053260, 0xc0140b2ea0, 0xc014fe0ed0, 0x0, 0x0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:806 +0xd58
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*TCPPeer).handleConn(0xc0140b2ea0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_peer.go:160 +0x294
Jun 25 23:55:53 nodoka neo-go[32733]: created by github.com/nspcc-dev/neo-go/pkg/network.(*TCPTransport).Dial
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_transport.go:38 +0x1ad
Luckily it doesn't break synchronization completely as eventually connection
timers fire, the node breaks all connections, create new ones and these new
ones request blocks successfully until another consensus payload stalls them
too. In the end the node reaches synchronization, message processing loop
starts and releases all of these waiting goroutines, but it's better for us to
avoid this happening at all.
This also makes double-starting a no-op which is a nice property.
2020-06-26 08:19:01 +00:00
|
|
|
// Skip srv.OnPayload, because the service is not really started.
|
|
|
|
srv.dbft.OnReceive(p)
|
|
|
|
}
|
|
|
|
require.Equal(t, uint8(1), srv.dbft.ViewNumber)
|
2020-01-14 11:34:09 +00:00
|
|
|
require.Equal(t, hashes, srv.lastProposal)
|
|
|
|
|
|
|
|
t.Run("new transactions will be proposed in case of failure", func(t *testing.T) {
|
2020-06-16 11:59:09 +00:00
|
|
|
txx := srv.getVerifiedTx()
|
2020-04-22 17:42:38 +00:00
|
|
|
require.Equal(t, 1, len(txx), "there is only 1 tx in mempool")
|
|
|
|
require.Equal(t, txs[3], txx[0])
|
2020-01-14 11:34:09 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("more than half of the last proposal will be reused", func(t *testing.T) {
|
|
|
|
for _, tx := range txs[:2] {
|
2020-02-06 14:49:05 +00:00
|
|
|
require.NoError(t, srv.Chain.PoolTx(tx))
|
2020-01-14 11:34:09 +00:00
|
|
|
}
|
|
|
|
|
2020-06-16 11:59:09 +00:00
|
|
|
txx := srv.getVerifiedTx()
|
2020-01-14 11:34:09 +00:00
|
|
|
require.Contains(t, txx, txs[0])
|
|
|
|
require.Contains(t, txx, txs[1])
|
|
|
|
require.NotContains(t, txx, txs[2])
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-12-16 08:57:49 +00:00
|
|
|
func TestService_ValidatePayload(t *testing.T) {
|
|
|
|
srv := newTestService(t)
|
|
|
|
priv, _ := getTestValidator(1)
|
|
|
|
p := new(Payload)
|
2021-01-14 11:17:00 +00:00
|
|
|
p.Sender = priv.GetScriptHash()
|
2019-12-16 08:57:49 +00:00
|
|
|
p.SetPayload(&prepareRequest{})
|
|
|
|
|
|
|
|
t.Run("invalid validator index", func(t *testing.T) {
|
|
|
|
p.SetValidatorIndex(11)
|
|
|
|
require.NoError(t, p.Sign(priv))
|
|
|
|
|
|
|
|
var ok bool
|
|
|
|
require.NotPanics(t, func() { ok = srv.validatePayload(p) })
|
|
|
|
require.False(t, ok)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("wrong validator index", func(t *testing.T) {
|
|
|
|
p.SetValidatorIndex(2)
|
|
|
|
require.NoError(t, p.Sign(priv))
|
|
|
|
require.False(t, srv.validatePayload(p))
|
|
|
|
})
|
|
|
|
|
2021-01-14 11:17:00 +00:00
|
|
|
t.Run("invalid sender", func(t *testing.T) {
|
|
|
|
p.SetValidatorIndex(1)
|
|
|
|
p.Sender = util.Uint160{}
|
|
|
|
require.NoError(t, p.Sign(priv))
|
|
|
|
require.False(t, srv.validatePayload(p))
|
|
|
|
})
|
|
|
|
|
2019-12-16 08:57:49 +00:00
|
|
|
t.Run("normal case", func(t *testing.T) {
|
|
|
|
p.SetValidatorIndex(1)
|
2021-01-14 11:17:00 +00:00
|
|
|
p.Sender = priv.GetScriptHash()
|
2019-12-16 08:57:49 +00:00
|
|
|
require.NoError(t, p.Sign(priv))
|
|
|
|
require.True(t, srv.validatePayload(p))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-12-27 11:09:58 +00:00
|
|
|
func TestService_getTx(t *testing.T) {
|
|
|
|
srv := newTestService(t)
|
|
|
|
|
|
|
|
t.Run("transaction in mempool", func(t *testing.T) {
|
2021-03-25 16:18:01 +00:00
|
|
|
tx := transaction.New([]byte{byte(opcode.PUSH1)}, 0)
|
2020-04-22 17:42:38 +00:00
|
|
|
tx.Nonce = 1234
|
2020-04-15 06:50:13 +00:00
|
|
|
tx.ValidUntilBlock = 1
|
2020-04-16 14:10:42 +00:00
|
|
|
addSender(t, tx)
|
2020-12-11 12:22:49 +00:00
|
|
|
signTx(t, srv.Chain, tx)
|
2019-12-27 11:09:58 +00:00
|
|
|
h := tx.Hash()
|
|
|
|
|
|
|
|
require.Equal(t, nil, srv.getTx(h))
|
|
|
|
|
2020-02-06 14:49:05 +00:00
|
|
|
require.NoError(t, srv.Chain.PoolTx(tx))
|
2019-12-27 11:09:58 +00:00
|
|
|
|
|
|
|
got := srv.getTx(h)
|
|
|
|
require.NotNil(t, got)
|
|
|
|
require.Equal(t, h, got.Hash())
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("transaction in local cache", func(t *testing.T) {
|
2021-03-25 16:18:01 +00:00
|
|
|
tx := transaction.New([]byte{byte(opcode.PUSH1)}, 0)
|
2020-04-22 17:42:38 +00:00
|
|
|
tx.Nonce = 4321
|
2020-04-15 06:50:13 +00:00
|
|
|
tx.ValidUntilBlock = 1
|
2019-12-27 11:09:58 +00:00
|
|
|
h := tx.Hash()
|
|
|
|
|
|
|
|
require.Equal(t, nil, srv.getTx(h))
|
|
|
|
|
|
|
|
srv.txx.Add(tx)
|
|
|
|
|
|
|
|
got := srv.getTx(h)
|
|
|
|
require.NotNil(t, got)
|
|
|
|
require.Equal(t, h, got.Hash())
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-11-17 12:57:50 +00:00
|
|
|
func TestService_PrepareRequest(t *testing.T) {
|
|
|
|
srv := newTestServiceWithState(t, true)
|
2022-10-19 15:43:23 +00:00
|
|
|
srv.dbft.Start(0)
|
2021-03-01 11:14:15 +00:00
|
|
|
t.Cleanup(srv.dbft.Timer.Stop)
|
2020-11-17 12:57:50 +00:00
|
|
|
|
|
|
|
priv, _ := getTestValidator(1)
|
|
|
|
p := new(Payload)
|
|
|
|
p.SetValidatorIndex(1)
|
|
|
|
|
2021-01-14 11:17:00 +00:00
|
|
|
prevHash := srv.Chain.CurrentBlockHash()
|
2020-11-17 12:57:50 +00:00
|
|
|
|
2021-01-14 11:17:00 +00:00
|
|
|
checkRequest := func(t *testing.T, expectedErr error, req *prepareRequest) {
|
|
|
|
p.SetPayload(req)
|
|
|
|
require.NoError(t, p.Sign(priv))
|
|
|
|
err := srv.verifyRequest(p)
|
|
|
|
if expectedErr == nil {
|
|
|
|
require.NoError(t, err)
|
|
|
|
return
|
|
|
|
}
|
2023-05-04 14:03:06 +00:00
|
|
|
require.ErrorIs(t, err, expectedErr)
|
2021-01-14 11:17:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
checkRequest(t, errInvalidVersion, &prepareRequest{version: 0xFF, prevHash: prevHash})
|
|
|
|
checkRequest(t, errInvalidPrevHash, &prepareRequest{prevHash: random.Uint256()})
|
|
|
|
checkRequest(t, errInvalidStateRoot, &prepareRequest{
|
|
|
|
stateRootEnabled: true,
|
|
|
|
prevHash: prevHash,
|
|
|
|
})
|
2020-11-17 12:57:50 +00:00
|
|
|
|
2022-07-22 20:14:02 +00:00
|
|
|
sr, err := srv.Chain.GetStateRoot(srv.dbft.BlockIndex - 1)
|
2020-11-17 12:57:50 +00:00
|
|
|
require.NoError(t, err)
|
2021-03-09 18:30:09 +00:00
|
|
|
|
|
|
|
checkRequest(t, errInvalidTransactionsCount, &prepareRequest{stateRootEnabled: true,
|
|
|
|
prevHash: prevHash,
|
|
|
|
stateRoot: sr.Root,
|
|
|
|
transactionHashes: make([]util.Uint256, srv.ProtocolConfiguration.MaxTransactionsPerBlock+1),
|
|
|
|
})
|
|
|
|
|
2021-01-14 11:17:00 +00:00
|
|
|
checkRequest(t, nil, &prepareRequest{
|
|
|
|
stateRootEnabled: true,
|
|
|
|
prevHash: prevHash,
|
|
|
|
stateRoot: sr.Root,
|
|
|
|
})
|
2020-11-17 12:57:50 +00:00
|
|
|
}
|
|
|
|
|
2019-12-05 09:07:09 +00:00
|
|
|
func TestService_OnPayload(t *testing.T) {
|
|
|
|
srv := newTestService(t)
|
consensus: prevent synchronization stalls
When CN is not up to date with the network is synchonizes blocks first and
only then starts consensus process. But while synchronizing it receives
consensus payloads and tries to process them even though messages reader
routine is not started yet. This leads to lots of goroutines waiting to send
their messages:
Jun 25 23:55:53 nodoka neo-go[32733]: goroutine 1639919 [chan send, 4 minutes]:
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/consensus.(*service).OnPayload(0xc0000ecb40, 0xc005bd7680)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/consensus/consensus.go:329 +0x31b
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleConsensusCmd(...)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:687
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleMessage(0xc0000ba160, 0x1053260, 0xc00507d170, 0xc005bdd560, 0x0, 0x0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:806 +0xd58
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*TCPPeer).handleConn(0xc00507d170)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_peer.go:160 +0x294
Jun 25 23:55:53 nodoka neo-go[32733]: created by github.com/nspcc-dev/neo-go/pkg/network.(*TCPTransport).Dial
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_transport.go:38 +0x1ad
Jun 25 23:55:53 nodoka neo-go[32733]: goroutine 1639181 [chan send, 10 minutes]:
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/consensus.(*service).OnPayload(0xc0000ecb40, 0xc013bb6600)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/consensus/consensus.go:329 +0x31b
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleConsensusCmd(...)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:687
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleMessage(0xc0000ba160, 0x1053260, 0xc01361ee10, 0xc01342c780, 0x0, 0x0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:806 +0xd58
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*TCPPeer).handleConn(0xc01361ee10)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_peer.go:160 +0x294
Jun 25 23:55:53 nodoka neo-go[32733]: created by github.com/nspcc-dev/neo-go/pkg/network.(*TCPTransport).Dial
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_transport.go:38 +0x1ad
Jun 25 23:55:53 nodoka neo-go[32733]: goroutine 39454 [chan send, 32 minutes]:
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/consensus.(*service).OnPayload(0xc0000ecb40, 0xc014fea680)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/consensus/consensus.go:329 +0x31b
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleConsensusCmd(...)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:687
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*Server).handleMessage(0xc0000ba160, 0x1053260, 0xc0140b2ea0, 0xc014fe0ed0, 0x0, 0x0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/server.go:806 +0xd58
Jun 25 23:55:53 nodoka neo-go[32733]: github.com/nspcc-dev/neo-go/pkg/network.(*TCPPeer).handleConn(0xc0140b2ea0)
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_peer.go:160 +0x294
Jun 25 23:55:53 nodoka neo-go[32733]: created by github.com/nspcc-dev/neo-go/pkg/network.(*TCPTransport).Dial
Jun 25 23:55:53 nodoka neo-go[32733]: #011/go/src/github.com/nspcc-dev/neo-go/pkg/network/tcp_transport.go:38 +0x1ad
Luckily it doesn't break synchronization completely as eventually connection
timers fire, the node breaks all connections, create new ones and these new
ones request blocks successfully until another consensus payload stalls them
too. In the end the node reaches synchronization, message processing loop
starts and releases all of these waiting goroutines, but it's better for us to
avoid this happening at all.
This also makes double-starting a no-op which is a nice property.
2020-06-26 08:19:01 +00:00
|
|
|
// This test directly reads things from srv.messages that normally
|
|
|
|
// is read by internal goroutine started with Start(). So let's
|
|
|
|
// pretend we really did start already.
|
|
|
|
srv.started.Store(true)
|
2019-12-05 09:07:09 +00:00
|
|
|
|
|
|
|
priv, _ := getTestValidator(1)
|
|
|
|
p := new(Payload)
|
|
|
|
p.SetValidatorIndex(1)
|
|
|
|
p.SetPayload(&prepareRequest{})
|
2021-01-14 13:38:40 +00:00
|
|
|
p.encodeData()
|
2019-12-05 09:07:09 +00:00
|
|
|
|
2021-01-14 11:17:00 +00:00
|
|
|
// sender is invalid
|
2022-01-12 18:09:37 +00:00
|
|
|
require.NoError(t, srv.OnPayload(&p.Extensible))
|
2019-12-05 09:07:09 +00:00
|
|
|
shouldNotReceive(t, srv.messages)
|
|
|
|
|
2021-01-14 11:17:00 +00:00
|
|
|
p = new(Payload)
|
|
|
|
p.SetValidatorIndex(1)
|
|
|
|
p.Sender = priv.GetScriptHash()
|
|
|
|
p.SetPayload(&prepareRequest{})
|
2019-12-05 09:07:09 +00:00
|
|
|
require.NoError(t, p.Sign(priv))
|
2022-01-12 18:09:37 +00:00
|
|
|
require.NoError(t, srv.OnPayload(&p.Extensible))
|
2019-12-05 09:07:09 +00:00
|
|
|
shouldReceive(t, srv.messages)
|
|
|
|
}
|
|
|
|
|
2020-08-20 15:47:52 +00:00
|
|
|
func TestVerifyBlock(t *testing.T) {
|
|
|
|
srv := newTestService(t)
|
2020-12-16 10:24:49 +00:00
|
|
|
|
2022-01-13 00:19:10 +00:00
|
|
|
bc := srv.Chain.(*core.Blockchain)
|
2020-12-16 10:24:49 +00:00
|
|
|
srv.lastTimestamp = 1
|
2020-08-20 15:47:52 +00:00
|
|
|
t.Run("good empty", func(t *testing.T) {
|
2022-01-13 00:19:10 +00:00
|
|
|
b := testchain.NewBlock(t, bc, 1, 0)
|
2020-08-20 15:47:52 +00:00
|
|
|
require.True(t, srv.verifyBlock(&neoBlock{Block: *b}))
|
|
|
|
})
|
|
|
|
t.Run("good pooled tx", func(t *testing.T) {
|
2021-03-25 16:18:01 +00:00
|
|
|
tx := transaction.New([]byte{byte(opcode.RET)}, 100000)
|
2020-08-20 15:47:52 +00:00
|
|
|
tx.ValidUntilBlock = 1
|
|
|
|
addSender(t, tx)
|
2020-12-11 12:22:49 +00:00
|
|
|
signTx(t, srv.Chain, tx)
|
2020-08-20 15:47:52 +00:00
|
|
|
require.NoError(t, srv.Chain.PoolTx(tx))
|
2022-01-13 00:19:10 +00:00
|
|
|
b := testchain.NewBlock(t, bc, 1, 0, tx)
|
2020-08-20 15:47:52 +00:00
|
|
|
require.True(t, srv.verifyBlock(&neoBlock{Block: *b}))
|
|
|
|
})
|
|
|
|
t.Run("good non-pooled tx", func(t *testing.T) {
|
2021-03-25 16:18:01 +00:00
|
|
|
tx := transaction.New([]byte{byte(opcode.RET)}, 100000)
|
2020-08-20 15:47:52 +00:00
|
|
|
tx.ValidUntilBlock = 1
|
|
|
|
addSender(t, tx)
|
2020-12-11 12:22:49 +00:00
|
|
|
signTx(t, srv.Chain, tx)
|
2022-01-13 00:19:10 +00:00
|
|
|
b := testchain.NewBlock(t, bc, 1, 0, tx)
|
2020-08-20 15:47:52 +00:00
|
|
|
require.True(t, srv.verifyBlock(&neoBlock{Block: *b}))
|
|
|
|
})
|
|
|
|
t.Run("good conflicting tx", func(t *testing.T) {
|
2021-07-20 12:54:56 +00:00
|
|
|
initGAS := srv.Chain.GetConfig().InitialGASSupply
|
2021-03-25 16:18:01 +00:00
|
|
|
tx1 := transaction.New([]byte{byte(opcode.RET)}, 100000)
|
2021-07-20 12:54:56 +00:00
|
|
|
tx1.NetworkFee = int64(initGAS)/2 + 1
|
2020-08-20 15:47:52 +00:00
|
|
|
tx1.ValidUntilBlock = 1
|
|
|
|
addSender(t, tx1)
|
2020-12-11 12:22:49 +00:00
|
|
|
signTx(t, srv.Chain, tx1)
|
2021-03-25 16:18:01 +00:00
|
|
|
tx2 := transaction.New([]byte{byte(opcode.RET)}, 100000)
|
2021-07-20 12:54:56 +00:00
|
|
|
tx2.NetworkFee = int64(initGAS)/2 + 1
|
2020-08-20 15:47:52 +00:00
|
|
|
tx2.ValidUntilBlock = 1
|
|
|
|
addSender(t, tx2)
|
2020-12-11 12:22:49 +00:00
|
|
|
signTx(t, srv.Chain, tx2)
|
2020-08-20 15:47:52 +00:00
|
|
|
require.NoError(t, srv.Chain.PoolTx(tx1))
|
|
|
|
require.Error(t, srv.Chain.PoolTx(tx2))
|
2022-01-13 00:19:10 +00:00
|
|
|
b := testchain.NewBlock(t, bc, 1, 0, tx2)
|
2020-08-20 15:47:52 +00:00
|
|
|
require.True(t, srv.verifyBlock(&neoBlock{Block: *b}))
|
|
|
|
})
|
|
|
|
t.Run("bad old", func(t *testing.T) {
|
2022-01-13 00:19:10 +00:00
|
|
|
b := testchain.NewBlock(t, bc, 1, 0)
|
2020-08-20 15:47:52 +00:00
|
|
|
b.Index = srv.Chain.BlockHeight()
|
|
|
|
require.False(t, srv.verifyBlock(&neoBlock{Block: *b}))
|
|
|
|
})
|
2021-03-15 10:00:04 +00:00
|
|
|
t.Run("bad big size", func(t *testing.T) {
|
|
|
|
script := make([]byte, int(srv.ProtocolConfiguration.MaxBlockSize))
|
|
|
|
script[0] = byte(opcode.RET)
|
2021-03-25 16:18:01 +00:00
|
|
|
tx := transaction.New(script, 100000)
|
2021-03-15 10:00:04 +00:00
|
|
|
tx.ValidUntilBlock = 1
|
|
|
|
addSender(t, tx)
|
|
|
|
signTx(t, srv.Chain, tx)
|
2022-01-13 00:19:10 +00:00
|
|
|
b := testchain.NewBlock(t, bc, 1, 0, tx)
|
2021-03-15 10:00:04 +00:00
|
|
|
require.False(t, srv.verifyBlock(&neoBlock{Block: *b}))
|
|
|
|
})
|
2020-12-16 10:24:49 +00:00
|
|
|
t.Run("bad timestamp", func(t *testing.T) {
|
2022-01-13 00:19:10 +00:00
|
|
|
b := testchain.NewBlock(t, bc, 1, 0)
|
2020-12-16 10:24:49 +00:00
|
|
|
b.Timestamp = srv.lastTimestamp - 1
|
|
|
|
require.False(t, srv.verifyBlock(&neoBlock{Block: *b}))
|
|
|
|
})
|
2020-08-20 15:47:52 +00:00
|
|
|
t.Run("bad tx", func(t *testing.T) {
|
2021-03-25 16:18:01 +00:00
|
|
|
tx := transaction.New([]byte{byte(opcode.RET)}, 100000)
|
2020-08-20 15:47:52 +00:00
|
|
|
tx.ValidUntilBlock = 1
|
|
|
|
addSender(t, tx)
|
2020-12-11 12:22:49 +00:00
|
|
|
signTx(t, srv.Chain, tx)
|
2020-08-20 15:47:52 +00:00
|
|
|
tx.Scripts[0].InvocationScript[16] = ^tx.Scripts[0].InvocationScript[16]
|
2022-01-13 00:19:10 +00:00
|
|
|
b := testchain.NewBlock(t, bc, 1, 0, tx)
|
2020-08-20 15:47:52 +00:00
|
|
|
require.False(t, srv.verifyBlock(&neoBlock{Block: *b}))
|
|
|
|
})
|
2021-03-15 10:51:07 +00:00
|
|
|
t.Run("bad big sys fee", func(t *testing.T) {
|
|
|
|
txes := make([]*transaction.Transaction, 2)
|
|
|
|
for i := range txes {
|
2021-03-25 16:18:01 +00:00
|
|
|
txes[i] = transaction.New([]byte{byte(opcode.RET)}, srv.ProtocolConfiguration.MaxBlockSystemFee/2+1)
|
2021-03-15 10:51:07 +00:00
|
|
|
txes[i].ValidUntilBlock = 1
|
|
|
|
addSender(t, txes[i])
|
|
|
|
signTx(t, srv.Chain, txes[i])
|
|
|
|
}
|
2022-01-13 00:19:10 +00:00
|
|
|
b := testchain.NewBlock(t, bc, 1, 0, txes...)
|
2021-03-15 10:51:07 +00:00
|
|
|
require.False(t, srv.verifyBlock(&neoBlock{Block: *b}))
|
|
|
|
})
|
2020-08-20 15:47:52 +00:00
|
|
|
}
|
|
|
|
|
2019-12-05 09:07:09 +00:00
|
|
|
func shouldReceive(t *testing.T, ch chan Payload) {
|
|
|
|
select {
|
|
|
|
case <-ch:
|
|
|
|
default:
|
|
|
|
require.Fail(t, "missing expected message")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func shouldNotReceive(t *testing.T, ch chan Payload) {
|
|
|
|
select {
|
|
|
|
case <-ch:
|
|
|
|
require.Fail(t, "unexpected message receive")
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-17 12:57:50 +00:00
|
|
|
func newTestServiceWithState(t *testing.T, stateRootInHeader bool) *service {
|
|
|
|
return newTestServiceWithChain(t, newTestChain(t, stateRootInHeader))
|
|
|
|
}
|
|
|
|
|
2019-11-29 12:40:21 +00:00
|
|
|
func newTestService(t *testing.T) *service {
|
2020-11-17 12:57:50 +00:00
|
|
|
return newTestServiceWithState(t, false)
|
2020-11-09 12:11:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func newTestServiceWithChain(t *testing.T, bc *core.Blockchain) *service {
|
2019-11-29 12:40:21 +00:00
|
|
|
srv, err := NewService(Config{
|
2021-03-15 09:25:52 +00:00
|
|
|
Logger: zaptest.NewLogger(t),
|
|
|
|
Broadcast: func(*npayload.Extensible) {},
|
|
|
|
Chain: bc,
|
2023-03-07 09:06:53 +00:00
|
|
|
BlockQueue: testBlockQueuer{bc: bc},
|
2022-12-06 13:34:38 +00:00
|
|
|
ProtocolConfiguration: bc.GetConfig().ProtocolConfiguration,
|
2021-03-15 09:25:52 +00:00
|
|
|
RequestTx: func(...util.Uint256) {},
|
2022-10-14 18:00:26 +00:00
|
|
|
StopTxFlow: func() {},
|
2022-12-02 16:10:45 +00:00
|
|
|
TimePerBlock: bc.GetConfig().TimePerBlock,
|
2022-12-05 15:11:18 +00:00
|
|
|
Wallet: config.Wallet{
|
2020-01-15 14:05:47 +00:00
|
|
|
Path: "./testdata/wallet1.json",
|
2019-11-29 12:40:21 +00:00
|
|
|
Password: "one",
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
return srv.(*service)
|
|
|
|
}
|
|
|
|
|
2023-03-07 09:06:53 +00:00
|
|
|
type testBlockQueuer struct {
|
|
|
|
bc *core.Blockchain
|
|
|
|
}
|
|
|
|
|
|
|
|
var _ = BlockQueuer(testBlockQueuer{})
|
|
|
|
|
|
|
|
// PutBlock implements BlockQueuer interface.
|
|
|
|
func (bq testBlockQueuer) PutBlock(b *coreb.Block) error {
|
|
|
|
return bq.bc.AddBlock(b)
|
|
|
|
}
|
|
|
|
|
2019-12-05 09:07:09 +00:00
|
|
|
func getTestValidator(i int) (*privateKey, *publicKey) {
|
2020-08-10 16:16:54 +00:00
|
|
|
key := testchain.PrivateKey(i)
|
2020-01-15 14:05:47 +00:00
|
|
|
return &privateKey{PrivateKey: key}, &publicKey{PublicKey: key.PublicKey()}
|
2019-12-05 09:07:09 +00:00
|
|
|
}
|
|
|
|
|
2020-11-09 12:11:51 +00:00
|
|
|
func newSingleTestChain(t *testing.T) *core.Blockchain {
|
|
|
|
configPath := "../../config/protocol.unit_testnet.single.yml"
|
|
|
|
cfg, err := config.LoadFile(configPath)
|
|
|
|
require.NoError(t, err, "could not load config")
|
|
|
|
|
2022-12-06 13:34:38 +00:00
|
|
|
chain, err := core.NewBlockchain(storage.NewMemoryStore(), cfg.Blockchain(), zaptest.NewLogger(t))
|
2020-11-09 12:11:51 +00:00
|
|
|
require.NoError(t, err, "could not create chain")
|
|
|
|
|
|
|
|
go chain.Run()
|
2021-03-03 10:14:25 +00:00
|
|
|
t.Cleanup(chain.Close)
|
2020-11-09 12:11:51 +00:00
|
|
|
return chain
|
|
|
|
}
|
|
|
|
|
2020-11-17 12:57:50 +00:00
|
|
|
func newTestChain(t *testing.T, stateRootInHeader bool) *core.Blockchain {
|
2020-06-14 07:34:50 +00:00
|
|
|
unitTestNetCfg, err := config.Load("../../config", netmode.UnitTestNet)
|
2019-11-29 12:40:21 +00:00
|
|
|
require.NoError(t, err)
|
2020-11-17 12:57:50 +00:00
|
|
|
unitTestNetCfg.ProtocolConfiguration.StateRootInHeader = stateRootInHeader
|
2019-11-29 12:40:21 +00:00
|
|
|
|
2022-12-06 13:34:38 +00:00
|
|
|
chain, err := core.NewBlockchain(storage.NewMemoryStore(), unitTestNetCfg.Blockchain(), zaptest.NewLogger(t))
|
2019-11-29 12:40:21 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
go chain.Run()
|
2021-03-01 11:14:15 +00:00
|
|
|
t.Cleanup(chain.Close)
|
2019-11-29 12:40:21 +00:00
|
|
|
return chain
|
|
|
|
}
|
|
|
|
|
2020-04-21 15:09:34 +00:00
|
|
|
var neoOwner = testchain.MultisigScriptHash()
|
|
|
|
|
2020-04-16 14:10:42 +00:00
|
|
|
func addSender(t *testing.T, txs ...*transaction.Transaction) {
|
|
|
|
for _, tx := range txs {
|
2020-07-29 16:57:38 +00:00
|
|
|
tx.Signers = []transaction.Signer{
|
|
|
|
{
|
|
|
|
Account: neoOwner,
|
|
|
|
},
|
|
|
|
}
|
2020-04-16 14:10:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-13 00:19:10 +00:00
|
|
|
func signTx(t *testing.T, bc Ledger, txs ...*transaction.Transaction) {
|
2020-04-16 14:10:42 +00:00
|
|
|
validators := make([]*keys.PublicKey, 4)
|
|
|
|
privNetKeys := make([]*keys.PrivateKey, 4)
|
|
|
|
for i := 0; i < 4; i++ {
|
2020-06-23 10:26:39 +00:00
|
|
|
privNetKeys[i] = testchain.PrivateKey(i)
|
|
|
|
validators[i] = privNetKeys[i].PublicKey()
|
2020-04-16 14:10:42 +00:00
|
|
|
}
|
2020-07-13 15:07:59 +00:00
|
|
|
privNetKeys = privNetKeys[:3]
|
2020-04-16 14:10:42 +00:00
|
|
|
rawScript, err := smartcontract.CreateMultiSigRedeemScript(3, validators)
|
|
|
|
require.NoError(t, err)
|
|
|
|
for _, tx := range txs {
|
2020-05-08 17:54:24 +00:00
|
|
|
size := io.GetVarSize(tx)
|
2022-01-11 21:58:03 +00:00
|
|
|
netFee, sizeDelta := fee.Calculate(bc.GetBaseExecFee(), rawScript)
|
2020-06-23 14:15:35 +00:00
|
|
|
tx.NetworkFee += +netFee
|
2020-05-08 17:54:24 +00:00
|
|
|
size += sizeDelta
|
2023-09-20 13:33:04 +00:00
|
|
|
tx.NetworkFee += int64(size)*bc.FeePerByte() + bc.CalculateAttributesFee(tx)
|
2020-04-16 14:10:42 +00:00
|
|
|
|
2020-04-21 13:45:48 +00:00
|
|
|
buf := io.NewBufBinWriter()
|
2020-04-16 14:10:42 +00:00
|
|
|
for _, key := range privNetKeys {
|
2021-03-25 16:18:01 +00:00
|
|
|
signature := key.SignHashable(uint32(testchain.Network()), tx)
|
2020-04-21 13:45:48 +00:00
|
|
|
emit.Bytes(buf.BinWriter, signature)
|
2020-04-16 14:10:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tx.Scripts = []transaction.Witness{{
|
2020-04-21 13:45:48 +00:00
|
|
|
InvocationScript: buf.Bytes(),
|
2020-04-16 14:10:42 +00:00
|
|
|
VerificationScript: rawScript,
|
|
|
|
}}
|
|
|
|
}
|
|
|
|
}
|