forked from TrueCloudLab/neoneo-go
network: keep a copy of the config inside of Server
Avoid copying the configuration again and again, make things a bit more efficient.
This commit is contained in:
parent
6e9d725a29
commit
60d6fa1125
3 changed files with 27 additions and 32 deletions
|
@ -14,7 +14,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/config"
|
"github.com/nspcc-dev/neo-go/pkg/config"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/config/netmode"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/block"
|
"github.com/nspcc-dev/neo-go/pkg/core/block"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/mempool"
|
"github.com/nspcc-dev/neo-go/pkg/core/mempool"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/mempoolevent"
|
"github.com/nspcc-dev/neo-go/pkg/core/mempoolevent"
|
||||||
|
@ -89,10 +88,8 @@ type (
|
||||||
// id also known as the nonce of the server.
|
// id also known as the nonce of the server.
|
||||||
id uint32
|
id uint32
|
||||||
|
|
||||||
// Network's magic number for correct message decoding.
|
// A copy of the Ledger's config.
|
||||||
network netmode.Magic
|
config config.ProtocolConfiguration
|
||||||
// stateRootInHeader specifies if block header contain state root.
|
|
||||||
stateRootInHeader bool
|
|
||||||
|
|
||||||
transport Transporter
|
transport Transporter
|
||||||
discovery Discoverer
|
discovery Discoverer
|
||||||
|
@ -166,27 +163,26 @@ func newServerFromConstructors(config ServerConfig, chain Ledger, stSync StateSy
|
||||||
}
|
}
|
||||||
|
|
||||||
s := &Server{
|
s := &Server{
|
||||||
ServerConfig: config,
|
ServerConfig: config,
|
||||||
chain: chain,
|
chain: chain,
|
||||||
id: randomID(),
|
id: randomID(),
|
||||||
network: chain.GetConfig().Magic,
|
config: chain.GetConfig(),
|
||||||
stateRootInHeader: chain.GetConfig().StateRootInHeader,
|
quit: make(chan struct{}),
|
||||||
quit: make(chan struct{}),
|
register: make(chan Peer),
|
||||||
register: make(chan Peer),
|
unregister: make(chan peerDrop),
|
||||||
unregister: make(chan peerDrop),
|
txInMap: make(map[util.Uint256]struct{}),
|
||||||
txInMap: make(map[util.Uint256]struct{}),
|
peers: make(map[Peer]bool),
|
||||||
peers: make(map[Peer]bool),
|
syncReached: atomic.NewBool(false),
|
||||||
syncReached: atomic.NewBool(false),
|
mempool: chain.GetMemPool(),
|
||||||
mempool: chain.GetMemPool(),
|
extensiblePool: extpool.New(chain, config.ExtensiblePoolSize),
|
||||||
extensiblePool: extpool.New(chain, config.ExtensiblePoolSize),
|
log: log,
|
||||||
log: log,
|
transactions: make(chan *transaction.Transaction, 64),
|
||||||
transactions: make(chan *transaction.Transaction, 64),
|
extensHandlers: make(map[string]func(*payload.Extensible) error),
|
||||||
extensHandlers: make(map[string]func(*payload.Extensible) error),
|
stateSync: stSync,
|
||||||
stateSync: stSync,
|
|
||||||
}
|
}
|
||||||
if chain.P2PSigExtensionsEnabled() {
|
if chain.P2PSigExtensionsEnabled() {
|
||||||
s.notaryFeer = NewNotaryFeer(chain)
|
s.notaryFeer = NewNotaryFeer(chain)
|
||||||
s.notaryRequestPool = mempool.New(chain.GetConfig().P2PNotaryRequestPayloadPoolSize, 1, true)
|
s.notaryRequestPool = mempool.New(s.config.P2PNotaryRequestPayloadPoolSize, 1, true)
|
||||||
chain.RegisterPostBlock(func(isRelevant func(*transaction.Transaction, *mempool.Pool, bool) bool, txpool *mempool.Pool, _ *block.Block) {
|
chain.RegisterPostBlock(func(isRelevant func(*transaction.Transaction, *mempool.Pool, bool) bool, txpool *mempool.Pool, _ *block.Block) {
|
||||||
s.notaryRequestPool.RemoveStale(func(t *transaction.Transaction) bool {
|
s.notaryRequestPool.RemoveStale(func(t *transaction.Transaction) bool {
|
||||||
return isRelevant(t, txpool, true)
|
return isRelevant(t, txpool, true)
|
||||||
|
@ -773,10 +769,10 @@ func (s *Server) handleGetDataCmd(p Peer, inv *payload.Inventory) error {
|
||||||
|
|
||||||
// handleGetMPTDataCmd processes the received MPT inventory.
|
// handleGetMPTDataCmd processes the received MPT inventory.
|
||||||
func (s *Server) handleGetMPTDataCmd(p Peer, inv *payload.MPTInventory) error {
|
func (s *Server) handleGetMPTDataCmd(p Peer, inv *payload.MPTInventory) error {
|
||||||
if !s.chain.GetConfig().P2PStateExchangeExtensions {
|
if !s.config.P2PStateExchangeExtensions {
|
||||||
return errors.New("GetMPTDataCMD was received, but P2PStateExchangeExtensions are disabled")
|
return errors.New("GetMPTDataCMD was received, but P2PStateExchangeExtensions are disabled")
|
||||||
}
|
}
|
||||||
if s.chain.GetConfig().KeepOnlyLatestState {
|
if s.config.KeepOnlyLatestState {
|
||||||
// TODO: implement keeping MPT states for P1 and P2 height (#2095, #2152 related)
|
// TODO: implement keeping MPT states for P1 and P2 height (#2095, #2152 related)
|
||||||
return errors.New("GetMPTDataCMD was received, but only latest MPT state is supported")
|
return errors.New("GetMPTDataCMD was received, but only latest MPT state is supported")
|
||||||
}
|
}
|
||||||
|
@ -814,7 +810,7 @@ func (s *Server) handleGetMPTDataCmd(p Peer, inv *payload.MPTInventory) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) handleMPTDataCmd(p Peer, data *payload.MPTData) error {
|
func (s *Server) handleMPTDataCmd(p Peer, data *payload.MPTData) error {
|
||||||
if !s.chain.GetConfig().P2PStateExchangeExtensions {
|
if !s.config.P2PStateExchangeExtensions {
|
||||||
return errors.New("MPTDataCMD was received, but P2PStateExchangeExtensions are disabled")
|
return errors.New("MPTDataCMD was received, but P2PStateExchangeExtensions are disabled")
|
||||||
}
|
}
|
||||||
return s.stateSync.AddMPTNodes(data.Nodes)
|
return s.stateSync.AddMPTNodes(data.Nodes)
|
||||||
|
@ -1396,10 +1392,9 @@ func (s *Server) broadcastTxHashes(hs []util.Uint256) {
|
||||||
|
|
||||||
// initStaleMemPools initializes mempools for stale tx/payload processing.
|
// initStaleMemPools initializes mempools for stale tx/payload processing.
|
||||||
func (s *Server) initStaleMemPools() {
|
func (s *Server) initStaleMemPools() {
|
||||||
cfg := s.chain.GetConfig()
|
|
||||||
threshold := 5
|
threshold := 5
|
||||||
if cfg.ValidatorsCount*2 > threshold {
|
if s.config.ValidatorsCount*2 > threshold {
|
||||||
threshold = cfg.ValidatorsCount * 2
|
threshold = s.config.ValidatorsCount * 2
|
||||||
}
|
}
|
||||||
|
|
||||||
s.mempool.SetResendThreshold(uint32(threshold), s.broadcastTX)
|
s.mempool.SetResendThreshold(uint32(threshold), s.broadcastTX)
|
||||||
|
|
|
@ -813,7 +813,7 @@ func TestHandleMPTData(t *testing.T) {
|
||||||
t.Run("good", func(t *testing.T) {
|
t.Run("good", func(t *testing.T) {
|
||||||
expected := [][]byte{{1, 2, 3}, {2, 3, 4}}
|
expected := [][]byte{{1, 2, 3}, {2, 3, 4}}
|
||||||
s := newTestServer(t, ServerConfig{Port: 0, UserAgent: "/test/"})
|
s := newTestServer(t, ServerConfig{Port: 0, UserAgent: "/test/"})
|
||||||
s.chain.(*fakechain.FakeChain).P2PStateExchangeExtensions = true
|
s.config.P2PStateExchangeExtensions = true
|
||||||
s.stateSync = &fakechain.FakeStateSync{
|
s.stateSync = &fakechain.FakeStateSync{
|
||||||
AddMPTNodesFunc: func(nodes [][]byte) error {
|
AddMPTNodesFunc: func(nodes [][]byte) error {
|
||||||
require.Equal(t, expected, nodes)
|
require.Equal(t, expected, nodes)
|
||||||
|
|
|
@ -167,7 +167,7 @@ func (p *TCPPeer) handleConn() {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
r := io.NewBinReaderFromIO(p.conn)
|
r := io.NewBinReaderFromIO(p.conn)
|
||||||
for {
|
for {
|
||||||
msg := &Message{StateRootInHeader: p.server.stateRootInHeader}
|
msg := &Message{StateRootInHeader: p.server.config.StateRootInHeader}
|
||||||
err = msg.Decode(r)
|
err = msg.Decode(r)
|
||||||
|
|
||||||
if err == payload.ErrTooManyHeaders {
|
if err == payload.ErrTooManyHeaders {
|
||||||
|
@ -207,7 +207,7 @@ func (p *TCPPeer) handleQueues() {
|
||||||
var p2pSkipCounter uint32
|
var p2pSkipCounter uint32
|
||||||
const p2pSkipDivisor = 4
|
const p2pSkipDivisor = 4
|
||||||
|
|
||||||
var writeTimeout = time.Duration(p.server.chain.GetConfig().SecondsPerBlock) * time.Second
|
var writeTimeout = time.Duration(p.server.config.SecondsPerBlock) * time.Second
|
||||||
for {
|
for {
|
||||||
var msg []byte
|
var msg []byte
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue