network: change server Start() behavior

Previously user should Start server in a separate goroutine. Now
separate goroutine is created inside the Start(). For normal server
operation, the caller should wait for Start to finish. Also, fixed
TestTryInitStateSync test which was exiting earlier than logs are
called.

Close #3112

Signed-off-by: Ekaterina Pavlova <ekt@morphbits.io>
This commit is contained in:
Ekaterina Pavlova 2024-02-21 18:07:28 +03:00
parent 224808975b
commit f8dc5ec44f
5 changed files with 10 additions and 10 deletions

View file

@ -498,7 +498,7 @@ func startServer(ctx *cli.Context) error {
rpcServer := rpcsrv.New(chain, cfg.ApplicationConfiguration.RPC, serv, oracleSrv, log, errChan) rpcServer := rpcsrv.New(chain, cfg.ApplicationConfiguration.RPC, serv, oracleSrv, log, errChan)
serv.AddService(&rpcServer) serv.AddService(&rpcServer)
go serv.Start() serv.Start()
if !cfg.ApplicationConfiguration.RPC.StartWhenSynchronized { if !cfg.ApplicationConfiguration.RPC.StartWhenSynchronized {
// Run RPC server in a separate routine. This is necessary to avoid a potential // Run RPC server in a separate routine. This is necessary to avoid a potential
// deadlock: Start() can write errors to errChan which is not yet read in the // deadlock: Start() can write errors to errChan which is not yet read in the

View file

@ -164,7 +164,7 @@ func NewTestChain(t *testing.T, f func(*config.Config), run bool) (*core.Blockch
}) })
require.NoError(t, err) require.NoError(t, err)
netSrv.AddConsensusService(cons, cons.OnPayload, cons.OnTransaction) netSrv.AddConsensusService(cons, cons.OnPayload, cons.OnTransaction)
go netSrv.Start() netSrv.Start()
errCh := make(chan error, 2) errCh := make(chan error, 2)
rpcServer := rpcsrv.New(chain, cfg.ApplicationConfiguration.RPC, netSrv, nil, logger, errCh) rpcServer := rpcsrv.New(chain, cfg.ApplicationConfiguration.RPC, netSrv, nil, logger, errCh)
rpcServer.Start() rpcServer.Start()

View file

@ -262,7 +262,7 @@ func (s *Server) ID() uint32 {
} }
// Start will start the server and its underlying transport. Calling it twice // Start will start the server and its underlying transport. Calling it twice
// is an error. // is an error. Caller should wait for Start to finish for normal server operation.
func (s *Server) Start() { func (s *Server) Start() {
s.log.Info("node started", s.log.Info("node started",
zap.Uint32("blockHeight", s.chain.BlockHeight()), zap.Uint32("blockHeight", s.chain.BlockHeight()),
@ -285,7 +285,7 @@ func (s *Server) Start() {
setServerAndNodeVersions(s.UserAgent, strconv.FormatUint(uint64(s.id), 10)) setServerAndNodeVersions(s.UserAgent, strconv.FormatUint(uint64(s.id), 10))
setNeoGoVersion(config.Version) setNeoGoVersion(config.Version)
setSeverID(strconv.FormatUint(uint64(s.id), 10)) setSeverID(strconv.FormatUint(uint64(s.id), 10))
s.run() go s.run()
} }
// Shutdown disconnects all peers and stops listening. Calling it twice is an error, // Shutdown disconnects all peers and stops listening. Calling it twice is an error,

View file

@ -90,7 +90,7 @@ func TestServerStartAndShutdown(t *testing.T) {
t.Run("no consensus", func(t *testing.T) { t.Run("no consensus", func(t *testing.T) {
s := newTestServer(t, ServerConfig{}) s := newTestServer(t, ServerConfig{})
go s.Start() s.Start()
p := newLocalPeer(t, s) p := newLocalPeer(t, s)
s.register <- p s.register <- p
require.Eventually(t, func() bool { return 1 == s.PeerCount() }, time.Second, time.Millisecond*10) require.Eventually(t, func() bool { return 1 == s.PeerCount() }, time.Second, time.Millisecond*10)
@ -110,7 +110,7 @@ func TestServerStartAndShutdown(t *testing.T) {
cons := new(fakeConsensus) cons := new(fakeConsensus)
s.AddConsensusService(cons, cons.OnPayload, cons.OnTransaction) s.AddConsensusService(cons, cons.OnPayload, cons.OnTransaction)
go s.Start() s.Start()
p := newLocalPeer(t, s) p := newLocalPeer(t, s)
s.register <- p s.register <- p
@ -312,7 +312,7 @@ func TestServerNotSendsVerack(t *testing.T) {
s.id = 1 s.id = 1
finished := make(chan struct{}) finished := make(chan struct{})
go func() { go func() {
s.run() go s.run()
close(finished) close(finished)
}() }()
t.Cleanup(func() { t.Cleanup(func() {
@ -389,7 +389,7 @@ func startTestServer(t *testing.T, protocolCfg ...func(*config.Blockchain)) *Ser
} }
func startWithCleanup(t *testing.T, s *Server) { func startWithCleanup(t *testing.T, s *Server) {
go s.Start() s.Start()
t.Cleanup(func() { t.Cleanup(func() {
s.Shutdown() s.Shutdown()
}) })

View file

@ -99,7 +99,7 @@ func TestSubscriptions(t *testing.T) {
defer chain.Close() defer chain.Close()
defer rpcSrv.Shutdown() defer rpcSrv.Shutdown()
go rpcSrv.coreServer.Start() rpcSrv.coreServer.Start()
defer rpcSrv.coreServer.Shutdown() defer rpcSrv.coreServer.Shutdown()
for _, feed := range subFeeds { for _, feed := range subFeeds {
@ -395,7 +395,7 @@ func TestFilteredNotaryRequestSubscriptions(t *testing.T) {
} }
chain, rpcSrv, c, respMsgs, finishedFlag := initCleanServerAndWSClient(t) chain, rpcSrv, c, respMsgs, finishedFlag := initCleanServerAndWSClient(t)
go rpcSrv.coreServer.Start() rpcSrv.coreServer.Start()
defer chain.Close() defer chain.Close()
defer rpcSrv.Shutdown() defer rpcSrv.Shutdown()