network: change server Start() behavior
Previously user should Start server in a separate goroutine. Now separate goroutine is created inside the Start(). For normal server operation, the caller should wait for Start to finish. Also, fixed TestTryInitStateSync test which was exiting earlier than logs are called. Close #3112 Signed-off-by: Ekaterina Pavlova <ekt@morphbits.io>
This commit is contained in:
parent
224808975b
commit
f8dc5ec44f
5 changed files with 10 additions and 10 deletions
|
@ -498,7 +498,7 @@ func startServer(ctx *cli.Context) error {
|
|||
rpcServer := rpcsrv.New(chain, cfg.ApplicationConfiguration.RPC, serv, oracleSrv, log, errChan)
|
||||
serv.AddService(&rpcServer)
|
||||
|
||||
go serv.Start()
|
||||
serv.Start()
|
||||
if !cfg.ApplicationConfiguration.RPC.StartWhenSynchronized {
|
||||
// Run RPC server in a separate routine. This is necessary to avoid a potential
|
||||
// deadlock: Start() can write errors to errChan which is not yet read in the
|
||||
|
|
|
@ -164,7 +164,7 @@ func NewTestChain(t *testing.T, f func(*config.Config), run bool) (*core.Blockch
|
|||
})
|
||||
require.NoError(t, err)
|
||||
netSrv.AddConsensusService(cons, cons.OnPayload, cons.OnTransaction)
|
||||
go netSrv.Start()
|
||||
netSrv.Start()
|
||||
errCh := make(chan error, 2)
|
||||
rpcServer := rpcsrv.New(chain, cfg.ApplicationConfiguration.RPC, netSrv, nil, logger, errCh)
|
||||
rpcServer.Start()
|
||||
|
|
|
@ -262,7 +262,7 @@ func (s *Server) ID() uint32 {
|
|||
}
|
||||
|
||||
// Start will start the server and its underlying transport. Calling it twice
|
||||
// is an error.
|
||||
// is an error. Caller should wait for Start to finish for normal server operation.
|
||||
func (s *Server) Start() {
|
||||
s.log.Info("node started",
|
||||
zap.Uint32("blockHeight", s.chain.BlockHeight()),
|
||||
|
@ -285,7 +285,7 @@ func (s *Server) Start() {
|
|||
setServerAndNodeVersions(s.UserAgent, strconv.FormatUint(uint64(s.id), 10))
|
||||
setNeoGoVersion(config.Version)
|
||||
setSeverID(strconv.FormatUint(uint64(s.id), 10))
|
||||
s.run()
|
||||
go s.run()
|
||||
}
|
||||
|
||||
// Shutdown disconnects all peers and stops listening. Calling it twice is an error,
|
||||
|
|
|
@ -90,7 +90,7 @@ func TestServerStartAndShutdown(t *testing.T) {
|
|||
t.Run("no consensus", func(t *testing.T) {
|
||||
s := newTestServer(t, ServerConfig{})
|
||||
|
||||
go s.Start()
|
||||
s.Start()
|
||||
p := newLocalPeer(t, s)
|
||||
s.register <- p
|
||||
require.Eventually(t, func() bool { return 1 == s.PeerCount() }, time.Second, time.Millisecond*10)
|
||||
|
@ -110,7 +110,7 @@ func TestServerStartAndShutdown(t *testing.T) {
|
|||
cons := new(fakeConsensus)
|
||||
s.AddConsensusService(cons, cons.OnPayload, cons.OnTransaction)
|
||||
|
||||
go s.Start()
|
||||
s.Start()
|
||||
p := newLocalPeer(t, s)
|
||||
s.register <- p
|
||||
|
||||
|
@ -312,7 +312,7 @@ func TestServerNotSendsVerack(t *testing.T) {
|
|||
s.id = 1
|
||||
finished := make(chan struct{})
|
||||
go func() {
|
||||
s.run()
|
||||
go s.run()
|
||||
close(finished)
|
||||
}()
|
||||
t.Cleanup(func() {
|
||||
|
@ -389,7 +389,7 @@ func startTestServer(t *testing.T, protocolCfg ...func(*config.Blockchain)) *Ser
|
|||
}
|
||||
|
||||
func startWithCleanup(t *testing.T, s *Server) {
|
||||
go s.Start()
|
||||
s.Start()
|
||||
t.Cleanup(func() {
|
||||
s.Shutdown()
|
||||
})
|
||||
|
|
|
@ -99,7 +99,7 @@ func TestSubscriptions(t *testing.T) {
|
|||
defer chain.Close()
|
||||
defer rpcSrv.Shutdown()
|
||||
|
||||
go rpcSrv.coreServer.Start()
|
||||
rpcSrv.coreServer.Start()
|
||||
defer rpcSrv.coreServer.Shutdown()
|
||||
|
||||
for _, feed := range subFeeds {
|
||||
|
@ -395,7 +395,7 @@ func TestFilteredNotaryRequestSubscriptions(t *testing.T) {
|
|||
}
|
||||
|
||||
chain, rpcSrv, c, respMsgs, finishedFlag := initCleanServerAndWSClient(t)
|
||||
go rpcSrv.coreServer.Start()
|
||||
rpcSrv.coreServer.Start()
|
||||
|
||||
defer chain.Close()
|
||||
defer rpcSrv.Shutdown()
|
||||
|
|
Loading…
Reference in a new issue