mirror of
https://github.com/nspcc-dev/neo-go.git
synced 2024-11-26 19:42:23 +00:00
network: pings are broadcasted, don't send them to everyone
Follow the general rules of broadcasts, even though it's somewhat different from Inv, we just want to get some reply from our neighbors to see if we're behind. We don't strictly need all neighbors for it.
This commit is contained in:
parent
ec4983e88e
commit
b345581c72
4 changed files with 11 additions and 18 deletions
|
@ -159,10 +159,8 @@ func (p *localPeer) HandleVersionAck() error {
|
||||||
p.handshaked = true
|
p.handshaked = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (p *localPeer) SendPing(m *Message) error {
|
func (p *localPeer) SetPingTimer() {
|
||||||
p.pingSent++
|
p.pingSent++
|
||||||
_ = p.EnqueueMessage(m)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
func (p *localPeer) HandlePing(ping *payload.Ping) error {
|
func (p *localPeer) HandlePing(ping *payload.Ping) error {
|
||||||
p.lastBlockIndex = ping.LastBlockIndex
|
p.lastBlockIndex = ping.LastBlockIndex
|
||||||
|
|
|
@ -59,10 +59,9 @@ type Peer interface {
|
||||||
Handshaked() bool
|
Handshaked() bool
|
||||||
IsFullNode() bool
|
IsFullNode() bool
|
||||||
|
|
||||||
// SendPing enqueues a ping message to be sent to the peer and does
|
// SetPingTimer adds an outgoing ping to the counter and sets a PingTimeout
|
||||||
// appropriate protocol handling like timeouts and outstanding pings
|
// timer that will shut the connection down in case of no response.
|
||||||
// management.
|
SetPingTimer()
|
||||||
SendPing(*Message) error
|
|
||||||
// SendVersion checks handshake status and sends a version message to
|
// SendVersion checks handshake status and sends a version message to
|
||||||
// the peer.
|
// the peer.
|
||||||
SendVersion() error
|
SendVersion() error
|
||||||
|
|
|
@ -465,10 +465,7 @@ func (s *Server) runProto() {
|
||||||
return
|
return
|
||||||
case <-pingTimer.C:
|
case <-pingTimer.C:
|
||||||
if s.chain.BlockHeight() == prevHeight {
|
if s.chain.BlockHeight() == prevHeight {
|
||||||
// Get a copy of s.peers to avoid holding a lock while sending.
|
s.broadcastMessage(NewMessage(CMDPing, payload.NewPing(s.chain.BlockHeight(), s.id)))
|
||||||
for _, peer := range s.getPeers(nil) {
|
|
||||||
_ = peer.SendPing(NewMessage(CMDPing, payload.NewPing(s.chain.BlockHeight(), s.id)))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
pingTimer.Reset(s.PingInterval)
|
pingTimer.Reset(s.PingInterval)
|
||||||
}
|
}
|
||||||
|
@ -1371,6 +1368,9 @@ func (s *Server) iteratePeersWithSendMsg(msg *Message, send func(Peer, context.C
|
||||||
if msg.Command == CMDGetAddr {
|
if msg.Command == CMDGetAddr {
|
||||||
p.AddGetAddrSent()
|
p.AddGetAddrSent()
|
||||||
}
|
}
|
||||||
|
if msg.Command == CMDPing {
|
||||||
|
p.SetPingTimer()
|
||||||
|
}
|
||||||
replies <- send(p, ctx, pkt)
|
replies <- send(p, ctx, pkt)
|
||||||
}(peer, ctx, pkt)
|
}(peer, ctx, pkt)
|
||||||
}
|
}
|
||||||
|
|
|
@ -454,12 +454,9 @@ func (p *TCPPeer) LastBlockIndex() uint32 {
|
||||||
return p.lastBlockIndex
|
return p.lastBlockIndex
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendPing sends a ping message to the peer and does an appropriate accounting of
|
// SetPingTimer adds an outgoing ping to the counter and sets a PingTimeout timer
|
||||||
// outstanding pings and timeouts.
|
// that will shut the connection down in case of no response.
|
||||||
func (p *TCPPeer) SendPing(msg *Message) error {
|
func (p *TCPPeer) SetPingTimer() {
|
||||||
if !p.Handshaked() {
|
|
||||||
return errStateMismatch
|
|
||||||
}
|
|
||||||
p.lock.Lock()
|
p.lock.Lock()
|
||||||
p.pingSent++
|
p.pingSent++
|
||||||
if p.pingTimer == nil {
|
if p.pingTimer == nil {
|
||||||
|
@ -468,7 +465,6 @@ func (p *TCPPeer) SendPing(msg *Message) error {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
p.lock.Unlock()
|
p.lock.Unlock()
|
||||||
return p.EnqueueMessage(msg)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// HandlePing handles a ping message received from the peer.
|
// HandlePing handles a ping message received from the peer.
|
||||||
|
|
Loading…
Reference in a new issue