network: add BroadcastFactor to control gossip, fix #2678

This commit is contained in:
Roman Khimov 2022-10-13 22:14:14 +03:00
parent 3ed140abbf
commit c17b2afab5
4 changed files with 20 additions and 3 deletions

View file

@ -19,6 +19,7 @@ node-related settings described in the table below.
| Address | `string` | `0.0.0.0` | Node address that P2P protocol handler binds to. | | Address | `string` | `0.0.0.0` | Node address that P2P protocol handler binds to. |
| AnnouncedPort | `uint16` | Same as `NodePort` | Node port which should be used to announce node's port on P2P layer, it can differ from the `NodePort` the node is bound to (for example, if your node is behind NAT). | | AnnouncedPort | `uint16` | Same as `NodePort` | Node port which should be used to announce node's port on P2P layer, it can differ from the `NodePort` the node is bound to (for example, if your node is behind NAT). |
| AttemptConnPeers | `int` | `20` | Number of connection to try to establish when the connection count drops below the `MinPeers` value.| | AttemptConnPeers | `int` | `20` | Number of connection to try to establish when the connection count drops below the `MinPeers` value.|
| BroadcastFactor | `int` | `0` | Multiplier that is used to determine the number of optimal gossip fan-out peer number for broadcasted messages (0-100). By default it's zero, node uses the most optimized value depending on the estimated network size (`2.5×log(size)`), so the node may have 20 peers and calculate that it needs to broadcast messages to just 10 of them. With BroadcastFactor set to 100 it will always send messages to all peers, any value in-between 0 and 100 is used for weighted calculation, for example if it's 30 then 13 neighbors will be used in the previous case. |
| DBConfiguration | [DB Configuration](#DB-Configuration) | | Describes configuration for database. See the [DB Configuration](#DB-Configuration) section for details. | | DBConfiguration | [DB Configuration](#DB-Configuration) | | Describes configuration for database. See the [DB Configuration](#DB-Configuration) section for details. |
| DialTimeout | `int64` | `0` | Maximum duration a single dial may take in seconds. | | DialTimeout | `int64` | `0` | Maximum duration a single dial may take in seconds. |
| ExtensiblePoolSize | `int` | `20` | Maximum amount of the extensible payloads from a single sender stored in a local pool. | | ExtensiblePoolSize | `int` | `20` | Maximum amount of the extensible payloads from a single sender stored in a local pool. |

View file

@ -9,6 +9,8 @@ type ApplicationConfiguration struct {
Address string `yaml:"Address"` Address string `yaml:"Address"`
AnnouncedNodePort uint16 `yaml:"AnnouncedPort"` AnnouncedNodePort uint16 `yaml:"AnnouncedPort"`
AttemptConnPeers int `yaml:"AttemptConnPeers"` AttemptConnPeers int `yaml:"AttemptConnPeers"`
// BroadcastFactor is the factor (0-100) controlling gossip fan-out number optimization.
BroadcastFactor int `yaml:"BroadcastFactor"`
DBConfiguration dbconfig.DBConfiguration `yaml:"DBConfiguration"` DBConfiguration dbconfig.DBConfiguration `yaml:"DBConfiguration"`
DialTimeout int64 `yaml:"DialTimeout"` DialTimeout int64 `yaml:"DialTimeout"`
LogPath string `yaml:"LogPath"` LogPath string `yaml:"LogPath"`
@ -36,6 +38,7 @@ func (a *ApplicationConfiguration) EqualsButServices(o *ApplicationConfiguration
if a.Address != o.Address || if a.Address != o.Address ||
a.AnnouncedNodePort != o.AnnouncedNodePort || a.AnnouncedNodePort != o.AnnouncedNodePort ||
a.AttemptConnPeers != o.AttemptConnPeers || a.AttemptConnPeers != o.AttemptConnPeers ||
a.BroadcastFactor != o.BroadcastFactor ||
a.DBConfiguration != o.DBConfiguration || a.DBConfiguration != o.DBConfiguration ||
a.DialTimeout != o.DialTimeout || a.DialTimeout != o.DialTimeout ||
a.ExtensiblePoolSize != o.ExtensiblePoolSize || a.ExtensiblePoolSize != o.ExtensiblePoolSize ||

View file

@ -35,6 +35,7 @@ const (
defaultAttemptConnPeers = 20 defaultAttemptConnPeers = 20
defaultMaxPeers = 100 defaultMaxPeers = 100
defaultExtensiblePoolSize = 20 defaultExtensiblePoolSize = 20
defaultBroadcastFactor = 0
maxBlockBatch = 200 maxBlockBatch = 200
minPoolCount = 30 minPoolCount = 30
) )
@ -222,6 +223,13 @@ func newServerFromConstructors(config ServerConfig, chain Ledger, stSync StateSy
s.AttemptConnPeers = defaultAttemptConnPeers s.AttemptConnPeers = defaultAttemptConnPeers
} }
if s.BroadcastFactor < 0 || s.BroadcastFactor > 100 {
s.log.Info("bad BroadcastFactor configured, using the default value",
zap.Int("configured", s.BroadcastFactor),
zap.Int("actual", defaultBroadcastFactor))
s.BroadcastFactor = defaultBroadcastFactor
}
s.transport = newTransport(s) s.transport = newTransport(s)
s.discovery = newDiscovery( s.discovery = newDiscovery(
s.Seeds, s.Seeds,
@ -1366,6 +1374,7 @@ func (s *Server) iteratePeersWithSendMsg(msg *Message, send func(Peer, context.C
replies = make(chan error, peerN) // Cache is there just to make goroutines exit faster. replies = make(chan error, peerN) // Cache is there just to make goroutines exit faster.
ctx, cancel = context.WithTimeout(context.Background(), s.TimePerBlock/2) ctx, cancel = context.WithTimeout(context.Background(), s.TimePerBlock/2)
) )
enoughN = (enoughN*(100-s.BroadcastFactor) + peerN*s.BroadcastFactor) / 100
for _, peer := range peers { for _, peer := range peers {
go func(p Peer, ctx context.Context, pkt []byte) { go func(p Peer, ctx context.Context, pkt []byte) {
// Do this before packet is sent, reader thread can get the reply before this routine wakes up. // Do this before packet is sent, reader thread can get the reply before this routine wakes up.

View file

@ -78,6 +78,9 @@ type (
// ExtensiblePoolSize is the size of the pool for extensible payloads from a single sender. // ExtensiblePoolSize is the size of the pool for extensible payloads from a single sender.
ExtensiblePoolSize int ExtensiblePoolSize int
// BroadcastFactor is the factor (0-100) for fan-out optimization.
BroadcastFactor int
} }
) )
@ -107,5 +110,6 @@ func NewServerConfig(cfg config.Config) ServerConfig {
P2PNotaryCfg: appConfig.P2PNotary, P2PNotaryCfg: appConfig.P2PNotary,
StateRootCfg: appConfig.StateRoot, StateRootCfg: appConfig.StateRoot,
ExtensiblePoolSize: appConfig.ExtensiblePoolSize, ExtensiblePoolSize: appConfig.ExtensiblePoolSize,
BroadcastFactor: appConfig.BroadcastFactor,
} }
} }