[#66] Group pool config parameters

Signed-off-by: Denis Kirillov <denis@nspcc.ru>
This commit is contained in:
Denis Kirillov 2022-10-13 09:41:57 +03:00 committed by Alex Vanin
parent 8d83320120
commit ac5750670f
6 changed files with 131 additions and 100 deletions

View file

@ -7,6 +7,19 @@ This document outlines major changes between releases.
### Added
- Stop pool dial on SIGINT (#76)
### Changed
- Pool configuration parameters (#66)
### Updating from v0.5.0
Now all pool config parameters moved to `pool` section. So you need to change:
* `peers` -> `pool.peers` (`REST_GW_PEERS` -> `REST_GW_POOL_PEERS`)
* `node-dial-timeout` -> `pool.node-dial-timeout` (`REST_GW_NODE_DIAL_TIMEOUT` -> `REST_GW_POOL_NODE_DIAL_TIMEOUT`)
* `healthcheck-timeout` -> `pool.healthcheck-timeout` (`REST_GW_HEALTHCHECK_TIMEOUT` -> `REST_GW_POOL_HEALTHCHECK_TIMEOUT`)
* `rebalance-timer` -> `pool.rebalance-timer` (`REST_GW_REBALANCE_TIMER` -> `REST_GW_POOL_REBALANCE_TIMER`)
* `pool-error-threshold` -> `pool.error-threshold`
## [0.5.0] "Undercity" - 2022-10-07
### Added

View file

@ -90,8 +90,8 @@ Or you can also use a [Docker image](https://hub.docker.com/r/nspccdev/neofs-res
## Execution
REST gateway itself is not a NeoFS node, so to access NeoFS it uses node's gRPC interface and you need to provide some
node that it will connect to. This can be done either via `-p` parameter or via `REST_GW_PEERS_<N>_ADDRESS` and
`REST_GW_PEERS_<N>_WEIGHT` environment variables (the gate supports multiple NeoFS nodes with weighted load balancing).
node that it will connect to. This can be done either via `-p` parameter or via `REST_GW_POOL_PEERS_<N>_ADDRESS` and
`REST_GW_POOL_PEERS_<N>_WEIGHT` environment variables (the gate supports multiple NeoFS nodes with weighted load balancing).
If you're launching REST gateway in bundle with [neofs-dev-env](https://github.com/nspcc-dev/neofs-dev-env), you can get
an IP address of the node in output of `make hosts` command
@ -102,14 +102,14 @@ settings):
```shell
$ neofs-rest-gw -p 192.168.130.72:8080
$ REST_GW_PEERS_0_ADDRESS=192.168.130.72:8080 neofs-rest-gw
$ REST_GW_POOL_PEERS_0_ADDRESS=192.168.130.72:8080 neofs-rest-gw
```
It's also possible to specify uri scheme (grpc or grpcs) when using `-p`:
```shell
$ neofs-rest-gw -p grpc://192.168.130.72:8080
$ REST_GW_PEERS_0_ADDRESS=grpcs://192.168.130.72:8080 neofs-rest-gw
$ REST_GW_POOL_PEERS_0_ADDRESS=grpcs://192.168.130.72:8080 neofs-rest-gw
```
## Configuration

View file

@ -35,10 +35,15 @@ const (
defaultPoolErrorThreshold uint32 = 100
// Pool config.
cfgNodeDialTimeout = "node-dial-timeout"
cfgHealthcheckTimeout = "healthcheck-timeout"
cfgRebalance = "rebalance-timer"
cfgPoolErrorThreshold = "pool-error-threshold"
cmdNodeDialTimeout = "node-dial-timeout"
cfgNodeDialTimeout = "pool." + cmdNodeDialTimeout
cmdHealthcheckTimeout = "healthcheck-timeout"
cfgHealthcheckTimeout = "pool." + cmdHealthcheckTimeout
cmdRebalance = "rebalance-timer"
cfgRebalance = "pool." + cmdRebalance
cfgPoolErrorThreshold = "pool.error-threshold"
cmdPeers = "peers"
cfgPeers = "pool." + cmdPeers
// Metrics / Profiler.
cfgPrometheusEnabled = "prometheus.enabled"
@ -54,9 +59,6 @@ const (
cfgWalletAddress = "wallet.address"
cfgWalletPassphrase = "wallet.passphrase"
// Peers.
cfgPeers = "peers"
// Command line args.
cmdHelp = "help"
cmdVersion = "version"
@ -104,11 +106,11 @@ func config() *viper.Viper {
flagSet.StringP(cmdWallet, "w", "", `path to the wallet`)
flagSet.String(cmdAddress, "", `address of wallet account`)
config := flagSet.String(cmdConfig, "", "config path")
flagSet.Duration(cfgNodeDialTimeout, defaultConnectTimeout, "gRPC node connect timeout")
flagSet.Duration(cfgHealthcheckTimeout, defaultHealthcheckTimeout, "gRPC healthcheck timeout")
flagSet.Duration(cfgRebalance, defaultRebalanceTimer, "gRPC connection rebalance timer")
flagSet.Duration(cmdNodeDialTimeout, defaultConnectTimeout, "gRPC node connect timeout")
flagSet.Duration(cmdHealthcheckTimeout, defaultHealthcheckTimeout, "gRPC healthcheck timeout")
flagSet.Duration(cmdRebalance, defaultRebalanceTimer, "gRPC connection rebalance timer")
peers := flagSet.StringArrayP(cfgPeers, "p", nil, "NeoFS nodes")
peers := flagSet.StringArrayP(cmdPeers, "p", nil, "NeoFS nodes")
// init server flags
restapi.BindDefaultFlags(flagSet)
@ -131,6 +133,15 @@ func config() *viper.Viper {
if err := v.BindPFlag(cfgPrometheusEnabled, flagSet.Lookup(cmdMetrics)); err != nil {
panic(err)
}
if err := v.BindPFlag(cfgNodeDialTimeout, flagSet.Lookup(cmdNodeDialTimeout)); err != nil {
panic(err)
}
if err := v.BindPFlag(cfgHealthcheckTimeout, flagSet.Lookup(cmdHealthcheckTimeout)); err != nil {
panic(err)
}
if err := v.BindPFlag(cfgRebalance, flagSet.Lookup(cmdRebalance)); err != nil {
panic(err)
}
if err := v.BindPFlags(flagSet); err != nil {
panic(err)

View file

@ -20,28 +20,28 @@ REST_GW_LOGGER_LEVEL=debug
# while it's healthy. Otherwise, gateway use the second node (grpc://s01.neofs.devenv:8080)
# for 10% of requests and the third node for 90% of requests.
# Endpoint.
REST_GW_PEERS_0_ADDRESS=grpc://s01.neofs.devenv:8080
REST_GW_POOL_PEERS_0_ADDRESS=grpc://s01.neofs.devenv:8080
# Until nodes with the same priority level are healthy
# nodes with other priority are not used.
# Еhe lower the value, the higher the priority.
REST_GW_PEERS_0_PRIORITY=1
REST_GW_POOL_PEERS_0_PRIORITY=1
# Load distribution proportion for nodes with the same priority.
REST_GW_PEERS_0_WEIGHT=1
REST_GW_POOL_PEERS_0_WEIGHT=1
REST_GW_PEERS_1_ADDRESS=grpc://s02.neofs.devenv:8080
REST_GW_PEERS_1_PRIORITY=2
REST_GW_PEERS_1_WEIGHT=1
REST_GW_POOL_PEERS_1_ADDRESS=grpc://s02.neofs.devenv:8080
REST_GW_POOL_PEERS_1_PRIORITY=2
REST_GW_POOL_PEERS_1_WEIGHT=1
REST_GW_PEERS_2_ADDRESS=grpc://s03.neofs.devenv:8080
REST_GW_PEERS_2_PRIORITY=2
REST_GW_PEERS_3_WEIGHT=9
REST_GW_POOL_PEERS_2_ADDRESS=grpc://s03.neofs.devenv:8080
REST_GW_POOL_PEERS_2_PRIORITY=2
REST_GW_POOL_PEERS_3_WEIGHT=9
# Timeout to dial node.
REST_GW_NODE_DIAL_TIMEOUT=10s
REST_GW_POOL_NODE_DIAL_TIMEOUT=10s
# Timeout to check node health during rebalance.
REST_GW_HEALTHCHECK_TIMEOUT=15s
REST_GW_POOL_HEALTHCHECK_TIMEOUT=15s
# Interval to check nodes health.
REST_GW_REBALANCE_TIMER=60s
REST_GW_POOL_REBALANCE_TIMER=60s
# The number of errors on connection after which node is considered as unhealthy.
REST_GW_POOL_ERROR_THRESHOLD=100

View file

@ -17,11 +17,21 @@ logger:
# Log level.
level: debug
# Nodes configuration.
# This configuration make gateway use the first node (grpc://s01.neofs.devenv:8080)
# while it's healthy. Otherwise, gateway use the second node (grpc://s01.neofs.devenv:8080)
# for 10% of requests and the third node for 90% of requests.
peers:
pool:
# Timeout to dial node.
node-dial-timeout: 5s
# Timeout to check node health during rebalance.
healthcheck-timeout: 5s
# Interval to check nodes' health.
rebalance-timer: 30s
# The number of errors on connection after which node is considered as unhealthy.
error-threshold: 100
# Nodes configuration.
# This configuration make gateway use the first node (grpc://s01.neofs.devenv:8080)
# while it's healthy. Otherwise, gateway use the second node (grpc://s01.neofs.devenv:8080)
# for 10% of requests and the third node for 90% of requests.
peers:
0:
# Endpoint.
address: grpc://s01.neofs.devenv:8080
@ -40,15 +50,6 @@ peers:
priority: 2
weight: 9
# Timeout to dial node.
node-dial-timeout: 5s
# Timeout to check node health during rebalance.
healthcheck-timeout: 5s
# Interval to check nodes health.
rebalance-timer: 30s
# The number of errors on connection after which node is considered as unhealthy.
pool-error-threshold: 100
# The listeners to enable, this can be repeated and defaults to the schemes in the swagger spec.
scheme: [ http ]
# Grace period for which to wait before killing idle connections

View file

@ -11,10 +11,10 @@ There are some custom types used for brevity:
# Structure
| Section | Description |
|-----------------|-------------------------------------------------------|
|--------------|-------------------------------------------------|
| no section | [General parameters](#general-section) |
| `wallet` | [Wallet configuration](#wallet-section) |
| `peers` | [Nodes configuration](#peers-section) |
| `pool` | [Pool configuration](#pool-section) |
| `logger` | [Logger configuration](#logger-section) |
| `pprof` | [Pprof configuration](#pprof-section) |
| `prometheus` | [Prometheus configuration](#prometheus-section) |
@ -22,11 +22,6 @@ There are some custom types used for brevity:
# General section
```yaml
node-dial-timeout: 10s
healthcheck-timeout: 15s
rebalance-timer: 60s
pool-error-threshold: 100
scheme: [ http ]
cleanup-timeout: 10s
graceful-timeout: 15s
@ -50,10 +45,6 @@ tls-write-timeout: 30s
| Parameter | Type | Default value | Description |
|------------------------|------------|------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `node-dial-timeout` | `duration` | `10s` | Timeout to connect to a node. |
| `healthcheck-timeout` | `duration` | `15s` | Timeout to check node health during rebalance. |
| `rebalance-timer` | `duration` | `60s` | Interval to check node health. |
| `pool-error-threshold` | `uint32` | `100` | The number of errors on connection after which node is considered as unhealthy. |
| `scheme` | `[]string` | `[http]` | The listeners to enable, this can be repeated and defaults to the schemes in the swagger spec. |
| `cleanup-timeout` | `duration` | `10s` | Grace period for which to wait before killing idle connections. |
| `graceful-timeout` | `duration` | `15s` | Grace period for which to wait before shutting down the server. |
@ -87,17 +78,23 @@ wallet:
| `address` | `string` | | Account address to get from wallet. If omitted default one will be used. |
| `passphrase` | `string` | | Passphrase to decrypt wallet. |
# `peers` section
# `pool` section
```yaml
# Nodes configuration
# This configuration makes the gateway use the first node (node1.neofs:8080)
# while it's healthy. Otherwise, gateway uses the second node (node2.neofs:8080)
# for 10% of requests and the third node (node3.neofs:8080) for 90% of requests.
# Until nodes with the same priority level are healthy
# nodes with other priority are not used.
# The lower the value, the higher the priority.
peers:
pool:
node-dial-timeout: 10s
healthcheck-timeout: 15s
rebalance-timer: 60s
error-threshold: 100
# Nodes configuration
# This configuration makes the gateway use the first node (node1.neofs:8080)
# while it's healthy. Otherwise, gateway uses the second node (node2.neofs:8080)
# for 10% of requests and the third node (node3.neofs:8080) for 90% of requests.
# Until nodes with the same priority level are healthy
# nodes with other priority are not used.
# The lower the value, the higher the priority.
peers:
0:
address: node1.neofs:8080
priority: 1
@ -113,7 +110,16 @@ peers:
```
| Parameter | Type | Default value | Description |
|------------|----------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------|
|-----------------------|------------|---------------|---------------------------------------------------------------------------------|
| `node-dial-timeout` | `duration` | `10s` | Timeout to connect to a node. |
| `healthcheck-timeout` | `duration` | `15s` | Timeout to check node health during rebalance. |
| `rebalance-timer` | `duration` | `60s` | Interval to check node health. |
| `error-threshold` | `uint32` | `100` | The number of errors on connection after which node is considered as unhealthy. |
## `peers` section
| Parameter | Type | Default value | Description |
|------------------------|------------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------|
| `address` | `string` | | Address of storage node. |
| `priority` | `int` | `1` | It allows to group nodes and don't switch group until all nodes with the same priority will be unhealthy. The lower the value, the higher the priority. |
| `weight` | `float` | `1` | Weight of node in the group with the same priority. Distribute requests to nodes proportionally to these values. |