[#1770] node: Support logger config rereading

Signed-off-by: Pavel Karpy <carpawell@nspcc.ru>
This commit is contained in:
Pavel Karpy 2022-09-28 12:19:23 +03:00 committed by Pavel Karpy
parent 8c75cb1dad
commit b6806ea6b9
2 changed files with 123 additions and 63 deletions

View file

@ -54,7 +54,7 @@ func main() {
) )
exitErr(err) exitErr(err)
log, err := logger.NewLogger(logPrm) log, err := logger.NewLogger(&logPrm)
exitErr(err) exitErr(err)
ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP) ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)

View file

@ -87,6 +87,10 @@ type applicationConfiguration struct {
// has already been read // has already been read
_read bool _read bool
LoggerCfg struct {
level string
}
EngineCfg struct { EngineCfg struct {
errorThreshold uint32 errorThreshold uint32
shardPoolSize uint32 shardPoolSize uint32
@ -182,6 +186,12 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
a._read = true a._read = true
// Logger
a.LoggerCfg.level = loggerconfig.Level(c)
// Storage Engine
a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c) a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c)
a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c) a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c)
@ -346,10 +356,17 @@ type shared struct {
metricsCollector *metrics.NodeMetrics metricsCollector *metrics.NodeMetrics
} }
// dynamicConfiguration stores parameters of the
// components that supports runtime reconfigurations
type dynamicConfiguration struct {
logger *logger.Prm
}
type cfg struct { type cfg struct {
applicationConfiguration applicationConfiguration
internals internals
shared shared
dynamicConfiguration
// configuration of the internal // configuration of the internal
// services // services
@ -483,16 +500,19 @@ type cfgReputation struct {
var persistateSideChainLastBlockKey = []byte("side_chain_last_processed_block") var persistateSideChainLastBlockKey = []byte("side_chain_last_processed_block")
func initCfg(appCfg *config.Config) *cfg { func initCfg(appCfg *config.Config) *cfg {
c := &cfg{}
err := c.readConfig(appCfg)
if err != nil {
panic(fmt.Errorf("config reading: %w", err))
}
key := nodeconfig.Key(appCfg) key := nodeconfig.Key(appCfg)
var logPrm logger.Prm logPrm, err := c.loggerPrm()
err := logPrm.SetLevelString(
loggerconfig.Level(appCfg),
)
fatalOnErr(err) fatalOnErr(err)
log, err := logger.NewLogger(&logPrm) log, err := logger.NewLogger(logPrm)
fatalOnErr(err) fatalOnErr(err)
var netAddr network.AddressGroup var netAddr network.AddressGroup
@ -519,8 +539,7 @@ func initCfg(appCfg *config.Config) *cfg {
reputationWorkerPool, err := ants.NewPool(notificationHandlerPoolSize) reputationWorkerPool, err := ants.NewPool(notificationHandlerPoolSize)
fatalOnErr(err) fatalOnErr(err)
c := &cfg{ c.internals = internals{
internals: internals{
ctx: context.Background(), ctx: context.Background(),
appCfg: appCfg, appCfg: appCfg,
internalErr: make(chan error), internalErr: make(chan error),
@ -528,8 +547,8 @@ func initCfg(appCfg *config.Config) *cfg {
wg: new(sync.WaitGroup), wg: new(sync.WaitGroup),
apiVersion: version.Current(), apiVersion: version.Current(),
healthStatus: atomic.NewInt32(int32(control.HealthStatus_HEALTH_STATUS_UNDEFINED)), healthStatus: atomic.NewInt32(int32(control.HealthStatus_HEALTH_STATUS_UNDEFINED)),
}, }
shared: shared{ c.shared = shared{
key: key, key: key,
binPublicKey: key.PublicKey().Bytes(), binPublicKey: key.PublicKey().Bytes(),
localAddr: netAddr, localAddr: netAddr,
@ -541,41 +560,34 @@ func initCfg(appCfg *config.Config) *cfg {
AllowExternal: apiclientconfig.AllowExternal(appCfg), AllowExternal: apiclientconfig.AllowExternal(appCfg),
}), }),
persistate: persistate, persistate: persistate,
}, }
cfgAccounting: cfgAccounting{ c.cfgAccounting = cfgAccounting{
scriptHash: contractsconfig.Balance(appCfg), scriptHash: contractsconfig.Balance(appCfg),
}, }
cfgContainer: cfgContainer{ c.cfgContainer = cfgContainer{
scriptHash: contractsconfig.Container(appCfg), scriptHash: contractsconfig.Container(appCfg),
workerPool: containerWorkerPool, workerPool: containerWorkerPool,
}, }
cfgNetmap: cfgNetmap{ c.cfgNetmap = cfgNetmap{
scriptHash: contractsconfig.Netmap(appCfg), scriptHash: contractsconfig.Netmap(appCfg),
state: netState, state: netState,
workerPool: netmapWorkerPool, workerPool: netmapWorkerPool,
needBootstrap: !relayOnly, needBootstrap: !relayOnly,
reBoostrapTurnedOff: atomic.NewBool(relayOnly), reBoostrapTurnedOff: atomic.NewBool(relayOnly),
}, }
cfgGRPC: cfgGRPC{ c.cfgGRPC = cfgGRPC{
maxChunkSize: maxChunkSize, maxChunkSize: maxChunkSize,
maxAddrAmount: maxAddrAmount, maxAddrAmount: maxAddrAmount,
}, }
cfgMorph: cfgMorph{ c.cfgMorph = cfgMorph{
proxyScriptHash: contractsconfig.Proxy(appCfg), proxyScriptHash: contractsconfig.Proxy(appCfg),
}, }
cfgObject: cfgObject{ c.cfgObject = cfgObject{
pool: initObjectPool(appCfg), pool: initObjectPool(appCfg),
}, }
cfgReputation: cfgReputation{ c.cfgReputation = cfgReputation{
scriptHash: contractsconfig.Reputation(appCfg), scriptHash: contractsconfig.Reputation(appCfg),
workerPool: reputationWorkerPool, workerPool: reputationWorkerPool,
},
}
// returned err must be nil during first time read
err = c.readConfig(appCfg)
if err != nil {
panic(fmt.Errorf("config reading: %w", err))
} }
user.IDFromKey(&c.ownerIDFromKey, key.PrivateKey.PublicKey) user.IDFromKey(&c.ownerIDFromKey, key.PrivateKey.PublicKey)
@ -721,6 +733,22 @@ func (c *cfg) shardOpts() []shardOptsWithID {
return shards return shards
} }
func (c *cfg) loggerPrm() (*logger.Prm, error) {
// check if it has been inited before
if c.dynamicConfiguration.logger == nil {
c.dynamicConfiguration.logger = new(logger.Prm)
}
// (re)init read configuration
err := c.dynamicConfiguration.logger.SetLevelString(c.LoggerCfg.level)
if err != nil {
// not expected since validation should be performed before
panic(fmt.Sprintf("incorrect log level format: %s", c.LoggerCfg.level))
}
return c.dynamicConfiguration.logger, nil
}
func (c *cfg) LocalAddress() network.AddressGroup { func (c *cfg) LocalAddress() network.AddressGroup {
return c.localAddr return c.localAddr
} }
@ -829,6 +857,13 @@ func (c *cfg) ObjectServiceLoad() float64 {
return float64(c.cfgObject.pool.putRemote.Running()) / float64(c.cfgObject.pool.putRemoteCapacity) return float64(c.cfgObject.pool.putRemote.Running()) / float64(c.cfgObject.pool.putRemoteCapacity)
} }
type dCfg struct {
name string
cfg interface {
Reload() error
}
}
func (c *cfg) configWatcher(ctx context.Context) { func (c *cfg) configWatcher(ctx context.Context) {
ch := make(chan os.Signal, 1) ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGHUP) signal.Notify(ch, syscall.SIGHUP)
@ -844,6 +879,22 @@ func (c *cfg) configWatcher(ctx context.Context) {
continue continue
} }
// all the components are expected to support
// Logger's dynamic reconfiguration approach
var components []dCfg
// Logger
logPrm, err := c.loggerPrm()
if err != nil {
c.log.Error("logger configuration preparation", zap.Error(err))
continue
}
components = append(components, dCfg{name: "logger", cfg: logPrm})
// Storage Engine
var rcfg engine.ReConfiguration var rcfg engine.ReConfiguration
for _, optsWithID := range c.shardOpts() { for _, optsWithID := range c.shardOpts() {
rcfg.AddShard(optsWithID.configID, optsWithID.shOpts) rcfg.AddShard(optsWithID.configID, optsWithID.shOpts)
@ -855,6 +906,15 @@ func (c *cfg) configWatcher(ctx context.Context) {
continue continue
} }
for _, component := range components {
err = component.cfg.Reload()
if err != nil {
c.log.Error("updated configuration applying",
zap.String("component", component.name),
zap.Error(err))
}
}
c.log.Info("configuration has been reloaded successfully") c.log.Info("configuration has been reloaded successfully")
case <-ctx.Done(): case <-ctx.Done():
return return