[#168] node: Refactor node config

Resolve containedctx linter for cfg

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
Dmitrii Stepanov 2023-03-23 17:59:14 +03:00
parent 8426d25f4b
commit a7c79c773a
20 changed files with 93 additions and 83 deletions

View file

@ -303,10 +303,8 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
// the application life cycle.
// It should not contain any read configuration values, component-specific
// helpers and fields.
// nolint: containedctx
type internals struct {
ctx context.Context
ctxCancel func()
done chan struct{}
internalErr chan error // channel for internal application errors at runtime
appCfg *config.Config
@ -570,7 +568,7 @@ func initCfg(appCfg *config.Config) *cfg {
fatalOnErr(err)
c.internals = internals{
ctx: context.Background(),
done: make(chan struct{}),
appCfg: appCfg,
internalErr: make(chan error),
log: log,
@ -940,7 +938,7 @@ type dCmp struct {
reloadFunc func() error
}
func (c *cfg) signalWatcher() {
func (c *cfg) signalWatcher(ctx context.Context) {
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
@ -949,7 +947,7 @@ func (c *cfg) signalWatcher() {
case sig := <-ch:
switch sig {
case syscall.SIGHUP:
c.reloadConfig()
c.reloadConfig(ctx)
case syscall.SIGTERM, syscall.SIGINT:
c.log.Info("termination signal has been received, stopping...")
// TODO (@acid-ant): #49 need to cover case when stuck at the middle(node health UNDEFINED or STARTING)
@ -971,7 +969,7 @@ func (c *cfg) signalWatcher() {
}
}
func (c *cfg) reloadConfig() {
func (c *cfg) reloadConfig(ctx context.Context) {
c.log.Info("SIGHUP has been received, rereading configuration...")
err := c.readConfig(c.appCfg)
@ -999,10 +997,10 @@ func (c *cfg) reloadConfig() {
} else {
cmp.preReload = disableMetricsSvc
}
components = append(components, dCmp{cmp.name, cmp.reload})
components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }})
}
if cmp, updated := pprofComponent(c); updated {
components = append(components, dCmp{cmp.name, cmp.reload})
components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }})
}
// Storage Engine
@ -1012,7 +1010,7 @@ func (c *cfg) reloadConfig() {
rcfg.AddShard(optsWithID.configID, optsWithID.shOpts)
}
err = c.cfgObject.cfgLocalStorage.localStorage.Reload(rcfg)
err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
if err != nil {
c.log.Error("storage engine configuration update", zap.Error(err))
return
@ -1033,7 +1031,7 @@ func (c *cfg) reloadConfig() {
func (c *cfg) shutdown() {
c.setHealthStatus(control.HealthStatus_SHUTTING_DOWN)
c.ctxCancel()
c.done <- struct{}{}
for i := range c.closers {
c.closers[len(c.closers)-1-i].fn()
}

View file

@ -65,7 +65,7 @@ func initControlService(c *cfg) {
control.RegisterControlServiceServer(c.cfgControlService.server, ctlSvc)
c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) {
runAndLog(c, "control", false, func(c *cfg) {
runAndLog(ctx, c, "control", false, func(context.Context, *cfg) {
fatalOnErr(c.cfgControlService.server.Serve(lis))
})
}))

View file

@ -41,14 +41,14 @@ func (cmp *httpComponent) init(c *cfg) {
c.workers = append(c.workers, worker{
cmp.name,
func(ctx context.Context) {
runAndLog(c, cmp.name, false, func(c *cfg) {
runAndLog(ctx, c, cmp.name, false, func(context.Context, *cfg) {
fatalOnErr(srv.Serve())
})
},
})
}
func (cmp *httpComponent) reload() error {
func (cmp *httpComponent) reload(ctx context.Context) error {
if cmp.preReload != nil {
cmp.preReload(cmp.cfg)
}
@ -64,7 +64,7 @@ func (cmp *httpComponent) reload() error {
cmp.init(cmp.cfg)
// Start worker
if cmp.enabled {
startWorker(cmp.cfg, *getWorker(cmp.cfg, cmp.name))
startWorker(ctx, cmp.cfg, *getWorker(cmp.cfg, cmp.name))
}
return nil
}

View file

@ -56,15 +56,17 @@ func main() {
c := initCfg(appCfg)
initApp(c)
ctx, cancel := context.WithCancel(context.Background())
initApp(ctx, c)
c.setHealthStatus(control.HealthStatus_STARTING)
bootUp(c)
bootUp(ctx, c)
c.setHealthStatus(control.HealthStatus_READY)
wait(c)
wait(c, cancel)
}
func initAndLog(c *cfg, name string, initializer func(*cfg)) {
@ -73,12 +75,10 @@ func initAndLog(c *cfg, name string, initializer func(*cfg)) {
c.log.Info(fmt.Sprintf("%s service has been successfully initialized", name))
}
func initApp(c *cfg) {
c.ctx, c.ctxCancel = context.WithCancel(context.Background())
func initApp(ctx context.Context, c *cfg) {
c.wg.Add(1)
go func() {
c.signalWatcher()
c.signalWatcher(ctx)
c.wg.Done()
}()
@ -91,7 +91,7 @@ func initApp(c *cfg) {
initAndLog(c, "storage engine", func(c *cfg) {
fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Open())
fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Init())
fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Init(ctx))
})
initAndLog(c, "gRPC", initGRPC)
@ -105,12 +105,12 @@ func initApp(c *cfg) {
initAndLog(c, "tree", initTreeService)
initAndLog(c, "control", initControlService)
initAndLog(c, "morph notifications", listenMorphNotifications)
initAndLog(c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
}
func runAndLog(c *cfg, name string, logSuccess bool, starter func(*cfg)) {
func runAndLog(ctx context.Context, c *cfg, name string, logSuccess bool, starter func(context.Context, *cfg)) {
c.log.Info(fmt.Sprintf("starting %s service...", name))
starter(c)
starter(ctx, c)
if logSuccess {
c.log.Info(fmt.Sprintf("%s service started successfully", name))
@ -130,20 +130,22 @@ func stopAndLog(c *cfg, name string, stopper func() error) {
c.log.Debug(fmt.Sprintf("%s service has been stopped", name))
}
func bootUp(c *cfg) {
runAndLog(c, "NATS", true, connectNats)
runAndLog(c, "gRPC", false, serveGRPC)
runAndLog(c, "notary", true, makeAndWaitNotaryDeposit)
func bootUp(ctx context.Context, c *cfg) {
runAndLog(ctx, c, "NATS", true, connectNats)
runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(c) })
runAndLog(ctx, c, "notary", true, makeAndWaitNotaryDeposit)
bootstrapNode(c)
startWorkers(c)
startWorkers(ctx, c)
}
func wait(c *cfg) {
func wait(c *cfg, cancel func()) {
c.log.Info("application started",
zap.String("version", misc.Version))
<-c.ctx.Done() // graceful shutdown
<-c.done // graceful shutdown
cancel()
c.log.Debug("waiting for all processes to stop")

View file

@ -108,7 +108,7 @@ func initMorphComponents(c *cfg) {
c.cfgNetmap.wrapper = wrap
}
func makeAndWaitNotaryDeposit(c *cfg) {
func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
// skip notary deposit in non-notary environments
if !c.cfgMorph.notaryEnabled {
return
@ -125,7 +125,7 @@ func makeAndWaitNotaryDeposit(c *cfg) {
return
}
err = waitNotaryDeposit(c, tx)
err = waitNotaryDeposit(ctx, c, tx)
fatalOnErr(err)
}
@ -154,11 +154,11 @@ var (
errNotaryDepositTimeout = errors.New("notary deposit tx has not appeared in the network")
)
func waitNotaryDeposit(c *cfg, tx util.Uint256) error {
func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256) error {
for i := 0; i < notaryDepositRetriesAmount; i++ {
select {
case <-c.ctx.Done():
return c.ctx.Err()
case <-ctx.Done():
return ctx.Err()
default:
}
@ -171,7 +171,7 @@ func waitNotaryDeposit(c *cfg, tx util.Uint256) error {
return errNotaryDepositFail
}
err = c.cfgMorph.client.Wait(c.ctx, 1)
err = c.cfgMorph.client.Wait(ctx, 1)
if err != nil {
return fmt.Errorf("could not wait for one block in chain: %w", err)
}
@ -180,7 +180,7 @@ func waitNotaryDeposit(c *cfg, tx util.Uint256) error {
return errNotaryDepositTimeout
}
func listenMorphNotifications(c *cfg) {
func listenMorphNotifications(ctx context.Context, c *cfg) {
// listenerPoolCap is a capacity of a
// worker pool inside the listener. It
// is used to prevent blocking in neo-go:
@ -200,7 +200,7 @@ func listenMorphNotifications(c *cfg) {
c.log.Warn("can't get last processed side chain block number", zap.String("error", err.Error()))
}
subs, err = subscriber.New(c.ctx, &subscriber.Params{
subs, err = subscriber.New(ctx, &subscriber.Params{
Log: c.log,
StartFromBlock: fromSideChainBlock,
Client: c.cfgMorph.client,
@ -214,9 +214,9 @@ func listenMorphNotifications(c *cfg) {
})
fatalOnErr(err)
c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) {
runAndLog(c, "morph notification", false, func(c *cfg) {
lis.ListenWithError(ctx, c.internalErr)
c.workers = append(c.workers, newWorkerFromFunc(func(wCtx context.Context) {
runAndLog(wCtx, c, "morph notification", false, func(lCtx context.Context, c *cfg) {
lis.ListenWithError(lCtx, c.internalErr)
})
}))

View file

@ -1,6 +1,7 @@
package main
import (
"context"
"encoding/hex"
"fmt"
@ -155,13 +156,13 @@ func initNotifications(c *cfg) {
}
}
func connectNats(c *cfg) {
func connectNats(ctx context.Context, c *cfg) {
if !c.cfgNotifications.enabled {
return
}
endpoint := nodeconfig.Notification(c.appCfg).Endpoint()
err := c.cfgNotifications.nw.w.Connect(c.ctx, endpoint)
err := c.cfgNotifications.nw.w.Connect(ctx, endpoint)
if err != nil {
panic(fmt.Sprintf("could not connect to a nats endpoint %s: %v", endpoint, err))
}

View file

@ -15,17 +15,17 @@ func newWorkerFromFunc(fn func(ctx context.Context)) worker {
}
}
func startWorkers(c *cfg) {
func startWorkers(ctx context.Context, c *cfg) {
for _, wrk := range c.workers {
startWorker(c, wrk)
startWorker(ctx, c, wrk)
}
}
func startWorker(c *cfg, wrk worker) {
func startWorker(ctx context.Context, c *cfg, wrk worker) {
c.wg.Add(1)
go func(w worker) {
w.fn(c.ctx)
w.fn(ctx)
c.wg.Done()
}(wrk)
}