[#1619] logger: Simplify logger config reloading
All checks were successful
Vulncheck / Vulncheck (push) Successful in 1m45s
Build / Build Components (push) Successful in 2m22s
Pre-commit hooks / Pre-commit (push) Successful in 2m25s
Tests and linters / gopls check (push) Successful in 4m16s
OCI image / Build container images (push) Successful in 5m26s
Tests and linters / Lint (push) Successful in 6m0s
Tests and linters / Run gofumpt (push) Successful in 6m45s
Tests and linters / Tests (push) Successful in 7m8s
Tests and linters / Tests with -race (push) Successful in 7m6s
Tests and linters / Staticcheck (push) Successful in 7m10s

Change-Id: Ide892b250304b8cdb6c279f5f728c3b35f05df54
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
This commit is contained in:
Anton Nikiforov 2025-01-28 13:29:10 +03:00 committed by Evgenii Stratonikov
parent 3f4717a37f
commit 049a650b89
5 changed files with 31 additions and 60 deletions

View file

@ -9,6 +9,7 @@ import (
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/spf13/viper"
"go.uber.org/zap"
)
@ -38,13 +39,14 @@ func reloadConfig() error {
}
cmode.Store(cfg.GetBool("node.kludge_compatibility_mode"))
audit.Store(cfg.GetBool("audit.enabled"))
var logPrm logger.Prm
err = logPrm.SetLevelString(cfg.GetString("logger.level"))
if err != nil {
return err
}
logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp")
log.Reload(logPrm)
return logPrm.Reload()
return nil
}
func watchForSignal(ctx context.Context, cancel func()) {

View file

@ -31,7 +31,6 @@ const (
var (
wg = new(sync.WaitGroup)
intErr = make(chan error) // internal inner ring errors
logPrm = new(logger.Prm)
innerRing *innerring.Server
pprofCmp *pprofComponent
metricsCmp *httpComponent
@ -70,6 +69,7 @@ func main() {
metrics := irMetrics.NewInnerRingMetrics()
var logPrm logger.Prm
err = logPrm.SetLevelString(
cfg.GetString("logger.level"),
)

View file

@ -473,7 +473,6 @@ type shared struct {
// dynamicConfiguration stores parameters of the
// components that supports runtime reconfigurations.
type dynamicConfiguration struct {
logger *logger.Prm
pprof *httpComponent
metrics *httpComponent
}
@ -714,7 +713,8 @@ func initCfg(appCfg *config.Config) *cfg {
netState.metrics = c.metricsCollector
logPrm := c.loggerPrm()
logPrm, err := c.loggerPrm()
fatalOnErr(err)
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
log, err := logger.NewLogger(logPrm)
fatalOnErr(err)
@ -1076,26 +1076,22 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
return sh
}
func (c *cfg) loggerPrm() *logger.Prm {
// check if it has been inited before
if c.dynamicConfiguration.logger == nil {
c.dynamicConfiguration.logger = new(logger.Prm)
}
func (c *cfg) loggerPrm() (logger.Prm, error) {
var prm logger.Prm
// (re)init read configuration
err := c.dynamicConfiguration.logger.SetLevelString(c.LoggerCfg.level)
err := prm.SetLevelString(c.LoggerCfg.level)
if err != nil {
// not expected since validation should be performed before
panic("incorrect log level format: " + c.LoggerCfg.level)
return logger.Prm{}, errors.New("incorrect log level format: " + c.LoggerCfg.level)
}
err = c.dynamicConfiguration.logger.SetDestination(c.LoggerCfg.destination)
err = prm.SetDestination(c.LoggerCfg.destination)
if err != nil {
// not expected since validation should be performed before
panic("incorrect log destination format: " + c.LoggerCfg.destination)
return logger.Prm{}, errors.New("incorrect log destination format: " + c.LoggerCfg.destination)
}
c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp
prm.PrependTimestamp = c.LoggerCfg.timestamp
return c.dynamicConfiguration.logger
return prm, nil
}
func (c *cfg) LocalAddress() network.AddressGroup {
@ -1335,11 +1331,7 @@ func (c *cfg) reloadConfig(ctx context.Context) {
// all the components are expected to support
// Logger's dynamic reconfiguration approach
// Logger
logPrm := c.loggerPrm()
components := c.getComponents(ctx, logPrm)
components := c.getComponents(ctx)
// Object
c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime)
@ -1377,10 +1369,17 @@ func (c *cfg) reloadConfig(ctx context.Context) {
c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
}
func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
func (c *cfg) getComponents(ctx context.Context) []dCmp {
var components []dCmp
components = append(components, dCmp{"logger", logPrm.Reload})
components = append(components, dCmp{"logger", func() error {
prm, err := c.loggerPrm()
if err != nil {
return err
}
c.log.Reload(prm)
return nil
}})
components = append(components, dCmp{"runtime", func() error {
setRuntimeParameters(ctx, c)
return nil

View file

@ -130,7 +130,7 @@ func TestECWriter(t *testing.T) {
nodeKey, err := keys.NewPrivateKey()
require.NoError(t, err)
log, err := logger.NewLogger(nil)
log, err := logger.NewLogger(logger.Prm{})
require.NoError(t, err)
var n nmKeys

View file

@ -23,16 +23,8 @@ type Logger struct {
// Parameters that have been connected to the Logger support its
// configuration changing.
//
// Passing Prm after a successful connection via the NewLogger, connects
// the Prm to a new instance of the Logger.
//
// See also Reload, SetLevelString.
// See also Logger.Reload, SetLevelString.
type Prm struct {
// link to the created Logger
// instance; used for a runtime
// reconfiguration
_log *Logger
// support runtime rereading
level zapcore.Level
@ -73,22 +65,6 @@ func (p *Prm) SetDestination(d string) error {
return nil
}
// Reload reloads configuration of a connected instance of the Logger.
// Returns ErrLoggerNotConnected if no connection has been performed.
// Returns any reconfiguration error from the Logger directly.
func (p Prm) Reload() error {
if p._log == nil {
// incorrect logger usage
panic("parameters are not connected to any Logger")
}
return p._log.reload(p)
}
func defaultPrm() *Prm {
return new(Prm)
}
// NewLogger constructs a new zap logger instance. Constructing with nil
// parameters is safe: default values will be used then.
// Passing non-nil parameters after a successful creation (non-error) allows
@ -100,10 +76,7 @@ func defaultPrm() *Prm {
// - ISO8601 time encoding.
//
// Logger records a stack trace for all messages at or above fatal level.
func NewLogger(prm *Prm) (*Logger, error) {
if prm == nil {
prm = defaultPrm()
}
func NewLogger(prm Prm) (*Logger, error) {
switch prm.dest {
case DestinationUndefined, DestinationStdout:
return newConsoleLogger(prm)
@ -114,7 +87,7 @@ func NewLogger(prm *Prm) (*Logger, error) {
}
}
func newConsoleLogger(prm *Prm) (*Logger, error) {
func newConsoleLogger(prm Prm) (*Logger, error) {
lvl := zap.NewAtomicLevelAt(prm.level)
c := zap.NewProductionConfig()
@ -139,12 +112,11 @@ func newConsoleLogger(prm *Prm) (*Logger, error) {
}
l := &Logger{z: lZap, lvl: lvl}
prm._log = l
return l, nil
}
func newJournaldLogger(prm *Prm) (*Logger, error) {
func newJournaldLogger(prm Prm) (*Logger, error) {
lvl := zap.NewAtomicLevelAt(prm.level)
c := zap.NewProductionConfig()
@ -181,14 +153,12 @@ func newJournaldLogger(prm *Prm) (*Logger, error) {
lZap := zap.New(samplingCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), zap.AddCallerSkip(1))
l := &Logger{z: lZap, lvl: lvl}
prm._log = l
return l, nil
}
func (l *Logger) reload(prm Prm) error {
func (l *Logger) Reload(prm Prm) {
l.lvl.SetLevel(prm.level)
return nil
}
func (l *Logger) WithOptions(options ...zap.Option) {