[#195] Add tags support #195
|
@ -44,6 +44,7 @@ import (
|
|||||||||||||||||
"github.com/valyala/fasthttp"
|
||||||||||||||||||
"go.opentelemetry.io/otel/trace"
|
||||||||||||||||||
"go.uber.org/zap"
|
||||||||||||||||||
"go.uber.org/zap/zapcore"
|
||||||||||||||||||
"golang.org/x/exp/slices"
|
||||||||||||||||||
)
|
||||||||||||||||||
|
||||||||||||||||||
|
@ -51,7 +52,6 @@ type (
|
|||||||||||||||||
app struct {
|
||||||||||||||||||
ctx context.Context
|
||||||||||||||||||
log *zap.Logger
|
||||||||||||||||||
logLevel zap.AtomicLevel
|
||||||||||||||||||
pool *pool.Pool
|
||||||||||||||||||
dkirillov marked this conversation as resolved
Outdated
|
||||||||||||||||||
treePool *treepool.Pool
|
||||||||||||||||||
key *keys.PrivateKey
|
||||||||||||||||||
|
@ -94,6 +94,7 @@ type (
|
|||||||||||||||||
reconnectInterval time.Duration
|
||||||||||||||||||
dialerSource *internalnet.DialerSource
|
||||||||||||||||||
workerPoolSize int
|
||||||||||||||||||
logLevelConfig *logLevelConfig
|
||||||||||||||||||
|
||||||||||||||||||
mu sync.RWMutex
|
||||||||||||||||||
dkirillov marked this conversation as resolved
Outdated
dkirillov
commented
It seem we don't use It seem we don't use `mu` to protect `logLevelConfig` so this field should be placed above `mu`
|
||||||||||||||||||
defaultTimestamp bool
|
||||||||||||||||||
|
@ -113,6 +114,15 @@ type (
|
|||||||||||||||||
enableFilepathFallback bool
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
tagsConfig struct {
|
||||||||||||||||||
tagLogs sync.Map
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
logLevelConfig struct {
|
||||||||||||||||||
logLevel zap.AtomicLevel
|
||||||||||||||||||
tagsConfig *tagsConfig
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
CORS struct {
|
||||||||||||||||||
AllowOrigin string
|
||||||||||||||||||
AllowMethods []string
|
||||||||||||||||||
|
@ -123,14 +133,91 @@ type (
|
|||||||||||||||||
}
|
||||||||||||||||||
)
|
||||||||||||||||||
|
||||||||||||||||||
func newLogLevel(v *viper.Viper) zap.AtomicLevel {
|
||||||||||||||||||
ll, err := getLogLevel(v)
|
||||||||||||||||||
if err != nil {
|
||||||||||||||||||
panic(err.Error())
|
||||||||||||||||||
}
|
||||||||||||||||||
atomicLogLevel := zap.NewAtomicLevel()
|
||||||||||||||||||
atomicLogLevel.SetLevel(ll)
|
||||||||||||||||||
return atomicLogLevel
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
func newTagsConfig(v *viper.Viper, ll zapcore.Level) *tagsConfig {
|
||||||||||||||||||
var t tagsConfig
|
||||||||||||||||||
if err := t.update(v, ll); err != nil {
|
||||||||||||||||||
// panic here is analogue of the similar panic during common log level initialization.
|
||||||||||||||||||
panic(err.Error())
|
||||||||||||||||||
}
|
||||||||||||||||||
return &t
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
func newLogLevelConfig(lvl zap.AtomicLevel, tagsConfig *tagsConfig) *logLevelConfig {
|
||||||||||||||||||
return &logLevelConfig{
|
||||||||||||||||||
logLevel: lvl,
|
||||||||||||||||||
tagsConfig: tagsConfig,
|
||||||||||||||||||
}
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
func (l *logLevelConfig) update(cfg *viper.Viper, log *zap.Logger) {
|
||||||||||||||||||
if lvl, err := getLogLevel(cfg); err != nil {
|
||||||||||||||||||
log.Warn(logs.LogLevelWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp))
|
||||||||||||||||||
} else {
|
||||||||||||||||||
l.logLevel.SetLevel(lvl)
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
if err := l.tagsConfig.update(cfg, l.logLevel.Level()); err != nil {
|
||||||||||||||||||
log.Warn(logs.TagsLogConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp))
|
||||||||||||||||||
}
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
func (t *tagsConfig) LevelEnabled(tag string, tgtLevel zapcore.Level) bool {
|
||||||||||||||||||
lvl, ok := t.tagLogs.Load(tag)
|
||||||||||||||||||
if !ok {
|
||||||||||||||||||
return false
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
return lvl.(zapcore.Level).Enabled(tgtLevel)
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
func (t *tagsConfig) update(cfg *viper.Viper, ll zapcore.Level) error {
|
||||||||||||||||||
tags, err := fetchLogTagsConfig(cfg, ll)
|
||||||||||||||||||
if err != nil {
|
||||||||||||||||||
return err
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
t.tagLogs.Range(func(key, value any) bool {
|
||||||||||||||||||
k := key.(string)
|
||||||||||||||||||
v := value.(zapcore.Level)
|
||||||||||||||||||
|
||||||||||||||||||
if lvl, ok := tags[k]; ok {
|
||||||||||||||||||
if lvl != v {
|
||||||||||||||||||
t.tagLogs.Store(key, lvl)
|
||||||||||||||||||
}
|
||||||||||||||||||
} else {
|
||||||||||||||||||
t.tagLogs.Delete(key)
|
||||||||||||||||||
delete(tags, k)
|
||||||||||||||||||
}
|
||||||||||||||||||
return true
|
||||||||||||||||||
})
|
||||||||||||||||||
|
||||||||||||||||||
for k, v := range tags {
|
||||||||||||||||||
t.tagLogs.Store(k, v)
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
return nil
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
func newApp(ctx context.Context, cfg *appCfg) App {
|
||||||||||||||||||
logSettings := &loggerSettings{}
|
||||||||||||||||||
log := pickLogger(cfg.config(), logSettings)
|
||||||||||||||||||
logLevel := newLogLevel(cfg.config())
|
||||||||||||||||||
tagConfig := newTagsConfig(cfg.config(), logLevel.Level())
|
||||||||||||||||||
logConfig := newLogLevelConfig(logLevel, tagConfig)
|
||||||||||||||||||
log := pickLogger(cfg.config(), logConfig.logLevel, logSettings, tagConfig)
|
||||||||||||||||||
|
||||||||||||||||||
a := &app{
|
||||||||||||||||||
ctx: ctx,
|
||||||||||||||||||
log: log.logger,
|
||||||||||||||||||
logLevel: log.lvl,
|
||||||||||||||||||
cfg: cfg,
|
||||||||||||||||||
loggerSettings: logSettings,
|
||||||||||||||||||
webServer: new(fasthttp.Server),
|
||||||||||||||||||
|
@ -138,7 +225,7 @@ func newApp(ctx context.Context, cfg *appCfg) App {
|
|||||||||||||||||
bucketCache: cache.NewBucketCache(getBucketCacheOptions(cfg.config(), log.logger), cfg.config().GetBool(cfgFeaturesTreePoolNetmapSupport)),
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
a.initAppSettings()
|
||||||||||||||||||
a.initAppSettings(logConfig)
|
||||||||||||||||||
|
||||||||||||||||||
// -- setup FastHTTP server --
|
||||||||||||||||||
a.webServer.Name = "frost-http-gw"
|
||||||||||||||||||
|
@ -172,11 +259,12 @@ func (a *app) config() *viper.Viper {
|
|||||||||||||||||
return a.cfg.config()
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
func (a *app) initAppSettings() {
|
||||||||||||||||||
func (a *app) initAppSettings(lc *logLevelConfig) {
|
||||||||||||||||||
a.settings = &appSettings{
|
||||||||||||||||||
reconnectInterval: fetchReconnectInterval(a.config()),
|
||||||||||||||||||
dialerSource: getDialerSource(a.log, a.config()),
|
||||||||||||||||||
workerPoolSize: a.config().GetInt(cfgWorkerPoolSize),
|
||||||||||||||||||
logLevelConfig: lc,
|
||||||||||||||||||
}
|
||||||||||||||||||
a.settings.update(a.config(), a.log)
|
||||||||||||||||||
}
|
||||||||||||||||||
|
@ -324,7 +412,7 @@ func (a *app) initResolver() {
|
|||||||||||||||||
var err error
|
||||||||||||||||||
a.resolver, err = resolver.NewContainerResolver(a.getResolverConfig())
|
||||||||||||||||||
if err != nil {
|
||||||||||||||||||
a.log.Fatal(logs.FailedToCreateResolver, zap.Error(err))
|
||||||||||||||||||
a.log.Fatal(logs.FailedToCreateResolver, zap.Error(err), logs.TagField(logs.TagApp))
|
||||||||||||||||||
}
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
|
@ -338,11 +426,12 @@ func (a *app) getResolverConfig() ([]string, *resolver.Config) {
|
|||||||||||||||||
order := a.config().GetStringSlice(cfgResolveOrder)
|
||||||||||||||||||
if resolveCfg.RPCAddress == "" {
|
||||||||||||||||||
order = remove(order, resolver.NNSResolver)
|
||||||||||||||||||
a.log.Warn(logs.ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided)
|
||||||||||||||||||
a.log.Warn(logs.ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided, logs.TagField(logs.TagApp))
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
if len(order) == 0 {
|
||||||||||||||||||
a.log.Info(logs.ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty)
|
||||||||||||||||||
a.log.Info(logs.ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty,
|
||||||||||||||||||
logs.TagField(logs.TagApp))
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
return order, resolveCfg
|
||||||||||||||||||
|
@ -357,7 +446,7 @@ func (a *app) initMetrics() {
|
|||||||||||||||||
|
||||||||||||||||||
func newGateMetrics(logger *zap.Logger, provider *metrics.GateMetrics, enabled bool) *gateMetrics {
|
||||||||||||||||||
if !enabled {
|
||||||||||||||||||
logger.Warn(logs.MetricsAreDisabled)
|
||||||||||||||||||
logger.Warn(logs.MetricsAreDisabled, logs.TagField(logs.TagApp))
|
||||||||||||||||||
}
|
||||||||||||||||||
return &gateMetrics{
|
||||||||||||||||||
logger: logger,
|
||||||||||||||||||
|
@ -375,7 +464,7 @@ func (m *gateMetrics) isEnabled() bool {
|
|||||||||||||||||
|
||||||||||||||||||
func (m *gateMetrics) SetEnabled(enabled bool) {
|
||||||||||||||||||
if !enabled {
|
||||||||||||||||||
m.logger.Warn(logs.MetricsAreDisabled)
|
||||||||||||||||||
m.logger.Warn(logs.MetricsAreDisabled, logs.TagField(logs.TagApp))
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
m.mu.Lock()
|
||||||||||||||||||
|
@ -438,7 +527,7 @@ func getFrostFSKey(cfg *viper.Viper, log *zap.Logger) (*keys.PrivateKey, error)
|
|||||||||||||||||
walletPath := cfg.GetString(cfgWalletPath)
|
||||||||||||||||||
|
||||||||||||||||||
if len(walletPath) == 0 {
|
||||||||||||||||||
log.Info(logs.NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun)
|
||||||||||||||||||
log.Info(logs.NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun, logs.TagField(logs.TagApp))
|
||||||||||||||||||
key, err := keys.NewPrivateKey()
|
||||||||||||||||||
if err != nil {
|
||||||||||||||||||
return nil, err
|
||||||||||||||||||
|
@ -495,7 +584,10 @@ func getKeyFromWallet(w *wallet.Wallet, addrStr string, password *string) (*keys
|
|||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
func (a *app) Wait() {
|
||||||||||||||||||
a.log.Info(logs.StartingApplication, zap.String("app_name", "frostfs-http-gw"), zap.String("version", Version))
|
||||||||||||||||||
a.log.Info(logs.StartingApplication,
|
||||||||||||||||||
zap.String("app_name", "frostfs-http-gw"),
|
||||||||||||||||||
zap.String("version", Version),
|
||||||||||||||||||
logs.TagField(logs.TagApp))
|
||||||||||||||||||
|
||||||||||||||||||
a.metrics.SetVersion(Version)
|
||||||||||||||||||
a.setHealthStatus()
|
||||||||||||||||||
|
@ -526,10 +618,10 @@ func (a *app) Serve() {
|
|||||||||||||||||
|
||||||||||||||||||
for i := range servs {
|
||||||||||||||||||
go func(i int) {
|
||||||||||||||||||
a.log.Info(logs.StartingServer, zap.String("address", servs[i].Address()))
|
||||||||||||||||||
a.log.Info(logs.StartingServer, zap.String("address", servs[i].Address()), logs.TagField(logs.TagApp))
|
||||||||||||||||||
if err := a.webServer.Serve(servs[i].Listener()); err != nil && err != http.ErrServerClosed {
|
||||||||||||||||||
a.metrics.MarkUnhealthy(servs[i].Address())
|
||||||||||||||||||
a.log.Fatal(logs.ListenAndServe, zap.Error(err))
|
||||||||||||||||||
a.log.Fatal(logs.ListenAndServe, zap.Error(err), logs.TagField(logs.TagApp))
|
||||||||||||||||||
}
|
||||||||||||||||||
}(i)
|
||||||||||||||||||
}
|
||||||||||||||||||
|
@ -551,7 +643,7 @@ LOOP:
|
|||||||||||||||||
}
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
a.log.Info(logs.ShuttingDownWebServer, zap.Error(a.webServer.Shutdown()))
|
||||||||||||||||||
a.log.Info(logs.ShuttingDownWebServer, zap.Error(a.webServer.Shutdown()), logs.TagField(logs.TagApp))
|
||||||||||||||||||
|
||||||||||||||||||
a.metrics.Shutdown()
|
||||||||||||||||||
a.stopServices()
|
||||||||||||||||||
|
@ -561,7 +653,7 @@ LOOP:
|
|||||||||||||||||
func (a *app) initWorkerPool() *ants.Pool {
|
||||||||||||||||||
workerPool, err := ants.NewPool(a.settings.workerPoolSize)
|
||||||||||||||||||
if err != nil {
|
||||||||||||||||||
a.log.Fatal(logs.FailedToCreateWorkerPool, zap.Error(err))
|
||||||||||||||||||
a.log.Fatal(logs.FailedToCreateWorkerPool, zap.Error(err), logs.TagField(logs.TagApp))
|
||||||||||||||||||
}
|
||||||||||||||||||
return workerPool
|
||||||||||||||||||
}
|
||||||||||||||||||
|
@ -572,37 +664,33 @@ func (a *app) shutdownTracing() {
|
|||||||||||||||||
defer cancel()
|
||||||||||||||||||
|
||||||||||||||||||
if err := tracing.Shutdown(shdnCtx); err != nil {
|
||||||||||||||||||
a.log.Warn(logs.FailedToShutdownTracing, zap.Error(err))
|
||||||||||||||||||
a.log.Warn(logs.FailedToShutdownTracing, zap.Error(err), logs.TagField(logs.TagApp))
|
||||||||||||||||||
}
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
func (a *app) configReload(ctx context.Context) {
|
||||||||||||||||||
a.log.Info(logs.SIGHUPConfigReloadStarted)
|
||||||||||||||||||
a.log.Info(logs.SIGHUPConfigReloadStarted, logs.TagField(logs.TagApp))
|
||||||||||||||||||
if !a.config().IsSet(cmdConfig) && !a.config().IsSet(cmdConfigDir) {
|
||||||||||||||||||
a.log.Warn(logs.FailedToReloadConfigBecauseItsMissed)
|
||||||||||||||||||
a.log.Warn(logs.FailedToReloadConfigBecauseItsMissed, logs.TagField(logs.TagApp))
|
||||||||||||||||||
return
|
||||||||||||||||||
}
|
||||||||||||||||||
if err := a.cfg.reload(); err != nil {
|
||||||||||||||||||
dkirillov marked this conversation as resolved
Outdated
dkirillov
commented
Why do we drop this? Why do we drop this? `tagsConfig.update` doesn't reload this param
KurlesHS
commented
We no longer need the "app.logLevel" variable, so there is no point in reading the "logger.level" parameter in this place. However, in "fetchLogTagsConfig", the "logger.level" parameter is used to set the logging level for tags for which a level has not been explicitly set. We no longer need the "app.logLevel" variable, so there is no point in reading the "logger.level" parameter in this place. However, in "fetchLogTagsConfig", the "logger.level" parameter is used to set the logging level for tags for which a level has not been explicitly set.
dkirillov
commented
Well, we still have some untagged logs e.g. Lines 600 to 601 in KurlesHS/frostfs-http-gw@a51358b
So I would expect we still can affect their appearance by SIGHUP Well, we still have some untagged logs e.g.
https://git.frostfs.info/KurlesHS/frostfs-http-gw/src/commit/a51358bb169003dc40a9e79e5ef0bb9834d64914/cmd/http-gw/settings.go#L600-L601
https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/src/commit/9d7f7bd04f63cf373a50c7e88e815cb332b561b2/pool/pool.go#L1248
So I would expect we still can affect their appearance by SIGHUP
KurlesHS
commented
I decided to rename the I decided to rename the `tagsConfig` structure to `logLevelConfig` and extend it with a new `logLevel zap.AtomicLevel` variable that will be responsible for the global logging level. I also updated the code in `tagsConfig.update' to reflect this change.
dkirillov
commented
Probably we should do the same for other repositories to keep this logic consistent Probably we should do the same for other repositories to keep this logic consistent
dkirillov
commented
If something goes wrong with global log level we don't update tags config Lines 151 to 154 in KurlesHS/frostfs-http-gw@22d7962
I suppose we can use something like that:
If something goes wrong with global log level we don't update tags config
https://git.frostfs.info/KurlesHS/frostfs-http-gw/src/commit/22d7962039c67ae2870875b6a5c3d22f2908aa7f/cmd/http-gw/app.go#L151-L154
I suppose we can use something like that:
```golang
type logLevelConfig struct {
logLevel zap.AtomicLevel
tagsConfig *tagsConfig
}
type tagsConfig struct {
tagLogs sync.Map
}
func newLogLevelConfig(lvl zap.AtomicLevel, tagsConfig *tagsConfig) *logLevelConfig {
return &logLevelConfig{
logLevel: lvl,
tagsConfig: tagsConfig,
}
}
func (l *logLevelConfig) update(cfg *viper.Viper, log *zap.Logger) {
if lvl, err := getLogLevel(cfg.GetString(cfgLoggerLevel)); err != nil {
log.Warn(logs.LogLevelWontBeUpdated, zap.Error(err), logs.TagField(logs.TagConfig))
} else {
l.logLevel.SetLevel(lvl)
}
if err := l.tagsConfig.update(cfg); err != nil {
log.Warn(logs.TagsLogConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagConfig))
}
}
```
|
||||||||||||||||||
a.log.Warn(logs.FailedToReloadConfig, zap.Error(err))
|
||||||||||||||||||
a.log.Warn(logs.FailedToReloadConfig, zap.Error(err), logs.TagField(logs.TagApp))
|
||||||||||||||||||
return
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
if lvl, err := getLogLevel(a.config()); err != nil {
|
||||||||||||||||||
a.log.Warn(logs.LogLevelWontBeUpdated, zap.Error(err))
|
||||||||||||||||||
} else {
|
||||||||||||||||||
a.logLevel.SetLevel(lvl)
|
||||||||||||||||||
}
|
||||||||||||||||||
a.settings.logLevelConfig.update(a.cfg.settings, a.log)
|
||||||||||||||||||
|
||||||||||||||||||
if err := a.settings.dialerSource.Update(fetchMultinetConfig(a.config(), a.log)); err != nil {
|
||||||||||||||||||
a.log.Warn(logs.MultinetConfigWontBeUpdated, zap.Error(err))
|
||||||||||||||||||
a.log.Warn(logs.MultinetConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp))
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
if err := a.resolver.UpdateResolvers(a.getResolverConfig()); err != nil {
|
||||||||||||||||||
a.log.Warn(logs.FailedToUpdateResolvers, zap.Error(err))
|
||||||||||||||||||
a.log.Warn(logs.FailedToUpdateResolvers, zap.Error(err), logs.TagField(logs.TagApp))
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
if err := a.updateServers(); err != nil {
|
||||||||||||||||||
a.log.Warn(logs.FailedToReloadServerParameters, zap.Error(err))
|
||||||||||||||||||
a.log.Warn(logs.FailedToReloadServerParameters, zap.Error(err), logs.TagField(logs.TagApp))
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
a.setRuntimeParameters()
|
||||||||||||||||||
|
@ -616,7 +704,7 @@ func (a *app) configReload(ctx context.Context) {
|
|||||||||||||||||
a.initTracing(ctx)
|
||||||||||||||||||
a.setHealthStatus()
|
||||||||||||||||||
|
||||||||||||||||||
a.log.Info(logs.SIGHUPConfigReloadCompleted)
|
||||||||||||||||||
a.log.Info(logs.SIGHUPConfigReloadCompleted, logs.TagField(logs.TagApp))
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
func (a *app) startServices() {
|
||||||||||||||||||
|
@ -654,20 +742,20 @@ func (a *app) configureRouter(h *handler.Handler) {
|
|||||||||||||||||
|
||||||||||||||||||
r.POST("/upload/{cid}", a.addMiddlewares(h.Upload))
|
||||||||||||||||||
r.OPTIONS("/upload/{cid}", a.addPreflight())
|
||||||||||||||||||
a.log.Info(logs.AddedPathUploadCid)
|
||||||||||||||||||
a.log.Info(logs.AddedPathUploadCid, logs.TagField(logs.TagApp))
|
||||||||||||||||||
r.GET("/get/{cid}/{oid:*}", a.addMiddlewares(h.DownloadByAddressOrBucketName))
|
||||||||||||||||||
r.HEAD("/get/{cid}/{oid:*}", a.addMiddlewares(h.HeadByAddressOrBucketName))
|
||||||||||||||||||
r.OPTIONS("/get/{cid}/{oid:*}", a.addPreflight())
|
||||||||||||||||||
a.log.Info(logs.AddedPathGetCidOid)
|
||||||||||||||||||
a.log.Info(logs.AddedPathGetCidOid, logs.TagField(logs.TagApp))
|
||||||||||||||||||
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(h.DownloadByAttribute))
|
||||||||||||||||||
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(h.HeadByAttribute))
|
||||||||||||||||||
r.OPTIONS("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addPreflight())
|
||||||||||||||||||
a.log.Info(logs.AddedPathGetByAttributeCidAttrKeyAttrVal)
|
||||||||||||||||||
a.log.Info(logs.AddedPathGetByAttributeCidAttrKeyAttrVal, logs.TagField(logs.TagApp))
|
||||||||||||||||||
r.GET("/zip/{cid}/{prefix:*}", a.addMiddlewares(h.DownloadZip))
|
||||||||||||||||||
r.OPTIONS("/zip/{cid}/{prefix:*}", a.addPreflight())
|
||||||||||||||||||
r.GET("/tar/{cid}/{prefix:*}", a.addMiddlewares(h.DownloadTar))
|
||||||||||||||||||
r.OPTIONS("/tar/{cid}/{prefix:*}", a.addPreflight())
|
||||||||||||||||||
a.log.Info(logs.AddedPathZipCidPrefix)
|
||||||||||||||||||
a.log.Info(logs.AddedPathZipCidPrefix, logs.TagField(logs.TagApp))
|
||||||||||||||||||
|
||||||||||||||||||
a.webServer.Handler = r.Handler
|
||||||||||||||||||
}
|
||||||||||||||||||
|
@ -756,14 +844,11 @@ func (a *app) logger(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
|||||||||||||||||
reqCtx = utils.SetReqLog(reqCtx, log)
|
||||||||||||||||||
utils.SetContextToRequest(reqCtx, req)
|
||||||||||||||||||
|
||||||||||||||||||
fields := []zap.Field{
|
||||||||||||||||||
zap.String("remote", req.RemoteAddr().String()),
|
||||||||||||||||||
log.Info(logs.Request, zap.String("remote", req.RemoteAddr().String()),
|
||||||||||||||||||
zap.ByteString("method", req.Method()),
|
||||||||||||||||||
zap.ByteString("path", req.Path()),
|
||||||||||||||||||
zap.ByteString("query", req.QueryArgs().QueryString()),
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
log.Info(logs.Request, fields...)
|
||||||||||||||||||
logs.TagField(logs.TagDatapath))
|
||||||||||||||||||
h(req)
|
||||||||||||||||||
}
|
||||||||||||||||||
}
|
||||||||||||||||||
|
@ -807,7 +892,7 @@ func (a *app) tokenizer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
|||||||||||||||||
if err != nil {
|
||||||||||||||||||
log := utils.GetReqLogOrDefault(reqCtx, a.log)
|
||||||||||||||||||
|
||||||||||||||||||
log.Error(logs.CouldNotFetchAndStoreBearerToken, zap.Error(err))
|
||||||||||||||||||
log.Error(logs.CouldNotFetchAndStoreBearerToken, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||||||||||||||||
handler.ResponseError(req, "could not fetch and store bearer token: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||||||||||||||
return
|
||||||||||||||||||
}
|
||||||||||||||||||
|
@ -866,17 +951,17 @@ func (a *app) initServers(ctx context.Context) {
|
|||||||||||||||||
if err != nil {
|
||||||||||||||||||
a.unbindServers = append(a.unbindServers, serverInfo)
|
||||||||||||||||||
a.metrics.MarkUnhealthy(serverInfo.Address)
|
||||||||||||||||||
a.log.Warn(logs.FailedToAddServer, append(fields, zap.Error(err))...)
|
||||||||||||||||||
a.log.Warn(logs.FailedToAddServer, append(fields, zap.Error(err), logs.TagField(logs.TagApp))...)
|
||||||||||||||||||
continue
|
||||||||||||||||||
}
|
||||||||||||||||||
a.metrics.MarkHealthy(serverInfo.Address)
|
||||||||||||||||||
|
||||||||||||||||||
a.servers = append(a.servers, srv)
|
||||||||||||||||||
a.log.Info(logs.AddServer, fields...)
|
||||||||||||||||||
a.log.Info(logs.AddServer, append(fields, logs.TagField(logs.TagApp))...)
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
if len(a.servers) == 0 {
|
||||||||||||||||||
a.log.Fatal(logs.NoHealthyServers)
|
||||||||||||||||||
a.log.Fatal(logs.NoHealthyServers, logs.TagField(logs.TagApp))
|
||||||||||||||||||
}
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
|
@ -950,13 +1035,14 @@ func (a *app) initTracing(ctx context.Context) {
|
|||||||||||||||||
if trustedCa := a.config().GetString(cfgTracingTrustedCa); trustedCa != "" {
|
||||||||||||||||||
caBytes, err := os.ReadFile(trustedCa)
|
||||||||||||||||||
if err != nil {
|
||||||||||||||||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||||||||||||||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err), logs.TagField(logs.TagApp))
|
||||||||||||||||||
return
|
||||||||||||||||||
}
|
||||||||||||||||||
certPool := x509.NewCertPool()
|
||||||||||||||||||
ok := certPool.AppendCertsFromPEM(caBytes)
|
||||||||||||||||||
if !ok {
|
||||||||||||||||||
a.log.Warn(logs.FailedToInitializeTracing, zap.String("error", "can't fill cert pool by ca cert"))
|
||||||||||||||||||
a.log.Warn(logs.FailedToInitializeTracing, zap.String("error", "can't fill cert pool by ca cert"),
|
||||||||||||||||||
logs.TagField(logs.TagApp))
|
||||||||||||||||||
return
|
||||||||||||||||||
}
|
||||||||||||||||||
cfg.ServerCaCertPool = certPool
|
||||||||||||||||||
|
@ -964,24 +1050,24 @@ func (a *app) initTracing(ctx context.Context) {
|
|||||||||||||||||
|
||||||||||||||||||
attributes, err := fetchTracingAttributes(a.config())
|
||||||||||||||||||
dkirillov marked this conversation as resolved
Outdated
dkirillov
commented
Probably this should have Probably this should have `config` tag
|
||||||||||||||||||
if err != nil {
|
||||||||||||||||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||||||||||||||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err), logs.TagField(logs.TagApp))
|
||||||||||||||||||
return
|
||||||||||||||||||
}
|
||||||||||||||||||
cfg.Attributes = attributes
|
||||||||||||||||||
|
||||||||||||||||||
updated, err := tracing.Setup(ctx, cfg)
|
||||||||||||||||||
if err != nil {
|
||||||||||||||||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||||||||||||||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err), logs.TagField(logs.TagApp))
|
||||||||||||||||||
}
|
||||||||||||||||||
if updated {
|
||||||||||||||||||
a.log.Info(logs.TracingConfigUpdated)
|
||||||||||||||||||
a.log.Info(logs.TracingConfigUpdated, logs.TagField(logs.TagApp))
|
||||||||||||||||||
}
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
func (a *app) setRuntimeParameters() {
|
||||||||||||||||||
if len(os.Getenv("GOMEMLIMIT")) != 0 {
|
||||||||||||||||||
// default limit < yaml limit < app env limit < GOMEMLIMIT
|
||||||||||||||||||
a.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
|
||||||||||||||||||
a.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT, logs.TagField(logs.TagApp))
|
||||||||||||||||||
return
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
|
@ -990,7 +1076,8 @@ func (a *app) setRuntimeParameters() {
|
|||||||||||||||||
if softMemoryLimit != previous {
|
||||||||||||||||||
a.log.Info(logs.RuntimeSoftMemoryLimitUpdated,
|
||||||||||||||||||
zap.Int64("new_value", softMemoryLimit),
|
||||||||||||||||||
zap.Int64("old_value", previous))
|
||||||||||||||||||
zap.Int64("old_value", previous),
|
||||||||||||||||||
logs.TagField(logs.TagApp))
|
||||||||||||||||||
}
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
|
@ -1016,34 +1103,32 @@ func (a *app) tryReconnect(ctx context.Context, sr *fasthttp.Server) bool {
|
|||||||||||||||||
a.mu.Lock()
|
||||||||||||||||||
defer a.mu.Unlock()
|
||||||||||||||||||
|
||||||||||||||||||
a.log.Info(logs.ServerReconnecting)
|
||||||||||||||||||
a.log.Info(logs.ServerReconnecting, logs.TagField(logs.TagApp))
|
||||||||||||||||||
var failedServers []ServerInfo
|
||||||||||||||||||
|
||||||||||||||||||
for _, serverInfo := range a.unbindServers {
|
||||||||||||||||||
fields := []zap.Field{
|
||||||||||||||||||
zap.String("address", serverInfo.Address), zap.Bool("tls enabled", serverInfo.TLS.Enabled),
|
||||||||||||||||||
zap.String("tls cert", serverInfo.TLS.CertFile), zap.String("tls key", serverInfo.TLS.KeyFile),
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
srv, err := newServer(ctx, serverInfo)
|
||||||||||||||||||
if err != nil {
|
||||||||||||||||||
a.log.Warn(logs.ServerReconnectFailed, zap.Error(err))
|
||||||||||||||||||
a.log.Warn(logs.ServerReconnectFailed, zap.Error(err), logs.TagField(logs.TagApp))
|
||||||||||||||||||
failedServers = append(failedServers, serverInfo)
|
||||||||||||||||||
a.metrics.MarkUnhealthy(serverInfo.Address)
|
||||||||||||||||||
continue
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
go func() {
|
||||||||||||||||||
a.log.Info(logs.StartingServer, zap.String("address", srv.Address()))
|
||||||||||||||||||
a.log.Info(logs.StartingServer, zap.String("address", srv.Address()), logs.TagField(logs.TagApp))
|
||||||||||||||||||
a.metrics.MarkHealthy(serverInfo.Address)
|
||||||||||||||||||
if err = sr.Serve(srv.Listener()); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||||||||||||||||
a.log.Warn(logs.ListenAndServe, zap.Error(err))
|
||||||||||||||||||
a.log.Warn(logs.ListenAndServe, zap.Error(err), logs.TagField(logs.TagApp))
|
||||||||||||||||||
a.metrics.MarkUnhealthy(serverInfo.Address)
|
||||||||||||||||||
}
|
||||||||||||||||||
}()
|
||||||||||||||||||
|
||||||||||||||||||
a.servers = append(a.servers, srv)
|
||||||||||||||||||
a.log.Info(logs.ServerReconnectedSuccessfully, fields...)
|
||||||||||||||||||
a.log.Info(logs.ServerReconnectedSuccessfully,
|
||||||||||||||||||
zap.String("address", serverInfo.Address), zap.Bool("tls enabled", serverInfo.TLS.Enabled),
|
||||||||||||||||||
zap.String("tls cert", serverInfo.TLS.CertFile), zap.String("tls key", serverInfo.TLS.KeyFile),
|
||||||||||||||||||
logs.TagField(logs.TagApp))
|
||||||||||||||||||
}
|
||||||||||||||||||
|
||||||||||||||||||
a.unbindServers = failedServers
|
||||||||||||||||||
|
|
174
cmd/http-gw/logger.go
Normal file
|
@ -0,0 +1,174 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/zapjournald"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/ssgreg/journald"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
|
||||
var lvl zapcore.Level
|
||||
lvlStr := v.GetString(cfgLoggerLevel)
|
||||
err := lvl.UnmarshalText([]byte(lvlStr))
|
||||
if err != nil {
|
||||
return lvl, fmt.Errorf("incorrect logger level configuration %s (%v), "+
|
||||
"value should be one of %v", lvlStr, err, [...]zapcore.Level{
|
||||
zapcore.DebugLevel,
|
||||
zapcore.InfoLevel,
|
||||
zapcore.WarnLevel,
|
||||
zapcore.ErrorLevel,
|
||||
zapcore.DPanicLevel,
|
||||
zapcore.PanicLevel,
|
||||
zapcore.FatalLevel,
|
||||
})
|
||||
}
|
||||
return lvl, nil
|
||||
}
|
||||
|
||||
var _ zapcore.Core = (*zapCoreTagFilterWrapper)(nil)
|
||||
|
||||
type zapCoreTagFilterWrapper struct {
|
||||
core zapcore.Core
|
||||
settings TagFilterSettings
|
||||
extra []zap.Field
|
||||
}
|
||||
|
||||
type TagFilterSettings interface {
|
||||
LevelEnabled(tag string, lvl zapcore.Level) bool
|
||||
}
|
||||
|
||||
func (c *zapCoreTagFilterWrapper) Enabled(level zapcore.Level) bool {
|
||||
return c.core.Enabled(level)
|
||||
}
|
||||
|
||||
func (c *zapCoreTagFilterWrapper) With(fields []zapcore.Field) zapcore.Core {
|
||||
return &zapCoreTagFilterWrapper{
|
||||
core: c.core.With(fields),
|
||||
settings: c.settings,
|
||||
extra: append(c.extra, fields...),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *zapCoreTagFilterWrapper) Check(entry zapcore.Entry, checked *zapcore.CheckedEntry) *zapcore.CheckedEntry {
|
||||
if c.core.Enabled(entry.Level) {
|
||||
return checked.AddCore(entry, c)
|
||||
}
|
||||
return checked
|
||||
}
|
||||
|
||||
func (c *zapCoreTagFilterWrapper) Write(entry zapcore.Entry, fields []zapcore.Field) error {
|
||||
if c.shouldSkip(entry, fields) || c.shouldSkip(entry, c.extra) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return c.core.Write(entry, fields)
|
||||
}
|
||||
|
||||
func (c *zapCoreTagFilterWrapper) shouldSkip(entry zapcore.Entry, fields []zap.Field) bool {
|
||||
for _, field := range fields {
|
||||
if field.Key == logs.TagFieldName && field.Type == zapcore.StringType {
|
||||
if !c.settings.LevelEnabled(field.String, entry.Level) {
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *zapCoreTagFilterWrapper) Sync() error {
|
||||
return c.core.Sync()
|
||||
}
|
||||
|
||||
func applyZapCoreMiddlewares(core zapcore.Core, v *viper.Viper, loggerSettings LoggerAppSettings, tagSetting TagFilterSettings) zapcore.Core {
|
||||
core = &zapCoreTagFilterWrapper{
|
||||
core: core,
|
||||
settings: tagSetting,
|
||||
}
|
||||
|
||||
if v.GetBool(cfgLoggerSamplingEnabled) {
|
||||
core = zapcore.NewSamplerWithOptions(core,
|
||||
v.GetDuration(cfgLoggerSamplingInterval),
|
||||
v.GetInt(cfgLoggerSamplingInitial),
|
||||
v.GetInt(cfgLoggerSamplingThereafter),
|
||||
zapcore.SamplerHook(func(_ zapcore.Entry, dec zapcore.SamplingDecision) {
|
||||
if dec&zapcore.LogDropped > 0 {
|
||||
loggerSettings.DroppedLogsInc()
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
return core
|
||||
}
|
||||
|
||||
func newLogEncoder() zapcore.Encoder {
|
||||
c := zap.NewProductionEncoderConfig()
|
||||
c.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
|
||||
return zapcore.NewConsoleEncoder(c)
|
||||
}
|
||||
|
||||
// newStdoutLogger constructs a zap.Logger instance for current application.
|
||||
// Panics on failure.
|
||||
//
|
||||
// Logger is built from zap's production logging configuration with:
|
||||
// - parameterized level (debug by default)
|
||||
// - console encoding
|
||||
// - ISO8601 time encoding
|
||||
//
|
||||
// Logger records a stack trace for all messages at or above fatal level.
|
||||
//
|
||||
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
|
||||
func newStdoutLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings LoggerAppSettings, tagSetting TagFilterSettings) *Logger {
|
||||
stdout := zapcore.AddSync(os.Stderr)
|
||||
|
||||
consoleOutCore := zapcore.NewCore(newLogEncoder(), stdout, lvl)
|
||||
consoleOutCore = applyZapCoreMiddlewares(consoleOutCore, v, loggerSettings, tagSetting)
|
||||
|
||||
return &Logger{
|
||||
logger: zap.New(consoleOutCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
|
||||
lvl: lvl,
|
||||
}
|
||||
}
|
||||
|
||||
func newJournaldLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings LoggerAppSettings, tagSetting TagFilterSettings) *Logger {
|
||||
encoder := zapjournald.NewPartialEncoder(newLogEncoder(), zapjournald.SyslogFields)
|
||||
|
||||
core := zapjournald.NewCore(lvl, encoder, &journald.Journal{}, zapjournald.SyslogFields)
|
||||
coreWithContext := core.With([]zapcore.Field{
|
||||
zapjournald.SyslogFacility(zapjournald.LogDaemon),
|
||||
zapjournald.SyslogIdentifier(),
|
||||
zapjournald.SyslogPid(),
|
||||
})
|
||||
|
||||
coreWithContext = applyZapCoreMiddlewares(coreWithContext, v, loggerSettings, tagSetting)
|
||||
|
||||
return &Logger{
|
||||
logger: zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
|
||||
lvl: lvl,
|
||||
}
|
||||
}
|
||||
|
||||
type LoggerAppSettings interface {
|
||||
DroppedLogsInc()
|
||||
}
|
||||
|
||||
func pickLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings LoggerAppSettings, tagSettings TagFilterSettings) *Logger {
|
||||
dest := v.GetString(cfgLoggerDestination)
|
||||
|
||||
switch dest {
|
||||
case destinationStdout:
|
||||
return newStdoutLogger(v, lvl, loggerSettings, tagSettings)
|
||||
case destinationJournald:
|
||||
return newJournaldLogger(v, lvl, loggerSettings, tagSettings)
|
||||
default:
|
||||
panic(fmt.Sprintf("wrong destination for logger: %s", dest))
|
||||
}
|
||||
}
|
|
@ -23,10 +23,8 @@ import (
|
|||
grpctracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||
"git.frostfs.info/TrueCloudLab/zapjournald"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/ssgreg/journald"
|
||||
"github.com/valyala/fasthttp"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
|
@ -111,6 +109,11 @@ const (
|
|||
cfgLoggerSamplingThereafter = "logger.sampling.thereafter"
|
||||
cfgLoggerSamplingInterval = "logger.sampling.interval"
|
||||
|
||||
cfgLoggerTags = "logger.tags"
|
||||
cfgLoggerTagsPrefixTmpl = cfgLoggerTags + ".%d."
|
||||
cfgLoggerTagsNameTmpl = cfgLoggerTagsPrefixTmpl + "name"
|
||||
cfgLoggerTagsLevelTmpl = cfgLoggerTagsPrefixTmpl + "level"
|
||||
|
||||
// Wallet.
|
||||
cfgWalletPassphrase = "wallet.passphrase"
|
||||
cfgWalletPath = "wallet.path"
|
||||
|
@ -193,6 +196,8 @@ var ignore = map[string]struct{}{
|
|||
cmdVersion: {},
|
||||
}
|
||||
|
||||
var defaultTags = []string{logs.TagApp, logs.TagDatapath, logs.TagExternalStorage, logs.TagExternalStorageTree}
|
||||
|
||||
type Logger struct {
|
||||
logger *zap.Logger
|
||||
lvl zap.AtomicLevel
|
||||
|
@ -499,112 +504,33 @@ func mergeConfig(v *viper.Viper, fileName string) error {
|
|||
return v.MergeConfig(cfgFile)
|
||||
}
|
||||
|
||||
type LoggerAppSettings interface {
|
||||
DroppedLogsInc()
|
||||
}
|
||||
func fetchLogTagsConfig(v *viper.Viper, defaultLvl zapcore.Level) (map[string]zapcore.Level, error) {
|
||||
res := make(map[string]zapcore.Level)
|
||||
|
||||
func pickLogger(v *viper.Viper, settings LoggerAppSettings) *Logger {
|
||||
lvl, err := getLogLevel(v)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
for i := 0; ; i++ {
|
||||
name := v.GetString(fmt.Sprintf(cfgLoggerTagsNameTmpl, i))
|
||||
if name == "" {
|
||||
break
|
||||
}
|
||||
|
||||
lvl := defaultLvl
|
||||
level := v.GetString(fmt.Sprintf(cfgLoggerTagsLevelTmpl, i))
|
||||
if level != "" {
|
||||
if err := lvl.Set(level); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse log tags config, unknown level: '%s'", level)
|
||||
}
|
||||
}
|
||||
|
||||
res[name] = lvl
|
||||
}
|
||||
|
||||
dest := v.GetString(cfgLoggerDestination)
|
||||
|
||||
switch dest {
|
||||
case destinationStdout:
|
||||
return newStdoutLogger(v, lvl, settings)
|
||||
case destinationJournald:
|
||||
return newJournaldLogger(v, lvl, settings)
|
||||
default:
|
||||
panic(fmt.Sprintf("wrong destination for logger: %s", dest))
|
||||
}
|
||||
}
|
||||
|
||||
// newStdoutLogger constructs a zap.Logger instance for current application.
|
||||
// Panics on failure.
|
||||
//
|
||||
// Logger is built from zap's production logging configuration with:
|
||||
// - parameterized level (debug by default)
|
||||
// - console encoding
|
||||
// - ISO8601 time encoding
|
||||
//
|
||||
// Logger records a stack trace for all messages at or above fatal level.
|
||||
//
|
||||
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
|
||||
func newStdoutLogger(v *viper.Viper, lvl zapcore.Level, settings LoggerAppSettings) *Logger {
|
||||
stdout := zapcore.AddSync(os.Stderr)
|
||||
level := zap.NewAtomicLevelAt(lvl)
|
||||
|
||||
consoleOutCore := zapcore.NewCore(newLogEncoder(), stdout, level)
|
||||
consoleOutCore = applyZapCoreMiddlewares(consoleOutCore, v, settings)
|
||||
|
||||
return &Logger{
|
||||
logger: zap.New(consoleOutCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
|
||||
lvl: level,
|
||||
}
|
||||
}
|
||||
|
||||
func newJournaldLogger(v *viper.Viper, lvl zapcore.Level, settings LoggerAppSettings) *Logger {
|
||||
level := zap.NewAtomicLevelAt(lvl)
|
||||
|
||||
encoder := zapjournald.NewPartialEncoder(newLogEncoder(), zapjournald.SyslogFields)
|
||||
|
||||
core := zapjournald.NewCore(level, encoder, &journald.Journal{}, zapjournald.SyslogFields)
|
||||
coreWithContext := core.With([]zapcore.Field{
|
||||
zapjournald.SyslogFacility(zapjournald.LogDaemon),
|
||||
zapjournald.SyslogIdentifier(),
|
||||
zapjournald.SyslogPid(),
|
||||
})
|
||||
|
||||
coreWithContext = applyZapCoreMiddlewares(coreWithContext, v, settings)
|
||||
|
||||
return &Logger{
|
||||
logger: zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
|
||||
lvl: level,
|
||||
}
|
||||
}
|
||||
|
||||
func newLogEncoder() zapcore.Encoder {
|
||||
c := zap.NewProductionEncoderConfig()
|
||||
c.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
|
||||
return zapcore.NewConsoleEncoder(c)
|
||||
}
|
||||
|
||||
func applyZapCoreMiddlewares(core zapcore.Core, v *viper.Viper, settings LoggerAppSettings) zapcore.Core {
|
||||
if v.GetBool(cfgLoggerSamplingEnabled) {
|
||||
core = zapcore.NewSamplerWithOptions(core,
|
||||
v.GetDuration(cfgLoggerSamplingInterval),
|
||||
v.GetInt(cfgLoggerSamplingInitial),
|
||||
v.GetInt(cfgLoggerSamplingThereafter),
|
||||
zapcore.SamplerHook(func(_ zapcore.Entry, dec zapcore.SamplingDecision) {
|
||||
if dec&zapcore.LogDropped > 0 {
|
||||
settings.DroppedLogsInc()
|
||||
}
|
||||
}))
|
||||
if len(res) == 0 && !v.IsSet(cfgLoggerTags) {
|
||||
for _, tag := range defaultTags {
|
||||
res[tag] = defaultLvl
|
||||
}
|
||||
}
|
||||
|
||||
return core
|
||||
}
|
||||
|
||||
func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
|
||||
var lvl zapcore.Level
|
||||
lvlStr := v.GetString(cfgLoggerLevel)
|
||||
err := lvl.UnmarshalText([]byte(lvlStr))
|
||||
if err != nil {
|
||||
return lvl, fmt.Errorf("incorrect logger level configuration %s (%v), "+
|
||||
"value should be one of %v", lvlStr, err, [...]zapcore.Level{
|
||||
zapcore.DebugLevel,
|
||||
zapcore.InfoLevel,
|
||||
zapcore.WarnLevel,
|
||||
zapcore.ErrorLevel,
|
||||
zapcore.DPanicLevel,
|
||||
zapcore.PanicLevel,
|
||||
zapcore.FatalLevel,
|
||||
})
|
||||
}
|
||||
return lvl, nil
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func fetchReconnectInterval(cfg *viper.Viper) time.Duration {
|
||||
|
@ -620,20 +546,19 @@ func fetchIndexPageTemplate(v *viper.Viper, l *zap.Logger) (string, bool) {
|
|||
if !v.GetBool(cfgIndexPageEnabled) {
|
||||
return "", false
|
||||
}
|
||||
|
||||
reader, err := os.Open(v.GetString(cfgIndexPageTemplatePath))
|
||||
if err != nil {
|
||||
l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err))
|
||||
l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
return "", true
|
||||
}
|
||||
|
||||
tmpl, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err))
|
||||
l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
return "", true
|
||||
}
|
||||
|
||||
l.Info(logs.SetCustomIndexPageTemplate)
|
||||
l.Info(logs.SetCustomIndexPageTemplate, logs.TagField(logs.TagApp))
|
||||
return string(tmpl), true
|
||||
}
|
||||
|
||||
|
@ -674,7 +599,7 @@ func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo {
|
|||
}
|
||||
|
||||
if _, ok := seen[serverInfo.Address]; ok {
|
||||
log.Warn(logs.WarnDuplicateAddress, zap.String("address", serverInfo.Address))
|
||||
log.Warn(logs.WarnDuplicateAddress, zap.String("address", serverInfo.Address), logs.TagField(logs.TagApp))
|
||||
continue
|
||||
}
|
||||
seen[serverInfo.Address] = struct{}{}
|
||||
|
@ -687,7 +612,7 @@ func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo {
|
|||
func (a *app) initPools(ctx context.Context) {
|
||||
key, err := getFrostFSKey(a.config(), a.log)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldNotLoadFrostFSPrivateKey, zap.Error(err))
|
||||
a.log.Fatal(logs.CouldNotLoadFrostFSPrivateKey, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
}
|
||||
|
||||
var prm pool.InitParameters
|
||||
|
@ -695,7 +620,8 @@ func (a *app) initPools(ctx context.Context) {
|
|||
|
||||
prm.SetKey(&key.PrivateKey)
|
||||
prmTree.SetKey(key)
|
||||
a.log.Info(logs.UsingCredentials, zap.String("FrostFS", hex.EncodeToString(key.PublicKey().Bytes())))
|
||||
a.log.Info(logs.UsingCredentials, zap.String("FrostFS", hex.EncodeToString(key.PublicKey().Bytes())),
|
||||
logs.TagField(logs.TagApp))
|
||||
|
||||
for _, peer := range fetchPeers(a.log, a.config()) {
|
||||
prm.AddNode(peer)
|
||||
|
@ -750,11 +676,11 @@ func (a *app) initPools(ctx context.Context) {
|
|||
|
||||
p, err := pool.NewPool(prm)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.FailedToCreateConnectionPool, zap.Error(err))
|
||||
a.log.Fatal(logs.FailedToCreateConnectionPool, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
}
|
||||
|
||||
if err = p.Dial(ctx); err != nil {
|
||||
a.log.Fatal(logs.FailedToDialConnectionPool, zap.Error(err))
|
||||
a.log.Fatal(logs.FailedToDialConnectionPool, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
}
|
||||
|
||||
if a.config().GetBool(cfgFeaturesTreePoolNetmapSupport) {
|
||||
|
@ -763,10 +689,10 @@ func (a *app) initPools(ctx context.Context) {
|
|||
|
||||
treePool, err := treepool.NewPool(prmTree)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.FailedToCreateTreePool, zap.Error(err))
|
||||
a.log.Fatal(logs.FailedToCreateTreePool, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
}
|
||||
if err = treePool.Dial(ctx); err != nil {
|
||||
a.log.Fatal(logs.FailedToDialTreePool, zap.Error(err))
|
||||
a.log.Fatal(logs.FailedToDialTreePool, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
}
|
||||
|
||||
a.pool = p
|
||||
|
@ -797,7 +723,8 @@ func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam {
|
|||
l.Info(logs.AddedStoragePeer,
|
||||
zap.Int("priority", priority),
|
||||
zap.String("address", address),
|
||||
zap.Float64("weight", weight))
|
||||
zap.Float64("weight", weight),
|
||||
logs.TagField(logs.TagApp))
|
||||
}
|
||||
|
||||
return nodes
|
||||
|
@ -836,7 +763,8 @@ func fetchCacheLifetime(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultV
|
|||
l.Error(logs.InvalidLifetimeUsingDefaultValue,
|
||||
zap.String("parameter", cfgEntry),
|
||||
zap.Duration("value in config", lifetime),
|
||||
zap.Duration("default", defaultValue))
|
||||
zap.Duration("default", defaultValue),
|
||||
logs.TagField(logs.TagApp))
|
||||
} else {
|
||||
return lifetime
|
||||
}
|
||||
|
@ -852,7 +780,8 @@ func fetchCacheSize(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue
|
|||
l.Error(logs.InvalidCacheSizeUsingDefaultValue,
|
||||
zap.String("parameter", cfgEntry),
|
||||
zap.Int("value in config", size),
|
||||
zap.Int("default", defaultValue))
|
||||
zap.Int("default", defaultValue),
|
||||
logs.TagField(logs.TagApp))
|
||||
} else {
|
||||
return size
|
||||
}
|
||||
|
@ -864,7 +793,7 @@ func fetchCacheSize(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue
|
|||
func getDialerSource(logger *zap.Logger, cfg *viper.Viper) *internalnet.DialerSource {
|
||||
source, err := internalnet.NewDialerSource(fetchMultinetConfig(cfg, logger))
|
||||
if err != nil {
|
||||
logger.Fatal(logs.FailedToLoadMultinetConfig, zap.Error(err))
|
||||
logger.Fatal(logs.FailedToLoadMultinetConfig, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
}
|
||||
return source
|
||||
}
|
||||
|
|
|
@ -20,6 +20,8 @@ HTTP_GW_LOGGER_SAMPLING_ENABLED=false
|
|||
HTTP_GW_LOGGER_SAMPLING_INITIAL=100
|
||||
HTTP_GW_LOGGER_SAMPLING_THEREAFTER=100
|
||||
HTTP_GW_LOGGER_SAMPLING_INTERVAL=1s
|
||||
HTTP_GW_LOGGER_TAGS_0_NAME=app
|
||||
HTTP_GW_LOGGER_TAGS_1_NAME=datapath
|
||||
|
||||
HTTP_GW_SERVER_0_ADDRESS=0.0.0.0:443
|
||||
HTTP_GW_SERVER_0_TLS_ENABLED=false
|
||||
|
|
|
@ -29,6 +29,10 @@ logger:
|
|||
initial: 100
|
||||
thereafter: 100
|
||||
interval: 1s
|
||||
tags:
|
||||
- name: app
|
||||
dkirillov marked this conversation as resolved
Outdated
dkirillov
commented
Can we add the same examples to Can we add the same examples to `config.env` also?
|
||||
- name: datapath
|
||||
dkirillov marked this conversation as resolved
Outdated
dkirillov
commented
We don't have this section anymore We don't have this section anymore
|
||||
level: debug
|
||||
|
||||
server:
|
||||
- address: 0.0.0.0:8080
|
||||
|
|
|
@ -174,6 +174,11 @@ logger:
|
|||
initial: 100
|
||||
thereafter: 100
|
||||
interval: 1s
|
||||
tags:
|
||||
- name: "app"
|
||||
level: info
|
||||
- name: "datapath"
|
||||
- name: "external_storage_tree"
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|
@ -184,6 +189,30 @@ logger:
|
|||
| `sampling.initial` | `int` | no | '100' | Sampling count of first log entries. |
|
||||
| `sampling.thereafter` | `int` | no | '100' | Sampling count of entries after an `interval`. |
|
||||
| `sampling.interval` | `duration` | no | '1s' | Sampling interval of messaging similar entries. |
|
||||
| `sampling.tags` | `[]Tag` | yes | | Tagged log entries that should be additionally logged (available tags see in the next section). |
|
||||
|
||||
## Tags
|
||||
|
||||
There are additional log entries that can hurt performance and can be additionally logged by using `logger.tags`
|
||||
parameter. Available tags:
|
||||
|
||||
```yaml
|
||||
tags:
|
||||
- name: "app"
|
||||
level: info
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|-----------------------|------------|---------------|---------------------------|-------------------------------------------------------------------------------------------------------|
|
||||
| `name` | `string` | yes | | Tag name. Possible values see below in `Tag values` section. |
|
||||
| `level` | `string` | yes | Value from `logger.level` | Logging level for specific tag. Possible values: `debug`, `info`, `warn`, `dpanic`, `panic`, `fatal`. |
|
||||
|
||||
### Tag values
|
||||
|
||||
* `app` - common application logs (enabled by default).
|
||||
* `datapath` - main logic of application (enabled by default).
|
||||
* `external_storage` - external interaction with storage node (enabled by default).
|
||||
* `external_storage_tree` - external interaction with tree service in storage node (enabled by default).
|
||||
dkirillov marked this conversation as resolved
Outdated
dkirillov
commented
We should also mention the default tags here We should also mention the default tags here
|
||||
|
||||
# `web` section
|
||||
|
||||
|
|
4
internal/cache/buckets.go
vendored
|
@ -72,7 +72,7 @@ func (o *BucketCache) GetByCID(cnrID cid.ID) *data.BucketInfo {
|
|||
key, ok := entry.(string)
|
||||
if !ok {
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", key)))
|
||||
zap.String("expected", fmt.Sprintf("%T", key)), logs.TagField(logs.TagDatapath))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -88,7 +88,7 @@ func (o *BucketCache) get(key string) *data.BucketInfo {
|
|||
result, ok := entry.(*data.BucketInfo)
|
||||
if !ok {
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
2
internal/cache/netmap.go
vendored
|
@ -53,7 +53,7 @@ func (c *NetmapCache) Get() *netmap.NetMap {
|
|||
result, ok := entry.(netmap.NetMap)
|
||||
if !ok {
|
||||
c.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -230,7 +230,7 @@ func (h *Handler) getDirObjectsNative(ctx context.Context, bucketInfo *data.Buck
|
|||
}
|
||||
for objExt := range resp {
|
||||
if objExt.Error != nil {
|
||||
log.Error(logs.FailedToHeadObject, zap.Error(objExt.Error))
|
||||
log.Error(logs.FailedToHeadObject, zap.Error(objExt.Error), logs.TagField(logs.TagExternalStorage))
|
||||
result.hasErrors = true
|
||||
continue
|
||||
}
|
||||
|
@ -273,7 +273,7 @@ func (h *Handler) headDirObjects(ctx context.Context, cnrID cid.ID, objectIDs Re
|
|||
})
|
||||
if err != nil {
|
||||
wg.Done()
|
||||
log.Warn(logs.FailedToSumbitTaskToPool, zap.Error(err))
|
||||
log.Warn(logs.FailedToSumbitTaskToPool, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
dkirillov marked this conversation as resolved
Outdated
dkirillov
commented
Probably this can have Probably this can have `app` tag. Anyway we should be consistent across other components
KurlesHS
commented
In the frostfs-s3-lifecycler service, such logic has been marked with the In the frostfs-s3-lifecycler service, such logic has been marked with the `runtime` tag. Maybe, in order to be consistent with other components, we should add this tag here despite the fact that there are no other messages for this tag? I've tagged this message as an `app` for now.
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
@ -283,7 +283,7 @@ func (h *Handler) headDirObjects(ctx context.Context, cnrID cid.ID, objectIDs Re
|
|||
}
|
||||
})
|
||||
if err != nil {
|
||||
log.Error(logs.FailedToIterateOverResponse, zap.Error(err))
|
||||
log.Error(logs.FailedToIterateOverResponse, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
}
|
||||
wg.Wait()
|
||||
}()
|
||||
|
|
|
@ -43,6 +43,8 @@ func (h *Handler) DownloadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
|||
|
||||
checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo)
|
||||
if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) {
|
||||
log.Error(logs.FailedToCheckIfSettingsNodeExist, zap.String("cid", bktInfo.CID.String()),
|
||||
zap.Error(checkS3Err), logs.TagField(logs.TagExternalStorageTree))
|
||||
logAndSendBucketError(c, log, checkS3Err)
|
||||
return
|
||||
}
|
||||
|
@ -121,13 +123,13 @@ func (h *Handler) getZipResponseWriter(ctx context.Context, log *zap.Logger, res
|
|||
}),
|
||||
)
|
||||
if errIter != nil {
|
||||
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter))
|
||||
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath))
|
||||
return
|
||||
} else if objectsWritten == 0 {
|
||||
log.Warn(logs.ObjectsNotFound)
|
||||
log.Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath))
|
||||
}
|
||||
if err := zipWriter.Close(); err != nil {
|
||||
log.Error(logs.CloseZipWriter, zap.Error(err))
|
||||
log.Error(logs.CloseZipWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -187,10 +189,10 @@ func (h *Handler) getTarResponseWriter(ctx context.Context, log *zap.Logger, res
|
|||
|
||||
defer func() {
|
||||
if err := tarWriter.Close(); err != nil {
|
||||
log.Error(logs.CloseTarWriter, zap.Error(err))
|
||||
log.Error(logs.CloseTarWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
}
|
||||
if err := gzipWriter.Close(); err != nil {
|
||||
log.Error(logs.CloseGzipWriter, zap.Error(err))
|
||||
log.Error(logs.CloseGzipWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -204,9 +206,9 @@ func (h *Handler) getTarResponseWriter(ctx context.Context, log *zap.Logger, res
|
|||
}),
|
||||
)
|
||||
if errIter != nil {
|
||||
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter))
|
||||
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath))
|
||||
} else if objectsWritten == 0 {
|
||||
log.Warn(logs.ObjectsNotFound)
|
||||
log.Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -237,18 +239,18 @@ func (h *Handler) putObjectToArchive(ctx context.Context, log *zap.Logger, cnrID
|
|||
|
||||
resGet, err := h.frostfs.GetObject(ctx, prm)
|
||||
if err != nil {
|
||||
log.Error(logs.FailedToGetObject, zap.Error(err))
|
||||
log.Error(logs.FailedToGetObject, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
||||
return false
|
||||
}
|
||||
|
||||
fileWriter, err := createArchiveHeader(&resGet.Header)
|
||||
if err != nil {
|
||||
log.Error(logs.FailedToAddObjectToArchive, zap.Error(err))
|
||||
log.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
return false
|
||||
}
|
||||
|
||||
if err = writeToArchive(resGet, fileWriter, buf); err != nil {
|
||||
log.Error(logs.FailedToAddObjectToArchive, zap.Error(err))
|
||||
log.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -264,7 +266,8 @@ func (h *Handler) searchObjectsByPrefix(c *fasthttp.RequestCtx, log *zap.Logger,
|
|||
|
||||
prefix, err := url.QueryUnescape(prefix)
|
||||
if err != nil {
|
||||
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("prefix", prefix), zap.Error(err))
|
||||
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("prefix", prefix),
|
||||
zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
ResponseError(c, "could not unescape prefix: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return nil, err
|
||||
}
|
||||
|
@ -273,7 +276,7 @@ func (h *Handler) searchObjectsByPrefix(c *fasthttp.RequestCtx, log *zap.Logger,
|
|||
|
||||
resSearch, err := h.search(ctx, cnrID, object.AttributeFilePath, prefix, object.MatchCommonPrefix)
|
||||
if err != nil {
|
||||
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
|
||||
log.Error(logs.CouldNotSearchForObjects, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
||||
ResponseError(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -50,7 +50,8 @@ func filterHeaders(l *zap.Logger, header *fasthttp.RequestHeader) (map[string]st
|
|||
|
||||
l.Debug(logs.AddAttributeToResultObject,
|
||||
zap.String("key", k),
|
||||
zap.String("val", v))
|
||||
zap.String("val", v),
|
||||
logs.TagField(logs.TagDatapath))
|
||||
})
|
||||
|
||||
return result, err
|
||||
|
|
|
@ -206,11 +206,13 @@ func (h *Handler) byS3Path(ctx context.Context, req request, cnrID cid.ID, path
|
|||
|
||||
foundOID, err := h.tree.GetLatestVersion(ctx, &cnrID, path)
|
||||
if err != nil {
|
||||
log.Error(logs.FailedToGetLatestVersionOfObject, zap.Error(err), zap.String("cid", cnrID.String()),
|
||||
zap.String("path", path), logs.TagField(logs.TagExternalStorageTree))
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
if foundOID.IsDeleteMarker {
|
||||
log.Error(logs.ObjectWasDeleted)
|
||||
log.Error(logs.ObjectWasDeleted, logs.TagField(logs.TagExternalStorageTree))
|
||||
ResponseError(c, "object deleted", fasthttp.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
@ -230,14 +232,16 @@ func (h *Handler) byAttribute(c *fasthttp.RequestCtx, handler func(context.Conte
|
|||
|
||||
key, err := url.QueryUnescape(key)
|
||||
if err != nil {
|
||||
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", cidParam), zap.String("attr_key", key), zap.Error(err))
|
||||
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", cidParam), zap.String("attr_key", key),
|
||||
zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
ResponseError(c, "could not unescape attr_key: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
val, err = url.QueryUnescape(val)
|
||||
if err != nil {
|
||||
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", cidParam), zap.String("attr_val", val), zap.Error(err))
|
||||
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", cidParam), zap.String("attr_val", val),
|
||||
zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
ResponseError(c, "could not unescape attr_val: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
@ -271,7 +275,7 @@ func (h *Handler) byAttribute(c *fasthttp.RequestCtx, handler func(context.Conte
|
|||
func (h *Handler) findObjectByAttribute(ctx context.Context, log *zap.Logger, cnrID cid.ID, attrKey, attrVal string) (oid.ID, error) {
|
||||
res, err := h.search(ctx, cnrID, attrKey, attrVal, object.MatchStringEqual)
|
||||
if err != nil {
|
||||
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
|
||||
log.Error(logs.CouldNotSearchForObjects, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
||||
return oid.ID{}, fmt.Errorf("could not search for objects: %w", err)
|
||||
}
|
||||
defer res.Close()
|
||||
|
@ -282,13 +286,13 @@ func (h *Handler) findObjectByAttribute(ctx context.Context, log *zap.Logger, cn
|
|||
if n == 0 {
|
||||
switch {
|
||||
case errors.Is(err, io.EOF) && h.needSearchByFileName(attrKey, attrVal):
|
||||
log.Debug(logs.ObjectNotFoundByFilePathTrySearchByFileName)
|
||||
log.Debug(logs.ObjectNotFoundByFilePathTrySearchByFileName, logs.TagField(logs.TagExternalStorage))
|
||||
return h.findObjectByAttribute(ctx, log, cnrID, attrFileName, attrVal)
|
||||
case errors.Is(err, io.EOF):
|
||||
log.Error(logs.ObjectNotFound, zap.Error(err))
|
||||
log.Error(logs.ObjectNotFound, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
||||
return oid.ID{}, fmt.Errorf("object not found: %w", err)
|
||||
default:
|
||||
log.Error(logs.ReadObjectListFailed, zap.Error(err))
|
||||
log.Error(logs.ReadObjectListFailed, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
||||
return oid.ID{}, fmt.Errorf("read object list failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
@ -330,11 +334,16 @@ func (h *Handler) getBucketInfo(ctx context.Context, containerName string, log *
|
|||
|
||||
cnrID, err := h.resolveContainer(ctx, containerName)
|
||||
if err != nil {
|
||||
log.Error(logs.CouldNotResolveContainerID, zap.Error(err), zap.String("cnrName", containerName),
|
||||
logs.TagField(logs.TagDatapath))
|
||||
KurlesHS
commented
The The `datapath`tag is used because both `DNS` and `NNS` resolvers can be used here.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bktInfo, err := h.readContainer(ctx, *cnrID)
|
||||
if err != nil {
|
||||
log.Error(logs.CouldNotGetContainerInfo, zap.Error(err), zap.String("cnrName", containerName),
|
||||
zap.String("cnrName", cnrID.String()),
|
||||
logs.TagField(logs.TagExternalStorage))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -342,7 +351,8 @@ func (h *Handler) getBucketInfo(ctx context.Context, containerName string, log *
|
|||
log.Warn(logs.CouldntPutBucketIntoCache,
|
||||
zap.String("bucket name", bktInfo.Name),
|
||||
zap.Stringer("bucket cid", bktInfo.CID),
|
||||
zap.Error(err))
|
||||
zap.Error(err),
|
||||
logs.TagField(logs.TagDatapath))
|
||||
}
|
||||
|
||||
return bktInfo, nil
|
||||
|
|
|
@ -67,7 +67,8 @@ func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid
|
|||
req.log.Info(logs.CouldntParseCreationDate,
|
||||
zap.String("key", key),
|
||||
zap.String("val", val),
|
||||
zap.Error(err))
|
||||
zap.Error(err),
|
||||
logs.TagField(logs.TagDatapath))
|
||||
continue
|
||||
}
|
||||
req.Response.Header.Set(fasthttp.HeaderLastModified, time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
||||
|
@ -131,6 +132,8 @@ func (h *Handler) HeadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
|||
}
|
||||
checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo)
|
||||
if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) {
|
||||
log.Error(logs.FailedToCheckIfSettingsNodeExist, zap.String("cid", bktInfo.CID.String()),
|
||||
zap.Error(checkS3Err), logs.TagField(logs.TagExternalStorageTree))
|
||||
logAndSendBucketError(c, log, checkS3Err)
|
||||
return
|
||||
}
|
||||
|
@ -144,7 +147,6 @@ func (h *Handler) HeadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
|||
h.byNativeAddress(ctx, req, bktInfo.CID, objID, h.headObject)
|
||||
} else {
|
||||
logAndSendBucketError(c, log, checkS3Err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartF
|
|||
|
||||
name := part.FormName()
|
||||
if name == "" {
|
||||
l.Debug(logs.IgnorePartEmptyFormName)
|
||||
l.Debug(logs.IgnorePartEmptyFormName, logs.TagField(logs.TagDatapath))
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -41,9 +41,9 @@ func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartF
|
|||
|
||||
// ignore multipart/form-data values
|
||||
if filename == "" {
|
||||
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name))
|
||||
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name), logs.TagField(logs.TagDatapath))
|
||||
if err = part.Close(); err != nil {
|
||||
l.Warn(logs.FailedToCloseReader, zap.Error(err))
|
||||
l.Warn(logs.FailedToCloseReader, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -112,7 +112,7 @@ func fetchMultipartFileDefault(l *zap.Logger, r io.Reader, boundary string) (Mul
|
|||
|
||||
name := part.FormName()
|
||||
if name == "" {
|
||||
l.Debug(logs.IgnorePartEmptyFormName)
|
||||
l.Debug(logs.IgnorePartEmptyFormName, logs.TagField(logs.TagDatapath))
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -120,8 +120,7 @@ func fetchMultipartFileDefault(l *zap.Logger, r io.Reader, boundary string) (Mul
|
|||
|
||||
// ignore multipart/form-data values
|
||||
if filename == "" {
|
||||
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name))
|
||||
|
||||
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name), logs.TagField(logs.TagDatapath))
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -110,7 +110,8 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.A
|
|||
if err = req.setTimestamp(val); err != nil {
|
||||
req.log.Error(logs.CouldntParseCreationDate,
|
||||
zap.String("val", val),
|
||||
zap.Error(err))
|
||||
zap.Error(err),
|
||||
logs.TagField(logs.TagDatapath))
|
||||
}
|
||||
case object.AttributeContentType:
|
||||
contentType = val
|
||||
|
@ -144,7 +145,7 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.A
|
|||
return payload, nil
|
||||
}, filename)
|
||||
if err != nil && err != io.EOF {
|
||||
req.log.Error(logs.CouldNotDetectContentTypeFromPayload, zap.Error(err))
|
||||
req.log.Error(logs.CouldNotDetectContentTypeFromPayload, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
ResponseError(req.RequestCtx, "could not detect Content-Type from payload: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -68,14 +68,14 @@ func (h *Handler) Upload(c *fasthttp.RequestCtx) {
|
|||
|
||||
boundary := string(c.Request.Header.MultipartFormBoundary())
|
||||
if file, err = fetchMultipartFile(log, bodyStream, boundary); err != nil {
|
||||
log.Error(logs.CouldNotReceiveMultipartForm, zap.Error(err))
|
||||
log.Error(logs.CouldNotReceiveMultipartForm, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
ResponseError(c, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
filtered, err := filterHeaders(log, &c.Request.Header)
|
||||
if err != nil {
|
||||
log.Error(logs.FailedToFilterHeaders, zap.Error(err))
|
||||
log.Error(logs.FailedToFilterHeaders, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
ResponseError(c, err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ func (h *Handler) uploadSingleObject(req request, bkt *data.BucketInfo, file Mul
|
|||
|
||||
attributes, err := h.extractAttributes(c, log, filtered)
|
||||
if err != nil {
|
||||
log.Error(logs.FailedToGetAttributes, zap.Error(err))
|
||||
log.Error(logs.FailedToGetAttributes, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
ResponseError(c, "could not extract attributes: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
@ -119,13 +119,14 @@ func (h *Handler) uploadSingleObject(req request, bkt *data.BucketInfo, file Mul
|
|||
log.Debug(logs.ObjectUploaded,
|
||||
zap.String("oid", idObj.EncodeToString()),
|
||||
zap.String("FileName", file.FileName()),
|
||||
logs.TagField(logs.TagExternalStorage),
|
||||
)
|
||||
|
||||
addr := newAddress(bkt.CID, idObj)
|
||||
c.Response.Header.SetContentType(jsonHeader)
|
||||
// Try to return the response, otherwise, if something went wrong, throw an error.
|
||||
if err = newPutResponse(addr).encode(c); err != nil {
|
||||
log.Error(logs.CouldNotEncodeResponse, zap.Error(err))
|
||||
log.Error(logs.CouldNotEncodeResponse, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
ResponseError(c, "could not encode response", fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
@ -162,13 +163,14 @@ func (h *Handler) extractAttributes(c *fasthttp.RequestCtx, log *zap.Logger, fil
|
|||
now := time.Now()
|
||||
if rawHeader := c.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
|
||||
if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
|
||||
log.Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err))
|
||||
log.Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err),
|
||||
logs.TagField(logs.TagDatapath))
|
||||
} else {
|
||||
now = parsed
|
||||
}
|
||||
}
|
||||
if err := utils.PrepareExpirationHeader(c, h.frostfs, filtered, now); err != nil {
|
||||
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err))
|
||||
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
return nil, err
|
||||
}
|
||||
attributes := make([]object.Attribute, 0, len(filtered))
|
||||
|
@ -205,7 +207,7 @@ func (h *Handler) explodeArchive(req request, bkt *data.BucketInfo, file io.Read
|
|||
|
||||
commonAttributes, err := h.extractAttributes(c, log, filtered)
|
||||
if err != nil {
|
||||
log.Error(logs.FailedToGetAttributes, zap.Error(err))
|
||||
log.Error(logs.FailedToGetAttributes, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
ResponseError(c, "could not extract attributes: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
@ -213,16 +215,16 @@ func (h *Handler) explodeArchive(req request, bkt *data.BucketInfo, file io.Read
|
|||
|
||||
reader := file
|
||||
if bytes.EqualFold(c.Request.Header.Peek(fasthttp.HeaderContentEncoding), []byte("gzip")) {
|
||||
log.Debug(logs.GzipReaderSelected)
|
||||
log.Debug(logs.GzipReaderSelected, logs.TagField(logs.TagDatapath))
|
||||
gzipReader, err := gzip.NewReader(file)
|
||||
if err != nil {
|
||||
log.Error(logs.FailedToCreateGzipReader, zap.Error(err))
|
||||
log.Error(logs.FailedToCreateGzipReader, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
ResponseError(c, "could read gzip file: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if err := gzipReader.Close(); err != nil {
|
||||
log.Warn(logs.FailedToCloseReader, zap.Error(err))
|
||||
log.Warn(logs.FailedToCloseReader, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
}
|
||||
}()
|
||||
reader = gzipReader
|
||||
|
@ -234,7 +236,7 @@ func (h *Handler) explodeArchive(req request, bkt *data.BucketInfo, file io.Read
|
|||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
} else if err != nil {
|
||||
log.Error(logs.FailedToReadFileFromTar, zap.Error(err))
|
||||
log.Error(logs.FailedToReadFileFromTar, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
ResponseError(c, "could not get next entry: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
@ -258,6 +260,7 @@ func (h *Handler) explodeArchive(req request, bkt *data.BucketInfo, file io.Read
|
|||
log.Debug(logs.ObjectUploaded,
|
||||
zap.String("oid", idObj.EncodeToString()),
|
||||
zap.String("FileName", fileName),
|
||||
logs.TagField(logs.TagExternalStorage),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -266,7 +269,7 @@ func (h *Handler) handlePutFrostFSErr(r *fasthttp.RequestCtx, err error, log *za
|
|||
statusCode, msg, additionalFields := formErrorResponse("could not store file in frostfs", err)
|
||||
logFields := append([]zap.Field{zap.Error(err)}, additionalFields...)
|
||||
|
||||
log.Error(logs.CouldNotStoreFileInFrostfs, logFields...)
|
||||
log.Error(logs.CouldNotStoreFileInFrostfs, append(logFields, logs.TagField(logs.TagExternalStorage))...)
|
||||
ResponseError(r, msg, statusCode)
|
||||
}
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ func (r *request) handleFrostFSErr(err error, start time.Time) {
|
|||
statusCode, msg, additionalFields := formErrorResponse("could not receive object", err)
|
||||
logFields = append(logFields, additionalFields...)
|
||||
|
||||
r.log.Error(logs.CouldNotReceiveObject, logFields...)
|
||||
r.log.Error(logs.CouldNotReceiveObject, append(logFields, logs.TagField(logs.TagExternalStorage))...)
|
||||
ResponseError(r.RequestCtx, msg, statusCode)
|
||||
}
|
||||
|
||||
|
@ -85,7 +85,7 @@ func isValidValue(s string) bool {
|
|||
}
|
||||
|
||||
func logAndSendBucketError(c *fasthttp.RequestCtx, log *zap.Logger, err error) {
|
||||
log.Error(logs.CouldntGetBucket, zap.Error(err))
|
||||
log.Error(logs.CouldNotGetBucket, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
|
||||
if client.IsErrContainerNotFound(err) {
|
||||
ResponseError(c, "Not Found", fasthttp.StatusNotFound)
|
||||
|
|
|
@ -1,63 +1,43 @@
|
|||
package logs
|
||||
|
||||
import "go.uber.org/zap"
|
||||
|
||||
const (
|
||||
TagFieldName = "tag"
|
||||
|
||||
TagApp = "app"
|
||||
TagDatapath = "datapath"
|
||||
TagExternalStorage = "external_storage"
|
||||
TagExternalStorageTree = "external_storage_tree"
|
||||
)
|
||||
|
||||
func TagField(tag string) zap.Field {
|
||||
return zap.String(TagFieldName, tag)
|
||||
}
|
||||
|
||||
// Log messages with the "app" tag.
|
||||
const (
|
||||
dkirillov marked this conversation as resolved
Outdated
dkirillov
commented
Let's group logs by their tags. See https://git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/src/branch/master/internal/logs/logs.go Let's group logs by their tags. See https://git.frostfs.info/TrueCloudLab/frostfs-s3-lifecycler/src/branch/master/internal/logs/logs.go
|
||||
CouldntParseCreationDate = "couldn't parse creation date"
|
||||
CouldNotDetectContentTypeFromPayload = "could not detect Content-Type from payload"
|
||||
CouldNotReceiveObject = "could not receive object"
|
||||
ObjectWasDeleted = "object was deleted"
|
||||
CouldNotSearchForObjects = "could not search for objects"
|
||||
ObjectNotFound = "object not found"
|
||||
ReadObjectListFailed = "read object list failed"
|
||||
FailedToAddObjectToArchive = "failed to add object to archive"
|
||||
FailedToGetObject = "failed to get object"
|
||||
IteratingOverSelectedObjectsFailed = "iterating over selected objects failed"
|
||||
ObjectsNotFound = "objects not found"
|
||||
CloseZipWriter = "close zip writer"
|
||||
ServiceIsRunning = "service is running"
|
||||
dkirillov marked this conversation as resolved
Outdated
dkirillov
commented
Let's drop all comments Let's drop all comments
dkirillov
commented
Probably we can add other comments (just to mention for which tag every group of logs belong) Probably we can add other comments (just to mention for which tag every group of logs belong)
|
||||
ServiceCouldntStartOnConfiguredPort = "service couldn't start on configured port"
|
||||
ServiceHasntStartedSinceItsDisabled = "service hasn't started since it's disabled"
|
||||
ShuttingDownService = "shutting down service"
|
||||
CantShutDownService = "can't shut down service"
|
||||
CantGracefullyShutDownService = "can't gracefully shut down service, force stop"
|
||||
IgnorePartEmptyFormName = "ignore part, empty form name"
|
||||
IgnorePartEmptyFilename = "ignore part, empty filename"
|
||||
CouldNotReceiveMultipartForm = "could not receive multipart/form"
|
||||
CouldNotParseClientTime = "could not parse client time"
|
||||
CouldNotPrepareExpirationHeader = "could not prepare expiration header"
|
||||
CouldNotEncodeResponse = "could not encode response"
|
||||
CouldNotStoreFileInFrostfs = "could not store file in frostfs"
|
||||
AddAttributeToResultObject = "add attribute to result object"
|
||||
FailedToCreateResolver = "failed to create resolver"
|
||||
FailedToCreateWorkerPool = "failed to create worker pool"
|
||||
FailedToReadIndexPageTemplate = "failed to read index page template"
|
||||
SetCustomIndexPageTemplate = "set custom index page template"
|
||||
ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty = "container resolver will be disabled because of resolvers 'resolver_order' is empty"
|
||||
MetricsAreDisabled = "metrics are disabled"
|
||||
NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun = "no wallet path specified, creating ephemeral key automatically for this run"
|
||||
StartingApplication = "starting application"
|
||||
StartingServer = "starting server"
|
||||
ListenAndServe = "listen and serve"
|
||||
ShuttingDownWebServer = "shutting down web server"
|
||||
FailedToShutdownTracing = "failed to shutdown tracing"
|
||||
SIGHUPConfigReloadStarted = "SIGHUP config reload started"
|
||||
FailedToReloadConfigBecauseItsMissed = "failed to reload config because it's missed"
|
||||
FailedToReloadConfig = "failed to reload config"
|
||||
LogLevelWontBeUpdated = "log level won't be updated"
|
||||
FailedToUpdateResolvers = "failed to update resolvers"
|
||||
FailedToReloadServerParameters = "failed to reload server parameters"
|
||||
SIGHUPConfigReloadCompleted = "SIGHUP config reload completed"
|
||||
AddedPathUploadCid = "added path /upload/{cid}"
|
||||
AddedPathGetCidOid = "added path /get/{cid}/{oid}"
|
||||
AddedPathGetByAttributeCidAttrKeyAttrVal = "added path /get_by_attribute/{cid}/{attr_key}/{attr_val:*}"
|
||||
AddedPathZipCidPrefix = "added path /zip/{cid}/{prefix}"
|
||||
Request = "request"
|
||||
CouldNotFetchAndStoreBearerToken = "could not fetch and store bearer token"
|
||||
FailedToAddServer = "failed to add server"
|
||||
AddServer = "add server"
|
||||
NoHealthyServers = "no healthy servers"
|
||||
FailedToInitializeTracing = "failed to initialize tracing"
|
||||
TracingConfigUpdated = "tracing config updated"
|
||||
ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided = "resolver nns won't be used since rpc_endpoint isn't provided"
|
||||
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped"
|
||||
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated"
|
||||
CouldNotLoadFrostFSPrivateKey = "could not load FrostFS private key"
|
||||
|
@ -66,33 +46,86 @@ const (
|
|||
FailedToDialConnectionPool = "failed to dial connection pool"
|
||||
FailedToCreateTreePool = "failed to create tree pool"
|
||||
FailedToDialTreePool = "failed to dial tree pool"
|
||||
AddedStoragePeer = "added storage peer"
|
||||
CouldntGetBucket = "could not get bucket"
|
||||
CouldntPutBucketIntoCache = "couldn't put bucket info into cache"
|
||||
FailedToSumbitTaskToPool = "failed to submit task to pool"
|
||||
FailedToHeadObject = "failed to head object"
|
||||
FailedToIterateOverResponse = "failed to iterate over search response"
|
||||
InvalidCacheEntryType = "invalid cache entry type"
|
||||
InvalidLifetimeUsingDefaultValue = "invalid lifetime, using default value (in seconds)"
|
||||
InvalidCacheSizeUsingDefaultValue = "invalid cache size, using default value"
|
||||
FailedToUnescapeQuery = "failed to unescape query"
|
||||
ServerReconnecting = "reconnecting server..."
|
||||
ServerReconnectedSuccessfully = "server reconnected successfully"
|
||||
ServerReconnectFailed = "failed to reconnect server"
|
||||
WarnDuplicateAddress = "duplicate address"
|
||||
MultinetDialSuccess = "multinet dial successful"
|
||||
MultinetDialFail = "multinet dial failed"
|
||||
ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty = "container resolver will be disabled because of resolvers 'resolver_order' is empty"
|
||||
MetricsAreDisabled = "metrics are disabled"
|
||||
NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun = "no wallet path specified, creating ephemeral key automatically for this run"
|
||||
SIGHUPConfigReloadStarted = "SIGHUP config reload started"
|
||||
FailedToReloadConfigBecauseItsMissed = "failed to reload config because it's missed"
|
||||
FailedToReloadConfig = "failed to reload config"
|
||||
FailedToUpdateResolvers = "failed to update resolvers"
|
||||
FailedToReloadServerParameters = "failed to reload server parameters"
|
||||
SIGHUPConfigReloadCompleted = "SIGHUP config reload completed"
|
||||
TracingConfigUpdated = "tracing config updated"
|
||||
ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided = "resolver nns won't be used since rpc_endpoint isn't provided"
|
||||
AddedStoragePeer = "added storage peer"
|
||||
InvalidLifetimeUsingDefaultValue = "invalid lifetime, using default value (in seconds)"
|
||||
InvalidCacheSizeUsingDefaultValue = "invalid cache size, using default value"
|
||||
WarnDuplicateAddress = "duplicate address"
|
||||
FailedToLoadMultinetConfig = "failed to load multinet config"
|
||||
MultinetConfigWontBeUpdated = "multinet config won't be updated"
|
||||
ObjectNotFoundByFilePathTrySearchByFileName = "object not found by filePath attribute, try search by fileName"
|
||||
CouldntCacheNetmap = "couldn't cache netmap"
|
||||
FailedToFilterHeaders = "failed to filter headers"
|
||||
FailedToReadFileFromTar = "failed to read file from tar"
|
||||
FailedToGetAttributes = "failed to get attributes"
|
||||
ObjectUploaded = "object uploaded"
|
||||
CloseGzipWriter = "close gzip writer"
|
||||
CloseTarWriter = "close tar writer"
|
||||
FailedToCloseReader = "failed to close reader"
|
||||
FailedToCreateGzipReader = "failed to create gzip reader"
|
||||
GzipReaderSelected = "gzip reader selected"
|
||||
LogLevelWontBeUpdated = "log level won't be updated"
|
||||
TagsLogConfigWontBeUpdated = "tags log config won't be updated"
|
||||
FailedToReadIndexPageTemplate = "failed to read index page template"
|
||||
SetCustomIndexPageTemplate = "set custom index page template"
|
||||
)
|
||||
|
||||
// Log messages with the "datapath" tag.
|
||||
const (
|
||||
CouldntParseCreationDate = "couldn't parse creation date"
|
||||
CouldNotDetectContentTypeFromPayload = "could not detect Content-Type from payload"
|
||||
FailedToAddObjectToArchive = "failed to add object to archive"
|
||||
CloseZipWriter = "close zip writer"
|
||||
IgnorePartEmptyFormName = "ignore part, empty form name"
|
||||
IgnorePartEmptyFilename = "ignore part, empty filename"
|
||||
CouldNotParseClientTime = "could not parse client time"
|
||||
CouldNotPrepareExpirationHeader = "could not prepare expiration header"
|
||||
CouldNotEncodeResponse = "could not encode response"
|
||||
AddAttributeToResultObject = "add attribute to result object"
|
||||
Request = "request"
|
||||
CouldNotFetchAndStoreBearerToken = "could not fetch and store bearer token"
|
||||
CouldntPutBucketIntoCache = "couldn't put bucket info into cache"
|
||||
FailedToIterateOverResponse = "failed to iterate over search response"
|
||||
InvalidCacheEntryType = "invalid cache entry type"
|
||||
FailedToUnescapeQuery = "failed to unescape query"
|
||||
CouldntCacheNetmap = "couldn't cache netmap"
|
||||
FailedToCloseReader = "failed to close reader"
|
||||
FailedToFilterHeaders = "failed to filter headers"
|
||||
FailedToReadFileFromTar = "failed to read file from tar"
|
||||
FailedToGetAttributes = "failed to get attributes"
|
||||
CloseGzipWriter = "close gzip writer"
|
||||
CloseTarWriter = "close tar writer"
|
||||
FailedToCreateGzipReader = "failed to create gzip reader"
|
||||
GzipReaderSelected = "gzip reader selected"
|
||||
CouldNotReceiveMultipartForm = "could not receive multipart/form"
|
||||
ObjectsNotFound = "objects not found"
|
||||
IteratingOverSelectedObjectsFailed = "iterating over selected objects failed"
|
||||
CouldNotGetBucket = "could not get bucket"
|
||||
CouldNotResolveContainerID = "could not resolve container id"
|
||||
FailedToSumbitTaskToPool = "failed to submit task to pool"
|
||||
)
|
||||
|
||||
// Log messages with the "external_storage" tag.
|
||||
const (
|
||||
CouldNotReceiveObject = "could not receive object"
|
||||
CouldNotSearchForObjects = "could not search for objects"
|
||||
ObjectNotFound = "object not found"
|
||||
ReadObjectListFailed = "read object list failed"
|
||||
CouldNotStoreFileInFrostfs = "could not store file in frostfs"
|
||||
FailedToHeadObject = "failed to head object"
|
||||
ObjectNotFoundByFilePathTrySearchByFileName = "object not found by filePath attribute, try search by fileName"
|
||||
FailedToGetObject = "failed to get object"
|
||||
dkirillov marked this conversation as resolved
Outdated
dkirillov
commented
If this messages are unused we should drop it If this messages are unused we should drop it
|
||||
ObjectUploaded = "object uploaded"
|
||||
CouldNotGetContainerInfo = "could not get container info"
|
||||
)
|
||||
|
||||
// Log messages with the "external_storage_tree" tag.
|
||||
const (
|
||||
ObjectWasDeleted = "object was deleted"
|
||||
FailedToGetLatestVersionOfObject = "failed to get latest version of object"
|
||||
FailedToCheckIfSettingsNodeExist = "Failed to check if settings node exists"
|
||||
)
|
||||
|
|
|
@ -17,9 +17,11 @@ func (l LogEventHandler) DialPerformed(sourceIP net.Addr, _, address string, err
|
|||
sourceIPString = sourceIP.Network() + "://" + sourceIP.String()
|
||||
}
|
||||
if err == nil {
|
||||
l.logger.Debug(logs.MultinetDialSuccess, zap.String("source", sourceIPString), zap.String("destination", address))
|
||||
l.logger.Debug(logs.MultinetDialSuccess, zap.String("source", sourceIPString),
|
||||
zap.String("destination", address), logs.TagField(logs.TagApp))
|
||||
} else {
|
||||
l.logger.Debug(logs.MultinetDialFail, zap.String("source", sourceIPString), zap.String("destination", address), zap.Error(err))
|
||||
l.logger.Debug(logs.MultinetDialFail, zap.String("source", sourceIPString),
|
||||
zap.String("destination", address), logs.TagField(logs.TagApp))
|
||||
dkirillov marked this conversation as resolved
Outdated
dkirillov
commented
These logs should have These logs should have `app` tag
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ func (s *Source) NetMapSnapshot(ctx context.Context) (netmap.NetMap, error) {
|
|||
}
|
||||
|
||||
if err = s.netmapCache.Put(netmapSnapshot); err != nil {
|
||||
s.log.Warn(logs.CouldntCacheNetmap, zap.Error(err))
|
||||
s.log.Warn(logs.CouldntCacheNetmap, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
}
|
||||
|
||||
return netmapSnapshot, nil
|
||||
|
|
|
@ -25,24 +25,24 @@ type Config struct {
|
|||
// Start runs http service with the exposed endpoint on the configured port.
|
||||
func (ms *Service) Start() {
|
||||
if ms.enabled {
|
||||
ms.log.Info(logs.ServiceIsRunning, zap.String("endpoint", ms.Addr))
|
||||
ms.log.Info(logs.ServiceIsRunning, zap.String("endpoint", ms.Addr), logs.TagField(logs.TagApp))
|
||||
err := ms.ListenAndServe()
|
||||
if err != nil && err != http.ErrServerClosed {
|
||||
ms.log.Warn(logs.ServiceCouldntStartOnConfiguredPort)
|
||||
ms.log.Warn(logs.ServiceCouldntStartOnConfiguredPort, logs.TagField(logs.TagApp))
|
||||
}
|
||||
} else {
|
||||
ms.log.Info(logs.ServiceHasntStartedSinceItsDisabled)
|
||||
ms.log.Info(logs.ServiceHasntStartedSinceItsDisabled, logs.TagField(logs.TagApp))
|
||||
}
|
||||
}
|
||||
|
||||
// ShutDown stops the service.
|
||||
func (ms *Service) ShutDown(ctx context.Context) {
|
||||
ms.log.Info(logs.ShuttingDownService, zap.String("endpoint", ms.Addr))
|
||||
ms.log.Info(logs.ShuttingDownService, zap.String("endpoint", ms.Addr), logs.TagField(logs.TagApp))
|
||||
err := ms.Shutdown(ctx)
|
||||
if err != nil {
|
||||
ms.log.Error(logs.CantGracefullyShutDownService, zap.Error(err))
|
||||
ms.log.Error(logs.CantGracefullyShutDownService, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
if err = ms.Close(); err != nil {
|
||||
ms.log.Panic(logs.CantShutDownService, zap.Error(err))
|
||||
ms.log.Panic(logs.CantShutDownService, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Can we move this filed to
appSettings
?