[#20] Support logging with tags
Some checks failed
/ DCO (pull_request) Successful in 48s
/ Vulncheck (pull_request) Successful in 1m17s
/ Builds (pull_request) Failing after 1m5s
/ Lint (pull_request) Successful in 2m32s
/ Tests (pull_request) Successful in 1m7s

Allow to log additionally "tagged" log entries.

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
This commit is contained in:
Denis Kirillov 2024-09-23 17:38:40 +03:00
parent 4f250eba42
commit 2f7a73ee36
5 changed files with 80 additions and 18 deletions

View file

@ -2,8 +2,10 @@ package main
import (
"fmt"
"strings"
"git.frostfs.info/TrueCloudLab/zapjournald"
"github.com/nspcc-dev/neo-go/cli/options"
"github.com/spf13/viper"
"github.com/ssgreg/journald"
"go.uber.org/zap"
@ -33,18 +35,32 @@ func pickLogger(v *viper.Viper) *Logger {
func newStdoutLogger(v *viper.Viper) *Logger {
c := newZapLogConfig(v)
c.Encoding = "console"
l, err := c.Build(
zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
)
out, errSink, err := openZapSinks(c)
if err != nil {
panic(fmt.Sprintf("build zap logger instance: %v", err))
panic(fmt.Sprintf("open zap sinks: %v", err.Error()))
}
core := zapcore.NewCore(zapcore.NewConsoleEncoder(c.EncoderConfig), out, c.Level)
core = options.NewFilteringCore(core, filteringLogOption(v))
l := zap.New(core, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), zap.ErrorOutput(errSink))
return &Logger{logger: l, lvl: c.Level}
}
func openZapSinks(cfg zap.Config) (zapcore.WriteSyncer, zapcore.WriteSyncer, error) {
sink, closeOut, err := zap.Open(cfg.OutputPaths...)
if err != nil {
return nil, nil, err
}
errSink, _, err := zap.Open(cfg.ErrorOutputPaths...)
if err != nil {
closeOut()
return nil, nil, err
}
return sink, errSink, nil
}
func newJournaldLogger(v *viper.Viper) *Logger {
c := newZapLogConfig(v)
@ -53,7 +69,8 @@ func newJournaldLogger(v *viper.Viper) *Logger {
encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields)
core := zapjournald.NewCore(c.Level, encoder, &journald.Journal{}, zapjournald.SyslogFields)
coreWithContext := core.With([]zapcore.Field{
filteringCore := options.NewFilteringCore(core, filteringLogOption(v))
coreWithContext := filteringCore.With([]zapcore.Field{
zapjournald.SyslogFacility(zapjournald.LogDaemon),
zapjournald.SyslogIdentifier(),
zapjournald.SyslogPid(),
@ -84,6 +101,25 @@ func newZapLogConfig(v *viper.Viper) zap.Config {
return c
}
func filteringLogOption(v *viper.Viper) options.FilterFunc {
tags := v.GetStringSlice(cfgLoggerTags)
return func(entry zapcore.Entry) bool {
if !strings.HasPrefix(entry.Message, "tag:") {
return true
}
msg := entry.Message[4:] // len("tag:")
for _, tag := range tags {
if msg == tag {
return true
}
}
return false
}
}
func getLogLevel(lvlStr string) (zapcore.Level, error) {
var lvl zapcore.Level
err := lvl.UnmarshalText([]byte(lvlStr))

View file

@ -41,6 +41,7 @@ const (
cfgLoggerSamplingEnabled = "logger.sampling.enabled"
cfgLoggerSamplingInitial = "logger.sampling.initial"
cfgLoggerSamplingThereafter = "logger.sampling.thereafter"
cfgLoggerTags = "logger.tags"
// Morph.
cfgMorphRPCEndpointPrefixTmpl = "morph.rpc_endpoint.%d."

View file

@ -53,15 +53,30 @@ logger:
enabled: false
initial: 100
thereafter: 100
tags:
- "expiration_delete_object"
- "multipart_delete_object"
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-----------------------|----------|---------------|---------------|--------------------------------------------------------------------------------------------------|
|-----------------------|------------|---------------|---------------|--------------------------------------------------------------------------------------------------|
| `level` | `string` | yes | `info` | Logging level. Possible values: `debug`, `info`, `warn`, `dpanic`, `panic`, `fatal`. |
| `destination` | `string` | no | `stdout` | Destination for logger: `stdout` or `journald` |
| `sampling.enabled` | `bool` | no | `false` | Enable sampling. |
| `sampling.initial` | `int` | no | `100` | Logs firth N of the same (level and message) log entries each second. |
| `sampling.thereafter` | `int` | no | `100` | Logs every Mth of the same (level and message) log entries after first N entries in that second. |
| `sampling.tags` | `[]string` | no | | Tagged log entries that should be additionally logged (available tags see in the next section). |
## Tags
There are additional log entries that can hurt performance and can be additionally logged by using `logger.tags`
parameter. Available tags:
* `expiration_delete_object`
* `expiration_process_version`
* `expiration_remove_version`
* `multipart_delete_object`
* `multipart_process_upload`
# `pprof` section

View file

@ -179,6 +179,7 @@ func (e *Executor) abortMultiparts(ctx context.Context, ni *netmap.NetworkInfo,
}
for _, multipart := range multiparts {
e.log.Debug(logs.TagMultipartProcessUpload, zap.String("key", multipart.Key), zap.String("upload_id", multipart.UploadID), zap.Uint64("node_id", multipart.ID))
if !matcherFn(multipart) {
continue
}
@ -298,6 +299,9 @@ func (e *Executor) expireObjects(ctx context.Context, ni *netmap.NetworkInfo, ru
return fmt.Errorf("get node version from stream: %w", err)
}
e.log.Debug(logs.TagExpirationProcessVersion, zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", nodeVersion.OID),
zap.String("filepath", nodeVersion.FilePath), zap.Uint64("node_id", nodeVersion.ID))
if nodeVersion.FilePath != latestObjName {
if err = e.expireObject(ctx, versions, bktInfo, ni, matcherFn, settings); err != nil {
e.log.Warn(logs.FailedToExpireObject, zap.String("object", latestObjName), zap.Error(err))
@ -579,7 +583,7 @@ func (e *Executor) deleteObject(ctx context.Context, version *data.NodeVersion,
addr.SetContainer(bktInfo.CID)
addr.SetObject(version.OID)
e.log.Debug(logs.DeleteObject, zap.String("address", addr.EncodeToString()), zap.String("filepath", version.FilePath))
e.log.Debug(logs.TagExpirationDeleteObject, zap.Stringer("address", addr), zap.String("filepath", version.FilePath))
if err := e.frostfs.DeleteObject(ctx, addr); err != nil && !isNotFound(err) {
e.log.Warn(logs.DeleteObjectVersionFromStorage, zap.String("key", version.FilePath),
zap.String("address", addr.EncodeToString()), zap.Error(err))
@ -620,7 +624,7 @@ func (e *Executor) addDeleteMarker(ctx context.Context, version *data.NodeVersio
}
func (e *Executor) removeVersion(ctx context.Context, version *data.NodeVersion, bktInfo *data.BucketInfo) {
e.log.Debug(logs.RemoveVersion, zap.String("cid", bktInfo.CID.EncodeToString()), zap.String("oid", version.OID.EncodeToString()),
e.log.Debug(logs.TagExpirationRemoveVersion, zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", version.OID),
zap.String("filepath", version.FilePath), zap.Uint64("node_id", version.ID))
if err := e.tree.RemoveVersion(ctx, bktInfo, version.ID); err != nil {
e.log.Warn(logs.DeleteObjectVersionFromTree, zap.String("key", version.FilePath),
@ -706,7 +710,7 @@ func (e *Executor) abortMultipart(ctx context.Context, bktInfo *data.BucketInfo,
addr.SetContainer(bktInfo.CID)
for _, part := range parts {
addr.SetObject(part.OID)
e.log.Debug(logs.DeleteObject, zap.String("address", addr.EncodeToString()), zap.Int("part", part.Number))
e.log.Debug(logs.TagMultipartDeleteObject, zap.Stringer("address", addr), zap.Int("part", part.Number))
if err = e.frostfs.DeleteObject(ctx, addr); err != nil {
return fmt.Errorf("delete object '%s': %w", addr.EncodeToString(), err)
}

View file

@ -65,7 +65,13 @@ const (
EpochMismatched = "epoch mismatched"
UnknownCredentialSource = "unknown credential source to use"
AbortMultipart = "abort multipart"
DeleteObject = "delete object"
RemoveVersion = "remove version"
ProcessingJob = "processing job"
)
const (
TagExpirationDeleteObject = "tag:expiration_delete_object"
TagExpirationProcessVersion = "tag:expiration_process_version"
TagExpirationRemoveVersion = "tag:expiration_remove_version"
TagMultipartDeleteObject = "tag:multipart_delete_object"
TagMultipartProcessUpload = "tag:multipart_process_upload"
)