[#369] Write log to file

Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
This commit is contained in:
Nikita Zinkevich 2024-07-16 15:19:47 +03:00
parent 0a6bf3a5b0
commit c71d47164d
11 changed files with 168 additions and 26 deletions

View file

@ -106,3 +106,9 @@ const (
PartNumberQuery = "partNumber"
LegalHoldQuery = "legal-hold"
)
const (
StdoutPath = "stdout"
StderrPath = "stderr"
SinkName = "lumberjack"
)

View file

@ -1,27 +1,85 @@
package middleware
import (
"bytes"
"fmt"
"io"
"net/http"
"net/url"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"gopkg.in/natefinch/lumberjack.v2"
)
type LogHTTPSettings struct {
Enabled bool
MaxBody int64
type LogHTTPConfig struct {
Enabled bool
MaxBody int64
MaxLogSize int
OutputPath string
UseGzip bool
}
// getFileLogger returns file logger for http requests or creates new if not exists.
func (h *LogHTTPConfig) getFileLogger() (*zap.Logger, error) {
c := newLoggerConfig()
c.OutputPaths = []string{h.OutputPath}
if h.OutputPath != StdoutPath && h.OutputPath != StderrPath && h.OutputPath != "" {
err := h.registerOutputSink()
if err != nil {
return nil, err
}
c.OutputPaths[0] = SinkName + ":" + h.OutputPath
}
return c.Build()
}
// registerOutputSink registers sink for logger file output.
func (h *LogHTTPConfig) registerOutputSink() error {
logRoller := &lumberjack.Logger{
Filename: h.OutputPath,
MaxSize: h.MaxLogSize,
Compress: h.UseGzip,
}
err := zap.RegisterSink(SinkName, func(_ *url.URL) (zap.Sink, error) {
return lumberjackSink{
Logger: logRoller,
}, nil
})
if err != nil {
return err
}
return nil
}
type lumberjackSink struct {
*lumberjack.Logger
}
func (lumberjackSink) Sync() error {
return nil
}
// LogHTTP logs http parameters from s3 request.
func LogHTTP(l *zap.Logger, settings LogHTTPSettings) Func {
func LogHTTP(l *zap.Logger, config *LogHTTPConfig) Func {
fileLogger, err := config.getFileLogger()
if err != nil {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
h.ServeHTTP(w, r)
})
}
}
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !settings.Enabled {
if !config.Enabled {
h.ServeHTTP(w, r)
return
}
var httplog = l.With(
var httplog = fileLogger.With(
zap.String("from", r.RemoteAddr),
zap.String("URI", r.RequestURI),
zap.String("method", r.Method),
@ -29,11 +87,11 @@ func LogHTTP(l *zap.Logger, settings LogHTTPSettings) Func {
httplog = withFieldIfExist(httplog, "query", r.URL.Query())
httplog = withFieldIfExist(httplog, "headers", r.Header)
if r.ContentLength != 0 && r.ContentLength <= settings.MaxBody {
if r.ContentLength > 0 && r.ContentLength <= config.MaxBody {
var err error
httplog, err = withBody(httplog, r)
if err != nil {
l.Error("read body error")
l.Error("read body error", zap.Error(err))
}
}
httplog.Info(logs.RequestHTTP)
@ -42,17 +100,34 @@ func LogHTTP(l *zap.Logger, settings LogHTTPSettings) Func {
}
}
// newLoggerConfig creates new zap.Config with disabled base fields.
func newLoggerConfig() zap.Config {
c := zap.NewProductionConfig()
c.DisableCaller = true
c.DisableStacktrace = true
c.EncoderConfig.MessageKey = zapcore.OmitKey
c.EncoderConfig.LevelKey = zapcore.OmitKey
c.EncoderConfig.TimeKey = zapcore.OmitKey
c.EncoderConfig.FunctionKey = zapcore.OmitKey
return c
}
// withBody reads body and attach it to log output.
func withBody(httplog *zap.Logger, r *http.Request) (*zap.Logger, error) {
var body = make([]byte, r.ContentLength)
_, err := r.Body.Read(body)
if err != nil && err != io.EOF {
return nil, err
body, err := io.ReadAll(r.Body)
if err != nil {
return nil, fmt.Errorf("read body error: %w", err)
}
defer r.Body.Close()
r.Body = io.NopCloser(bytes.NewBuffer(body))
httplog = httplog.With(zap.String("body", string(body)))
return httplog, nil
}
// withFieldIfExist checks whether data is not empty and attach it to log output.
func withFieldIfExist(log *zap.Logger, label string, data map[string][]string) *zap.Logger {
if len(data) != 0 {
log = log.With(zap.Any(label, data))

View file

@ -110,7 +110,7 @@ type Config struct {
Handler Handler
Center s3middleware.Center
Log *zap.Logger
LogHTTP s3middleware.LogHTTPSettings
LogHTTP *s3middleware.LogHTTPConfig
Metrics *metrics.AppMetrics
MiddlewareSettings Settings
@ -127,8 +127,12 @@ type Config struct {
func NewRouter(cfg Config) *chi.Mux {
api := chi.NewRouter()
if cfg.LogHTTP.Enabled {
api.Use(s3middleware.LogHTTP(cfg.Log, cfg.LogHTTP))
}
api.Use(
s3middleware.LogHTTP(cfg.Log, cfg.LogHTTP),
s3middleware.Request(cfg.Log, cfg.MiddlewareSettings),
middleware.ThrottleWithOpts(cfg.Throttle),
middleware.Recoverer,

View file

@ -75,6 +75,7 @@ func prepareRouter(t *testing.T, opts ...option) *routerMock {
Handler: handlerTestMock,
Center: &centerMock{t: t},
Log: logger,
LogHTTP: new(s3middleware.LogHTTPConfig),
Metrics: metrics.NewAppMetrics(metricsConfig),
MiddlewareSettings: middlewareSettings,
PolicyChecker: policyChecker,

View file

@ -86,8 +86,7 @@ type (
appSettings struct {
logLevel zap.AtomicLevel
httpLoggingEnabled bool
maxHTTPLogBody int64
httpLogging *s3middleware.LogHTTPConfig
maxClient maxClientsConfig
defaultMaxAge int
reconnectInterval time.Duration
@ -218,12 +217,11 @@ func (a *App) initLayer(ctx context.Context) {
func newAppSettings(log *Logger, v *viper.Viper) *appSettings {
settings := &appSettings{
logLevel: log.lvl,
httpLogging: new(s3middleware.LogHTTPConfig),
maxClient: newMaxClients(v),
defaultMaxAge: fetchDefaultMaxAge(v, log.logger),
reconnectInterval: fetchReconnectInterval(v),
frostfsidValidation: v.GetBool(cfgFrostfsIDValidationEnabled),
httpLoggingEnabled: v.GetBool(cfgLoggerRequestEnabled),
maxHTTPLogBody: v.GetInt64(cfgLoggerRequestMaxBody),
}
settings.resolveZoneList = v.GetStringSlice(cfgResolveBucketAllow)
@ -250,6 +248,7 @@ func (s *appSettings) update(v *viper.Viper, log *zap.Logger) {
s.setRetryMaxBackoff(fetchRetryMaxBackoff(v))
s.setRetryStrategy(fetchRetryStrategy(v))
s.setVHSSettings(v, log)
s.updateHTTPLoggingSettings(v)
}
func (s *appSettings) updateNamespacesSettings(v *viper.Viper, log *zap.Logger) {
@ -645,6 +644,17 @@ func newMaxClients(cfg *viper.Viper) maxClientsConfig {
return config
}
func (s *appSettings) updateHTTPLoggingSettings(cfg *viper.Viper) {
s.mu.Lock()
defer s.mu.Unlock()
s.httpLogging.Enabled = cfg.GetBool(cfgHTTPLoggingEnabled)
s.httpLogging.MaxBody = cfg.GetInt64(cfgHTTPLoggingMaxBody)
s.httpLogging.MaxLogSize = cfg.GetInt(cfgHTTPLoggingMaxLogSize)
s.httpLogging.OutputPath = cfg.GetString(cfgHTTPLoggingDestination)
s.httpLogging.UseGzip = cfg.GetBool(cfgHTTPLoggingGzip)
}
func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.Pool, *treepool.Pool, *keys.PrivateKey) {
var prm pool.InitParameters
var prmTree treepool.InitParameters
@ -762,10 +772,7 @@ func (a *App) Serve(ctx context.Context) {
Handler: a.api,
Center: a.ctr,
Log: a.log,
LogHTTP: s3middleware.LogHTTPSettings{
Enabled: a.settings.httpLoggingEnabled,
MaxBody: a.settings.maxHTTPLogBody,
},
LogHTTP: a.settings.httpLogging,
Metrics: a.metrics,
MiddlewareSettings: a.settings,

View file

@ -79,8 +79,12 @@ const ( // Settings.
cfgLoggerLevel = "logger.level"
cfgLoggerDestination = "logger.destination"
cfgLoggerRequestEnabled = "http_logging.enabled"
cfgLoggerRequestMaxBody = "http_logging.max_body"
// HttpLogging.
cfgHTTPLoggingEnabled = "http_logging.enabled"
cfgHTTPLoggingMaxBody = "http_logging.max_body"
cfgHTTPLoggingMaxLogSize = "http_logging.max_log_size"
cfgHTTPLoggingDestination = "http_logging.destination"
cfgHTTPLoggingGzip = "http_logging.gzip"
// Wallet.
cfgWalletPath = "wallet.path"
@ -768,8 +772,11 @@ func newSettings() *viper.Viper {
v.SetDefault(cfgLoggerDestination, "stdout")
// http logger
v.SetDefault(cfgLoggerRequestEnabled, false)
v.SetDefault(cfgLoggerRequestMaxBody, 1024)
v.SetDefault(cfgHTTPLoggingEnabled, false)
v.SetDefault(cfgHTTPLoggingMaxBody, 1024)
v.SetDefault(cfgHTTPLoggingMaxLogSize, 50)
v.SetDefault(cfgHTTPLoggingDestination, "stdout")
v.SetDefault(cfgHTTPLoggingGzip, false)
// pool:
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)

View file

@ -52,6 +52,17 @@ S3_GW_CONFIG=/path/to/config/yaml
# Logger
S3_GW_LOGGER_LEVEL=debug
# HTTP logger
S3_GW_HTTP_LOGGING_ENABLED=false
# max body size to log
S3_GW_HTTP_LOGGING_MAX_BODY=1024
# max log size in Mb
S3_GW_HTTP_LOGGING_MAX_LOG_SIZE: 20
# use log compression
S3_GW_HTTP_LOGGING_GZIP=true
# possible destination output values: filesystem path, url, "stdout", "stderr"
S3_GW_HTTP_LOGGING_DESTINATION=stdout
# RPC endpoint and order of resolving of bucket names
S3_GW_RPC_ENDPOINT=http://morph-chain.frostfs.devenv:30333/
S3_GW_RESOLVE_ORDER="nns dns"

View file

@ -61,6 +61,12 @@ http_logging:
enabled: false
# max body size to log
max_body: 1024
# max log size in Mb
max_log_size: 20
# use log compression
gzip: true
# possible output values: filesystem path, url, "stdout", "stderr"
destination: stdout
# RPC endpoint and order of resolving of bucket names
rpc_endpoint: http://morph-chain.frostfs.devenv:30333

View file

@ -176,6 +176,7 @@ There are some custom types used for brevity:
| `placement_policy` | [Placement policy configuration](#placement_policy-section) |
| `server` | [Server configuration](#server-section) |
| `logger` | [Logger configuration](#logger-section) |
| `http_logging` | [HTTP Request logger configuration](#http_logging-section) |
| `cache` | [Cache configuration](#cache-section) |
| `cors` | [CORS configuration](#cors-section) |
| `pprof` | [Pprof configuration](#pprof-section) |
@ -376,6 +377,27 @@ logger:
| `level` | `string` | yes | `debug` | Logging level.<br/>Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. |
| `destination` | `string` | no | `stdout` | Destination for logger: `stdout` or `journald` |
### `http_logging` section
```yaml
http_logging:
enabled: false
max_body: 1024
max_log_size: 20
gzip: true
destination: stdout
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|----------------|--------|---------------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `enabled` | bool | yes | false | Flag to enable the logger. |
| `max_body` | int | yes | 1024 | Max body size for log output in bytes. |
| `max_log_size` | int | no | 50 | Log file size threshold (in megabytes) to be moved in backup file. After reaching threshold, initial filename is appended with timestamp. And new empty file with initial name is created. |
| `gzip` | bool | no | false | Whether to enable Gzip compression for backup log files. |
| `destination` | string | no | stdout | Specify path for log output. Accepts log file path, or "stdout" and "stderr" reserved words to print in output streams. |
### `cache` section
```yaml

1
go.mod
View file

@ -35,6 +35,7 @@ require (
golang.org/x/text v0.14.0
google.golang.org/grpc v1.62.0
google.golang.org/protobuf v1.33.0
gopkg.in/natefinch/lumberjack.v2 v2.2.1
)
require (

2
go.sum
View file

@ -705,6 +705,8 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=