[#369] Enhanced http requests logging #426
|
@ -106,3 +106,9 @@ const (
|
||||||
PartNumberQuery = "partNumber"
|
PartNumberQuery = "partNumber"
|
||||||
LegalHoldQuery = "legal-hold"
|
LegalHoldQuery = "legal-hold"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
StdoutPath = "stdout"
|
||||||
|
StderrPath = "stderr"
|
||||||
|
SinkName = "lumberjack"
|
||||||
|
)
|
||||||
|
|
|
@ -1,27 +1,85 @@
|
||||||
package middleware
|
package middleware
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
"go.uber.org/zap/zapcore"
|
||||||
|
"gopkg.in/natefinch/lumberjack.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
type LogHTTPSettings struct {
|
type LogHTTPConfig struct {
|
||||||
Enabled bool
|
Enabled bool
|
||||||
MaxBody int64
|
MaxBody int64
|
||||||
|
MaxLogSize int
|
||||||
|
OutputPath string
|
||||||
|
UseGzip bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// getFileLogger returns file logger for http requests or creates new if not exists.
|
||||||
|
func (h *LogHTTPConfig) getFileLogger() (*zap.Logger, error) {
|
||||||
|
c := newLoggerConfig()
|
||||||
|
c.OutputPaths = []string{h.OutputPath}
|
||||||
|
if h.OutputPath != StdoutPath && h.OutputPath != StderrPath && h.OutputPath != "" {
|
||||||
|
err := h.registerOutputSink()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
dkirillov marked this conversation as resolved
Outdated
|
|||||||
|
c.OutputPaths[0] = SinkName + ":" + h.OutputPath
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.Build()
|
||||||
|
}
|
||||||
|
|
||||||
|
// registerOutputSink registers sink for logger file output.
|
||||||
|
func (h *LogHTTPConfig) registerOutputSink() error {
|
||||||
|
logRoller := &lumberjack.Logger{
|
||||||
|
Filename: h.OutputPath,
|
||||||
|
MaxSize: h.MaxLogSize,
|
||||||
|
Compress: h.UseGzip,
|
||||||
|
}
|
||||||
alexvanin marked this conversation as resolved
Outdated
alexvanin
commented
It looks quite odd for me to see I suggest to do something like
It looks quite odd for me to see `config` field initialized by the function, that reads config as a parameter. Either `filerLogger` should not be a part of config structure or `getFileLogger` should be defined as function on config struct. Prefer making [pure functions](https://en.wikipedia.org/wiki/Pure_function) when it is possible and reasonable.
I suggest to do something like
```go
func LogHTTP() Func {
return func() http.Handler {
fileLogger, err = getFileLogger(config)
if err != nil {
// log error and skip this middleware by returing handler with single h.ServerHTTP(w, r) call
}
return http.HandlerFunc(func(w, r) {
log = fileLogger.With(...)
...
h.ServerHTTP(w, r)
}
```
|
|||||||
|
err := zap.RegisterSink(SinkName, func(_ *url.URL) (zap.Sink, error) {
|
||||||
|
return lumberjackSink{
|
||||||
dkirillov marked this conversation as resolved
Outdated
dkirillov
commented
We need close body after read. Besides, we must preserve initial body in request. After read the body doesn't contain anything and in handlers we get errors. For example:
We need close body after read.
Besides, we must preserve initial body in request. After read the body doesn't contain anything and in handlers we get errors. For example:
```
2024-07-15T11:11:06.778+0300 error handler/util.go:50 request failed {"request_id": "07e96f1e-13a7-423c-91f7-b69513209ebd", "method": "PutObject", "bucket": "test", "object": "tmp", "description": "could not upload object", "user": "NbUgTSFvPmsRxmGeWpuuGeJUoRoi6PErcM", "error": "InvalidDigest: 400 => The Content-Md5 you specified is not valid.", "body close errors": [], "status": 400}
```
|
|||||||
|
Logger: logRoller,
|
||||||
|
}, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type lumberjackSink struct {
|
||||||
|
*lumberjack.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lumberjackSink) Sync() error {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LogHTTP logs http parameters from s3 request.
|
// LogHTTP logs http parameters from s3 request.
|
||||||
func LogHTTP(l *zap.Logger, settings LogHTTPSettings) Func {
|
func LogHTTP(l *zap.Logger, config *LogHTTPConfig) Func {
|
||||||
|
fileLogger, err := config.getFileLogger()
|
||||||
|
if err != nil {
|
||||||
|
return func(h http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
h.ServeHTTP(w, r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
return func(h http.Handler) http.Handler {
|
return func(h http.Handler) http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
if !settings.Enabled {
|
if !config.Enabled {
|
||||||
h.ServeHTTP(w, r)
|
h.ServeHTTP(w, r)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var httplog = l.With(
|
var httplog = fileLogger.With(
|
||||||
zap.String("from", r.RemoteAddr),
|
zap.String("from", r.RemoteAddr),
|
||||||
zap.String("URI", r.RequestURI),
|
zap.String("URI", r.RequestURI),
|
||||||
zap.String("method", r.Method),
|
zap.String("method", r.Method),
|
||||||
|
@ -29,11 +87,11 @@ func LogHTTP(l *zap.Logger, settings LogHTTPSettings) Func {
|
||||||
|
|
||||||
httplog = withFieldIfExist(httplog, "query", r.URL.Query())
|
httplog = withFieldIfExist(httplog, "query", r.URL.Query())
|
||||||
httplog = withFieldIfExist(httplog, "headers", r.Header)
|
httplog = withFieldIfExist(httplog, "headers", r.Header)
|
||||||
if r.ContentLength != 0 && r.ContentLength <= settings.MaxBody {
|
if r.ContentLength > 0 && r.ContentLength <= config.MaxBody {
|
||||||
var err error
|
var err error
|
||||||
httplog, err = withBody(httplog, r)
|
httplog, err = withBody(httplog, r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Error("read body error")
|
l.Error("read body error", zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
httplog.Info(logs.RequestHTTP)
|
httplog.Info(logs.RequestHTTP)
|
||||||
|
@ -42,17 +100,34 @@ func LogHTTP(l *zap.Logger, settings LogHTTPSettings) Func {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// newLoggerConfig creates new zap.Config with disabled base fields.
|
||||||
|
func newLoggerConfig() zap.Config {
|
||||||
|
c := zap.NewProductionConfig()
|
||||||
|
c.DisableCaller = true
|
||||||
|
c.DisableStacktrace = true
|
||||||
|
c.EncoderConfig.MessageKey = zapcore.OmitKey
|
||||||
|
c.EncoderConfig.LevelKey = zapcore.OmitKey
|
||||||
|
c.EncoderConfig.TimeKey = zapcore.OmitKey
|
||||||
|
c.EncoderConfig.FunctionKey = zapcore.OmitKey
|
||||||
|
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
alexvanin marked this conversation as resolved
Outdated
alexvanin
commented
As previous comment mentioned, try to make this function pure by returing As previous comment mentioned, try to make this function pure by returing `sinkPath` instead of config modification. This pattern is valid overall but it doesn't make sense here.
|
|||||||
|
// withBody reads body and attach it to log output.
|
||||||
func withBody(httplog *zap.Logger, r *http.Request) (*zap.Logger, error) {
|
func withBody(httplog *zap.Logger, r *http.Request) (*zap.Logger, error) {
|
||||||
var body = make([]byte, r.ContentLength)
|
body, err := io.ReadAll(r.Body)
|
||||||
_, err := r.Body.Read(body)
|
if err != nil {
|
||||||
if err != nil && err != io.EOF {
|
return nil, fmt.Errorf("read body error: %w", err)
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
defer r.Body.Close()
|
||||||
|
r.Body = io.NopCloser(bytes.NewBuffer(body))
|
||||||
|
|
||||||
httplog = httplog.With(zap.String("body", string(body)))
|
httplog = httplog.With(zap.String("body", string(body)))
|
||||||
|
|
||||||
return httplog, nil
|
return httplog, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// withFieldIfExist checks whether data is not empty and attach it to log output.
|
||||||
func withFieldIfExist(log *zap.Logger, label string, data map[string][]string) *zap.Logger {
|
func withFieldIfExist(log *zap.Logger, label string, data map[string][]string) *zap.Logger {
|
||||||
if len(data) != 0 {
|
if len(data) != 0 {
|
||||||
log = log.With(zap.Any(label, data))
|
log = log.With(zap.Any(label, data))
|
||||||
|
|
|
@ -109,7 +109,7 @@ type Config struct {
|
||||||
Handler Handler
|
Handler Handler
|
||||||
Center s3middleware.Center
|
Center s3middleware.Center
|
||||||
Log *zap.Logger
|
Log *zap.Logger
|
||||||
LogHTTP s3middleware.LogHTTPSettings
|
LogHTTP *s3middleware.LogHTTPConfig
|
||||||
Metrics *metrics.AppMetrics
|
Metrics *metrics.AppMetrics
|
||||||
|
|
||||||
MiddlewareSettings Settings
|
MiddlewareSettings Settings
|
||||||
|
@ -129,8 +129,12 @@ type Config struct {
|
||||||
|
|
||||||
func NewRouter(cfg Config) *chi.Mux {
|
func NewRouter(cfg Config) *chi.Mux {
|
||||||
api := chi.NewRouter()
|
api := chi.NewRouter()
|
||||||
|
|
||||||
|
if cfg.LogHTTP.Enabled {
|
||||||
dkirillov
commented
If enabling this middleware being configured only once (and don't support SIGHUP reload) we can add it conditionally (like If enabling this middleware being configured only once (and don't support SIGHUP reload) we can add it conditionally (like `s3middleware.FrostfsIDValidation`)
|
|||||||
|
api.Use(s3middleware.LogHTTP(cfg.Log, cfg.LogHTTP))
|
||||||
|
}
|
||||||
|
|
||||||
api.Use(
|
api.Use(
|
||||||
s3middleware.LogHTTP(cfg.Log, cfg.LogHTTP),
|
|
||||||
s3middleware.Request(cfg.Log, cfg.MiddlewareSettings),
|
s3middleware.Request(cfg.Log, cfg.MiddlewareSettings),
|
||||||
middleware.ThrottleWithOpts(cfg.Throttle),
|
middleware.ThrottleWithOpts(cfg.Throttle),
|
||||||
middleware.Recoverer,
|
middleware.Recoverer,
|
||||||
|
|
|
@ -75,6 +75,7 @@ func prepareRouter(t *testing.T, opts ...option) *routerMock {
|
||||||
Handler: handlerTestMock,
|
Handler: handlerTestMock,
|
||||||
Center: ¢erMock{t: t},
|
Center: ¢erMock{t: t},
|
||||||
Log: logger,
|
Log: logger,
|
||||||
|
LogHTTP: new(s3middleware.LogHTTPConfig),
|
||||||
Metrics: metrics.NewAppMetrics(metricsConfig),
|
Metrics: metrics.NewAppMetrics(metricsConfig),
|
||||||
MiddlewareSettings: middlewareSettings,
|
MiddlewareSettings: middlewareSettings,
|
||||||
PolicyChecker: policyChecker,
|
PolicyChecker: policyChecker,
|
||||||
|
|
|
@ -83,8 +83,7 @@ type (
|
||||||
|
|
||||||
appSettings struct {
|
appSettings struct {
|
||||||
logLevel zap.AtomicLevel
|
logLevel zap.AtomicLevel
|
||||||
httpLoggingEnabled bool
|
httpLogging *s3middleware.LogHTTPConfig
|
||||||
maxHTTPLogBody int64
|
|
||||||
maxClient maxClientsConfig
|
maxClient maxClientsConfig
|
||||||
defaultMaxAge int
|
defaultMaxAge int
|
||||||
reconnectInterval time.Duration
|
reconnectInterval time.Duration
|
||||||
|
@ -191,12 +190,11 @@ func (a *App) initLayer() {
|
||||||
func newAppSettings(log *Logger, v *viper.Viper) *appSettings {
|
func newAppSettings(log *Logger, v *viper.Viper) *appSettings {
|
||||||
settings := &appSettings{
|
settings := &appSettings{
|
||||||
logLevel: log.lvl,
|
logLevel: log.lvl,
|
||||||
|
httpLogging: new(s3middleware.LogHTTPConfig),
|
||||||
maxClient: newMaxClients(v),
|
maxClient: newMaxClients(v),
|
||||||
defaultMaxAge: fetchDefaultMaxAge(v, log.logger),
|
defaultMaxAge: fetchDefaultMaxAge(v, log.logger),
|
||||||
reconnectInterval: fetchReconnectInterval(v),
|
reconnectInterval: fetchReconnectInterval(v),
|
||||||
frostfsidValidation: v.GetBool(cfgFrostfsIDValidationEnabled),
|
frostfsidValidation: v.GetBool(cfgFrostfsIDValidationEnabled),
|
||||||
httpLoggingEnabled: v.GetBool(cfgLoggerRequestEnabled),
|
|
||||||
maxHTTPLogBody: v.GetInt64(cfgLoggerRequestMaxBody),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
settings.resolveZoneList = v.GetStringSlice(cfgResolveBucketAllow)
|
settings.resolveZoneList = v.GetStringSlice(cfgResolveBucketAllow)
|
||||||
|
@ -222,6 +220,7 @@ func (s *appSettings) update(v *viper.Viper, log *zap.Logger) {
|
||||||
s.setRetryMaxAttempts(fetchRetryMaxAttempts(v))
|
s.setRetryMaxAttempts(fetchRetryMaxAttempts(v))
|
||||||
s.setRetryMaxBackoff(fetchRetryMaxBackoff(v))
|
s.setRetryMaxBackoff(fetchRetryMaxBackoff(v))
|
||||||
s.setRetryStrategy(fetchRetryStrategy(v))
|
s.setRetryStrategy(fetchRetryStrategy(v))
|
||||||
|
s.updateHTTPLoggingSettings(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) updateNamespacesSettings(v *viper.Viper, log *zap.Logger) {
|
func (s *appSettings) updateNamespacesSettings(v *viper.Viper, log *zap.Logger) {
|
||||||
|
@ -565,6 +564,17 @@ func newMaxClients(cfg *viper.Viper) maxClientsConfig {
|
||||||
return config
|
return config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) updateHTTPLoggingSettings(cfg *viper.Viper) {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
s.httpLogging.Enabled = cfg.GetBool(cfgHTTPLoggingEnabled)
|
||||||
|
s.httpLogging.MaxBody = cfg.GetInt64(cfgHTTPLoggingMaxBody)
|
||||||
|
s.httpLogging.MaxLogSize = cfg.GetInt(cfgHTTPLoggingMaxLogSize)
|
||||||
|
s.httpLogging.OutputPath = cfg.GetString(cfgHTTPLoggingDestination)
|
||||||
|
s.httpLogging.UseGzip = cfg.GetBool(cfgHTTPLoggingGzip)
|
||||||
|
}
|
||||||
|
|
||||||
func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.Pool, *treepool.Pool, *keys.PrivateKey) {
|
func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.Pool, *treepool.Pool, *keys.PrivateKey) {
|
||||||
var prm pool.InitParameters
|
var prm pool.InitParameters
|
||||||
var prmTree treepool.InitParameters
|
var prmTree treepool.InitParameters
|
||||||
|
@ -686,10 +696,7 @@ func (a *App) Serve(ctx context.Context) {
|
||||||
Handler: a.api,
|
Handler: a.api,
|
||||||
Center: a.ctr,
|
Center: a.ctr,
|
||||||
Log: a.log,
|
Log: a.log,
|
||||||
LogHTTP: s3middleware.LogHTTPSettings{
|
LogHTTP: a.settings.httpLogging,
|
||||||
Enabled: a.settings.httpLoggingEnabled,
|
|
||||||
MaxBody: a.settings.maxHTTPLogBody,
|
|
||||||
},
|
|
||||||
Metrics: a.metrics,
|
Metrics: a.metrics,
|
||||||
Domains: domains,
|
Domains: domains,
|
||||||
|
|
||||||
|
|
|
@ -75,8 +75,12 @@ const ( // Settings.
|
||||||
cfgLoggerLevel = "logger.level"
|
cfgLoggerLevel = "logger.level"
|
||||||
cfgLoggerDestination = "logger.destination"
|
cfgLoggerDestination = "logger.destination"
|
||||||
|
|
||||||
cfgLoggerRequestEnabled = "http_logging.enabled"
|
// HttpLogging.
|
||||||
dkirillov marked this conversation as resolved
Outdated
dkirillov
commented
We should mention this params in We should mention this params in `config/config.env` and `docs/configuration.md`
dkirillov
commented
We should mention this params in We should mention this params in `config/config.env` and `docs/configuration.md`
|
|||||||
cfgLoggerRequestMaxBody = "http_logging.max_body"
|
cfgHTTPLoggingEnabled = "http_logging.enabled"
|
||||||
dkirillov marked this conversation as resolved
Outdated
dkirillov
commented
We should mention this params in We should mention this params in `config/config.env` and `docs/configuration.md`
|
|||||||
|
cfgHTTPLoggingMaxBody = "http_logging.max_body"
|
||||||
|
cfgHTTPLoggingMaxLogSize = "http_logging.max_log_size"
|
||||||
|
cfgHTTPLoggingDestination = "http_logging.destination"
|
||||||
|
cfgHTTPLoggingGzip = "http_logging.gzip"
|
||||||
|
|
||||||
// Wallet.
|
// Wallet.
|
||||||
cfgWalletPath = "wallet.path"
|
cfgWalletPath = "wallet.path"
|
||||||
|
@ -719,8 +723,11 @@ func newSettings() *viper.Viper {
|
||||||
v.SetDefault(cfgLoggerDestination, "stdout")
|
v.SetDefault(cfgLoggerDestination, "stdout")
|
||||||
|
|
||||||
// http logger
|
// http logger
|
||||||
v.SetDefault(cfgLoggerRequestEnabled, false)
|
v.SetDefault(cfgHTTPLoggingEnabled, false)
|
||||||
v.SetDefault(cfgLoggerRequestMaxBody, 1024)
|
v.SetDefault(cfgHTTPLoggingMaxBody, 1024)
|
||||||
|
v.SetDefault(cfgHTTPLoggingMaxLogSize, 50)
|
||||||
|
v.SetDefault(cfgHTTPLoggingDestination, "stdout")
|
||||||
|
v.SetDefault(cfgHTTPLoggingGzip, false)
|
||||||
|
|
||||||
// pool:
|
// pool:
|
||||||
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
|
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
|
||||||
|
|
|
@ -45,6 +45,17 @@ S3_GW_CONFIG=/path/to/config/yaml
|
||||||
# Logger
|
# Logger
|
||||||
S3_GW_LOGGER_LEVEL=debug
|
S3_GW_LOGGER_LEVEL=debug
|
||||||
|
|
||||||
|
# HTTP logger
|
||||||
|
S3_GW_HTTP_LOGGING_ENABLED=false
|
||||||
|
# max body size to log
|
||||||
|
S3_GW_HTTP_LOGGING_MAX_BODY=1024
|
||||||
|
# max log size in Mb
|
||||||
|
S3_GW_HTTP_LOGGING_MAX_LOG_SIZE: 20
|
||||||
|
# use log compression
|
||||||
|
S3_GW_HTTP_LOGGING_GZIP=true
|
||||||
|
# possible destination output values: filesystem path, url, "stdout", "stderr"
|
||||||
|
S3_GW_HTTP_LOGGING_DESTINATION=stdout
|
||||||
|
|
||||||
# RPC endpoint and order of resolving of bucket names
|
# RPC endpoint and order of resolving of bucket names
|
||||||
S3_GW_RPC_ENDPOINT=http://morph-chain.frostfs.devenv:30333/
|
S3_GW_RPC_ENDPOINT=http://morph-chain.frostfs.devenv:30333/
|
||||||
S3_GW_RESOLVE_ORDER="nns dns"
|
S3_GW_RESOLVE_ORDER="nns dns"
|
||||||
|
|
|
@ -52,6 +52,12 @@ http_logging:
|
||||||
enabled: false
|
enabled: false
|
||||||
# max body size to log
|
# max body size to log
|
||||||
max_body: 1024
|
max_body: 1024
|
||||||
|
# max log size in Mb
|
||||||
|
max_log_size: 20
|
||||||
|
# use log compression
|
||||||
|
gzip: true
|
||||||
|
# possible output values: filesystem path, url, "stdout", "stderr"
|
||||||
|
destination: stdout
|
||||||
|
|
||||||
# RPC endpoint and order of resolving of bucket names
|
# RPC endpoint and order of resolving of bucket names
|
||||||
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
|
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
|
||||||
|
|
|
@ -176,6 +176,7 @@ There are some custom types used for brevity:
|
||||||
| `placement_policy` | [Placement policy configuration](#placement_policy-section) |
|
| `placement_policy` | [Placement policy configuration](#placement_policy-section) |
|
||||||
| `server` | [Server configuration](#server-section) |
|
| `server` | [Server configuration](#server-section) |
|
||||||
| `logger` | [Logger configuration](#logger-section) |
|
| `logger` | [Logger configuration](#logger-section) |
|
||||||
|
| `http_logging` | [HTTP Request logger configuration](#http_logging-section) |
|
||||||
| `cache` | [Cache configuration](#cache-section) |
|
| `cache` | [Cache configuration](#cache-section) |
|
||||||
| `cors` | [CORS configuration](#cors-section) |
|
| `cors` | [CORS configuration](#cors-section) |
|
||||||
| `pprof` | [Pprof configuration](#pprof-section) |
|
| `pprof` | [Pprof configuration](#pprof-section) |
|
||||||
|
@ -373,6 +374,27 @@ logger:
|
||||||
| `level` | `string` | yes | `debug` | Logging level.<br/>Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. |
|
| `level` | `string` | yes | `debug` | Logging level.<br/>Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. |
|
||||||
| `destination` | `string` | no | `stdout` | Destination for logger: `stdout` or `journald` |
|
| `destination` | `string` | no | `stdout` | Destination for logger: `stdout` or `journald` |
|
||||||
|
|
||||||
|
|
||||||
|
### `http_logging` section
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
http_logging:
|
||||||
|
enabled: false
|
||||||
|
max_body: 1024
|
||||||
|
max_log_size: 20
|
||||||
|
gzip: true
|
||||||
|
destination: stdout
|
||||||
|
```
|
||||||
|
|
||||||
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|
|----------------|--------|---------------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| `enabled` | bool | yes | false | Flag to enable the logger. |
|
||||||
|
| `max_body` | int | yes | 1024 | Max body size for log output in bytes. |
|
||||||
alexvanin marked this conversation as resolved
Outdated
alexvanin
commented
Is it bytes, kilobytes, megabytes? Is it bytes, kilobytes, megabytes?
|
|||||||
|
| `max_log_size` | int | no | 50 | Log file size threshold (in megabytes) to be moved in backup file. After reaching threshold, initial filename is appended with timestamp. And new empty file with initial name is created. |
|
||||||
alexvanin marked this conversation as resolved
Outdated
alexvanin
commented
Where are backup files are stored? Is it in destination dir? Where are backup files are stored? Is it in destination dir?
|
|||||||
|
| `gzip` | bool | no | false | Whether to enable Gzip compression for backup log files. |
|
||||||
|
| `destination` | string | no | stdout | Specify path for log output. Accepts log file path, or "stdout" and "stderr" reserved words to print in output streams. |
|
||||||
alexvanin marked this conversation as resolved
Outdated
alexvanin
commented
Do not mention URL if we didn't test it. 😃 Also, can we test it? What do we need to do that? Also, I suppose Do not mention URL if we didn't test it. 😃 Also, can we test it? What do we need to do that?
Also, I suppose `destination` is a path to dir, not a file, am I right?
nzinkevich
commented
`destination` specifies path with filename e.g. /var/log/http.log. During backup the filename is added with timestamp. And a new file with initial name creates.
I will append this to configuration.md
|
|||||||
|
|
||||||
|
|
||||||
### `cache` section
|
### `cache` section
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
|
1
go.mod
|
@ -34,6 +34,7 @@ require (
|
||||||
golang.org/x/text v0.14.0
|
golang.org/x/text v0.14.0
|
||||||
google.golang.org/grpc v1.59.0
|
google.golang.org/grpc v1.59.0
|
||||||
google.golang.org/protobuf v1.33.0
|
google.golang.org/protobuf v1.33.0
|
||||||
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
|
2
go.sum
|
@ -696,6 +696,8 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||||
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||||
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
|
|
@ -100,48 +100,47 @@ const (
|
||||||
FailedToPassAuthentication = "failed to pass authentication" // Error in ../../api/middleware/auth.go
|
FailedToPassAuthentication = "failed to pass authentication" // Error in ../../api/middleware/auth.go
|
||||||
FailedToResolveCID = "failed to resolve CID" // Debug in ../../api/middleware/metrics.go
|
FailedToResolveCID = "failed to resolve CID" // Debug in ../../api/middleware/metrics.go
|
||||||
RequestStart = "request start" // Info in ../../api/middleware/reqinfo.go
|
RequestStart = "request start" // Info in ../../api/middleware/reqinfo.go
|
||||||
|
RequestHTTP = "http request"
|
||||||
dkirillov marked this conversation as resolved
Outdated
dkirillov
commented
Empty line Empty line
|
|||||||
RequestHTTP = "http request"
|
FailedToUnescapeObjectName = "failed to unescape object name" // Warn in ../../api/middleware/reqinfo.go
|
||||||
FailedToUnescapeObjectName = "failed to unescape object name" // Warn in ../../api/middleware/reqinfo.go
|
InvalidDefaultMaxAge = "invalid defaultMaxAge" // Fatal in ../../cmd/s3-gw/app_settings.go
|
||||||
InvalidDefaultMaxAge = "invalid defaultMaxAge" // Fatal in ../../cmd/s3-gw/app_settings.go
|
CantShutDownService = "can't shut down service" // Panic in ../../cmd/s3-gw/service.go
|
||||||
CantShutDownService = "can't shut down service" // Panic in ../../cmd/s3-gw/service.go
|
CouldntGenerateRandomKey = "couldn't generate random key" // Fatal in ../../cmd/s3-gw/app.go
|
||||||
CouldntGenerateRandomKey = "couldn't generate random key" // Fatal in ../../cmd/s3-gw/app.go
|
FailedToCreateResolver = "failed to create resolver" // Fatal in ../../cmd/s3-gw/app.go
|
||||||
FailedToCreateResolver = "failed to create resolver" // Fatal in ../../cmd/s3-gw/app.go
|
CouldNotLoadFrostFSPrivateKey = "could not load FrostFS private key" // Fatal in ../../cmd/s3-gw/app.go
|
||||||
CouldNotLoadFrostFSPrivateKey = "could not load FrostFS private key" // Fatal in ../../cmd/s3-gw/app.go
|
FailedToCreateConnectionPool = "failed to create connection pool" // Fatal in ../../cmd/s3-gw/app.go
|
||||||
FailedToCreateConnectionPool = "failed to create connection pool" // Fatal in ../../cmd/s3-gw/app.go
|
FailedToDialConnectionPool = "failed to dial connection pool" // Fatal in ../../cmd/s3-gw/app.go
|
||||||
FailedToDialConnectionPool = "failed to dial connection pool" // Fatal in ../../cmd/s3-gw/app.go
|
FailedToCreateTreePool = "failed to create tree pool" // Fatal in ../../cmd/s3-gw/app.go
|
||||||
FailedToCreateTreePool = "failed to create tree pool" // Fatal in ../../cmd/s3-gw/app.go
|
FailedToDialTreePool = "failed to dial tree pool" // Fatal in ../../cmd/s3-gw/app.go
|
||||||
FailedToDialTreePool = "failed to dial tree pool" // Fatal in ../../cmd/s3-gw/app.go
|
ListenAndServe = "listen and serve" // Fatal in ../../cmd/s3-gw/app.go
|
||||||
ListenAndServe = "listen and serve" // Fatal in ../../cmd/s3-gw/app.go
|
NoHealthyServers = "no healthy servers" // Fatal in ../../cmd/s3-gw/app.go
|
||||||
NoHealthyServers = "no healthy servers" // Fatal in ../../cmd/s3-gw/app.go
|
CouldNotInitializeAPIHandler = "could not initialize API handler" // Fatal in ../../cmd/s3-gw/app.go
|
||||||
CouldNotInitializeAPIHandler = "could not initialize API handler" // Fatal in ../../cmd/s3-gw/app.go
|
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped" // Warn in ../../cmd/s3-gw/app.go
|
||||||
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped" // Warn in ../../cmd/s3-gw/app.go
|
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated" // Info in ../../cmd/s3-gw/app.go
|
||||||
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated" // Info in ../../cmd/s3-gw/app.go
|
AnonRequestSkipFrostfsIDValidation = "anon request, skip FrostfsID validation" // Debug in ../../api/middleware/auth.go
|
||||||
AnonRequestSkipFrostfsIDValidation = "anon request, skip FrostfsID validation" // Debug in ../../api/middleware/auth.go
|
FrostfsIDValidationFailed = "FrostfsID validation failed" // Error in ../../api/middleware/auth.go
|
||||||
FrostfsIDValidationFailed = "FrostfsID validation failed" // Error in ../../api/middleware/auth.go
|
InitFrostfsIDContractFailed = "init frostfsid contract failed" // Fatal in ../../cmd/s3-gw/app.go
|
||||||
InitFrostfsIDContractFailed = "init frostfsid contract failed" // Fatal in ../../cmd/s3-gw/app.go
|
InitPolicyContractFailed = "init policy contract failed" // Fatal in ../../cmd/s3-gw/app.go
|
||||||
InitPolicyContractFailed = "init policy contract failed" // Fatal in ../../cmd/s3-gw/app.go
|
PolicyValidationFailed = "policy validation failed"
|
||||||
PolicyValidationFailed = "policy validation failed"
|
ServerReconnecting = "reconnecting server..."
|
||||||
ServerReconnecting = "reconnecting server..."
|
ServerReconnectedSuccessfully = "server reconnected successfully"
|
||||||
ServerReconnectedSuccessfully = "server reconnected successfully"
|
ServerReconnectFailed = "failed to reconnect server"
|
||||||
ServerReconnectFailed = "failed to reconnect server"
|
ParseTreeNode = "parse tree node"
|
||||||
ParseTreeNode = "parse tree node"
|
FailedToGetRealObjectSize = "failed to get real object size"
|
||||||
FailedToGetRealObjectSize = "failed to get real object size"
|
CouldntDeleteObjectFromStorageContinueDeleting = "couldn't delete object from storage, continue deleting from tree"
|
||||||
CouldntDeleteObjectFromStorageContinueDeleting = "couldn't delete object from storage, continue deleting from tree"
|
CouldntPutAccessBoxIntoCache = "couldn't put accessbox into cache"
|
||||||
CouldntPutAccessBoxIntoCache = "couldn't put accessbox into cache"
|
InvalidAccessBoxCacheRemovingCheckInterval = "invalid accessbox check removing interval, using default value"
|
||||||
InvalidAccessBoxCacheRemovingCheckInterval = "invalid accessbox check removing interval, using default value"
|
CouldNotCloseRequestBody = "could not close request body"
|
||||||
CouldNotCloseRequestBody = "could not close request body"
|
BucketOwnerKeyIsMissing = "bucket owner key is missing"
|
||||||
BucketOwnerKeyIsMissing = "bucket owner key is missing"
|
SettingsNodeInvalidOwnerKey = "settings node: invalid owner key"
|
||||||
SettingsNodeInvalidOwnerKey = "settings node: invalid owner key"
|
SuccessfulAuth = "successful auth"
|
||||||
SuccessfulAuth = "successful auth"
|
PolicyRequest = "policy request"
|
||||||
PolicyRequest = "policy request"
|
FailedToGenerateRequestID = "failed to generate request id"
|
||||||
FailedToGenerateRequestID = "failed to generate request id"
|
InvalidBucketObjectLockEnabledHeader = "invalid X-Amz-Bucket-Object-Lock-Enabled header"
|
||||||
InvalidBucketObjectLockEnabledHeader = "invalid X-Amz-Bucket-Object-Lock-Enabled header"
|
InvalidTreeKV = "invalid tree service meta KV"
|
||||||
InvalidTreeKV = "invalid tree service meta KV"
|
FailedToWriteResponse = "failed to write response"
|
||||||
FailedToWriteResponse = "failed to write response"
|
WarnDuplicateAddress = "duplicate address"
|
||||||
WarnDuplicateAddress = "duplicate address"
|
PolicyCouldntBeConvertedToNativeRules = "policy couldn't be converted to native rules, only s3 rules be applied"
|
||||||
PolicyCouldntBeConvertedToNativeRules = "policy couldn't be converted to native rules, only s3 rules be applied"
|
CouldntCacheSubject = "couldn't cache subject info"
|
||||||
CouldntCacheSubject = "couldn't cache subject info"
|
UserGroupsListIsEmpty = "user groups list is empty, subject not found"
|
||||||
UserGroupsListIsEmpty = "user groups list is empty, subject not found"
|
CouldntCacheUserKey = "couldn't cache user key"
|
||||||
CouldntCacheUserKey = "couldn't cache user key"
|
|
||||||
)
|
)
|
||||||
|
|
Sometimes
r.ContentLength
can be-1
that means unknown length. Now we will try read body and get panic inwithBody
when try to allocate new buffervar body = make([]byte, r.ContentLength)
.Should we ignore such body or log first
settings.MaxBody
bytes anyway?