[#369] Enhanced http requests logging #426

Merged
alexvanin merged 1 commit from nzinkevich/frostfs-s3-gw:feature/9613-enhanced_log into feature/369 2024-09-04 19:51:14 +00:00
12 changed files with 211 additions and 70 deletions

View file

@ -106,3 +106,9 @@ const (
PartNumberQuery = "partNumber"
LegalHoldQuery = "legal-hold"
)
const (
StdoutPath = "stdout"
StderrPath = "stderr"
SinkName = "lumberjack"
)

View file

@ -1,27 +1,85 @@
package middleware
import (
"bytes"
"fmt"
"io"
"net/http"
"net/url"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"gopkg.in/natefinch/lumberjack.v2"
)
type LogHTTPSettings struct {
Enabled bool
MaxBody int64
type LogHTTPConfig struct {
Enabled bool
MaxBody int64
MaxLogSize int
OutputPath string
UseGzip bool
}
// getFileLogger returns file logger for http requests or creates new if not exists.
func (h *LogHTTPConfig) getFileLogger() (*zap.Logger, error) {
c := newLoggerConfig()
c.OutputPaths = []string{h.OutputPath}
if h.OutputPath != StdoutPath && h.OutputPath != StderrPath && h.OutputPath != "" {
err := h.registerOutputSink()
if err != nil {
return nil, err
}
dkirillov marked this conversation as resolved Outdated

Sometimes r.ContentLength can be -1 that means unknown length. Now we will try read body and get panic in withBody when try to allocate new buffer var body = make([]byte, r.ContentLength).

Should we ignore such body or log first settings.MaxBody bytes anyway?

Sometimes `r.ContentLength` can be `-1` that means unknown length. Now we will try read body and get panic in `withBody` when try to allocate new buffer `var body = make([]byte, r.ContentLength)`. Should we ignore such body or log first `settings.MaxBody` bytes anyway?
c.OutputPaths[0] = SinkName + ":" + h.OutputPath
}
return c.Build()
}
// registerOutputSink registers sink for logger file output.
func (h *LogHTTPConfig) registerOutputSink() error {
logRoller := &lumberjack.Logger{
Filename: h.OutputPath,
MaxSize: h.MaxLogSize,
Compress: h.UseGzip,
}
alexvanin marked this conversation as resolved Outdated

It looks quite odd for me to see config field initialized by the function, that reads config as a parameter. Either filerLogger should not be a part of config structure or getFileLogger should be defined as function on config struct. Prefer making pure functions when it is possible and reasonable.

I suggest to do something like

func LogHTTP() Func {
  return func() http.Handler {
    fileLogger, err = getFileLogger(config)
    if err != nil {
        // log error and skip this middleware by returing handler with single h.ServerHTTP(w, r) call
    }
    return http.HandlerFunc(func(w, r) {
      log = fileLogger.With(...)
      ...
      h.ServerHTTP(w, r)
    }
It looks quite odd for me to see `config` field initialized by the function, that reads config as a parameter. Either `filerLogger` should not be a part of config structure or `getFileLogger` should be defined as function on config struct. Prefer making [pure functions](https://en.wikipedia.org/wiki/Pure_function) when it is possible and reasonable. I suggest to do something like ```go func LogHTTP() Func { return func() http.Handler { fileLogger, err = getFileLogger(config) if err != nil { // log error and skip this middleware by returing handler with single h.ServerHTTP(w, r) call } return http.HandlerFunc(func(w, r) { log = fileLogger.With(...) ... h.ServerHTTP(w, r) } ```
err := zap.RegisterSink(SinkName, func(_ *url.URL) (zap.Sink, error) {
return lumberjackSink{
dkirillov marked this conversation as resolved Outdated

We need close body after read.

Besides, we must preserve initial body in request. After read the body doesn't contain anything and in handlers we get errors. For example:

2024-07-15T11:11:06.778+0300    error   handler/util.go:50      request failed  {"request_id": "07e96f1e-13a7-423c-91f7-b69513209ebd", "method": "PutObject", "bucket": "test", "object": "tmp", "description": "could not upload object", "user": "NbUgTSFvPmsRxmGeWpuuGeJUoRoi6PErcM", "error": "InvalidDigest: 400 => The Content-Md5 you specified is not valid.", "body close errors": [], "status": 400}
We need close body after read. Besides, we must preserve initial body in request. After read the body doesn't contain anything and in handlers we get errors. For example: ``` 2024-07-15T11:11:06.778+0300 error handler/util.go:50 request failed {"request_id": "07e96f1e-13a7-423c-91f7-b69513209ebd", "method": "PutObject", "bucket": "test", "object": "tmp", "description": "could not upload object", "user": "NbUgTSFvPmsRxmGeWpuuGeJUoRoi6PErcM", "error": "InvalidDigest: 400 => The Content-Md5 you specified is not valid.", "body close errors": [], "status": 400} ```
Logger: logRoller,
}, nil
})
if err != nil {
return err
}
return nil
}
type lumberjackSink struct {
*lumberjack.Logger
}
func (lumberjackSink) Sync() error {
return nil
}
// LogHTTP logs http parameters from s3 request.
func LogHTTP(l *zap.Logger, settings LogHTTPSettings) Func {
func LogHTTP(l *zap.Logger, config *LogHTTPConfig) Func {
fileLogger, err := config.getFileLogger()
if err != nil {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
h.ServeHTTP(w, r)
})
}
}
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !settings.Enabled {
if !config.Enabled {
h.ServeHTTP(w, r)
return
}
var httplog = l.With(
var httplog = fileLogger.With(
zap.String("from", r.RemoteAddr),
zap.String("URI", r.RequestURI),
zap.String("method", r.Method),
@ -29,11 +87,11 @@ func LogHTTP(l *zap.Logger, settings LogHTTPSettings) Func {
httplog = withFieldIfExist(httplog, "query", r.URL.Query())
httplog = withFieldIfExist(httplog, "headers", r.Header)
if r.ContentLength != 0 && r.ContentLength <= settings.MaxBody {
if r.ContentLength > 0 && r.ContentLength <= config.MaxBody {
var err error
httplog, err = withBody(httplog, r)
if err != nil {
l.Error("read body error")
l.Error("read body error", zap.Error(err))
}
}
httplog.Info(logs.RequestHTTP)
@ -42,17 +100,34 @@ func LogHTTP(l *zap.Logger, settings LogHTTPSettings) Func {
}
}
// newLoggerConfig creates new zap.Config with disabled base fields.
func newLoggerConfig() zap.Config {
c := zap.NewProductionConfig()
c.DisableCaller = true
c.DisableStacktrace = true
c.EncoderConfig.MessageKey = zapcore.OmitKey
c.EncoderConfig.LevelKey = zapcore.OmitKey
c.EncoderConfig.TimeKey = zapcore.OmitKey
c.EncoderConfig.FunctionKey = zapcore.OmitKey
return c
}
alexvanin marked this conversation as resolved Outdated

As previous comment mentioned, try to make this function pure by returing sinkPath instead of config modification. This pattern is valid overall but it doesn't make sense here.

As previous comment mentioned, try to make this function pure by returing `sinkPath` instead of config modification. This pattern is valid overall but it doesn't make sense here.
// withBody reads body and attach it to log output.
func withBody(httplog *zap.Logger, r *http.Request) (*zap.Logger, error) {
var body = make([]byte, r.ContentLength)
_, err := r.Body.Read(body)
if err != nil && err != io.EOF {
return nil, err
body, err := io.ReadAll(r.Body)
if err != nil {
return nil, fmt.Errorf("read body error: %w", err)
}
defer r.Body.Close()
r.Body = io.NopCloser(bytes.NewBuffer(body))
httplog = httplog.With(zap.String("body", string(body)))
return httplog, nil
}
// withFieldIfExist checks whether data is not empty and attach it to log output.
func withFieldIfExist(log *zap.Logger, label string, data map[string][]string) *zap.Logger {
if len(data) != 0 {
log = log.With(zap.Any(label, data))

View file

@ -109,7 +109,7 @@ type Config struct {
Handler Handler
Center s3middleware.Center
Log *zap.Logger
LogHTTP s3middleware.LogHTTPSettings
LogHTTP *s3middleware.LogHTTPConfig
Metrics *metrics.AppMetrics
MiddlewareSettings Settings
@ -129,8 +129,12 @@ type Config struct {
func NewRouter(cfg Config) *chi.Mux {
api := chi.NewRouter()
if cfg.LogHTTP.Enabled {

If enabling this middleware being configured only once (and don't support SIGHUP reload) we can add it conditionally (like s3middleware.FrostfsIDValidation)

If enabling this middleware being configured only once (and don't support SIGHUP reload) we can add it conditionally (like `s3middleware.FrostfsIDValidation`)
api.Use(s3middleware.LogHTTP(cfg.Log, cfg.LogHTTP))
}
api.Use(
s3middleware.LogHTTP(cfg.Log, cfg.LogHTTP),
s3middleware.Request(cfg.Log, cfg.MiddlewareSettings),
middleware.ThrottleWithOpts(cfg.Throttle),
middleware.Recoverer,

View file

@ -75,6 +75,7 @@ func prepareRouter(t *testing.T, opts ...option) *routerMock {
Handler: handlerTestMock,
Center: &centerMock{t: t},
Log: logger,
LogHTTP: new(s3middleware.LogHTTPConfig),
Metrics: metrics.NewAppMetrics(metricsConfig),
MiddlewareSettings: middlewareSettings,
PolicyChecker: policyChecker,

View file

@ -83,8 +83,7 @@ type (
appSettings struct {
logLevel zap.AtomicLevel
httpLoggingEnabled bool
maxHTTPLogBody int64
httpLogging *s3middleware.LogHTTPConfig
maxClient maxClientsConfig
defaultMaxAge int
reconnectInterval time.Duration
@ -191,12 +190,11 @@ func (a *App) initLayer() {
func newAppSettings(log *Logger, v *viper.Viper) *appSettings {
settings := &appSettings{
logLevel: log.lvl,
httpLogging: new(s3middleware.LogHTTPConfig),
maxClient: newMaxClients(v),
defaultMaxAge: fetchDefaultMaxAge(v, log.logger),
reconnectInterval: fetchReconnectInterval(v),
frostfsidValidation: v.GetBool(cfgFrostfsIDValidationEnabled),
httpLoggingEnabled: v.GetBool(cfgLoggerRequestEnabled),
maxHTTPLogBody: v.GetInt64(cfgLoggerRequestMaxBody),
}
settings.resolveZoneList = v.GetStringSlice(cfgResolveBucketAllow)
@ -222,6 +220,7 @@ func (s *appSettings) update(v *viper.Viper, log *zap.Logger) {
s.setRetryMaxAttempts(fetchRetryMaxAttempts(v))
s.setRetryMaxBackoff(fetchRetryMaxBackoff(v))
s.setRetryStrategy(fetchRetryStrategy(v))
s.updateHTTPLoggingSettings(v)
}
func (s *appSettings) updateNamespacesSettings(v *viper.Viper, log *zap.Logger) {
@ -565,6 +564,17 @@ func newMaxClients(cfg *viper.Viper) maxClientsConfig {
return config
}
func (s *appSettings) updateHTTPLoggingSettings(cfg *viper.Viper) {
s.mu.Lock()
defer s.mu.Unlock()
s.httpLogging.Enabled = cfg.GetBool(cfgHTTPLoggingEnabled)
s.httpLogging.MaxBody = cfg.GetInt64(cfgHTTPLoggingMaxBody)
s.httpLogging.MaxLogSize = cfg.GetInt(cfgHTTPLoggingMaxLogSize)
s.httpLogging.OutputPath = cfg.GetString(cfgHTTPLoggingDestination)
s.httpLogging.UseGzip = cfg.GetBool(cfgHTTPLoggingGzip)
}
func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.Pool, *treepool.Pool, *keys.PrivateKey) {
var prm pool.InitParameters
var prmTree treepool.InitParameters
@ -686,10 +696,7 @@ func (a *App) Serve(ctx context.Context) {
Handler: a.api,
Center: a.ctr,
Log: a.log,
LogHTTP: s3middleware.LogHTTPSettings{
Enabled: a.settings.httpLoggingEnabled,
MaxBody: a.settings.maxHTTPLogBody,
},
LogHTTP: a.settings.httpLogging,
Metrics: a.metrics,
Domains: domains,

View file

@ -75,8 +75,12 @@ const ( // Settings.
cfgLoggerLevel = "logger.level"
cfgLoggerDestination = "logger.destination"
cfgLoggerRequestEnabled = "http_logging.enabled"
cfgLoggerRequestMaxBody = "http_logging.max_body"
// HttpLogging.
dkirillov marked this conversation as resolved Outdated

We should mention this params in config/config.env and docs/configuration.md

We should mention this params in `config/config.env` and `docs/configuration.md`

We should mention this params in config/config.env and docs/configuration.md

We should mention this params in `config/config.env` and `docs/configuration.md`
cfgHTTPLoggingEnabled = "http_logging.enabled"
dkirillov marked this conversation as resolved Outdated

We should mention this params in config/config.env and docs/configuration.md

We should mention this params in `config/config.env` and `docs/configuration.md`
cfgHTTPLoggingMaxBody = "http_logging.max_body"
cfgHTTPLoggingMaxLogSize = "http_logging.max_log_size"
cfgHTTPLoggingDestination = "http_logging.destination"
cfgHTTPLoggingGzip = "http_logging.gzip"
// Wallet.
cfgWalletPath = "wallet.path"
@ -719,8 +723,11 @@ func newSettings() *viper.Viper {
v.SetDefault(cfgLoggerDestination, "stdout")
// http logger
v.SetDefault(cfgLoggerRequestEnabled, false)
v.SetDefault(cfgLoggerRequestMaxBody, 1024)
v.SetDefault(cfgHTTPLoggingEnabled, false)
v.SetDefault(cfgHTTPLoggingMaxBody, 1024)
v.SetDefault(cfgHTTPLoggingMaxLogSize, 50)
v.SetDefault(cfgHTTPLoggingDestination, "stdout")
v.SetDefault(cfgHTTPLoggingGzip, false)
// pool:
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)

View file

@ -45,6 +45,17 @@ S3_GW_CONFIG=/path/to/config/yaml
# Logger
S3_GW_LOGGER_LEVEL=debug
# HTTP logger
S3_GW_HTTP_LOGGING_ENABLED=false
# max body size to log
S3_GW_HTTP_LOGGING_MAX_BODY=1024
# max log size in Mb
S3_GW_HTTP_LOGGING_MAX_LOG_SIZE: 20
# use log compression
S3_GW_HTTP_LOGGING_GZIP=true
# possible destination output values: filesystem path, url, "stdout", "stderr"
S3_GW_HTTP_LOGGING_DESTINATION=stdout
# RPC endpoint and order of resolving of bucket names
S3_GW_RPC_ENDPOINT=http://morph-chain.frostfs.devenv:30333/
S3_GW_RESOLVE_ORDER="nns dns"

View file

@ -52,6 +52,12 @@ http_logging:
enabled: false
# max body size to log
max_body: 1024
# max log size in Mb
max_log_size: 20
# use log compression
gzip: true
# possible output values: filesystem path, url, "stdout", "stderr"
destination: stdout
# RPC endpoint and order of resolving of bucket names
rpc_endpoint: http://morph-chain.frostfs.devenv:30333

View file

@ -176,6 +176,7 @@ There are some custom types used for brevity:
| `placement_policy` | [Placement policy configuration](#placement_policy-section) |
| `server` | [Server configuration](#server-section) |
| `logger` | [Logger configuration](#logger-section) |
| `http_logging` | [HTTP Request logger configuration](#http_logging-section) |
| `cache` | [Cache configuration](#cache-section) |
| `cors` | [CORS configuration](#cors-section) |
| `pprof` | [Pprof configuration](#pprof-section) |
@ -373,6 +374,27 @@ logger:
| `level` | `string` | yes | `debug` | Logging level.<br/>Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. |
| `destination` | `string` | no | `stdout` | Destination for logger: `stdout` or `journald` |
### `http_logging` section
```yaml
http_logging:
enabled: false
max_body: 1024
max_log_size: 20
gzip: true
destination: stdout
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|----------------|--------|---------------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `enabled` | bool | yes | false | Flag to enable the logger. |
| `max_body` | int | yes | 1024 | Max body size for log output in bytes. |
alexvanin marked this conversation as resolved Outdated

Is it bytes, kilobytes, megabytes?

Is it bytes, kilobytes, megabytes?
| `max_log_size` | int | no | 50 | Log file size threshold (in megabytes) to be moved in backup file. After reaching threshold, initial filename is appended with timestamp. And new empty file with initial name is created. |
alexvanin marked this conversation as resolved Outdated

Where are backup files are stored? Is it in destination dir?

Where are backup files are stored? Is it in destination dir?
| `gzip` | bool | no | false | Whether to enable Gzip compression for backup log files. |
| `destination` | string | no | stdout | Specify path for log output. Accepts log file path, or "stdout" and "stderr" reserved words to print in output streams. |
alexvanin marked this conversation as resolved Outdated

Do not mention URL if we didn't test it. 😃 Also, can we test it? What do we need to do that?

Also, I suppose destination is a path to dir, not a file, am I right?

Do not mention URL if we didn't test it. 😃 Also, can we test it? What do we need to do that? Also, I suppose `destination` is a path to dir, not a file, am I right?

destination specifies path with filename e.g. /var/log/http.log. During backup the filename is added with timestamp. And a new file with initial name creates.
I will append this to configuration.md

`destination` specifies path with filename e.g. /var/log/http.log. During backup the filename is added with timestamp. And a new file with initial name creates. I will append this to configuration.md
### `cache` section
```yaml

1
go.mod
View file

@ -34,6 +34,7 @@ require (
golang.org/x/text v0.14.0
google.golang.org/grpc v1.59.0
google.golang.org/protobuf v1.33.0
gopkg.in/natefinch/lumberjack.v2 v2.2.1
)
require (

2
go.sum
View file

@ -696,6 +696,8 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View file

@ -100,48 +100,47 @@ const (
FailedToPassAuthentication = "failed to pass authentication" // Error in ../../api/middleware/auth.go
FailedToResolveCID = "failed to resolve CID" // Debug in ../../api/middleware/metrics.go
RequestStart = "request start" // Info in ../../api/middleware/reqinfo.go
RequestHTTP = "http request"
FailedToUnescapeObjectName = "failed to unescape object name" // Warn in ../../api/middleware/reqinfo.go
InvalidDefaultMaxAge = "invalid defaultMaxAge" // Fatal in ../../cmd/s3-gw/app_settings.go
CantShutDownService = "can't shut down service" // Panic in ../../cmd/s3-gw/service.go
CouldntGenerateRandomKey = "couldn't generate random key" // Fatal in ../../cmd/s3-gw/app.go
FailedToCreateResolver = "failed to create resolver" // Fatal in ../../cmd/s3-gw/app.go
CouldNotLoadFrostFSPrivateKey = "could not load FrostFS private key" // Fatal in ../../cmd/s3-gw/app.go
FailedToCreateConnectionPool = "failed to create connection pool" // Fatal in ../../cmd/s3-gw/app.go
FailedToDialConnectionPool = "failed to dial connection pool" // Fatal in ../../cmd/s3-gw/app.go
FailedToCreateTreePool = "failed to create tree pool" // Fatal in ../../cmd/s3-gw/app.go
FailedToDialTreePool = "failed to dial tree pool" // Fatal in ../../cmd/s3-gw/app.go
ListenAndServe = "listen and serve" // Fatal in ../../cmd/s3-gw/app.go
NoHealthyServers = "no healthy servers" // Fatal in ../../cmd/s3-gw/app.go
CouldNotInitializeAPIHandler = "could not initialize API handler" // Fatal in ../../cmd/s3-gw/app.go
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped" // Warn in ../../cmd/s3-gw/app.go
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated" // Info in ../../cmd/s3-gw/app.go
AnonRequestSkipFrostfsIDValidation = "anon request, skip FrostfsID validation" // Debug in ../../api/middleware/auth.go
FrostfsIDValidationFailed = "FrostfsID validation failed" // Error in ../../api/middleware/auth.go
InitFrostfsIDContractFailed = "init frostfsid contract failed" // Fatal in ../../cmd/s3-gw/app.go
InitPolicyContractFailed = "init policy contract failed" // Fatal in ../../cmd/s3-gw/app.go
PolicyValidationFailed = "policy validation failed"
ServerReconnecting = "reconnecting server..."
ServerReconnectedSuccessfully = "server reconnected successfully"
ServerReconnectFailed = "failed to reconnect server"
ParseTreeNode = "parse tree node"
FailedToGetRealObjectSize = "failed to get real object size"
CouldntDeleteObjectFromStorageContinueDeleting = "couldn't delete object from storage, continue deleting from tree"
CouldntPutAccessBoxIntoCache = "couldn't put accessbox into cache"
InvalidAccessBoxCacheRemovingCheckInterval = "invalid accessbox check removing interval, using default value"
CouldNotCloseRequestBody = "could not close request body"
BucketOwnerKeyIsMissing = "bucket owner key is missing"
SettingsNodeInvalidOwnerKey = "settings node: invalid owner key"
SuccessfulAuth = "successful auth"
PolicyRequest = "policy request"
FailedToGenerateRequestID = "failed to generate request id"
InvalidBucketObjectLockEnabledHeader = "invalid X-Amz-Bucket-Object-Lock-Enabled header"
InvalidTreeKV = "invalid tree service meta KV"
FailedToWriteResponse = "failed to write response"
WarnDuplicateAddress = "duplicate address"
PolicyCouldntBeConvertedToNativeRules = "policy couldn't be converted to native rules, only s3 rules be applied"
CouldntCacheSubject = "couldn't cache subject info"
UserGroupsListIsEmpty = "user groups list is empty, subject not found"
CouldntCacheUserKey = "couldn't cache user key"
RequestHTTP = "http request"
dkirillov marked this conversation as resolved Outdated

Empty line

Empty line
FailedToUnescapeObjectName = "failed to unescape object name" // Warn in ../../api/middleware/reqinfo.go
InvalidDefaultMaxAge = "invalid defaultMaxAge" // Fatal in ../../cmd/s3-gw/app_settings.go
CantShutDownService = "can't shut down service" // Panic in ../../cmd/s3-gw/service.go
CouldntGenerateRandomKey = "couldn't generate random key" // Fatal in ../../cmd/s3-gw/app.go
FailedToCreateResolver = "failed to create resolver" // Fatal in ../../cmd/s3-gw/app.go
CouldNotLoadFrostFSPrivateKey = "could not load FrostFS private key" // Fatal in ../../cmd/s3-gw/app.go
FailedToCreateConnectionPool = "failed to create connection pool" // Fatal in ../../cmd/s3-gw/app.go
FailedToDialConnectionPool = "failed to dial connection pool" // Fatal in ../../cmd/s3-gw/app.go
FailedToCreateTreePool = "failed to create tree pool" // Fatal in ../../cmd/s3-gw/app.go
FailedToDialTreePool = "failed to dial tree pool" // Fatal in ../../cmd/s3-gw/app.go
ListenAndServe = "listen and serve" // Fatal in ../../cmd/s3-gw/app.go
NoHealthyServers = "no healthy servers" // Fatal in ../../cmd/s3-gw/app.go
CouldNotInitializeAPIHandler = "could not initialize API handler" // Fatal in ../../cmd/s3-gw/app.go
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped" // Warn in ../../cmd/s3-gw/app.go
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated" // Info in ../../cmd/s3-gw/app.go
AnonRequestSkipFrostfsIDValidation = "anon request, skip FrostfsID validation" // Debug in ../../api/middleware/auth.go
FrostfsIDValidationFailed = "FrostfsID validation failed" // Error in ../../api/middleware/auth.go
InitFrostfsIDContractFailed = "init frostfsid contract failed" // Fatal in ../../cmd/s3-gw/app.go
InitPolicyContractFailed = "init policy contract failed" // Fatal in ../../cmd/s3-gw/app.go
PolicyValidationFailed = "policy validation failed"
ServerReconnecting = "reconnecting server..."
ServerReconnectedSuccessfully = "server reconnected successfully"
ServerReconnectFailed = "failed to reconnect server"
ParseTreeNode = "parse tree node"
FailedToGetRealObjectSize = "failed to get real object size"
CouldntDeleteObjectFromStorageContinueDeleting = "couldn't delete object from storage, continue deleting from tree"
CouldntPutAccessBoxIntoCache = "couldn't put accessbox into cache"
InvalidAccessBoxCacheRemovingCheckInterval = "invalid accessbox check removing interval, using default value"
CouldNotCloseRequestBody = "could not close request body"
BucketOwnerKeyIsMissing = "bucket owner key is missing"
SettingsNodeInvalidOwnerKey = "settings node: invalid owner key"
SuccessfulAuth = "successful auth"
PolicyRequest = "policy request"
FailedToGenerateRequestID = "failed to generate request id"
InvalidBucketObjectLockEnabledHeader = "invalid X-Amz-Bucket-Object-Lock-Enabled header"
InvalidTreeKV = "invalid tree service meta KV"
FailedToWriteResponse = "failed to write response"
WarnDuplicateAddress = "duplicate address"
PolicyCouldntBeConvertedToNativeRules = "policy couldn't be converted to native rules, only s3 rules be applied"
CouldntCacheSubject = "couldn't cache subject info"
UserGroupsListIsEmpty = "user groups list is empty, subject not found"
CouldntCacheUserKey = "couldn't cache user key"
)