Compare commits
No commits in common. "master" and "master" have entirely different histories.
50 changed files with 839 additions and 2844 deletions
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.24-alpine AS basebuilder
|
||||
FROM golang:1.22-alpine AS basebuilder
|
||||
RUN apk add --update make bash ca-certificates
|
||||
|
||||
FROM basebuilder AS builder
|
||||
|
|
|
@ -10,7 +10,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.23', '1.24' ]
|
||||
go_versions: [ '1.22', '1.23' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
|
|
@ -14,7 +14,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.24'
|
||||
go-version: '1.23'
|
||||
cache: true
|
||||
|
||||
- name: Install linters
|
||||
|
@ -28,7 +28,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.23', '1.24' ]
|
||||
go_versions: [ '1.22', '1.23' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
@ -53,7 +53,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.24'
|
||||
go-version: '1.23'
|
||||
|
||||
- name: Run integration tests
|
||||
run: |-
|
||||
|
|
|
@ -16,8 +16,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
go-version: '1.22.11'
|
||||
|
||||
- name: Install govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
|
|
@ -22,6 +22,9 @@ linters-settings:
|
|||
# 'default' case is present, even if all enum members aren't listed in the
|
||||
# switch
|
||||
default-signifies-exhaustive: true
|
||||
govet:
|
||||
# report about shadowed variables
|
||||
check-shadowing: false
|
||||
custom:
|
||||
truecloudlab-linters:
|
||||
path: bin/external_linters.so
|
||||
|
|
12
CHANGELOG.md
12
CHANGELOG.md
|
@ -4,17 +4,8 @@ This document outlines major changes between releases.
|
|||
|
||||
## [Unreleased]
|
||||
|
||||
- Update Go to 1.23 (#228)
|
||||
|
||||
### Added
|
||||
- Add handling quota limit reached error (#187)
|
||||
- Add slash clipping for FileName attribute (#174)
|
||||
- Add new format of tag names config
|
||||
|
||||
## [0.32.3] - 2025-02-05
|
||||
|
||||
### Added
|
||||
- Add slash clipping for FileName attribute (#174)
|
||||
|
||||
## [0.32.2] - 2025-02-03
|
||||
|
||||
|
@ -208,5 +199,4 @@ To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs
|
|||
[0.32.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.31.0...v0.32.0
|
||||
[0.32.1]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.0...v0.32.1
|
||||
[0.32.2]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.1...v0.32.2
|
||||
[0.32.3]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.2...v0.32.3
|
||||
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.3...master
|
||||
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.2...master
|
35
Makefile
35
Makefile
|
@ -2,9 +2,9 @@
|
|||
|
||||
REPO ?= $(shell go list -m)
|
||||
VERSION ?= $(shell git describe --tags --match "v*" --dirty --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
||||
GO_VERSION ?= 1.23
|
||||
LINT_VERSION ?= 1.64.8
|
||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.10
|
||||
GO_VERSION ?= 1.22
|
||||
LINT_VERSION ?= 1.60.3
|
||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
|
||||
BUILD ?= $(shell date -u --iso=seconds)
|
||||
|
||||
HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs-http-gw
|
||||
|
@ -30,10 +30,9 @@ PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
|
|||
sed "s/-/~/")-${OS_RELEASE}
|
||||
.PHONY: debpackage debclean
|
||||
|
||||
FUZZING_DIR = $(shell pwd)/tests/fuzzing/files
|
||||
NGFUZZ_REPO = https://gitflic.ru/project/yadro/ngfuzz.git
|
||||
FUZZ_NGFUZZ_DIR ?= ""
|
||||
FUZZ_TIMEOUT ?= 30
|
||||
FUZZ_FUNCTIONS ?= ""
|
||||
FUZZ_FUNCTIONS ?= "all"
|
||||
FUZZ_AUX ?= ""
|
||||
|
||||
# Make all binaries
|
||||
|
@ -100,22 +99,18 @@ check-ngfuzz:
|
|||
exit 1; \
|
||||
fi
|
||||
|
||||
.PHONY: install-ngfuzz
|
||||
install-ngfuzz:
|
||||
ifeq (,$(wildcard $(FUZZING_DIR)/ngfuzz))
|
||||
@rm -rf $(FUZZING_DIR)/ngfuzz
|
||||
@git clone $(NGFUZZ_REPO) $(FUZZING_DIR)/ngfuzz
|
||||
@cd $(FUZZING_DIR)/ngfuzz && make
|
||||
endif
|
||||
.PHONY: install-fuzzing-deps
|
||||
install-fuzzing-deps: check-clang check-ngfuzz
|
||||
|
||||
.PHONY: fuzz
|
||||
fuzz: check-clang install-ngfuzz
|
||||
fuzz: install-fuzzing-deps
|
||||
@START_PATH=$$(pwd); \
|
||||
ROOT_PATH=$$(realpath --relative-to=$(FUZZING_DIR)/ngfuzz $$START_PATH) ; \
|
||||
cd $(FUZZING_DIR)/ngfuzz && \
|
||||
./bin/ngfuzz clean && \
|
||||
env CGO_ENABLED=1 ./bin/ngfuzz fuzz --funcs $(FUZZ_FUNCTIONS) --rootdir $$ROOT_PATH --timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \
|
||||
./bin/ngfuzz coverage --rootdir $$ROOT_PATH
|
||||
ROOT_PATH=$$(realpath --relative-to=$(FUZZ_NGFUZZ_DIR) $$START_PATH) ; \
|
||||
cd $(FUZZ_NGFUZZ_DIR) && \
|
||||
./ngfuzz -clean && \
|
||||
./ngfuzz -fuzz $(FUZZ_FUNCTIONS) -rootdir $$ROOT_PATH -timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \
|
||||
./ngfuzz -report
|
||||
|
||||
|
||||
# Reformat code
|
||||
fmt:
|
||||
|
@ -155,7 +150,7 @@ dirty-image:
|
|||
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
|
||||
@rm -rf $(TMP_DIR)/linters
|
||||
@rmdir $(TMP_DIR) 2>/dev/null || true
|
||||
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
|
||||
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
|
||||
|
||||
# Run linters
|
||||
lint:
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
v0.32.3
|
||||
v0.32.2
|
||||
|
|
|
@ -17,7 +17,6 @@ import (
|
|||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
|
@ -31,7 +30,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
|
@ -46,7 +44,6 @@ import (
|
|||
"github.com/valyala/fasthttp"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
|
@ -54,6 +51,7 @@ type (
|
|||
app struct {
|
||||
ctx context.Context
|
||||
log *zap.Logger
|
||||
logLevel zap.AtomicLevel
|
||||
pool *pool.Pool
|
||||
treePool *treepool.Pool
|
||||
key *keys.PrivateKey
|
||||
|
@ -67,8 +65,6 @@ type (
|
|||
settings *appSettings
|
||||
loggerSettings *loggerSettings
|
||||
bucketCache *cache.BucketCache
|
||||
handle *handler.Handler
|
||||
corsCnrID cid.ID
|
||||
|
||||
servers []Server
|
||||
unbindServers []ServerInfo
|
||||
|
@ -98,7 +94,6 @@ type (
|
|||
reconnectInterval time.Duration
|
||||
dialerSource *internalnet.DialerSource
|
||||
workerPoolSize int
|
||||
logLevelConfig *logLevelConfig
|
||||
|
||||
mu sync.RWMutex
|
||||
defaultTimestamp bool
|
||||
|
@ -109,122 +104,33 @@ type (
|
|||
bufferMaxSizeForPut uint64
|
||||
namespaceHeader string
|
||||
defaultNamespaces []string
|
||||
cors *data.CORSRule
|
||||
corsAllowOrigin string
|
||||
corsAllowMethods []string
|
||||
corsAllowHeaders []string
|
||||
corsExposeHeaders []string
|
||||
corsAllowCredentials bool
|
||||
corsMaxAge int
|
||||
enableFilepathFallback bool
|
||||
}
|
||||
|
||||
tagsConfig struct {
|
||||
tagLogs sync.Map
|
||||
defaultLvl zap.AtomicLevel
|
||||
}
|
||||
|
||||
logLevelConfig struct {
|
||||
logLevel zap.AtomicLevel
|
||||
tagsConfig *tagsConfig
|
||||
CORS struct {
|
||||
AllowOrigin string
|
||||
AllowMethods []string
|
||||
AllowHeaders []string
|
||||
ExposeHeaders []string
|
||||
AllowCredentials bool
|
||||
MaxAge int
|
||||
}
|
||||
)
|
||||
|
||||
func newLogLevel(v *viper.Viper) zap.AtomicLevel {
|
||||
ll, err := getLogLevel(v)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
atomicLogLevel := zap.NewAtomicLevel()
|
||||
atomicLogLevel.SetLevel(ll)
|
||||
return atomicLogLevel
|
||||
}
|
||||
|
||||
func newTagsConfig(v *viper.Viper, ll zapcore.Level) *tagsConfig {
|
||||
t := tagsConfig{defaultLvl: zap.NewAtomicLevelAt(ll)}
|
||||
if err := t.update(v, ll); err != nil {
|
||||
// panic here is analogue of the similar panic during common log level initialization.
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
return &t
|
||||
}
|
||||
|
||||
func newLogLevelConfig(lvl zap.AtomicLevel, tagsConfig *tagsConfig) *logLevelConfig {
|
||||
cfg := &logLevelConfig{
|
||||
logLevel: lvl,
|
||||
tagsConfig: tagsConfig,
|
||||
}
|
||||
|
||||
cfg.setMinLogLevel()
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
func (l *logLevelConfig) setMinLogLevel() {
|
||||
l.tagsConfig.tagLogs.Range(func(_, value any) bool {
|
||||
v := value.(zapcore.Level)
|
||||
if v < l.logLevel.Level() {
|
||||
l.logLevel.SetLevel(v)
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (l *logLevelConfig) update(cfg *viper.Viper, log *zap.Logger) {
|
||||
if lvl, err := getLogLevel(cfg); err != nil {
|
||||
log.Warn(logs.LogLevelWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
} else {
|
||||
l.logLevel.SetLevel(lvl)
|
||||
}
|
||||
|
||||
if err := l.tagsConfig.update(cfg, l.logLevel.Level()); err != nil {
|
||||
log.Warn(logs.TagsLogConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
}
|
||||
|
||||
l.setMinLogLevel()
|
||||
}
|
||||
|
||||
func (t *tagsConfig) LevelEnabled(tag string, tgtLevel zapcore.Level) bool {
|
||||
lvl, ok := t.tagLogs.Load(tag)
|
||||
if !ok {
|
||||
return t.defaultLvl.Enabled(tgtLevel)
|
||||
}
|
||||
|
||||
return lvl.(zapcore.Level).Enabled(tgtLevel)
|
||||
}
|
||||
|
||||
func (t *tagsConfig) DefaultEnabled(lvl zapcore.Level) bool {
|
||||
return t.defaultLvl.Enabled(lvl)
|
||||
}
|
||||
|
||||
func (t *tagsConfig) update(cfg *viper.Viper, ll zapcore.Level) error {
|
||||
tags, err := fetchLogTagsConfig(cfg, ll)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.tagLogs.Range(func(key, _ any) bool {
|
||||
k := key.(string)
|
||||
|
||||
if _, ok := tags[k]; !ok {
|
||||
t.tagLogs.Delete(key)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
for k, v := range tags {
|
||||
t.tagLogs.Store(k, v)
|
||||
}
|
||||
t.defaultLvl.SetLevel(ll)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newApp(ctx context.Context, cfg *appCfg) App {
|
||||
logSettings := &loggerSettings{}
|
||||
logLevel := newLogLevel(cfg.config())
|
||||
tagConfig := newTagsConfig(cfg.config(), logLevel.Level())
|
||||
logConfig := newLogLevelConfig(logLevel, tagConfig)
|
||||
log := pickLogger(cfg.config(), logConfig.logLevel, logSettings, tagConfig)
|
||||
log := pickLogger(cfg.config(), logSettings)
|
||||
|
||||
a := &app{
|
||||
ctx: ctx,
|
||||
log: log.logger,
|
||||
logLevel: log.lvl,
|
||||
cfg: cfg,
|
||||
loggerSettings: logSettings,
|
||||
webServer: new(fasthttp.Server),
|
||||
|
@ -232,7 +138,7 @@ func newApp(ctx context.Context, cfg *appCfg) App {
|
|||
bucketCache: cache.NewBucketCache(getBucketCacheOptions(cfg.config(), log.logger), cfg.config().GetBool(cfgFeaturesTreePoolNetmapSupport)),
|
||||
}
|
||||
|
||||
a.initAppSettings(logConfig)
|
||||
a.initAppSettings()
|
||||
|
||||
// -- setup FastHTTP server --
|
||||
a.webServer.Name = "frost-http-gw"
|
||||
|
@ -258,7 +164,6 @@ func newApp(ctx context.Context, cfg *appCfg) App {
|
|||
a.initResolver()
|
||||
a.initMetrics()
|
||||
a.initTracing(ctx)
|
||||
a.initContainers(ctx)
|
||||
|
||||
return a
|
||||
}
|
||||
|
@ -267,20 +172,11 @@ func (a *app) config() *viper.Viper {
|
|||
return a.cfg.config()
|
||||
}
|
||||
|
||||
func (a *app) initContainers(ctx context.Context) {
|
||||
corsCnrID, err := a.fetchContainerID(ctx, cfgContainersCORS)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldNotFetchCORSContainerInfo, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
}
|
||||
a.corsCnrID = *corsCnrID
|
||||
}
|
||||
|
||||
func (a *app) initAppSettings(lc *logLevelConfig) {
|
||||
func (a *app) initAppSettings() {
|
||||
a.settings = &appSettings{
|
||||
reconnectInterval: fetchReconnectInterval(a.config()),
|
||||
dialerSource: getDialerSource(a.log, a.config()),
|
||||
workerPoolSize: a.config().GetInt(cfgWorkerPoolSize),
|
||||
logLevelConfig: lc,
|
||||
}
|
||||
a.settings.update(a.config(), a.log)
|
||||
}
|
||||
|
@ -294,7 +190,12 @@ func (s *appSettings) update(v *viper.Viper, l *zap.Logger) {
|
|||
namespaceHeader := v.GetString(cfgResolveNamespaceHeader)
|
||||
defaultNamespaces := fetchDefaultNamespaces(v)
|
||||
indexPage, indexEnabled := fetchIndexPageTemplate(v, l)
|
||||
cors := fetchCORSConfig(v)
|
||||
corsAllowOrigin := v.GetString(cfgCORSAllowOrigin)
|
||||
corsAllowMethods := v.GetStringSlice(cfgCORSAllowMethods)
|
||||
corsAllowHeaders := v.GetStringSlice(cfgCORSAllowHeaders)
|
||||
corsExposeHeaders := v.GetStringSlice(cfgCORSExposeHeaders)
|
||||
corsAllowCredentials := v.GetBool(cfgCORSAllowCredentials)
|
||||
corsMaxAge := fetchCORSMaxAge(v)
|
||||
enableFilepathFallback := v.GetBool(cfgFeaturesEnableFilepathFallback)
|
||||
|
||||
s.mu.Lock()
|
||||
|
@ -309,7 +210,12 @@ func (s *appSettings) update(v *viper.Viper, l *zap.Logger) {
|
|||
s.defaultNamespaces = defaultNamespaces
|
||||
s.returnIndexPage = indexEnabled
|
||||
s.indexPageTemplate = indexPage
|
||||
s.cors = cors
|
||||
s.corsAllowOrigin = corsAllowOrigin
|
||||
s.corsAllowMethods = corsAllowMethods
|
||||
s.corsAllowHeaders = corsAllowHeaders
|
||||
s.corsExposeHeaders = corsExposeHeaders
|
||||
s.corsAllowCredentials = corsAllowCredentials
|
||||
s.corsMaxAge = corsMaxAge
|
||||
s.enableFilepathFallback = enableFilepathFallback
|
||||
}
|
||||
|
||||
|
@ -356,33 +262,26 @@ func (s *appSettings) IndexPageTemplate() string {
|
|||
return s.indexPageTemplate
|
||||
}
|
||||
|
||||
func (s *appSettings) CORS() *data.CORSRule {
|
||||
func (s *appSettings) CORS() CORS {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
if s.cors == nil {
|
||||
return nil
|
||||
}
|
||||
allowMethods := make([]string, len(s.corsAllowMethods))
|
||||
copy(allowMethods, s.corsAllowMethods)
|
||||
|
||||
allowMethods := make([]string, len(s.cors.AllowedMethods))
|
||||
copy(allowMethods, s.cors.AllowedMethods)
|
||||
allowHeaders := make([]string, len(s.corsAllowHeaders))
|
||||
copy(allowHeaders, s.corsAllowHeaders)
|
||||
|
||||
allowHeaders := make([]string, len(s.cors.AllowedHeaders))
|
||||
copy(allowHeaders, s.cors.AllowedHeaders)
|
||||
exposeHeaders := make([]string, len(s.corsExposeHeaders))
|
||||
copy(exposeHeaders, s.corsExposeHeaders)
|
||||
|
||||
exposeHeaders := make([]string, len(s.cors.ExposeHeaders))
|
||||
copy(exposeHeaders, s.cors.ExposeHeaders)
|
||||
|
||||
allowOrigins := make([]string, len(s.cors.AllowedOrigins))
|
||||
copy(allowOrigins, s.cors.AllowedOrigins)
|
||||
|
||||
return &data.CORSRule{
|
||||
AllowedOrigins: allowOrigins,
|
||||
AllowedMethods: allowMethods,
|
||||
AllowedHeaders: allowHeaders,
|
||||
ExposeHeaders: exposeHeaders,
|
||||
AllowedCredentials: s.cors.AllowedCredentials,
|
||||
MaxAgeSeconds: s.cors.MaxAgeSeconds,
|
||||
return CORS{
|
||||
AllowOrigin: s.corsAllowOrigin,
|
||||
AllowMethods: allowMethods,
|
||||
AllowHeaders: allowHeaders,
|
||||
ExposeHeaders: exposeHeaders,
|
||||
AllowCredentials: s.corsAllowCredentials,
|
||||
MaxAge: s.corsMaxAge,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -404,15 +303,15 @@ func (s *appSettings) NamespaceHeader() string {
|
|||
return s.namespaceHeader
|
||||
}
|
||||
|
||||
func (s *appSettings) FormContainerZone(ns string) string {
|
||||
func (s *appSettings) FormContainerZone(ns string) (zone string, isDefault bool) {
|
||||
s.mu.RLock()
|
||||
namespaces := s.defaultNamespaces
|
||||
s.mu.RUnlock()
|
||||
if slices.Contains(namespaces, ns) {
|
||||
return v2container.SysAttributeZoneDefault
|
||||
return v2container.SysAttributeZoneDefault, true
|
||||
}
|
||||
|
||||
return ns + ".ns"
|
||||
return ns + ".ns", false
|
||||
}
|
||||
|
||||
func (s *appSettings) EnableFilepathFallback() bool {
|
||||
|
@ -425,7 +324,7 @@ func (a *app) initResolver() {
|
|||
var err error
|
||||
a.resolver, err = resolver.NewContainerResolver(a.getResolverConfig())
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.FailedToCreateResolver, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.FailedToCreateResolver, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -433,17 +332,17 @@ func (a *app) getResolverConfig() ([]string, *resolver.Config) {
|
|||
resolveCfg := &resolver.Config{
|
||||
FrostFS: frostfs.NewResolverFrostFS(a.pool),
|
||||
RPCAddress: a.config().GetString(cfgRPCEndpoint),
|
||||
Settings: a.settings,
|
||||
}
|
||||
|
||||
order := a.config().GetStringSlice(cfgResolveOrder)
|
||||
if resolveCfg.RPCAddress == "" {
|
||||
order = remove(order, resolver.NNSResolver)
|
||||
a.log.Warn(logs.ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided, logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided)
|
||||
}
|
||||
|
||||
if len(order) == 0 {
|
||||
a.log.Info(logs.ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty,
|
||||
logs.TagField(logs.TagApp))
|
||||
a.log.Info(logs.ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty)
|
||||
}
|
||||
|
||||
return order, resolveCfg
|
||||
|
@ -458,7 +357,7 @@ func (a *app) initMetrics() {
|
|||
|
||||
func newGateMetrics(logger *zap.Logger, provider *metrics.GateMetrics, enabled bool) *gateMetrics {
|
||||
if !enabled {
|
||||
logger.Warn(logs.MetricsAreDisabled, logs.TagField(logs.TagApp))
|
||||
logger.Warn(logs.MetricsAreDisabled)
|
||||
}
|
||||
return &gateMetrics{
|
||||
logger: logger,
|
||||
|
@ -476,7 +375,7 @@ func (m *gateMetrics) isEnabled() bool {
|
|||
|
||||
func (m *gateMetrics) SetEnabled(enabled bool) {
|
||||
if !enabled {
|
||||
m.logger.Warn(logs.MetricsAreDisabled, logs.TagField(logs.TagApp))
|
||||
m.logger.Warn(logs.MetricsAreDisabled)
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
|
@ -539,7 +438,7 @@ func getFrostFSKey(cfg *viper.Viper, log *zap.Logger) (*keys.PrivateKey, error)
|
|||
walletPath := cfg.GetString(cfgWalletPath)
|
||||
|
||||
if len(walletPath) == 0 {
|
||||
log.Info(logs.NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun, logs.TagField(logs.TagApp))
|
||||
log.Info(logs.NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun)
|
||||
key, err := keys.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -596,10 +495,7 @@ func getKeyFromWallet(w *wallet.Wallet, addrStr string, password *string) (*keys
|
|||
}
|
||||
|
||||
func (a *app) Wait() {
|
||||
a.log.Info(logs.StartingApplication,
|
||||
zap.String("app_name", "frostfs-http-gw"),
|
||||
zap.String("version", Version),
|
||||
logs.TagField(logs.TagApp))
|
||||
a.log.Info(logs.StartingApplication, zap.String("app_name", "frostfs-http-gw"), zap.String("version", Version))
|
||||
|
||||
a.metrics.SetVersion(Version)
|
||||
a.setHealthStatus()
|
||||
|
@ -618,8 +514,10 @@ func (a *app) Serve() {
|
|||
close(a.webDone)
|
||||
}()
|
||||
|
||||
handle := handler.New(a.AppParams(), a.settings, tree.NewTree(frostfs.NewPoolWrapper(a.treePool)), workerPool)
|
||||
|
||||
// Configure router.
|
||||
a.configureRouter(workerPool)
|
||||
a.configureRouter(handle)
|
||||
|
||||
a.startServices()
|
||||
a.initServers(a.ctx)
|
||||
|
@ -628,10 +526,10 @@ func (a *app) Serve() {
|
|||
|
||||
for i := range servs {
|
||||
go func(i int) {
|
||||
a.log.Info(logs.StartingServer, zap.String("address", servs[i].Address()), logs.TagField(logs.TagApp))
|
||||
a.log.Info(logs.StartingServer, zap.String("address", servs[i].Address()))
|
||||
if err := a.webServer.Serve(servs[i].Listener()); err != nil && err != http.ErrServerClosed {
|
||||
a.metrics.MarkUnhealthy(servs[i].Address())
|
||||
a.log.Fatal(logs.ListenAndServe, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.ListenAndServe, zap.Error(err))
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
@ -653,7 +551,7 @@ LOOP:
|
|||
}
|
||||
}
|
||||
|
||||
a.log.Info(logs.ShuttingDownWebServer, zap.Error(a.webServer.Shutdown()), logs.TagField(logs.TagApp))
|
||||
a.log.Info(logs.ShuttingDownWebServer, zap.Error(a.webServer.Shutdown()))
|
||||
|
||||
a.metrics.Shutdown()
|
||||
a.stopServices()
|
||||
|
@ -663,7 +561,7 @@ LOOP:
|
|||
func (a *app) initWorkerPool() *ants.Pool {
|
||||
workerPool, err := ants.NewPool(a.settings.workerPoolSize)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.FailedToCreateWorkerPool, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.FailedToCreateWorkerPool, zap.Error(err))
|
||||
}
|
||||
return workerPool
|
||||
}
|
||||
|
@ -674,33 +572,37 @@ func (a *app) shutdownTracing() {
|
|||
defer cancel()
|
||||
|
||||
if err := tracing.Shutdown(shdnCtx); err != nil {
|
||||
a.log.Warn(logs.FailedToShutdownTracing, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.FailedToShutdownTracing, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *app) configReload(ctx context.Context) {
|
||||
a.log.Info(logs.SIGHUPConfigReloadStarted, logs.TagField(logs.TagApp))
|
||||
a.log.Info(logs.SIGHUPConfigReloadStarted)
|
||||
if !a.config().IsSet(cmdConfig) && !a.config().IsSet(cmdConfigDir) {
|
||||
a.log.Warn(logs.FailedToReloadConfigBecauseItsMissed, logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.FailedToReloadConfigBecauseItsMissed)
|
||||
return
|
||||
}
|
||||
if err := a.cfg.reload(); err != nil {
|
||||
a.log.Warn(logs.FailedToReloadConfig, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.FailedToReloadConfig, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
a.settings.logLevelConfig.update(a.cfg.config(), a.log)
|
||||
if lvl, err := getLogLevel(a.config()); err != nil {
|
||||
a.log.Warn(logs.LogLevelWontBeUpdated, zap.Error(err))
|
||||
} else {
|
||||
a.logLevel.SetLevel(lvl)
|
||||
}
|
||||
|
||||
if err := a.settings.dialerSource.Update(fetchMultinetConfig(a.config(), a.log)); err != nil {
|
||||
a.log.Warn(logs.MultinetConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.MultinetConfigWontBeUpdated, zap.Error(err))
|
||||
}
|
||||
|
||||
if err := a.resolver.UpdateResolvers(a.getResolverConfig()); err != nil {
|
||||
a.log.Warn(logs.FailedToUpdateResolvers, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.FailedToUpdateResolvers, zap.Error(err))
|
||||
}
|
||||
|
||||
if err := a.updateServers(); err != nil {
|
||||
a.log.Warn(logs.FailedToReloadServerParameters, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.FailedToReloadServerParameters, zap.Error(err))
|
||||
}
|
||||
|
||||
a.setRuntimeParameters()
|
||||
|
@ -714,7 +616,7 @@ func (a *app) configReload(ctx context.Context) {
|
|||
a.initTracing(ctx)
|
||||
a.setHealthStatus()
|
||||
|
||||
a.log.Info(logs.SIGHUPConfigReloadCompleted, logs.TagField(logs.TagApp))
|
||||
a.log.Info(logs.SIGHUPConfigReloadCompleted)
|
||||
}
|
||||
|
||||
func (a *app) startServices() {
|
||||
|
@ -740,34 +642,32 @@ func (a *app) stopServices() {
|
|||
}
|
||||
}
|
||||
|
||||
func (a *app) configureRouter(workerPool *ants.Pool) {
|
||||
a.handle = handler.New(a.AppParams(), a.settings, tree.NewTree(frostfs.NewPoolWrapper(a.treePool), a.log), workerPool)
|
||||
|
||||
func (a *app) configureRouter(h *handler.Handler) {
|
||||
r := router.New()
|
||||
r.RedirectTrailingSlash = true
|
||||
r.NotFound = func(r *fasthttp.RequestCtx) {
|
||||
handler.ResponseError(r, "Route Not found", fasthttp.StatusNotFound)
|
||||
handler.ResponseError(r, "Not found", fasthttp.StatusNotFound)
|
||||
}
|
||||
r.MethodNotAllowed = func(r *fasthttp.RequestCtx) {
|
||||
handler.ResponseError(r, "Method Not Allowed", fasthttp.StatusMethodNotAllowed)
|
||||
}
|
||||
|
||||
r.POST("/upload/{cid}", a.addMiddlewares(a.handle.Upload))
|
||||
r.OPTIONS("/upload/{cid}", a.addPreflight(a.handle.Preflight))
|
||||
a.log.Info(logs.AddedPathUploadCid, logs.TagField(logs.TagApp))
|
||||
r.GET("/get/{cid}/{oid:*}", a.addMiddlewares(a.handle.DownloadByAddressOrBucketName))
|
||||
r.HEAD("/get/{cid}/{oid:*}", a.addMiddlewares(a.handle.HeadByAddressOrBucketName))
|
||||
r.OPTIONS("/get/{cid}/{oid:*}", a.addPreflight(a.handle.Preflight))
|
||||
a.log.Info(logs.AddedPathGetCidOid, logs.TagField(logs.TagApp))
|
||||
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(a.handle.DownloadByAttribute))
|
||||
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(a.handle.HeadByAttribute))
|
||||
r.OPTIONS("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addPreflight(a.handle.Preflight))
|
||||
a.log.Info(logs.AddedPathGetByAttributeCidAttrKeyAttrVal, logs.TagField(logs.TagApp))
|
||||
r.GET("/zip/{cid}/{prefix:*}", a.addMiddlewares(a.handle.DownloadZip))
|
||||
r.OPTIONS("/zip/{cid}/{prefix:*}", a.addPreflight(a.handle.Preflight))
|
||||
r.GET("/tar/{cid}/{prefix:*}", a.addMiddlewares(a.handle.DownloadTar))
|
||||
r.OPTIONS("/tar/{cid}/{prefix:*}", a.addPreflight(a.handle.Preflight))
|
||||
a.log.Info(logs.AddedPathZipCidPrefix, logs.TagField(logs.TagApp))
|
||||
r.POST("/upload/{cid}", a.addMiddlewares(h.Upload))
|
||||
r.OPTIONS("/upload/{cid}", a.addPreflight())
|
||||
a.log.Info(logs.AddedPathUploadCid)
|
||||
r.GET("/get/{cid}/{oid:*}", a.addMiddlewares(h.DownloadByAddressOrBucketName))
|
||||
r.HEAD("/get/{cid}/{oid:*}", a.addMiddlewares(h.HeadByAddressOrBucketName))
|
||||
r.OPTIONS("/get/{cid}/{oid:*}", a.addPreflight())
|
||||
a.log.Info(logs.AddedPathGetCidOid)
|
||||
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(h.DownloadByAttribute))
|
||||
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(h.HeadByAttribute))
|
||||
r.OPTIONS("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addPreflight())
|
||||
a.log.Info(logs.AddedPathGetByAttributeCidAttrKeyAttrVal)
|
||||
r.GET("/zip/{cid}/{prefix:*}", a.addMiddlewares(h.DownloadZip))
|
||||
r.OPTIONS("/zip/{cid}/{prefix:*}", a.addPreflight())
|
||||
r.GET("/tar/{cid}/{prefix:*}", a.addMiddlewares(h.DownloadTar))
|
||||
r.OPTIONS("/tar/{cid}/{prefix:*}", a.addPreflight())
|
||||
a.log.Info(logs.AddedPathZipCidPrefix)
|
||||
|
||||
a.webServer.Handler = r.Handler
|
||||
}
|
||||
|
@ -789,14 +689,14 @@ func (a *app) addMiddlewares(h fasthttp.RequestHandler) fasthttp.RequestHandler
|
|||
return h
|
||||
}
|
||||
|
||||
func (a *app) addPreflight(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||
func (a *app) addPreflight() fasthttp.RequestHandler {
|
||||
list := []func(fasthttp.RequestHandler) fasthttp.RequestHandler{
|
||||
a.tracer,
|
||||
a.logger,
|
||||
a.canonicalizer,
|
||||
a.reqNamespace,
|
||||
}
|
||||
|
||||
h := a.preflightHandler
|
||||
for i := len(list) - 1; i >= 0; i-- {
|
||||
h = list[i](h)
|
||||
}
|
||||
|
@ -804,16 +704,46 @@ func (a *app) addPreflight(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
|||
return h
|
||||
}
|
||||
|
||||
func (a *app) preflightHandler(c *fasthttp.RequestCtx) {
|
||||
cors := a.settings.CORS()
|
||||
setCORSHeaders(c, cors)
|
||||
}
|
||||
|
||||
func (a *app) cors(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||
return func(c *fasthttp.RequestCtx) {
|
||||
h(c)
|
||||
code := c.Response.StatusCode()
|
||||
if code >= fasthttp.StatusOK && code < fasthttp.StatusMultipleChoices {
|
||||
a.handle.SetCORSHeaders(c)
|
||||
cors := a.settings.CORS()
|
||||
setCORSHeaders(c, cors)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setCORSHeaders(c *fasthttp.RequestCtx, cors CORS) {
|
||||
c.Response.Header.Set(fasthttp.HeaderAccessControlMaxAge, strconv.Itoa(cors.MaxAge))
|
||||
|
||||
if len(cors.AllowOrigin) != 0 {
|
||||
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, cors.AllowOrigin)
|
||||
}
|
||||
|
||||
if len(cors.AllowMethods) != 0 {
|
||||
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(cors.AllowMethods, ","))
|
||||
}
|
||||
|
||||
if len(cors.AllowHeaders) != 0 {
|
||||
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowHeaders, strings.Join(cors.AllowHeaders, ","))
|
||||
}
|
||||
|
||||
if len(cors.ExposeHeaders) != 0 {
|
||||
c.Response.Header.Set(fasthttp.HeaderAccessControlExposeHeaders, strings.Join(cors.ExposeHeaders, ","))
|
||||
}
|
||||
|
||||
if cors.AllowCredentials {
|
||||
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true")
|
||||
}
|
||||
}
|
||||
|
||||
func (a *app) logger(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||
return func(req *fasthttp.RequestCtx) {
|
||||
requiredFields := []zap.Field{zap.Uint64("id", req.ID())}
|
||||
|
@ -826,11 +756,14 @@ func (a *app) logger(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
|||
reqCtx = utils.SetReqLog(reqCtx, log)
|
||||
utils.SetContextToRequest(reqCtx, req)
|
||||
|
||||
log.Info(logs.Request, zap.String("remote", req.RemoteAddr().String()),
|
||||
fields := []zap.Field{
|
||||
zap.String("remote", req.RemoteAddr().String()),
|
||||
zap.ByteString("method", req.Method()),
|
||||
zap.ByteString("path", req.Path()),
|
||||
zap.ByteString("query", req.QueryArgs().QueryString()),
|
||||
logs.TagField(logs.TagDatapath))
|
||||
}
|
||||
|
||||
log.Info(logs.Request, fields...)
|
||||
h(req)
|
||||
}
|
||||
}
|
||||
|
@ -874,7 +807,7 @@ func (a *app) tokenizer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
|||
if err != nil {
|
||||
log := utils.GetReqLogOrDefault(reqCtx, a.log)
|
||||
|
||||
log.Error(logs.CouldNotFetchAndStoreBearerToken, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
log.Error(logs.CouldNotFetchAndStoreBearerToken, zap.Error(err))
|
||||
handler.ResponseError(req, "could not fetch and store bearer token: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
@ -912,13 +845,11 @@ func (a *app) reqNamespace(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
|||
|
||||
func (a *app) AppParams() *handler.AppParams {
|
||||
return &handler.AppParams{
|
||||
Logger: a.log,
|
||||
FrostFS: frostfs.NewFrostFS(a.pool),
|
||||
Owner: a.owner,
|
||||
Resolver: a.resolver,
|
||||
Cache: a.bucketCache,
|
||||
CORSCnrID: a.corsCnrID,
|
||||
CORSCache: cache.NewCORSCache(getCORSCacheOptions(a.config(), a.log)),
|
||||
Logger: a.log,
|
||||
FrostFS: frostfs.NewFrostFS(a.pool),
|
||||
Owner: a.owner,
|
||||
Resolver: a.resolver,
|
||||
Cache: a.bucketCache,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -935,17 +866,17 @@ func (a *app) initServers(ctx context.Context) {
|
|||
if err != nil {
|
||||
a.unbindServers = append(a.unbindServers, serverInfo)
|
||||
a.metrics.MarkUnhealthy(serverInfo.Address)
|
||||
a.log.Warn(logs.FailedToAddServer, append(fields, zap.Error(err), logs.TagField(logs.TagApp))...)
|
||||
a.log.Warn(logs.FailedToAddServer, append(fields, zap.Error(err))...)
|
||||
continue
|
||||
}
|
||||
a.metrics.MarkHealthy(serverInfo.Address)
|
||||
|
||||
a.servers = append(a.servers, srv)
|
||||
a.log.Info(logs.AddServer, append(fields, logs.TagField(logs.TagApp))...)
|
||||
a.log.Info(logs.AddServer, fields...)
|
||||
}
|
||||
|
||||
if len(a.servers) == 0 {
|
||||
a.log.Fatal(logs.NoHealthyServers, logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.NoHealthyServers)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1019,14 +950,13 @@ func (a *app) initTracing(ctx context.Context) {
|
|||
if trustedCa := a.config().GetString(cfgTracingTrustedCa); trustedCa != "" {
|
||||
caBytes, err := os.ReadFile(trustedCa)
|
||||
if err != nil {
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||
return
|
||||
}
|
||||
certPool := x509.NewCertPool()
|
||||
ok := certPool.AppendCertsFromPEM(caBytes)
|
||||
if !ok {
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.String("error", "can't fill cert pool by ca cert"),
|
||||
logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.String("error", "can't fill cert pool by ca cert"))
|
||||
return
|
||||
}
|
||||
cfg.ServerCaCertPool = certPool
|
||||
|
@ -1034,24 +964,24 @@ func (a *app) initTracing(ctx context.Context) {
|
|||
|
||||
attributes, err := fetchTracingAttributes(a.config())
|
||||
if err != nil {
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||
return
|
||||
}
|
||||
cfg.Attributes = attributes
|
||||
|
||||
updated, err := tracing.Setup(ctx, cfg)
|
||||
if err != nil {
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||
}
|
||||
if updated {
|
||||
a.log.Info(logs.TracingConfigUpdated, logs.TagField(logs.TagApp))
|
||||
a.log.Info(logs.TracingConfigUpdated)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *app) setRuntimeParameters() {
|
||||
if len(os.Getenv("GOMEMLIMIT")) != 0 {
|
||||
// default limit < yaml limit < app env limit < GOMEMLIMIT
|
||||
a.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT, logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1060,8 +990,7 @@ func (a *app) setRuntimeParameters() {
|
|||
if softMemoryLimit != previous {
|
||||
a.log.Info(logs.RuntimeSoftMemoryLimitUpdated,
|
||||
zap.Int64("new_value", softMemoryLimit),
|
||||
zap.Int64("old_value", previous),
|
||||
logs.TagField(logs.TagApp))
|
||||
zap.Int64("old_value", previous))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1087,76 +1016,37 @@ func (a *app) tryReconnect(ctx context.Context, sr *fasthttp.Server) bool {
|
|||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
a.log.Info(logs.ServerReconnecting, logs.TagField(logs.TagApp))
|
||||
a.log.Info(logs.ServerReconnecting)
|
||||
var failedServers []ServerInfo
|
||||
|
||||
for _, serverInfo := range a.unbindServers {
|
||||
fields := []zap.Field{
|
||||
zap.String("address", serverInfo.Address), zap.Bool("tls enabled", serverInfo.TLS.Enabled),
|
||||
zap.String("tls cert", serverInfo.TLS.CertFile), zap.String("tls key", serverInfo.TLS.KeyFile),
|
||||
}
|
||||
|
||||
srv, err := newServer(ctx, serverInfo)
|
||||
if err != nil {
|
||||
a.log.Warn(logs.ServerReconnectFailed, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.ServerReconnectFailed, zap.Error(err))
|
||||
failedServers = append(failedServers, serverInfo)
|
||||
a.metrics.MarkUnhealthy(serverInfo.Address)
|
||||
continue
|
||||
}
|
||||
|
||||
go func() {
|
||||
a.log.Info(logs.StartingServer, zap.String("address", srv.Address()), logs.TagField(logs.TagApp))
|
||||
a.log.Info(logs.StartingServer, zap.String("address", srv.Address()))
|
||||
a.metrics.MarkHealthy(serverInfo.Address)
|
||||
if err = sr.Serve(srv.Listener()); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
a.log.Warn(logs.ListenAndServe, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Warn(logs.ListenAndServe, zap.Error(err))
|
||||
a.metrics.MarkUnhealthy(serverInfo.Address)
|
||||
}
|
||||
}()
|
||||
|
||||
a.servers = append(a.servers, srv)
|
||||
a.log.Info(logs.ServerReconnectedSuccessfully,
|
||||
zap.String("address", serverInfo.Address), zap.Bool("tls enabled", serverInfo.TLS.Enabled),
|
||||
zap.String("tls cert", serverInfo.TLS.CertFile), zap.String("tls key", serverInfo.TLS.KeyFile),
|
||||
logs.TagField(logs.TagApp))
|
||||
a.log.Info(logs.ServerReconnectedSuccessfully, fields...)
|
||||
}
|
||||
|
||||
a.unbindServers = failedServers
|
||||
|
||||
return len(a.unbindServers) == 0
|
||||
}
|
||||
|
||||
func (a *app) fetchContainerID(ctx context.Context, cfgKey string) (id *cid.ID, err error) {
|
||||
cnrID, err := a.resolveContainerID(ctx, cfgKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = checkContainerExists(ctx, *cnrID, a.pool)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cnrID, nil
|
||||
}
|
||||
|
||||
func (a *app) resolveContainerID(ctx context.Context, cfgKey string) (*cid.ID, error) {
|
||||
containerString := a.config().GetString(cfgKey)
|
||||
|
||||
id := new(cid.ID)
|
||||
if err := id.DecodeString(containerString); err != nil {
|
||||
i := strings.Index(containerString, ".")
|
||||
if i < 0 {
|
||||
return nil, fmt.Errorf("invalid container address: %s", containerString)
|
||||
}
|
||||
|
||||
if id, err = a.resolver.Resolve(ctx, containerString[i+1:], containerString[:i]); err != nil {
|
||||
return nil, fmt.Errorf("resolve container address %s: %w", containerString, err)
|
||||
}
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func checkContainerExists(ctx context.Context, id cid.ID, frostFSPool *pool.Pool) error {
|
||||
prm := pool.PrmContainerGet{
|
||||
ContainerID: id,
|
||||
}
|
||||
|
||||
_, err := frostFSPool.GetContainer(ctx, prm)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -20,11 +20,9 @@ import (
|
|||
|
||||
containerv2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
|
@ -45,10 +43,9 @@ type putResponse struct {
|
|||
}
|
||||
|
||||
const (
|
||||
testContainerName = "friendly"
|
||||
testListenAddress = "localhost:8082"
|
||||
testHost = "http://" + testListenAddress
|
||||
testCORSContainerName = "cors"
|
||||
testContainerName = "friendly"
|
||||
testListenAddress = "localhost:8082"
|
||||
testHost = "http://" + testListenAddress
|
||||
)
|
||||
|
||||
func TestIntegration(t *testing.T) {
|
||||
|
@ -79,14 +76,10 @@ func TestIntegration(t *testing.T) {
|
|||
registerUser(t, ctx, aioContainer, file.Name())
|
||||
}
|
||||
|
||||
// Creating CORS container
|
||||
clientPool := getPool(ctx, t, key)
|
||||
_, err = createContainer(ctx, t, clientPool, ownerID, testCORSContainerName)
|
||||
require.NoError(t, err, version)
|
||||
|
||||
// See the logs from the command execution.
|
||||
server, cancel := runServer(file.Name())
|
||||
CID, err := createContainer(ctx, t, clientPool, ownerID, testContainerName)
|
||||
clientPool := getPool(ctx, t, key)
|
||||
CID, err := createContainer(ctx, t, clientPool, ownerID)
|
||||
require.NoError(t, err, version)
|
||||
|
||||
jsonToken, binaryToken := makeBearerTokens(t, key, ownerID, version)
|
||||
|
@ -101,7 +94,6 @@ func TestIntegration(t *testing.T) {
|
|||
t.Run("get by attribute "+version, func(t *testing.T) { getByAttr(ctx, t, clientPool, ownerID, CID) })
|
||||
t.Run("get zip "+version, func(t *testing.T) { getZip(ctx, t, clientPool, ownerID, CID) })
|
||||
t.Run("test namespaces "+version, func(t *testing.T) { checkNamespaces(ctx, t, clientPool, ownerID, CID) })
|
||||
t.Run("test status codes "+version, func(t *testing.T) { checkStatusCodes(ctx, t, clientPool, ownerID, version) })
|
||||
|
||||
cancel()
|
||||
server.Wait()
|
||||
|
@ -118,8 +110,6 @@ func runServer(pathToWallet string) (App, context.CancelFunc) {
|
|||
v.config().Set(cfgWalletPath, pathToWallet)
|
||||
v.config().Set(cfgWalletPassphrase, "")
|
||||
|
||||
v.config().Set(cfgContainersCORS, testCORSContainerName+"."+containerv2.SysAttributeZoneDefault)
|
||||
|
||||
application := newApp(cancelCtx, v)
|
||||
go application.Serve()
|
||||
|
||||
|
@ -270,7 +260,7 @@ func putWithDuplicateKeys(t *testing.T, CID cid.ID) {
|
|||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, string(body), "key duplication error: "+attr+"\n")
|
||||
require.Equal(t, "key duplication error: "+attr+"\n", string(body))
|
||||
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
|
||||
}
|
||||
|
||||
|
@ -439,80 +429,7 @@ func checkNamespaces(ctx context.Context, t *testing.T, clientPool *pool.Pool, o
|
|||
resp, err = http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, http.StatusNotFound, resp.StatusCode)
|
||||
}
|
||||
|
||||
func checkStatusCodes(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, version string) {
|
||||
cli := http.Client{Timeout: 30 * time.Second}
|
||||
|
||||
t.Run("container not found by name", func(t *testing.T) {
|
||||
resp, err := cli.Get(testHost + "/get/unknown/object")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, http.StatusNotFound, resp.StatusCode)
|
||||
requireBodyContains(t, resp, "container not found")
|
||||
})
|
||||
|
||||
t.Run("container not found by cid", func(t *testing.T) {
|
||||
cnrIDTest := cidtest.ID()
|
||||
resp, err := cli.Get(testHost + "/get/" + cnrIDTest.EncodeToString() + "/object")
|
||||
require.NoError(t, err)
|
||||
requireBodyContains(t, resp, "container not found")
|
||||
require.Equal(t, http.StatusNotFound, resp.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("object not found in storage", func(t *testing.T) {
|
||||
resp, err := cli.Get(testHost + "/get_by_attribute/" + testContainerName + "/FilePath/object2")
|
||||
require.NoError(t, err)
|
||||
requireBodyContains(t, resp, "object not found")
|
||||
require.Equal(t, http.StatusNotFound, resp.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("access denied", func(t *testing.T) {
|
||||
basicACL := acl.Private
|
||||
var recs []*eacl.Record
|
||||
if version == "1.2.7" {
|
||||
basicACL = acl.PublicRWExtended
|
||||
rec := eacl.NewRecord()
|
||||
rec.SetAction(eacl.ActionDeny)
|
||||
rec.SetOperation(eacl.OperationGet)
|
||||
recs = append(recs, rec)
|
||||
}
|
||||
|
||||
cnrID, err := createContainerBase(ctx, t, clientPool, ownerID, basicACL, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
key, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
jsonToken, _ := makeBearerTokens(t, key, ownerID, version, recs...)
|
||||
|
||||
t.Run("get", func(t *testing.T) {
|
||||
request, err := http.NewRequest(http.MethodGet, testHost+"/get/"+cnrID.EncodeToString()+"/object", nil)
|
||||
require.NoError(t, err)
|
||||
request.Header.Set("Authorization", "Bearer "+jsonToken)
|
||||
|
||||
resp, err := cli.Do(request)
|
||||
require.NoError(t, err)
|
||||
requireBodyContains(t, resp, "access denied")
|
||||
require.Equal(t, http.StatusForbidden, resp.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("upload", func(t *testing.T) {
|
||||
request, _, _ := makePutRequest(t, testHost+"/upload/"+cnrID.EncodeToString())
|
||||
request.Header.Set("Authorization", "Bearer "+jsonToken)
|
||||
|
||||
resp, err := cli.Do(request)
|
||||
require.NoError(t, err)
|
||||
requireBodyContains(t, resp, "access denied")
|
||||
require.Equal(t, http.StatusForbidden, resp.StatusCode)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func requireBodyContains(t *testing.T, resp *http.Response, msg string) {
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
defer resp.Body.Close()
|
||||
|
||||
require.Contains(t, strings.ToLower(string(data)), strings.ToLower(msg))
|
||||
}
|
||||
|
||||
func createDockerContainer(ctx context.Context, t *testing.T, image string) testcontainers.Container {
|
||||
|
@ -560,11 +477,7 @@ func getPool(ctx context.Context, t *testing.T, key *keys.PrivateKey) *pool.Pool
|
|||
return clientPool
|
||||
}
|
||||
|
||||
func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, name string) (cid.ID, error) {
|
||||
return createContainerBase(ctx, t, clientPool, ownerID, acl.PublicRWExtended, name)
|
||||
}
|
||||
|
||||
func createContainerBase(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, basicACL acl.Basic, name string) (cid.ID, error) {
|
||||
func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID) (cid.ID, error) {
|
||||
var policy netmap.PlacementPolicy
|
||||
err := policy.DecodeString("REP 1")
|
||||
require.NoError(t, err)
|
||||
|
@ -572,28 +485,24 @@ func createContainerBase(ctx context.Context, t *testing.T, clientPool *pool.Poo
|
|||
var cnr container.Container
|
||||
cnr.Init()
|
||||
cnr.SetPlacementPolicy(policy)
|
||||
cnr.SetBasicACL(basicACL)
|
||||
cnr.SetBasicACL(acl.PublicRWExtended)
|
||||
cnr.SetOwner(ownerID)
|
||||
|
||||
container.SetCreationTime(&cnr, time.Now())
|
||||
|
||||
if name != "" {
|
||||
var domain container.Domain
|
||||
domain.SetName(name)
|
||||
var domain container.Domain
|
||||
domain.SetName(testContainerName)
|
||||
|
||||
cnr.SetAttribute(containerv2.SysAttributeName, domain.Name())
|
||||
cnr.SetAttribute(containerv2.SysAttributeZone, domain.Zone())
|
||||
}
|
||||
cnr.SetAttribute(containerv2.SysAttributeName, domain.Name())
|
||||
cnr.SetAttribute(containerv2.SysAttributeZone, domain.Zone())
|
||||
|
||||
prm := pool.PrmContainerPut{
|
||||
ClientParams: client.PrmContainerPut{
|
||||
Container: &cnr,
|
||||
},
|
||||
WaitParams: &pool.WaitParams{
|
||||
Timeout: 15 * time.Second,
|
||||
PollInterval: 3 * time.Second,
|
||||
},
|
||||
}
|
||||
var waitPrm pool.WaitParams
|
||||
waitPrm.SetTimeout(15 * time.Second)
|
||||
waitPrm.SetPollInterval(3 * time.Second)
|
||||
|
||||
var prm pool.PrmContainerPut
|
||||
prm.SetContainer(cnr)
|
||||
prm.SetWaitParams(waitPrm)
|
||||
|
||||
CID, err := clientPool.PutContainer(ctx, prm)
|
||||
if err != nil {
|
||||
|
@ -640,18 +549,13 @@ func registerUser(t *testing.T, ctx context.Context, aioContainer testcontainers
|
|||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func makeBearerTokens(t *testing.T, key *keys.PrivateKey, ownerID user.ID, version string, records ...*eacl.Record) (jsonTokenBase64, binaryTokenBase64 string) {
|
||||
func makeBearerTokens(t *testing.T, key *keys.PrivateKey, ownerID user.ID, version string) (jsonTokenBase64, binaryTokenBase64 string) {
|
||||
tkn := new(bearer.Token)
|
||||
tkn.ForUser(ownerID)
|
||||
tkn.SetExp(10000)
|
||||
|
||||
if version == "1.2.7" {
|
||||
table := eacl.NewTable()
|
||||
for i := range records {
|
||||
table.AddRecord(records[i])
|
||||
}
|
||||
|
||||
tkn.SetEACLTable(*table)
|
||||
tkn.SetEACLTable(*eacl.NewTable())
|
||||
} else {
|
||||
tkn.SetImpersonate(true)
|
||||
}
|
||||
|
|
|
@ -1,175 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/zapjournald"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/ssgreg/journald"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
|
||||
var lvl zapcore.Level
|
||||
lvlStr := v.GetString(cfgLoggerLevel)
|
||||
err := lvl.UnmarshalText([]byte(lvlStr))
|
||||
if err != nil {
|
||||
return lvl, fmt.Errorf("incorrect logger level configuration %s (%v), "+
|
||||
"value should be one of %v", lvlStr, err, [...]zapcore.Level{
|
||||
zapcore.DebugLevel,
|
||||
zapcore.InfoLevel,
|
||||
zapcore.WarnLevel,
|
||||
zapcore.ErrorLevel,
|
||||
zapcore.DPanicLevel,
|
||||
zapcore.PanicLevel,
|
||||
zapcore.FatalLevel,
|
||||
})
|
||||
}
|
||||
return lvl, nil
|
||||
}
|
||||
|
||||
var _ zapcore.Core = (*zapCoreTagFilterWrapper)(nil)
|
||||
|
||||
type zapCoreTagFilterWrapper struct {
|
||||
core zapcore.Core
|
||||
settings TagFilterSettings
|
||||
extra []zap.Field
|
||||
}
|
||||
|
||||
type TagFilterSettings interface {
|
||||
LevelEnabled(tag string, lvl zapcore.Level) bool
|
||||
DefaultEnabled(lvl zapcore.Level) bool
|
||||
}
|
||||
|
||||
func (c *zapCoreTagFilterWrapper) Enabled(level zapcore.Level) bool {
|
||||
return c.core.Enabled(level)
|
||||
}
|
||||
|
||||
func (c *zapCoreTagFilterWrapper) With(fields []zapcore.Field) zapcore.Core {
|
||||
return &zapCoreTagFilterWrapper{
|
||||
core: c.core.With(fields),
|
||||
settings: c.settings,
|
||||
extra: append(c.extra, fields...),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *zapCoreTagFilterWrapper) Check(entry zapcore.Entry, checked *zapcore.CheckedEntry) *zapcore.CheckedEntry {
|
||||
if c.core.Enabled(entry.Level) {
|
||||
return checked.AddCore(entry, c)
|
||||
}
|
||||
return checked
|
||||
}
|
||||
|
||||
func (c *zapCoreTagFilterWrapper) Write(entry zapcore.Entry, fields []zapcore.Field) error {
|
||||
if c.shouldSkip(entry, fields, c.extra) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return c.core.Write(entry, fields)
|
||||
}
|
||||
|
||||
func (c *zapCoreTagFilterWrapper) shouldSkip(entry zapcore.Entry, fields []zap.Field, extra []zap.Field) bool {
|
||||
for _, field := range fields {
|
||||
if field.Key == logs.TagFieldName && field.Type == zapcore.StringType {
|
||||
return !c.settings.LevelEnabled(field.String, entry.Level)
|
||||
}
|
||||
}
|
||||
for _, field := range extra {
|
||||
if field.Key == logs.TagFieldName && field.Type == zapcore.StringType {
|
||||
return !c.settings.LevelEnabled(field.String, entry.Level)
|
||||
}
|
||||
}
|
||||
|
||||
return !c.settings.DefaultEnabled(entry.Level)
|
||||
}
|
||||
|
||||
func (c *zapCoreTagFilterWrapper) Sync() error {
|
||||
return c.core.Sync()
|
||||
}
|
||||
|
||||
func applyZapCoreMiddlewares(core zapcore.Core, v *viper.Viper, loggerSettings LoggerAppSettings, tagSetting TagFilterSettings) zapcore.Core {
|
||||
core = &zapCoreTagFilterWrapper{
|
||||
core: core,
|
||||
settings: tagSetting,
|
||||
}
|
||||
|
||||
if v.GetBool(cfgLoggerSamplingEnabled) {
|
||||
core = zapcore.NewSamplerWithOptions(core,
|
||||
v.GetDuration(cfgLoggerSamplingInterval),
|
||||
v.GetInt(cfgLoggerSamplingInitial),
|
||||
v.GetInt(cfgLoggerSamplingThereafter),
|
||||
zapcore.SamplerHook(func(_ zapcore.Entry, dec zapcore.SamplingDecision) {
|
||||
if dec&zapcore.LogDropped > 0 {
|
||||
loggerSettings.DroppedLogsInc()
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
return core
|
||||
}
|
||||
|
||||
func newLogEncoder() zapcore.Encoder {
|
||||
c := zap.NewProductionEncoderConfig()
|
||||
c.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
|
||||
return zapcore.NewConsoleEncoder(c)
|
||||
}
|
||||
|
||||
// newStdoutLogger constructs a zap.Logger instance for current application.
|
||||
// Panics on failure.
|
||||
//
|
||||
// Logger is built from zap's production logging configuration with:
|
||||
// - parameterized level (debug by default)
|
||||
// - console encoding
|
||||
// - ISO8601 time encoding
|
||||
//
|
||||
// Logger records a stack trace for all messages at or above fatal level.
|
||||
//
|
||||
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
|
||||
func newStdoutLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings LoggerAppSettings, tagSetting TagFilterSettings) *Logger {
|
||||
stdout := zapcore.AddSync(os.Stdout)
|
||||
|
||||
consoleOutCore := zapcore.NewCore(newLogEncoder(), stdout, lvl)
|
||||
consoleOutCore = applyZapCoreMiddlewares(consoleOutCore, v, loggerSettings, tagSetting)
|
||||
|
||||
return &Logger{
|
||||
logger: zap.New(consoleOutCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
|
||||
}
|
||||
}
|
||||
|
||||
func newJournaldLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings LoggerAppSettings, tagSetting TagFilterSettings) *Logger {
|
||||
encoder := zapjournald.NewPartialEncoder(newLogEncoder(), zapjournald.SyslogFields)
|
||||
|
||||
core := zapjournald.NewCore(lvl, encoder, &journald.Journal{}, zapjournald.SyslogFields)
|
||||
coreWithContext := core.With([]zapcore.Field{
|
||||
zapjournald.SyslogFacility(zapjournald.LogDaemon),
|
||||
zapjournald.SyslogIdentifier(),
|
||||
zapjournald.SyslogPid(),
|
||||
})
|
||||
|
||||
coreWithContext = applyZapCoreMiddlewares(coreWithContext, v, loggerSettings, tagSetting)
|
||||
|
||||
return &Logger{
|
||||
logger: zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
|
||||
}
|
||||
}
|
||||
|
||||
type LoggerAppSettings interface {
|
||||
DroppedLogsInc()
|
||||
}
|
||||
|
||||
func pickLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings LoggerAppSettings, tagSettings TagFilterSettings) *Logger {
|
||||
dest := v.GetString(cfgLoggerDestination)
|
||||
|
||||
switch dest {
|
||||
case destinationStdout:
|
||||
return newStdoutLogger(v, lvl, loggerSettings, tagSettings)
|
||||
case destinationJournald:
|
||||
return newJournaldLogger(v, lvl, loggerSettings, tagSettings)
|
||||
default:
|
||||
panic(fmt.Sprintf("wrong destination for logger: %s", dest))
|
||||
}
|
||||
}
|
|
@ -74,6 +74,7 @@ func newServer(ctx context.Context, serverInfo ServerInfo) (*server, error) {
|
|||
|
||||
ln = tls.NewListener(ln, &tls.Config{
|
||||
GetCertificate: tlsProvider.GetCertificate,
|
||||
NextProtos: []string{"h2"}, // required to enable HTTP/2 requests in `http.Serve`
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/valyala/fasthttp"
|
||||
"golang.org/x/net/http2"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -26,10 +26,14 @@ const (
|
|||
expHeaderValue = "Bar"
|
||||
)
|
||||
|
||||
func TestHTTP_TLS(t *testing.T) {
|
||||
func TestHTTP2TLS(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
certPath, keyPath := prepareTestCerts(t)
|
||||
|
||||
srv := &http.Server{
|
||||
Handler: http.HandlerFunc(testHandler),
|
||||
}
|
||||
|
||||
tlsListener, err := newServer(ctx, ServerInfo{
|
||||
Address: ":0",
|
||||
TLS: ServerTLSInfo{
|
||||
|
@ -43,34 +47,37 @@ func TestHTTP_TLS(t *testing.T) {
|
|||
addr := fmt.Sprintf("https://localhost:%d", port)
|
||||
|
||||
go func() {
|
||||
_ = fasthttp.Serve(tlsListener.Listener(), testHandler)
|
||||
_ = srv.Serve(tlsListener.Listener())
|
||||
}()
|
||||
|
||||
// Server is running, now send HTTP/2 request
|
||||
|
||||
tlsClientConfig := &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
cliHTTP := http.Client{Transport: &http.Transport{}}
|
||||
cliHTTPS := http.Client{Transport: &http.Transport{TLSClientConfig: tlsClientConfig}}
|
||||
cliHTTP1 := http.Client{Transport: &http.Transport{TLSClientConfig: tlsClientConfig}}
|
||||
cliHTTP2 := http.Client{Transport: &http2.Transport{TLSClientConfig: tlsClientConfig}}
|
||||
|
||||
req, err := http.NewRequest("GET", addr, nil)
|
||||
require.NoError(t, err)
|
||||
req.Header[expHeaderKey] = []string{expHeaderValue}
|
||||
|
||||
resp, err := cliHTTPS.Do(req)
|
||||
resp, err := cliHTTP1.Do(req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
|
||||
_, err = cliHTTP.Do(req)
|
||||
require.ErrorContains(t, err, "failed to verify certificate")
|
||||
resp, err = cliHTTP2.Do(req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
}
|
||||
|
||||
func testHandler(ctx *fasthttp.RequestCtx) {
|
||||
hdr := ctx.Request.Header.Peek(expHeaderKey)
|
||||
if len(hdr) == 0 || string(hdr) != expHeaderValue {
|
||||
ctx.Response.SetStatusCode(http.StatusBadRequest)
|
||||
func testHandler(resp http.ResponseWriter, req *http.Request) {
|
||||
hdr, ok := req.Header[expHeaderKey]
|
||||
if !ok || len(hdr) != 1 || hdr[0] != expHeaderValue {
|
||||
resp.WriteHeader(http.StatusBadRequest)
|
||||
} else {
|
||||
ctx.Response.SetStatusCode(http.StatusOK)
|
||||
resp.WriteHeader(http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -16,17 +16,17 @@ import (
|
|||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
internalnet "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/net"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||
grpctracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
||||
qostagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||
"git.frostfs.info/TrueCloudLab/zapjournald"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/ssgreg/journald"
|
||||
"github.com/valyala/fasthttp"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
|
@ -111,11 +111,6 @@ const (
|
|||
cfgLoggerSamplingThereafter = "logger.sampling.thereafter"
|
||||
cfgLoggerSamplingInterval = "logger.sampling.interval"
|
||||
|
||||
cfgLoggerTags = "logger.tags"
|
||||
cfgLoggerTagsPrefixTmpl = cfgLoggerTags + ".%d."
|
||||
cfgLoggerTagsNameTmpl = cfgLoggerTagsPrefixTmpl + "names"
|
||||
cfgLoggerTagsLevelTmpl = cfgLoggerTagsPrefixTmpl + "level"
|
||||
|
||||
// Wallet.
|
||||
cfgWalletPassphrase = "wallet.passphrase"
|
||||
cfgWalletPath = "wallet.path"
|
||||
|
@ -156,21 +151,18 @@ const (
|
|||
cfgBucketsCacheLifetime = "cache.buckets.lifetime"
|
||||
cfgBucketsCacheSize = "cache.buckets.size"
|
||||
cfgNetmapCacheLifetime = "cache.netmap.lifetime"
|
||||
cfgCORSCacheLifetime = "cache.cors.lifetime"
|
||||
cfgCORSCacheSize = "cache.cors.size"
|
||||
|
||||
// Bucket resolving options.
|
||||
cfgResolveNamespaceHeader = "resolve_bucket.namespace_header"
|
||||
cfgResolveDefaultNamespaces = "resolve_bucket.default_namespaces"
|
||||
|
||||
// CORS.
|
||||
cfgCORS = "cors"
|
||||
cfgCORSAllowOrigin = cfgCORS + ".allow_origin"
|
||||
cfgCORSAllowMethods = cfgCORS + ".allow_methods"
|
||||
cfgCORSAllowHeaders = cfgCORS + ".allow_headers"
|
||||
cfgCORSExposeHeaders = cfgCORS + ".expose_headers"
|
||||
cfgCORSAllowCredentials = cfgCORS + ".allow_credentials"
|
||||
cfgCORSMaxAge = cfgCORS + ".max_age"
|
||||
cfgCORSAllowOrigin = "cors.allow_origin"
|
||||
cfgCORSAllowMethods = "cors.allow_methods"
|
||||
cfgCORSAllowHeaders = "cors.allow_headers"
|
||||
cfgCORSExposeHeaders = "cors.expose_headers"
|
||||
cfgCORSAllowCredentials = "cors.allow_credentials"
|
||||
cfgCORSMaxAge = "cors.max_age"
|
||||
|
||||
// Multinet.
|
||||
cfgMultinetEnabled = "multinet.enabled"
|
||||
|
@ -183,9 +175,6 @@ const (
|
|||
cfgFeaturesEnableFilepathFallback = "features.enable_filepath_fallback"
|
||||
cfgFeaturesTreePoolNetmapSupport = "features.tree_pool_netmap_support"
|
||||
|
||||
// Containers.
|
||||
cfgContainersCORS = "containers.cors"
|
||||
|
||||
// Command line args.
|
||||
cmdHelp = "help"
|
||||
cmdVersion = "version"
|
||||
|
@ -204,10 +193,9 @@ var ignore = map[string]struct{}{
|
|||
cmdVersion: {},
|
||||
}
|
||||
|
||||
var defaultTags = []string{logs.TagApp, logs.TagDatapath, logs.TagExternalStorage, logs.TagExternalStorageTree}
|
||||
|
||||
type Logger struct {
|
||||
logger *zap.Logger
|
||||
lvl zap.AtomicLevel
|
||||
}
|
||||
|
||||
type appCfg struct {
|
||||
|
@ -511,38 +499,112 @@ func mergeConfig(v *viper.Viper, fileName string) error {
|
|||
return v.MergeConfig(cfgFile)
|
||||
}
|
||||
|
||||
func fetchLogTagsConfig(v *viper.Viper, defaultLvl zapcore.Level) (map[string]zapcore.Level, error) {
|
||||
res := make(map[string]zapcore.Level)
|
||||
type LoggerAppSettings interface {
|
||||
DroppedLogsInc()
|
||||
}
|
||||
|
||||
for i := 0; ; i++ {
|
||||
tagNames := v.GetString(fmt.Sprintf(cfgLoggerTagsNameTmpl, i))
|
||||
if tagNames == "" {
|
||||
break
|
||||
}
|
||||
|
||||
lvl := defaultLvl
|
||||
level := v.GetString(fmt.Sprintf(cfgLoggerTagsLevelTmpl, i))
|
||||
if level != "" {
|
||||
if err := lvl.Set(level); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse log tags config, unknown level: '%s'", level)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tagName := range strings.Split(tagNames, ",") {
|
||||
tagName = strings.TrimSpace(tagName)
|
||||
if len(tagName) != 0 {
|
||||
res[tagName] = lvl
|
||||
}
|
||||
}
|
||||
func pickLogger(v *viper.Viper, settings LoggerAppSettings) *Logger {
|
||||
lvl, err := getLogLevel(v)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if len(res) == 0 && !v.IsSet(cfgLoggerTags) {
|
||||
for _, tag := range defaultTags {
|
||||
res[tag] = defaultLvl
|
||||
}
|
||||
dest := v.GetString(cfgLoggerDestination)
|
||||
|
||||
switch dest {
|
||||
case destinationStdout:
|
||||
return newStdoutLogger(v, lvl, settings)
|
||||
case destinationJournald:
|
||||
return newJournaldLogger(v, lvl, settings)
|
||||
default:
|
||||
panic(fmt.Sprintf("wrong destination for logger: %s", dest))
|
||||
}
|
||||
}
|
||||
|
||||
// newStdoutLogger constructs a zap.Logger instance for current application.
|
||||
// Panics on failure.
|
||||
//
|
||||
// Logger is built from zap's production logging configuration with:
|
||||
// - parameterized level (debug by default)
|
||||
// - console encoding
|
||||
// - ISO8601 time encoding
|
||||
//
|
||||
// Logger records a stack trace for all messages at or above fatal level.
|
||||
//
|
||||
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
|
||||
func newStdoutLogger(v *viper.Viper, lvl zapcore.Level, settings LoggerAppSettings) *Logger {
|
||||
stdout := zapcore.AddSync(os.Stderr)
|
||||
level := zap.NewAtomicLevelAt(lvl)
|
||||
|
||||
consoleOutCore := zapcore.NewCore(newLogEncoder(), stdout, level)
|
||||
consoleOutCore = applyZapCoreMiddlewares(consoleOutCore, v, settings)
|
||||
|
||||
return &Logger{
|
||||
logger: zap.New(consoleOutCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
|
||||
lvl: level,
|
||||
}
|
||||
}
|
||||
|
||||
func newJournaldLogger(v *viper.Viper, lvl zapcore.Level, settings LoggerAppSettings) *Logger {
|
||||
level := zap.NewAtomicLevelAt(lvl)
|
||||
|
||||
encoder := zapjournald.NewPartialEncoder(newLogEncoder(), zapjournald.SyslogFields)
|
||||
|
||||
core := zapjournald.NewCore(level, encoder, &journald.Journal{}, zapjournald.SyslogFields)
|
||||
coreWithContext := core.With([]zapcore.Field{
|
||||
zapjournald.SyslogFacility(zapjournald.LogDaemon),
|
||||
zapjournald.SyslogIdentifier(),
|
||||
zapjournald.SyslogPid(),
|
||||
})
|
||||
|
||||
coreWithContext = applyZapCoreMiddlewares(coreWithContext, v, settings)
|
||||
|
||||
return &Logger{
|
||||
logger: zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
|
||||
lvl: level,
|
||||
}
|
||||
}
|
||||
|
||||
func newLogEncoder() zapcore.Encoder {
|
||||
c := zap.NewProductionEncoderConfig()
|
||||
c.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
|
||||
return zapcore.NewConsoleEncoder(c)
|
||||
}
|
||||
|
||||
func applyZapCoreMiddlewares(core zapcore.Core, v *viper.Viper, settings LoggerAppSettings) zapcore.Core {
|
||||
if v.GetBool(cfgLoggerSamplingEnabled) {
|
||||
core = zapcore.NewSamplerWithOptions(core,
|
||||
v.GetDuration(cfgLoggerSamplingInterval),
|
||||
v.GetInt(cfgLoggerSamplingInitial),
|
||||
v.GetInt(cfgLoggerSamplingThereafter),
|
||||
zapcore.SamplerHook(func(_ zapcore.Entry, dec zapcore.SamplingDecision) {
|
||||
if dec&zapcore.LogDropped > 0 {
|
||||
settings.DroppedLogsInc()
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
return res, nil
|
||||
return core
|
||||
}
|
||||
|
||||
func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
|
||||
var lvl zapcore.Level
|
||||
lvlStr := v.GetString(cfgLoggerLevel)
|
||||
err := lvl.UnmarshalText([]byte(lvlStr))
|
||||
if err != nil {
|
||||
return lvl, fmt.Errorf("incorrect logger level configuration %s (%v), "+
|
||||
"value should be one of %v", lvlStr, err, [...]zapcore.Level{
|
||||
zapcore.DebugLevel,
|
||||
zapcore.InfoLevel,
|
||||
zapcore.WarnLevel,
|
||||
zapcore.ErrorLevel,
|
||||
zapcore.DPanicLevel,
|
||||
zapcore.PanicLevel,
|
||||
zapcore.FatalLevel,
|
||||
})
|
||||
}
|
||||
return lvl, nil
|
||||
}
|
||||
|
||||
func fetchReconnectInterval(cfg *viper.Viper) time.Duration {
|
||||
|
@ -558,19 +620,20 @@ func fetchIndexPageTemplate(v *viper.Viper, l *zap.Logger) (string, bool) {
|
|||
if !v.GetBool(cfgIndexPageEnabled) {
|
||||
return "", false
|
||||
}
|
||||
|
||||
reader, err := os.Open(v.GetString(cfgIndexPageTemplatePath))
|
||||
if err != nil {
|
||||
l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err))
|
||||
return "", true
|
||||
}
|
||||
|
||||
tmpl, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err))
|
||||
return "", true
|
||||
}
|
||||
|
||||
l.Info(logs.SetCustomIndexPageTemplate, logs.TagField(logs.TagApp))
|
||||
l.Info(logs.SetCustomIndexPageTemplate)
|
||||
return string(tmpl), true
|
||||
}
|
||||
|
||||
|
@ -611,7 +674,7 @@ func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo {
|
|||
}
|
||||
|
||||
if _, ok := seen[serverInfo.Address]; ok {
|
||||
log.Warn(logs.WarnDuplicateAddress, zap.String("address", serverInfo.Address), logs.TagField(logs.TagApp))
|
||||
log.Warn(logs.WarnDuplicateAddress, zap.String("address", serverInfo.Address))
|
||||
continue
|
||||
}
|
||||
seen[serverInfo.Address] = struct{}{}
|
||||
|
@ -624,7 +687,7 @@ func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo {
|
|||
func (a *app) initPools(ctx context.Context) {
|
||||
key, err := getFrostFSKey(a.config(), a.log)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldNotLoadFrostFSPrivateKey, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.CouldNotLoadFrostFSPrivateKey, zap.Error(err))
|
||||
}
|
||||
|
||||
var prm pool.InitParameters
|
||||
|
@ -632,8 +695,7 @@ func (a *app) initPools(ctx context.Context) {
|
|||
|
||||
prm.SetKey(&key.PrivateKey)
|
||||
prmTree.SetKey(key)
|
||||
a.log.Info(logs.UsingCredentials, zap.String("FrostFS", hex.EncodeToString(key.PublicKey().Bytes())),
|
||||
logs.TagField(logs.TagApp))
|
||||
a.log.Info(logs.UsingCredentials, zap.String("FrostFS", hex.EncodeToString(key.PublicKey().Bytes())))
|
||||
|
||||
for _, peer := range fetchPeers(a.log, a.config()) {
|
||||
prm.AddNode(peer)
|
||||
|
@ -673,8 +735,8 @@ func (a *app) initPools(ctx context.Context) {
|
|||
errorThreshold = defaultPoolErrorThreshold
|
||||
}
|
||||
prm.SetErrorThreshold(errorThreshold)
|
||||
prm.SetLogger(a.log.With(logs.TagField(logs.TagDatapath)))
|
||||
prmTree.SetLogger(a.log.With(logs.TagField(logs.TagDatapath)))
|
||||
prm.SetLogger(a.log)
|
||||
prmTree.SetLogger(a.log)
|
||||
|
||||
prmTree.SetMaxRequestAttempts(a.config().GetInt(cfgTreePoolMaxAttempts))
|
||||
|
||||
|
@ -682,19 +744,17 @@ func (a *app) initPools(ctx context.Context) {
|
|||
grpc.WithUnaryInterceptor(grpctracing.NewUnaryClientInteceptor()),
|
||||
grpc.WithStreamInterceptor(grpctracing.NewStreamClientInterceptor()),
|
||||
grpc.WithContextDialer(a.settings.dialerSource.GrpcContextDialer()),
|
||||
grpc.WithChainUnaryInterceptor(qostagging.NewUnaryClientInteceptor()),
|
||||
grpc.WithChainStreamInterceptor(qostagging.NewStreamClientInterceptor()),
|
||||
}
|
||||
prm.SetGRPCDialOptions(interceptors...)
|
||||
prmTree.SetGRPCDialOptions(interceptors...)
|
||||
|
||||
p, err := pool.NewPool(prm)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.FailedToCreateConnectionPool, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.FailedToCreateConnectionPool, zap.Error(err))
|
||||
}
|
||||
|
||||
if err = p.Dial(ctx); err != nil {
|
||||
a.log.Fatal(logs.FailedToDialConnectionPool, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.FailedToDialConnectionPool, zap.Error(err))
|
||||
}
|
||||
|
||||
if a.config().GetBool(cfgFeaturesTreePoolNetmapSupport) {
|
||||
|
@ -703,10 +763,10 @@ func (a *app) initPools(ctx context.Context) {
|
|||
|
||||
treePool, err := treepool.NewPool(prmTree)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.FailedToCreateTreePool, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.FailedToCreateTreePool, zap.Error(err))
|
||||
}
|
||||
if err = treePool.Dial(ctx); err != nil {
|
||||
a.log.Fatal(logs.FailedToDialTreePool, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
a.log.Fatal(logs.FailedToDialTreePool, zap.Error(err))
|
||||
}
|
||||
|
||||
a.pool = p
|
||||
|
@ -737,8 +797,7 @@ func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam {
|
|||
l.Info(logs.AddedStoragePeer,
|
||||
zap.Int("priority", priority),
|
||||
zap.String("address", address),
|
||||
zap.Float64("weight", weight),
|
||||
logs.TagField(logs.TagApp))
|
||||
zap.Float64("weight", weight))
|
||||
}
|
||||
|
||||
return nodes
|
||||
|
@ -770,15 +829,6 @@ func getNetmapCacheOptions(v *viper.Viper, l *zap.Logger) *cache.NetmapCacheConf
|
|||
return cacheCfg
|
||||
}
|
||||
|
||||
func getCORSCacheOptions(v *viper.Viper, l *zap.Logger) *cache.Config {
|
||||
cacheCfg := cache.DefaultCORSConfig(l)
|
||||
|
||||
cacheCfg.Lifetime = fetchCacheLifetime(v, l, cfgCORSCacheLifetime, cacheCfg.Lifetime)
|
||||
cacheCfg.Size = fetchCacheSize(v, l, cfgCORSCacheSize, cacheCfg.Size)
|
||||
|
||||
return cacheCfg
|
||||
}
|
||||
|
||||
func fetchCacheLifetime(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue time.Duration) time.Duration {
|
||||
if v.IsSet(cfgEntry) {
|
||||
lifetime := v.GetDuration(cfgEntry)
|
||||
|
@ -786,8 +836,7 @@ func fetchCacheLifetime(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultV
|
|||
l.Error(logs.InvalidLifetimeUsingDefaultValue,
|
||||
zap.String("parameter", cfgEntry),
|
||||
zap.Duration("value in config", lifetime),
|
||||
zap.Duration("default", defaultValue),
|
||||
logs.TagField(logs.TagApp))
|
||||
zap.Duration("default", defaultValue))
|
||||
} else {
|
||||
return lifetime
|
||||
}
|
||||
|
@ -803,8 +852,7 @@ func fetchCacheSize(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue
|
|||
l.Error(logs.InvalidCacheSizeUsingDefaultValue,
|
||||
zap.String("parameter", cfgEntry),
|
||||
zap.Int("value in config", size),
|
||||
zap.Int("default", defaultValue),
|
||||
logs.TagField(logs.TagApp))
|
||||
zap.Int("default", defaultValue))
|
||||
} else {
|
||||
return size
|
||||
}
|
||||
|
@ -816,7 +864,7 @@ func fetchCacheSize(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue
|
|||
func getDialerSource(logger *zap.Logger, cfg *viper.Viper) *internalnet.DialerSource {
|
||||
source, err := internalnet.NewDialerSource(fetchMultinetConfig(cfg, logger))
|
||||
if err != nil {
|
||||
logger.Fatal(logs.FailedToLoadMultinetConfig, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
logger.Fatal(logs.FailedToLoadMultinetConfig, zap.Error(err))
|
||||
}
|
||||
return source
|
||||
}
|
||||
|
@ -874,18 +922,3 @@ func fetchArchiveCompression(v *viper.Viper) bool {
|
|||
}
|
||||
return v.GetBool(cfgArchiveCompression)
|
||||
}
|
||||
|
||||
func fetchCORSConfig(v *viper.Viper) *data.CORSRule {
|
||||
if !v.IsSet(cfgCORS) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &data.CORSRule{
|
||||
AllowedOrigins: []string{v.GetString(cfgCORSAllowOrigin)},
|
||||
AllowedMethods: v.GetStringSlice(cfgCORSAllowMethods),
|
||||
AllowedHeaders: v.GetStringSlice(cfgCORSAllowHeaders),
|
||||
ExposeHeaders: v.GetStringSlice(cfgCORSExposeHeaders),
|
||||
AllowedCredentials: v.GetBool(cfgCORSAllowCredentials),
|
||||
MaxAgeSeconds: fetchCORSMaxAge(v),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,9 +20,6 @@ HTTP_GW_LOGGER_SAMPLING_ENABLED=false
|
|||
HTTP_GW_LOGGER_SAMPLING_INITIAL=100
|
||||
HTTP_GW_LOGGER_SAMPLING_THEREAFTER=100
|
||||
HTTP_GW_LOGGER_SAMPLING_INTERVAL=1s
|
||||
HTTP_GW_LOGGER_TAGS_0_NAMES=app,datapath
|
||||
HTTP_GW_LOGGER_TAGS_0_LEVEL=level
|
||||
HTTP_GW_LOGGER_TAGS_1_NAME=external_storage_tree
|
||||
|
||||
HTTP_GW_SERVER_0_ADDRESS=0.0.0.0:443
|
||||
HTTP_GW_SERVER_0_TLS_ENABLED=false
|
||||
|
@ -130,9 +127,6 @@ HTTP_GW_CACHE_BUCKETS_LIFETIME=1m
|
|||
HTTP_GW_CACHE_BUCKETS_SIZE=1000
|
||||
# Cache which stores netmap
|
||||
HTTP_GW_CACHE_NETMAP_LIFETIME=1m
|
||||
# Cache which stores container CORS configurations
|
||||
HTTP_GW_CACHE_CORS_LIFETIME=5m
|
||||
HTTP_GW_CACHE_CORS_SIZE=1000
|
||||
|
||||
# Header to determine zone to resolve bucket name
|
||||
HTTP_GW_RESOLVE_BUCKET_NAMESPACE_HEADER=X-Frostfs-Namespace
|
||||
|
@ -176,6 +170,3 @@ HTTP_GW_INDEX_PAGE_TEMPLATE_PATH=internal/handler/templates/index.gotmpl
|
|||
HTTP_GW_FEATURES_ENABLE_FILEPATH_FALLBACK=false
|
||||
# Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service
|
||||
HTTP_GW_FEATURES_TREE_POOL_NETMAP_SUPPORT=true
|
||||
|
||||
# Containers properties
|
||||
HTTP_GW_CONTAINERS_CORS=AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
|
||||
|
|
|
@ -29,9 +29,6 @@ logger:
|
|||
initial: 100
|
||||
thereafter: 100
|
||||
interval: 1s
|
||||
tags:
|
||||
- names: app,datapath
|
||||
level: debug
|
||||
|
||||
server:
|
||||
- address: 0.0.0.0:8080
|
||||
|
@ -155,10 +152,6 @@ cache:
|
|||
# Cache which stores netmap
|
||||
netmap:
|
||||
lifetime: 1m
|
||||
# Cache which stores container CORS configurations
|
||||
cors:
|
||||
lifetime: 5m
|
||||
size: 1000
|
||||
|
||||
resolve_bucket:
|
||||
namespace_header: X-Frostfs-Namespace
|
||||
|
@ -194,6 +187,3 @@ features:
|
|||
enable_filepath_fallback: false
|
||||
# Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service
|
||||
tree_pool_netmap_support: true
|
||||
|
||||
containers:
|
||||
cors: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
|
||||
|
|
26
docs/api.md
26
docs/api.md
|
@ -94,8 +94,6 @@ The `filename` field from the multipart form will be set as `FileName` attribute
|
|||
|--------|----------------------------------------------|
|
||||
| 200 | Object created successfully. |
|
||||
| 400 | Some error occurred during object uploading. |
|
||||
| 403 | Access denied. |
|
||||
| 409 | Can not upload object due to quota reached. |
|
||||
|
||||
## Get object
|
||||
|
||||
|
@ -143,7 +141,6 @@ Get an object (payload and attributes) by an address.
|
|||
|--------|------------------------------------------------|
|
||||
| 200 | Object got successfully. |
|
||||
| 400 | Some error occurred during object downloading. |
|
||||
| 403 | Access denied. |
|
||||
| 404 | Container or object not found. |
|
||||
|
||||
###### Body
|
||||
|
@ -186,7 +183,6 @@ Get an object attributes by an address.
|
|||
|--------|---------------------------------------------------|
|
||||
| 200 | Object head successfully. |
|
||||
| 400 | Some error occurred during object HEAD operation. |
|
||||
| 403 | Access denied. |
|
||||
| 404 | Container or object not found. |
|
||||
|
||||
## Search object
|
||||
|
@ -237,7 +233,6 @@ If more than one object is found, an arbitrary one will be returned.
|
|||
|--------|------------------------------------------------|
|
||||
| 200 | Object got successfully. |
|
||||
| 400 | Some error occurred during object downloading. |
|
||||
| 403 | Access denied. |
|
||||
| 404 | Container or object not found. |
|
||||
|
||||
#### HEAD
|
||||
|
@ -274,7 +269,6 @@ If more than one object is found, an arbitrary one will be used to get attribute
|
|||
|--------|---------------------------------------|
|
||||
| 200 | Object head successfully. |
|
||||
| 400 | Some error occurred during operation. |
|
||||
| 403 | Access denied. |
|
||||
| 404 | Container or object not found. |
|
||||
|
||||
## Download archive
|
||||
|
@ -310,16 +304,16 @@ Archive can be compressed (see http-gw [configuration](gate-configuration.md#arc
|
|||
|
||||
###### Headers
|
||||
|
||||
| Header | Description |
|
||||
|-----------------------|---------------------------------------------------------------------------------------------|
|
||||
| `Content-Disposition` | Indicate how to browsers should treat file (`attachment`). Set `filename` as `archive.zip`. |
|
||||
| `Content-Type` | Indicate content type of object. Set to `application/zip` |
|
||||
| Header | Description |
|
||||
|-----------------------|-------------------------------------------------------------------------------------------------------------------|
|
||||
| `Content-Disposition` | Indicate how to browsers should treat file (`attachment`). Set `filename` as `archive.zip`. |
|
||||
| `Content-Type` | Indicate content type of object. Set to `application/zip` |
|
||||
|
||||
###### Status codes
|
||||
|
||||
| Status | Description |
|
||||
|--------|------------------------------------------------|
|
||||
| 200 | Object got successfully. |
|
||||
| 400 | Some error occurred during object downloading. |
|
||||
| 403 | Access denied. |
|
||||
| 404 | Container or objects not found. |
|
||||
| Status | Description |
|
||||
|--------|-----------------------------------------------------|
|
||||
| 200 | Object got successfully. |
|
||||
| 400 | Some error occurred during object downloading. |
|
||||
| 404 | Container or objects not found. |
|
||||
| 500 | Some inner error (e.g. error on streaming objects). |
|
||||
|
|
|
@ -60,7 +60,6 @@ $ cat http.log
|
|||
| `index_page` | [Index page configuration](#index_page-section) |
|
||||
| `multinet` | [Multinet configuration](#multinet-section) |
|
||||
| `features` | [Features configuration](#features-section) |
|
||||
| `containers` | [Containers configuration](#containers-section) |
|
||||
|
||||
# General section
|
||||
|
||||
|
@ -175,10 +174,6 @@ logger:
|
|||
initial: 100
|
||||
thereafter: 100
|
||||
interval: 1s
|
||||
tags:
|
||||
- names: "app,datapath"
|
||||
level: info
|
||||
- names: "external_storage_tree"
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|
@ -189,30 +184,6 @@ logger:
|
|||
| `sampling.initial` | `int` | no | '100' | Sampling count of first log entries. |
|
||||
| `sampling.thereafter` | `int` | no | '100' | Sampling count of entries after an `interval`. |
|
||||
| `sampling.interval` | `duration` | no | '1s' | Sampling interval of messaging similar entries. |
|
||||
| `sampling.tags` | `[]Tag` | yes | | Tagged log entries that should be additionally logged (available tags see in the next section). |
|
||||
|
||||
## Tags
|
||||
|
||||
There are additional log entries that can hurt performance and can be additionally logged by using `logger.tags`
|
||||
parameter. Available tags:
|
||||
|
||||
```yaml
|
||||
tags:
|
||||
- names: "app,datapath"
|
||||
level: info
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|-----------|------------|---------------|---------------------------|-------------------------------------------------------------------------------------------------------|
|
||||
| `names` | `[]string` | yes | | Tag names separated by `,`. Possible values see below in `Tag values` section. |
|
||||
| `level` | `string` | yes | Value from `logger.level` | Logging level for specific tag. Possible values: `debug`, `info`, `warn`, `dpanic`, `panic`, `fatal`. |
|
||||
|
||||
### Tag values
|
||||
|
||||
* `app` - common application logs (enabled by default).
|
||||
* `datapath` - main logic of application (enabled by default).
|
||||
* `external_storage` - external interaction with storage node (enabled by default).
|
||||
* `external_storage_tree` - external interaction with tree service in storage node (enabled by default).
|
||||
|
||||
# `web` section
|
||||
|
||||
|
@ -382,16 +353,12 @@ cache:
|
|||
size: 1000
|
||||
netmap:
|
||||
lifetime: 1m
|
||||
cors:
|
||||
lifetime: 5m
|
||||
size: 1000
|
||||
```
|
||||
|
||||
| Parameter | Type | Default value | Description |
|
||||
|-----------|-----------------------------------|---------------------------------|---------------------------------------------------------------------------|
|
||||
| `buckets` | [Cache config](#cache-subsection) | `lifetime: 60s`<br>`size: 1000` | Cache which contains mapping of bucket name to bucket info. |
|
||||
| `netmap` | [Cache config](#cache-subsection) | `lifetime: 1m` | Cache which stores netmap. `netmap.size` isn't applicable for this cache. |
|
||||
| `cors` | [Cache config](#cache-subsection) | `lifetime: 5m`<br>`size: 1000` | Cache which stores container CORS configurations. |
|
||||
|
||||
|
||||
#### `cache` subsection
|
||||
|
@ -445,7 +412,7 @@ index_page:
|
|||
# `cors` section
|
||||
|
||||
Parameters for CORS (used in OPTIONS requests and responses in all handlers).
|
||||
If values are not set, settings from CORS container will be used.
|
||||
If values are not set, headers will not be included to response.
|
||||
|
||||
```yaml
|
||||
cors:
|
||||
|
@ -519,16 +486,3 @@ features:
|
|||
|-------------------------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `features.enable_filepath_fallback` | `bool` | yes | `false` | Enable using fallback path to search for a object by attribute. If the value of the `FilePath` attribute in the request contains no `/` symbols or single leading `/` symbol and the object was not found, then an attempt is made to search for the object by the attribute `FileName`. |
|
||||
| `features.tree_pool_netmap_support` | `bool` | no | `false` | Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service. |
|
||||
|
||||
# `containers` section
|
||||
|
||||
Section for well-known containers to store data and settings.
|
||||
|
||||
```yaml
|
||||
containers:
|
||||
cors: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|-------------|----------|---------------|---------------|-----------------------------------------|
|
||||
| `cors` | `string` | no | | Container name for CORS configurations. |
|
||||
|
|
9
go.mod
9
go.mod
|
@ -1,11 +1,10 @@
|
|||
module git.frostfs.info/TrueCloudLab/frostfs-http-gw
|
||||
|
||||
go 1.23
|
||||
go 1.22
|
||||
|
||||
require (
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121
|
||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250317082814-87bb55f992dc
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a
|
||||
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
|
||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
|
||||
github.com/bluele/gcache v0.0.2
|
||||
|
@ -27,6 +26,7 @@ require (
|
|||
go.opentelemetry.io/otel/trace v1.31.0
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
|
||||
golang.org/x/net v0.30.0
|
||||
golang.org/x/sys v0.28.0
|
||||
google.golang.org/grpc v1.69.2
|
||||
)
|
||||
|
@ -125,7 +125,6 @@ require (
|
|||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/net v0.30.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
|
|
10
go.sum
10
go.sum
|
@ -42,12 +42,10 @@ git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f
|
|||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121 h1:/Z8DfbLZXp7exUQWUKoG/9tbFdI9d5lV1qSReaYoG8I=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe h1:81gDNdWNLP24oMQukRiCE9R1wGSh0l0dRq3F1W+Oesc=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250317082814-87bb55f992dc h1:fS6Yp4GvI+C22UrWz9oqJXwvQw5Q6SmADIY4H9eIQsc=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250317082814-87bb55f992dc/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 h1:9bvBDLApbbO5sXBKdODpE9tzy3HV99nXxkDWNn22rdI=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a h1:Ud+3zz4WP9HPxEQxDPJZPpiPdm30nDNSKucsWP9L54M=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
|
||||
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=
|
||||
|
|
4
internal/cache/buckets.go
vendored
4
internal/cache/buckets.go
vendored
|
@ -72,7 +72,7 @@ func (o *BucketCache) GetByCID(cnrID cid.ID) *data.BucketInfo {
|
|||
key, ok := entry.(string)
|
||||
if !ok {
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", key)), logs.TagField(logs.TagDatapath))
|
||||
zap.String("expected", fmt.Sprintf("%T", key)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -88,7 +88,7 @@ func (o *BucketCache) get(key string) *data.BucketInfo {
|
|||
result, ok := entry.(*data.BucketInfo)
|
||||
if !ok {
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
62
internal/cache/cors.go
vendored
62
internal/cache/cors.go
vendored
|
@ -1,62 +0,0 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"github.com/bluele/gcache"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// CORSCache contains cache with CORS objects.
|
||||
type CORSCache struct {
|
||||
cache gcache.Cache
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
const (
|
||||
// DefaultCORSCacheSize is a default maximum number of entries in cache.
|
||||
DefaultCORSCacheSize = 1e3
|
||||
// DefaultCORSCacheLifetime is a default lifetime of entries in cache.
|
||||
DefaultCORSCacheLifetime = 5 * time.Minute
|
||||
)
|
||||
|
||||
// DefaultCORSConfig returns new default cache expiration values.
|
||||
func DefaultCORSConfig(logger *zap.Logger) *Config {
|
||||
return &Config{
|
||||
Size: DefaultCORSCacheSize,
|
||||
Lifetime: DefaultCORSCacheLifetime,
|
||||
Logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// NewCORSCache creates an object of CORSCache.
|
||||
func NewCORSCache(config *Config) *CORSCache {
|
||||
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
||||
return &CORSCache{cache: gc, logger: config.Logger}
|
||||
}
|
||||
|
||||
// Get returns a cached object.
|
||||
func (o *CORSCache) Get(cnrID cid.ID) *data.CORSConfiguration {
|
||||
entry, err := o.cache.Get(cnrID)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
result, ok := entry.(*data.CORSConfiguration)
|
||||
if !ok {
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
||||
return nil
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Put puts an object to cache.
|
||||
func (o *CORSCache) Put(cnrID cid.ID, cors *data.CORSConfiguration) error {
|
||||
return o.cache.Set(cnrID, cors)
|
||||
}
|
2
internal/cache/netmap.go
vendored
2
internal/cache/netmap.go
vendored
|
@ -53,7 +53,7 @@ func (c *NetmapCache) Get() *netmap.NetMap {
|
|||
result, ok := entry.(netmap.NetMap)
|
||||
if !ok {
|
||||
c.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
package data
|
||||
|
||||
type (
|
||||
// CORSConfiguration stores CORS configuration of a request.
|
||||
CORSConfiguration struct {
|
||||
CORSRules []CORSRule `xml:"CORSRule" json:"CORSRules"`
|
||||
}
|
||||
|
||||
// CORSRule stores rules for CORS configuration.
|
||||
CORSRule struct {
|
||||
AllowedHeaders []string `xml:"AllowedHeader" json:"AllowedHeaders"`
|
||||
AllowedMethods []string `xml:"AllowedMethod" json:"AllowedMethods"`
|
||||
AllowedOrigins []string `xml:"AllowedOrigin" json:"AllowedOrigins"`
|
||||
ExposeHeaders []string `xml:"ExposeHeader" json:"ExposeHeaders"`
|
||||
MaxAgeSeconds int `xml:"MaxAgeSeconds,omitempty" json:"MaxAgeSeconds,omitempty"`
|
||||
AllowedCredentials bool `xml:"AllowedCredentials,omitempty" json:"AllowedCredentials,omitempty"`
|
||||
}
|
||||
)
|
|
@ -223,14 +223,14 @@ func (h *Handler) getDirObjectsNative(ctx context.Context, bucketInfo *data.Buck
|
|||
return nil, err
|
||||
}
|
||||
|
||||
log := h.reqLogger(ctx)
|
||||
log := utils.GetReqLogOrDefault(ctx, h.log)
|
||||
dirs := make(map[string]struct{})
|
||||
result := &GetObjectsResponse{
|
||||
objects: make([]ResponseObject, 0, 100),
|
||||
}
|
||||
for objExt := range resp {
|
||||
if objExt.Error != nil {
|
||||
log.Error(logs.FailedToHeadObject, zap.Error(objExt.Error), logs.TagField(logs.TagExternalStorage))
|
||||
log.Error(logs.FailedToHeadObject, zap.Error(objExt.Error))
|
||||
result.hasErrors = true
|
||||
continue
|
||||
}
|
||||
|
@ -258,7 +258,7 @@ func (h *Handler) headDirObjects(ctx context.Context, cnrID cid.ID, objectIDs Re
|
|||
|
||||
go func() {
|
||||
defer close(res)
|
||||
log := h.reqLogger(ctx).With(
|
||||
log := utils.GetReqLogOrDefault(ctx, h.log).With(
|
||||
zap.String("cid", cnrID.EncodeToString()),
|
||||
zap.String("path", basePath),
|
||||
)
|
||||
|
@ -273,7 +273,7 @@ func (h *Handler) headDirObjects(ctx context.Context, cnrID cid.ID, objectIDs Re
|
|||
})
|
||||
if err != nil {
|
||||
wg.Done()
|
||||
log.Warn(logs.FailedToSubmitTaskToPool, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
log.Warn(logs.FailedToSumbitTaskToPool, zap.Error(err))
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
@ -283,7 +283,7 @@ func (h *Handler) headDirObjects(ctx context.Context, cnrID cid.ID, objectIDs Re
|
|||
}
|
||||
})
|
||||
if err != nil {
|
||||
log.Error(logs.FailedToIterateOverResponse, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
log.Error(logs.FailedToIterateOverResponse, zap.Error(err))
|
||||
}
|
||||
wg.Wait()
|
||||
}()
|
||||
|
@ -328,18 +328,20 @@ type browseParams struct {
|
|||
listObjects func(ctx context.Context, bucketName *data.BucketInfo, prefix string) (*GetObjectsResponse, error)
|
||||
}
|
||||
|
||||
func (h *Handler) browseObjects(ctx context.Context, req *fasthttp.RequestCtx, p browseParams) {
|
||||
func (h *Handler) browseObjects(c *fasthttp.RequestCtx, p browseParams) {
|
||||
const S3Protocol = "s3"
|
||||
const FrostfsProtocol = "frostfs"
|
||||
|
||||
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
|
||||
log := reqLog.With(
|
||||
zap.String("bucket", p.bucketInfo.Name),
|
||||
zap.String("container", p.bucketInfo.CID.EncodeToString()),
|
||||
zap.String("prefix", p.prefix),
|
||||
))
|
||||
)
|
||||
resp, err := p.listObjects(ctx, p.bucketInfo, p.prefix)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToListObjects, err)
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -358,7 +360,7 @@ func (h *Handler) browseObjects(ctx context.Context, req *fasthttp.RequestCtx, p
|
|||
"parentDir": parentDir,
|
||||
}).Parse(h.config.IndexPageTemplate())
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToParseTemplate, err)
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
bucketName := p.bucketInfo.Name
|
||||
|
@ -367,14 +369,14 @@ func (h *Handler) browseObjects(ctx context.Context, req *fasthttp.RequestCtx, p
|
|||
bucketName = p.bucketInfo.CID.EncodeToString()
|
||||
protocol = FrostfsProtocol
|
||||
}
|
||||
if err = tmpl.Execute(req, &BrowsePageData{
|
||||
if err = tmpl.Execute(c, &BrowsePageData{
|
||||
Container: bucketName,
|
||||
Prefix: p.prefix,
|
||||
Objects: objects,
|
||||
Protocol: protocol,
|
||||
HasErrors: resp.hasErrors,
|
||||
}); err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToExecuteTemplate, err)
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,353 +0,0 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
qostagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/valyala/fasthttp"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
internalIOTag = "internal"
|
||||
corsFilePathTemplate = "/%s.cors"
|
||||
wildcard = "*"
|
||||
)
|
||||
|
||||
var errNoCORS = errors.New("no CORS objects found")
|
||||
|
||||
func (h *Handler) Preflight(req *fasthttp.RequestCtx) {
|
||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.Preflight")
|
||||
defer span.End()
|
||||
|
||||
ctx = qostagging.ContextWithIOTag(ctx, internalIOTag)
|
||||
cidParam, _ := req.UserValue("cid").(string)
|
||||
reqLog := h.reqLogger(ctx)
|
||||
log := reqLog.With(zap.String("cid", cidParam))
|
||||
|
||||
origin := req.Request.Header.Peek(fasthttp.HeaderOrigin)
|
||||
if len(origin) == 0 {
|
||||
log.Error(logs.EmptyOriginRequestHeader, logs.TagField(logs.TagDatapath))
|
||||
ResponseError(req, "Origin request header needed", fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
method := req.Request.Header.Peek(fasthttp.HeaderAccessControlRequestMethod)
|
||||
if len(method) == 0 {
|
||||
log.Error(logs.EmptyAccessControlRequestMethodHeader, logs.TagField(logs.TagDatapath))
|
||||
ResponseError(req, "Access-Control-Request-Method request header needed", fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
corsRule := h.config.CORS()
|
||||
if corsRule != nil {
|
||||
setCORSHeadersFromRule(req, corsRule)
|
||||
return
|
||||
}
|
||||
|
||||
corsConfig, err := h.getCORSConfig(ctx, log, cidParam)
|
||||
if err != nil {
|
||||
log.Error(logs.CouldNotGetCORSConfiguration, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
status := fasthttp.StatusInternalServerError
|
||||
if errors.Is(err, errNoCORS) {
|
||||
status = fasthttp.StatusNotFound
|
||||
}
|
||||
ResponseError(req, "could not get CORS configuration: "+err.Error(), status)
|
||||
return
|
||||
}
|
||||
|
||||
var headers []string
|
||||
requestHeaders := req.Request.Header.Peek(fasthttp.HeaderAccessControlRequestHeaders)
|
||||
if len(requestHeaders) > 0 {
|
||||
headers = strings.Split(string(requestHeaders), ", ")
|
||||
}
|
||||
|
||||
for _, rule := range corsConfig.CORSRules {
|
||||
for _, o := range rule.AllowedOrigins {
|
||||
if o == string(origin) || o == wildcard || (strings.Contains(o, "*") && match(o, string(origin))) {
|
||||
for _, m := range rule.AllowedMethods {
|
||||
if m == string(method) {
|
||||
if !checkSubslice(rule.AllowedHeaders, headers) {
|
||||
continue
|
||||
}
|
||||
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, string(origin))
|
||||
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(rule.AllowedMethods, ", "))
|
||||
if headers != nil {
|
||||
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowHeaders, string(requestHeaders))
|
||||
}
|
||||
if rule.ExposeHeaders != nil {
|
||||
req.Response.Header.Set(fasthttp.HeaderAccessControlExposeHeaders, strings.Join(rule.ExposeHeaders, ", "))
|
||||
}
|
||||
if rule.MaxAgeSeconds > 0 || rule.MaxAgeSeconds == -1 {
|
||||
req.Response.Header.Set(fasthttp.HeaderAccessControlMaxAge, strconv.Itoa(rule.MaxAgeSeconds))
|
||||
}
|
||||
if o != wildcard {
|
||||
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true")
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Error(logs.CORSRuleWasNotMatched, logs.TagField(logs.TagDatapath))
|
||||
ResponseError(req, "Forbidden", fasthttp.StatusForbidden)
|
||||
}
|
||||
|
||||
func (h *Handler) SetCORSHeaders(req *fasthttp.RequestCtx) {
|
||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.SetCORSHeaders")
|
||||
defer span.End()
|
||||
|
||||
origin := req.Request.Header.Peek(fasthttp.HeaderOrigin)
|
||||
if len(origin) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
method := req.Request.Header.Peek(fasthttp.HeaderAccessControlRequestMethod)
|
||||
if len(method) == 0 {
|
||||
method = req.Method()
|
||||
}
|
||||
|
||||
ctx = qostagging.ContextWithIOTag(ctx, internalIOTag)
|
||||
cidParam, _ := req.UserValue("cid").(string)
|
||||
reqLog := h.reqLogger(ctx)
|
||||
log := reqLog.With(zap.String("cid", cidParam))
|
||||
|
||||
corsRule := h.config.CORS()
|
||||
if corsRule != nil {
|
||||
setCORSHeadersFromRule(req, corsRule)
|
||||
return
|
||||
}
|
||||
|
||||
corsConfig, err := h.getCORSConfig(ctx, log, cidParam)
|
||||
if err != nil {
|
||||
log.Error(logs.CouldNotGetCORSConfiguration, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
return
|
||||
}
|
||||
|
||||
var withCredentials bool
|
||||
if tkn, err := tokens.LoadBearerToken(ctx); err == nil && tkn != nil {
|
||||
withCredentials = true
|
||||
}
|
||||
|
||||
for _, rule := range corsConfig.CORSRules {
|
||||
for _, o := range rule.AllowedOrigins {
|
||||
if o == string(origin) || (strings.Contains(o, "*") && len(o) > 1 && match(o, string(origin))) {
|
||||
for _, m := range rule.AllowedMethods {
|
||||
if m == string(method) {
|
||||
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, string(origin))
|
||||
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(rule.AllowedMethods, ", "))
|
||||
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true")
|
||||
req.Response.Header.Set(fasthttp.HeaderVary, fasthttp.HeaderOrigin)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
if o == wildcard {
|
||||
for _, m := range rule.AllowedMethods {
|
||||
if m == string(method) {
|
||||
if withCredentials {
|
||||
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, string(origin))
|
||||
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true")
|
||||
req.Response.Header.Set(fasthttp.HeaderVary, fasthttp.HeaderOrigin)
|
||||
} else {
|
||||
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, o)
|
||||
}
|
||||
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(rule.AllowedMethods, ", "))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) getCORSConfig(ctx context.Context, log *zap.Logger, cidStr string) (*data.CORSConfiguration, error) {
|
||||
cnrID, err := h.resolveContainer(ctx, cidStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("resolve container '%s': %w", cidStr, err)
|
||||
}
|
||||
|
||||
if cors := h.corsCache.Get(*cnrID); cors != nil {
|
||||
return cors, nil
|
||||
}
|
||||
|
||||
objID, err := h.getLastCORSObject(ctx, *cnrID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get last cors object: %w", err)
|
||||
}
|
||||
|
||||
var addr oid.Address
|
||||
addr.SetContainer(h.corsCnrID)
|
||||
addr.SetObject(objID)
|
||||
corsObj, err := h.frostfs.GetObject(ctx, PrmObjectGet{
|
||||
PrmAuth: PrmAuth{
|
||||
BearerToken: bearerToken(ctx),
|
||||
},
|
||||
Address: addr,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get cors object '%s': %w", addr.EncodeToString(), err)
|
||||
}
|
||||
|
||||
corsConfig := &data.CORSConfiguration{}
|
||||
if err = xml.NewDecoder(corsObj.Payload).Decode(corsConfig); err != nil {
|
||||
return nil, fmt.Errorf("decode cors object: %w", err)
|
||||
}
|
||||
|
||||
if err = h.corsCache.Put(*cnrID, corsConfig); err != nil {
|
||||
log.Warn(logs.CouldntCacheCors, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
}
|
||||
|
||||
return corsConfig, nil
|
||||
}
|
||||
|
||||
func (h *Handler) getLastCORSObject(ctx context.Context, cnrID cid.ID) (oid.ID, error) {
|
||||
filters := object.NewSearchFilters()
|
||||
filters.AddRootFilter()
|
||||
filters.AddFilter(object.AttributeFilePath, fmt.Sprintf(corsFilePathTemplate, cnrID), object.MatchStringEqual)
|
||||
|
||||
prmAuth := PrmAuth{
|
||||
BearerToken: bearerToken(ctx),
|
||||
}
|
||||
res, err := h.frostfs.SearchObjects(ctx, PrmObjectSearch{
|
||||
PrmAuth: prmAuth,
|
||||
Container: h.corsCnrID,
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil {
|
||||
return oid.ID{}, fmt.Errorf("search cors versions: %w", err)
|
||||
}
|
||||
defer res.Close()
|
||||
|
||||
var (
|
||||
addr oid.Address
|
||||
obj *object.Object
|
||||
headErr error
|
||||
objs = make([]*object.Object, 0)
|
||||
)
|
||||
addr.SetContainer(h.corsCnrID)
|
||||
err = res.Iterate(func(id oid.ID) bool {
|
||||
addr.SetObject(id)
|
||||
obj, headErr = h.frostfs.HeadObject(ctx, PrmObjectHead{
|
||||
PrmAuth: prmAuth,
|
||||
Address: addr,
|
||||
})
|
||||
if headErr != nil {
|
||||
headErr = fmt.Errorf("head cors object '%s': %w", addr.EncodeToString(), headErr)
|
||||
return true
|
||||
}
|
||||
|
||||
objs = append(objs, obj)
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return oid.ID{}, fmt.Errorf("iterate cors objects: %w", err)
|
||||
}
|
||||
|
||||
if headErr != nil {
|
||||
return oid.ID{}, headErr
|
||||
}
|
||||
|
||||
if len(objs) == 0 {
|
||||
return oid.ID{}, errNoCORS
|
||||
}
|
||||
|
||||
sort.Slice(objs, func(i, j int) bool {
|
||||
versionID1, _ := objs[i].ID()
|
||||
versionID2, _ := objs[j].ID()
|
||||
timestamp1 := utils.GetAttributeValue(objs[i].Attributes(), object.AttributeTimestamp)
|
||||
timestamp2 := utils.GetAttributeValue(objs[j].Attributes(), object.AttributeTimestamp)
|
||||
|
||||
if objs[i].CreationEpoch() != objs[j].CreationEpoch() {
|
||||
return objs[i].CreationEpoch() < objs[j].CreationEpoch()
|
||||
}
|
||||
|
||||
if len(timestamp1) > 0 && len(timestamp2) > 0 && timestamp1 != timestamp2 {
|
||||
unixTime1, err := strconv.ParseInt(timestamp1, 10, 64)
|
||||
if err != nil {
|
||||
return versionID1.EncodeToString() < versionID2.EncodeToString()
|
||||
}
|
||||
|
||||
unixTime2, err := strconv.ParseInt(timestamp2, 10, 64)
|
||||
if err != nil {
|
||||
return versionID1.EncodeToString() < versionID2.EncodeToString()
|
||||
}
|
||||
|
||||
return unixTime1 < unixTime2
|
||||
}
|
||||
|
||||
return versionID1.EncodeToString() < versionID2.EncodeToString()
|
||||
})
|
||||
|
||||
objID, _ := objs[len(objs)-1].ID()
|
||||
return objID, nil
|
||||
}
|
||||
|
||||
func setCORSHeadersFromRule(c *fasthttp.RequestCtx, cors *data.CORSRule) {
|
||||
c.Response.Header.Set(fasthttp.HeaderAccessControlMaxAge, strconv.Itoa(cors.MaxAgeSeconds))
|
||||
|
||||
if len(cors.AllowedOrigins) != 0 {
|
||||
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, cors.AllowedOrigins[0])
|
||||
}
|
||||
|
||||
if len(cors.AllowedMethods) != 0 {
|
||||
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(cors.AllowedMethods, ", "))
|
||||
}
|
||||
|
||||
if len(cors.AllowedHeaders) != 0 {
|
||||
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowHeaders, strings.Join(cors.AllowedHeaders, ", "))
|
||||
}
|
||||
|
||||
if len(cors.ExposeHeaders) != 0 {
|
||||
c.Response.Header.Set(fasthttp.HeaderAccessControlExposeHeaders, strings.Join(cors.ExposeHeaders, ", "))
|
||||
}
|
||||
|
||||
if cors.AllowedCredentials {
|
||||
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true")
|
||||
}
|
||||
}
|
||||
|
||||
func checkSubslice(slice []string, subSlice []string) bool {
|
||||
if slices.Contains(slice, wildcard) {
|
||||
return true
|
||||
}
|
||||
for _, r := range subSlice {
|
||||
if !sliceContains(slice, r) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func sliceContains(slice []string, str string) bool {
|
||||
for _, s := range slice {
|
||||
if s == str || (strings.Contains(s, "*") && match(s, str)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func match(tmpl, str string) bool {
|
||||
regexpStr := "^" + regexp.QuoteMeta(tmpl) + "$"
|
||||
regexpStr = regexpStr[:strings.Index(regexpStr, "*")-1] + "." + regexpStr[strings.Index(regexpStr, "*"):]
|
||||
reg := regexp.MustCompile(regexpStr)
|
||||
return reg.Match([]byte(str))
|
||||
}
|
|
@ -1,930 +0,0 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/valyala/fasthttp"
|
||||
)
|
||||
|
||||
func TestPreflight(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-preflight"
|
||||
cnrID, cnr, err := hc.prepareContainer(bktName, acl.Private)
|
||||
require.NoError(t, err)
|
||||
hc.frostfs.SetContainer(cnrID, cnr)
|
||||
|
||||
var epoch uint64
|
||||
|
||||
t.Run("CORS object", func(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
corsConfig *data.CORSConfiguration
|
||||
requestHeaders map[string]string
|
||||
expectedHeaders map[string]string
|
||||
status int
|
||||
}{
|
||||
{
|
||||
name: "no CORS configuration",
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "",
|
||||
fasthttp.HeaderAccessControlAllowHeaders: "",
|
||||
fasthttp.HeaderAccessControlExposeHeaders: "",
|
||||
fasthttp.HeaderAccessControlMaxAge: "",
|
||||
fasthttp.HeaderAccessControlAllowCredentials: "",
|
||||
},
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "http://example.com",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
|
||||
},
|
||||
status: fasthttp.StatusNotFound,
|
||||
},
|
||||
{
|
||||
name: "specific allowed origin",
|
||||
corsConfig: &data.CORSConfiguration{
|
||||
CORSRules: []data.CORSRule{
|
||||
{
|
||||
AllowedOrigins: []string{"http://example.com"},
|
||||
AllowedMethods: []string{"GET", "HEAD"},
|
||||
AllowedHeaders: []string{"Content-Type"},
|
||||
ExposeHeaders: []string{"x-amz-*", "X-Amz-*"},
|
||||
MaxAgeSeconds: 900,
|
||||
},
|
||||
},
|
||||
},
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "http://example.com",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
|
||||
fasthttp.HeaderAccessControlRequestHeaders: "Content-Type",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "http://example.com",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD",
|
||||
fasthttp.HeaderAccessControlAllowHeaders: "Content-Type",
|
||||
fasthttp.HeaderAccessControlExposeHeaders: "x-amz-*, X-Amz-*",
|
||||
fasthttp.HeaderAccessControlMaxAge: "900",
|
||||
fasthttp.HeaderAccessControlAllowCredentials: "true",
|
||||
},
|
||||
status: fasthttp.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "wildcard allowed origin",
|
||||
corsConfig: &data.CORSConfiguration{
|
||||
CORSRules: []data.CORSRule{
|
||||
{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "HEAD"},
|
||||
AllowedHeaders: []string{"Content-Type"},
|
||||
ExposeHeaders: []string{"x-amz-*", "X-Amz-*"},
|
||||
MaxAgeSeconds: 900,
|
||||
},
|
||||
},
|
||||
},
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "http://example.com",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "http://example.com",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD",
|
||||
fasthttp.HeaderAccessControlAllowHeaders: "",
|
||||
fasthttp.HeaderAccessControlExposeHeaders: "x-amz-*, X-Amz-*",
|
||||
fasthttp.HeaderAccessControlMaxAge: "900",
|
||||
fasthttp.HeaderAccessControlAllowCredentials: "",
|
||||
},
|
||||
status: fasthttp.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "not allowed header",
|
||||
corsConfig: &data.CORSConfiguration{
|
||||
CORSRules: []data.CORSRule{
|
||||
{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "HEAD"},
|
||||
AllowedHeaders: []string{"Content-Type"},
|
||||
},
|
||||
},
|
||||
},
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "http://example.com",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "GET",
|
||||
fasthttp.HeaderAccessControlRequestHeaders: "Authorization",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "",
|
||||
fasthttp.HeaderAccessControlAllowHeaders: "",
|
||||
fasthttp.HeaderAccessControlExposeHeaders: "",
|
||||
fasthttp.HeaderAccessControlMaxAge: "",
|
||||
fasthttp.HeaderAccessControlAllowCredentials: "",
|
||||
},
|
||||
status: fasthttp.StatusForbidden,
|
||||
},
|
||||
{
|
||||
name: "empty Origin header",
|
||||
corsConfig: &data.CORSConfiguration{
|
||||
CORSRules: []data.CORSRule{
|
||||
{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "HEAD"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "",
|
||||
fasthttp.HeaderAccessControlAllowHeaders: "",
|
||||
fasthttp.HeaderAccessControlExposeHeaders: "",
|
||||
fasthttp.HeaderAccessControlMaxAge: "",
|
||||
fasthttp.HeaderAccessControlAllowCredentials: "",
|
||||
},
|
||||
status: fasthttp.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
name: "empty Access-Control-Request-Method header",
|
||||
corsConfig: &data.CORSConfiguration{
|
||||
CORSRules: []data.CORSRule{
|
||||
{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "HEAD"},
|
||||
},
|
||||
},
|
||||
},
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "http://example.com",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "",
|
||||
fasthttp.HeaderAccessControlAllowHeaders: "",
|
||||
fasthttp.HeaderAccessControlExposeHeaders: "",
|
||||
fasthttp.HeaderAccessControlMaxAge: "",
|
||||
fasthttp.HeaderAccessControlAllowCredentials: "",
|
||||
},
|
||||
status: fasthttp.StatusBadRequest,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
if tc.corsConfig != nil {
|
||||
epoch++
|
||||
setCORSObject(t, hc, cnrID, tc.corsConfig, epoch)
|
||||
}
|
||||
|
||||
r := prepareCORSRequest(t, bktName, tc.requestHeaders)
|
||||
hc.Handler().Preflight(r)
|
||||
|
||||
require.Equal(t, tc.status, r.Response.StatusCode())
|
||||
for k, v := range tc.expectedHeaders {
|
||||
require.Equal(t, v, string(r.Response.Header.Peek(k)))
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("CORS config", func(t *testing.T) {
|
||||
hc.cfg.cors = &data.CORSRule{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "HEAD"},
|
||||
AllowedHeaders: []string{"Content-Type", "Content-Encoding"},
|
||||
ExposeHeaders: []string{"x-amz-*", "X-Amz-*"},
|
||||
MaxAgeSeconds: 900,
|
||||
AllowedCredentials: true,
|
||||
}
|
||||
|
||||
r := prepareCORSRequest(t, bktName, map[string]string{
|
||||
fasthttp.HeaderOrigin: "http://example.com",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "GET",
|
||||
})
|
||||
hc.Handler().Preflight(r)
|
||||
|
||||
require.Equal(t, fasthttp.StatusOK, r.Response.StatusCode())
|
||||
require.Equal(t, "900", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlMaxAge)))
|
||||
require.Equal(t, "*", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowOrigin)))
|
||||
require.Equal(t, "GET, HEAD", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowMethods)))
|
||||
require.Equal(t, "Content-Type, Content-Encoding", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowHeaders)))
|
||||
require.Equal(t, "x-amz-*, X-Amz-*", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlExposeHeaders)))
|
||||
require.Equal(t, "true", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowCredentials)))
|
||||
})
|
||||
}
|
||||
|
||||
func TestSetCORSHeaders(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-set-cors-headers"
|
||||
cnrID, cnr, err := hc.prepareContainer(bktName, acl.Private)
|
||||
require.NoError(t, err)
|
||||
hc.frostfs.SetContainer(cnrID, cnr)
|
||||
|
||||
var epoch uint64
|
||||
|
||||
t.Run("CORS object", func(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
corsConfig *data.CORSConfiguration
|
||||
requestHeaders map[string]string
|
||||
expectedHeaders map[string]string
|
||||
}{
|
||||
{
|
||||
name: "empty Origin header",
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "",
|
||||
fasthttp.HeaderVary: "",
|
||||
fasthttp.HeaderAccessControlAllowCredentials: "",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no CORS configuration",
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "",
|
||||
fasthttp.HeaderVary: "",
|
||||
fasthttp.HeaderAccessControlAllowCredentials: "",
|
||||
},
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "http://example.com",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "specific allowed origin",
|
||||
corsConfig: &data.CORSConfiguration{
|
||||
CORSRules: []data.CORSRule{
|
||||
{
|
||||
AllowedOrigins: []string{"http://example.com"},
|
||||
AllowedMethods: []string{"GET", "HEAD"},
|
||||
},
|
||||
},
|
||||
},
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "http://example.com",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "http://example.com",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD",
|
||||
fasthttp.HeaderVary: fasthttp.HeaderOrigin,
|
||||
fasthttp.HeaderAccessControlAllowCredentials: "true",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "wildcard allowed origin, with credentials",
|
||||
corsConfig: &data.CORSConfiguration{
|
||||
CORSRules: []data.CORSRule{
|
||||
{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "HEAD"},
|
||||
},
|
||||
},
|
||||
},
|
||||
requestHeaders: func() map[string]string {
|
||||
tkn := new(bearer.Token)
|
||||
err = tkn.Sign(hc.key.PrivateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
|
||||
require.NotEmpty(t, t64)
|
||||
|
||||
return map[string]string{
|
||||
fasthttp.HeaderOrigin: "http://example.com",
|
||||
fasthttp.HeaderAuthorization: "Bearer " + t64,
|
||||
}
|
||||
}(),
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "http://example.com",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD",
|
||||
fasthttp.HeaderVary: fasthttp.HeaderOrigin,
|
||||
fasthttp.HeaderAccessControlAllowCredentials: "true",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "wildcard allowed origin, without credentials",
|
||||
corsConfig: &data.CORSConfiguration{
|
||||
CORSRules: []data.CORSRule{
|
||||
{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "HEAD"},
|
||||
},
|
||||
},
|
||||
},
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "http://example.com",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "*",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD",
|
||||
fasthttp.HeaderVary: "",
|
||||
fasthttp.HeaderAccessControlAllowCredentials: "",
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
epoch++
|
||||
setCORSObject(t, hc, cnrID, tc.corsConfig, epoch)
|
||||
r := prepareCORSRequest(t, bktName, tc.requestHeaders)
|
||||
hc.Handler().SetCORSHeaders(r)
|
||||
|
||||
require.Equal(t, fasthttp.StatusOK, r.Response.StatusCode())
|
||||
for k, v := range tc.expectedHeaders {
|
||||
require.Equal(t, v, string(r.Response.Header.Peek(k)))
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("CORS config", func(t *testing.T) {
|
||||
hc.cfg.cors = &data.CORSRule{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "HEAD"},
|
||||
AllowedHeaders: []string{"Content-Type", "Content-Encoding"},
|
||||
ExposeHeaders: []string{"x-amz-*", "X-Amz-*"},
|
||||
MaxAgeSeconds: 900,
|
||||
AllowedCredentials: true,
|
||||
}
|
||||
|
||||
r := prepareCORSRequest(t, bktName, map[string]string{fasthttp.HeaderOrigin: "http://example.com"})
|
||||
hc.Handler().SetCORSHeaders(r)
|
||||
|
||||
require.Equal(t, "900", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlMaxAge)))
|
||||
require.Equal(t, "*", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowOrigin)))
|
||||
require.Equal(t, "GET, HEAD", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowMethods)))
|
||||
require.Equal(t, "Content-Type, Content-Encoding", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowHeaders)))
|
||||
require.Equal(t, "x-amz-*, X-Amz-*", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlExposeHeaders)))
|
||||
require.Equal(t, "true", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowCredentials)))
|
||||
})
|
||||
}
|
||||
|
||||
func TestCheckSubslice(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
allowed []string
|
||||
actual []string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "empty allowed slice",
|
||||
allowed: []string{},
|
||||
actual: []string{"str1", "str2", "str3"},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "empty actual slice",
|
||||
allowed: []string{"str1", "str2", "str3"},
|
||||
actual: []string{},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "allowed wildcard",
|
||||
allowed: []string{"str", "*"},
|
||||
actual: []string{"str1", "str2", "str3"},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "similar allowed and actual",
|
||||
allowed: []string{"str1", "str2", "str3"},
|
||||
actual: []string{"str1", "str2", "str3"},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "allowed actual",
|
||||
allowed: []string{"str", "str1", "str2", "str4"},
|
||||
actual: []string{"str1", "str2"},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "not allowed actual",
|
||||
allowed: []string{"str", "str1", "str2", "str4"},
|
||||
actual: []string{"str1", "str5"},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "wildcard in allowed",
|
||||
allowed: []string{"str*"},
|
||||
actual: []string{"str", "str5"},
|
||||
expected: true,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
require.Equal(t, tc.expected, checkSubslice(tc.allowed, tc.actual))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllowedOriginWildcards(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName := "bucket-allowed-origin-wildcards"
|
||||
cnrID, cnr, err := hc.prepareContainer(bktName, acl.Private)
|
||||
require.NoError(t, err)
|
||||
hc.frostfs.SetContainer(cnrID, cnr)
|
||||
|
||||
cfg := &data.CORSConfiguration{
|
||||
CORSRules: []data.CORSRule{
|
||||
{
|
||||
AllowedOrigins: []string{"*suffix.example"},
|
||||
AllowedMethods: []string{"GET"},
|
||||
},
|
||||
{
|
||||
AllowedOrigins: []string{"https://*example"},
|
||||
AllowedMethods: []string{"GET"},
|
||||
},
|
||||
{
|
||||
AllowedOrigins: []string{"prefix.example*"},
|
||||
AllowedMethods: []string{"GET"},
|
||||
},
|
||||
},
|
||||
}
|
||||
setCORSObject(t, hc, cnrID, cfg, 1)
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
handler func(*fasthttp.RequestCtx)
|
||||
requestHeaders map[string]string
|
||||
expectedHeaders map[string]string
|
||||
expectedStatus int
|
||||
}{
|
||||
{
|
||||
name: "set cors headers, empty request cors headers",
|
||||
handler: hc.Handler().SetCORSHeaders,
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set cors headers, invalid origin",
|
||||
handler: hc.Handler().SetCORSHeaders,
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "https://origin.com",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set cors headers, first rule, no symbols in place of wildcard",
|
||||
handler: hc.Handler().SetCORSHeaders,
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "suffix.example",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "suffix.example",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "GET",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set cors headers, first rule, valid origin",
|
||||
handler: hc.Handler().SetCORSHeaders,
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "http://suffix.example",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "http://suffix.example",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "GET",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set cors headers, first rule, invalid origin",
|
||||
handler: hc.Handler().SetCORSHeaders,
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "http://suffix-example",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set cors headers, second rule, no symbols in place of wildcard",
|
||||
handler: hc.Handler().SetCORSHeaders,
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "https://example",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "https://example",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "GET",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set cors headers, second rule, valid origin",
|
||||
handler: hc.Handler().SetCORSHeaders,
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "https://www.example",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "https://www.example",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "GET",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set cors headers, second rule, invalid origin",
|
||||
handler: hc.Handler().SetCORSHeaders,
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "https://www.example.com",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set cors headers, third rule, no symbols in place of wildcard",
|
||||
handler: hc.Handler().SetCORSHeaders,
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "prefix.example",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "prefix.example",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "GET",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set cors headers, third rule, valid origin",
|
||||
handler: hc.Handler().SetCORSHeaders,
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "prefix.example.com",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "prefix.example.com",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "GET",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set cors headers, third rule, invalid origin",
|
||||
handler: hc.Handler().SetCORSHeaders,
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "www.prefix.example",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set cors headers, third rule, invalid request method in header",
|
||||
handler: hc.Handler().SetCORSHeaders,
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "prefix.example.com",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "PUT",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set cors headers, third rule, valid request method in header",
|
||||
handler: hc.Handler().SetCORSHeaders,
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "prefix.example.com",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "GET",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "prefix.example.com",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "GET",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "preflight, empty request cors headers",
|
||||
handler: hc.Handler().Preflight,
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "",
|
||||
},
|
||||
expectedStatus: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
name: "preflight, invalid origin",
|
||||
handler: hc.Handler().Preflight,
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "https://origin.com",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "GET",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "",
|
||||
},
|
||||
expectedStatus: http.StatusForbidden,
|
||||
},
|
||||
{
|
||||
name: "preflight, first rule, no symbols in place of wildcard",
|
||||
handler: hc.Handler().Preflight,
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "suffix.example",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "GET",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "suffix.example",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "GET",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "prelight, first rule, valid origin",
|
||||
handler: hc.Handler().Preflight,
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "http://suffix.example",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "GET",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "http://suffix.example",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "GET",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "preflight, first rule, invalid origin",
|
||||
handler: hc.Handler().Preflight,
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "http://suffix-example",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "GET",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "",
|
||||
},
|
||||
expectedStatus: http.StatusForbidden,
|
||||
},
|
||||
{
|
||||
name: "preflight, second rule, no symbols in place of wildcard",
|
||||
handler: hc.Handler().Preflight,
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "https://example",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "GET",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "https://example",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "GET",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "preflight, second rule, valid origin",
|
||||
handler: hc.Handler().Preflight,
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "https://www.example",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "GET",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "https://www.example",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "GET",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "preflight, second rule, invalid origin",
|
||||
handler: hc.Handler().Preflight,
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "https://www.example.com",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "GET",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "",
|
||||
},
|
||||
expectedStatus: http.StatusForbidden,
|
||||
},
|
||||
{
|
||||
name: "preflight, third rule, no symbols in place of wildcard",
|
||||
handler: hc.Handler().Preflight,
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "prefix.example",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "GET",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "prefix.example",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "GET",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "preflight, third rule, valid origin",
|
||||
handler: hc.Handler().Preflight,
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "prefix.example.com",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "GET",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "prefix.example.com",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "GET",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "preflight, third rule, invalid origin",
|
||||
handler: hc.Handler().Preflight,
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "www.prefix.example",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "GET",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "",
|
||||
},
|
||||
expectedStatus: http.StatusForbidden,
|
||||
},
|
||||
{
|
||||
name: "preflight, third rule, invalid request method in header",
|
||||
handler: hc.Handler().Preflight,
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "prefix.example.com",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "PUT",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "",
|
||||
},
|
||||
expectedStatus: http.StatusForbidden,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
r := prepareCORSRequest(t, bktName, tc.requestHeaders)
|
||||
tc.handler(r)
|
||||
|
||||
expectedStatus := fasthttp.StatusOK
|
||||
if tc.expectedStatus != 0 {
|
||||
expectedStatus = tc.expectedStatus
|
||||
}
|
||||
require.Equal(t, expectedStatus, r.Response.StatusCode())
|
||||
for k, v := range tc.expectedHeaders {
|
||||
require.Equal(t, v, string(r.Response.Header.Peek(k)))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllowedHeaderWildcards(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName := "bucket-allowed-header-wildcards"
|
||||
cnrID, cnr, err := hc.prepareContainer(bktName, acl.Private)
|
||||
require.NoError(t, err)
|
||||
hc.frostfs.SetContainer(cnrID, cnr)
|
||||
|
||||
cfg := &data.CORSConfiguration{
|
||||
CORSRules: []data.CORSRule{
|
||||
{
|
||||
AllowedOrigins: []string{"https://www.example.com"},
|
||||
AllowedMethods: []string{"HEAD"},
|
||||
AllowedHeaders: []string{"*-suffix"},
|
||||
},
|
||||
{
|
||||
AllowedOrigins: []string{"https://www.example.com"},
|
||||
AllowedMethods: []string{"HEAD"},
|
||||
AllowedHeaders: []string{"start-*-end"},
|
||||
},
|
||||
{
|
||||
AllowedOrigins: []string{"https://www.example.com"},
|
||||
AllowedMethods: []string{"HEAD"},
|
||||
AllowedHeaders: []string{"X-Amz-*"},
|
||||
},
|
||||
},
|
||||
}
|
||||
setCORSObject(t, hc, cnrID, cfg, 1)
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
requestHeaders map[string]string
|
||||
expectedHeaders map[string]string
|
||||
expectedStatus int
|
||||
}{
|
||||
{
|
||||
name: "first rule, valid headers",
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "https://www.example.com",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
|
||||
fasthttp.HeaderAccessControlRequestHeaders: "header-suffix, -suffix",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "https://www.example.com",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "HEAD",
|
||||
fasthttp.HeaderAccessControlAllowHeaders: "header-suffix, -suffix",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "first rule, invalid headers",
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "https://www.example.com",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
|
||||
fasthttp.HeaderAccessControlRequestHeaders: "header-suffix-*",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "",
|
||||
fasthttp.HeaderAccessControlAllowHeaders: "",
|
||||
},
|
||||
expectedStatus: http.StatusForbidden,
|
||||
},
|
||||
{
|
||||
name: "second rule, valid headers",
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "https://www.example.com",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
|
||||
fasthttp.HeaderAccessControlRequestHeaders: "start--end, start-header-end",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "https://www.example.com",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "HEAD",
|
||||
fasthttp.HeaderAccessControlAllowHeaders: "start--end, start-header-end",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "second rule, invalid header ending",
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "https://www.example.com",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
|
||||
fasthttp.HeaderAccessControlRequestHeaders: "start-header-end-*",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "",
|
||||
fasthttp.HeaderAccessControlAllowHeaders: "",
|
||||
},
|
||||
expectedStatus: http.StatusForbidden,
|
||||
},
|
||||
{
|
||||
name: "second rule, invalid header beginning",
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "https://www.example.com",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
|
||||
fasthttp.HeaderAccessControlRequestHeaders: "*-start-header-end",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "",
|
||||
fasthttp.HeaderAccessControlAllowHeaders: "",
|
||||
},
|
||||
expectedStatus: http.StatusForbidden,
|
||||
},
|
||||
{
|
||||
name: "third rule, valid headers",
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "https://www.example.com",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
|
||||
fasthttp.HeaderAccessControlRequestHeaders: "X-Amz-Date, X-Amz-Content-Sha256",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "https://www.example.com",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "HEAD",
|
||||
fasthttp.HeaderAccessControlAllowHeaders: "X-Amz-Date, X-Amz-Content-Sha256",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "third rule, invalid headers",
|
||||
requestHeaders: map[string]string{
|
||||
fasthttp.HeaderOrigin: "https://www.example.com",
|
||||
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
|
||||
fasthttp.HeaderAccessControlRequestHeaders: "Authorization",
|
||||
},
|
||||
expectedHeaders: map[string]string{
|
||||
fasthttp.HeaderAccessControlAllowOrigin: "",
|
||||
fasthttp.HeaderAccessControlAllowMethods: "",
|
||||
fasthttp.HeaderAccessControlAllowHeaders: "",
|
||||
},
|
||||
expectedStatus: http.StatusForbidden,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
r := prepareCORSRequest(t, bktName, tc.requestHeaders)
|
||||
hc.Handler().Preflight(r)
|
||||
|
||||
expectedStatus := http.StatusOK
|
||||
if tc.expectedStatus != 0 {
|
||||
expectedStatus = tc.expectedStatus
|
||||
}
|
||||
require.Equal(t, expectedStatus, r.Response.StatusCode())
|
||||
for k, v := range tc.expectedHeaders {
|
||||
require.Equal(t, v, string(r.Response.Header.Peek(k)))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func setCORSObject(t *testing.T, hc *handlerContext, cnrID cid.ID, corsConfig *data.CORSConfiguration, epoch uint64) {
|
||||
payload, err := xml.Marshal(corsConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
a := object.NewAttribute()
|
||||
a.SetKey(object.AttributeFilePath)
|
||||
a.SetValue(fmt.Sprintf(corsFilePathTemplate, cnrID))
|
||||
|
||||
objID := oidtest.ID()
|
||||
obj := object.New()
|
||||
obj.SetAttributes(*a)
|
||||
obj.SetOwnerID(hc.owner)
|
||||
obj.SetPayload(payload)
|
||||
obj.SetPayloadSize(uint64(len(payload)))
|
||||
obj.SetContainerID(hc.corsCnr)
|
||||
obj.SetID(objID)
|
||||
obj.SetCreationEpoch(epoch)
|
||||
|
||||
var addr oid.Address
|
||||
addr.SetObject(objID)
|
||||
addr.SetContainer(hc.corsCnr)
|
||||
|
||||
hc.frostfs.SetObject(addr, obj)
|
||||
}
|
|
@ -16,7 +16,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
|
@ -25,38 +24,38 @@ import (
|
|||
)
|
||||
|
||||
// DownloadByAddressOrBucketName handles download requests using simple cid/oid or bucketname/key format.
|
||||
func (h *Handler) DownloadByAddressOrBucketName(req *fasthttp.RequestCtx) {
|
||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.DownloadByAddressOrBucketName")
|
||||
defer span.End()
|
||||
func (h *Handler) DownloadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
||||
cidParam := c.UserValue("cid").(string)
|
||||
oidParam := c.UserValue("oid").(string)
|
||||
downloadParam := c.QueryArgs().GetBool("download")
|
||||
|
||||
cidParam := req.UserValue("cid").(string)
|
||||
oidParam := req.UserValue("oid").(string)
|
||||
downloadParam := req.QueryArgs().GetBool("download")
|
||||
|
||||
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
log := utils.GetReqLogOrDefault(ctx, h.log).With(
|
||||
zap.String("cid", cidParam),
|
||||
zap.String("oid", oidParam),
|
||||
))
|
||||
)
|
||||
|
||||
bktInfo, err := h.getBucketInfo(ctx, cidParam)
|
||||
bktInfo, err := h.getBucketInfo(ctx, cidParam, log)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
|
||||
checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo)
|
||||
if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) {
|
||||
h.logAndSendError(ctx, req, logs.FailedToCheckIfSettingsNodeExist, checkS3Err)
|
||||
logAndSendBucketError(c, log, checkS3Err)
|
||||
return
|
||||
}
|
||||
|
||||
req := newRequest(c, log)
|
||||
|
||||
var objID oid.ID
|
||||
if checkS3Err == nil && shouldDownload(oidParam, downloadParam) {
|
||||
h.byS3Path(ctx, req, bktInfo.CID, oidParam, h.receiveFile)
|
||||
} else if err = objID.DecodeString(oidParam); err == nil {
|
||||
h.byNativeAddress(ctx, req, bktInfo.CID, objID, h.receiveFile)
|
||||
} else {
|
||||
h.browseIndex(ctx, req, cidParam, oidParam, checkS3Err != nil)
|
||||
h.browseIndex(c, checkS3Err != nil)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -65,11 +64,8 @@ func shouldDownload(oidParam string, downloadParam bool) bool {
|
|||
}
|
||||
|
||||
// DownloadByAttribute handles attribute-based download requests.
|
||||
func (h *Handler) DownloadByAttribute(req *fasthttp.RequestCtx) {
|
||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.DownloadByAttribute")
|
||||
defer span.End()
|
||||
|
||||
h.byAttribute(ctx, req, h.receiveFile)
|
||||
func (h *Handler) DownloadByAttribute(c *fasthttp.RequestCtx) {
|
||||
h.byAttribute(c, h.receiveFile)
|
||||
}
|
||||
|
||||
func (h *Handler) search(ctx context.Context, cnrID cid.ID, key, val string, op object.SearchMatchType) (ResObjectSearch, error) {
|
||||
|
@ -89,33 +85,28 @@ func (h *Handler) search(ctx context.Context, cnrID cid.ID, key, val string, op
|
|||
}
|
||||
|
||||
// DownloadZip handles zip by prefix requests.
|
||||
func (h *Handler) DownloadZip(req *fasthttp.RequestCtx) {
|
||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.DownloadZip")
|
||||
defer span.End()
|
||||
func (h *Handler) DownloadZip(c *fasthttp.RequestCtx) {
|
||||
scid, _ := c.UserValue("cid").(string)
|
||||
|
||||
scid, _ := req.UserValue("cid").(string)
|
||||
prefix, _ := req.UserValue("prefix").(string)
|
||||
|
||||
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(zap.String("cid", scid), zap.String("prefix", prefix)))
|
||||
|
||||
bktInfo, err := h.getBucketInfo(ctx, scid)
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
log := utils.GetReqLogOrDefault(ctx, h.log)
|
||||
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
|
||||
resSearch, err := h.searchObjectsByPrefix(ctx, bktInfo.CID, prefix)
|
||||
resSearch, err := h.searchObjectsByPrefix(c, log, bktInfo.CID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
req.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
|
||||
req.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
|
||||
c.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
|
||||
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
|
||||
|
||||
req.SetBodyStreamWriter(h.getZipResponseWriter(ctx, resSearch, bktInfo))
|
||||
c.SetBodyStreamWriter(h.getZipResponseWriter(ctx, log, resSearch, bktInfo))
|
||||
}
|
||||
|
||||
func (h *Handler) getZipResponseWriter(ctx context.Context, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
|
||||
func (h *Handler) getZipResponseWriter(ctx context.Context, log *zap.Logger, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
|
||||
return func(w *bufio.Writer) {
|
||||
defer resSearch.Close()
|
||||
|
||||
|
@ -123,20 +114,20 @@ func (h *Handler) getZipResponseWriter(ctx context.Context, resSearch ResObjectS
|
|||
zipWriter := zip.NewWriter(w)
|
||||
var objectsWritten int
|
||||
|
||||
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, bktInfo.CID, buf,
|
||||
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, log, bktInfo.CID, buf,
|
||||
func(obj *object.Object) (io.Writer, error) {
|
||||
objectsWritten++
|
||||
return h.createZipFile(zipWriter, obj)
|
||||
}),
|
||||
)
|
||||
if errIter != nil {
|
||||
h.reqLogger(ctx).Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath))
|
||||
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter))
|
||||
return
|
||||
} else if objectsWritten == 0 {
|
||||
h.reqLogger(ctx).Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath))
|
||||
log.Warn(logs.ObjectsNotFound)
|
||||
}
|
||||
if err := zipWriter.Close(); err != nil {
|
||||
h.reqLogger(ctx).Error(logs.CloseZipWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
log.Error(logs.CloseZipWriter, zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -160,33 +151,28 @@ func (h *Handler) createZipFile(zw *zip.Writer, obj *object.Object) (io.Writer,
|
|||
}
|
||||
|
||||
// DownloadTar forms tar.gz from objects by prefix.
|
||||
func (h *Handler) DownloadTar(req *fasthttp.RequestCtx) {
|
||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.DownloadTar")
|
||||
defer span.End()
|
||||
func (h *Handler) DownloadTar(c *fasthttp.RequestCtx) {
|
||||
scid, _ := c.UserValue("cid").(string)
|
||||
|
||||
scid, _ := req.UserValue("cid").(string)
|
||||
prefix, _ := req.UserValue("prefix").(string)
|
||||
|
||||
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(zap.String("cid", scid), zap.String("prefix", prefix)))
|
||||
|
||||
bktInfo, err := h.getBucketInfo(ctx, scid)
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
log := utils.GetReqLogOrDefault(ctx, h.log)
|
||||
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
|
||||
resSearch, err := h.searchObjectsByPrefix(ctx, bktInfo.CID, prefix)
|
||||
resSearch, err := h.searchObjectsByPrefix(c, log, bktInfo.CID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
req.Response.Header.Set(fasthttp.HeaderContentType, "application/gzip")
|
||||
req.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.tar.gz\"")
|
||||
c.Response.Header.Set(fasthttp.HeaderContentType, "application/gzip")
|
||||
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.tar.gz\"")
|
||||
|
||||
req.SetBodyStreamWriter(h.getTarResponseWriter(ctx, resSearch, bktInfo))
|
||||
c.SetBodyStreamWriter(h.getTarResponseWriter(ctx, log, resSearch, bktInfo))
|
||||
}
|
||||
|
||||
func (h *Handler) getTarResponseWriter(ctx context.Context, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
|
||||
func (h *Handler) getTarResponseWriter(ctx context.Context, log *zap.Logger, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
|
||||
return func(w *bufio.Writer) {
|
||||
defer resSearch.Close()
|
||||
|
||||
|
@ -201,26 +187,26 @@ func (h *Handler) getTarResponseWriter(ctx context.Context, resSearch ResObjectS
|
|||
|
||||
defer func() {
|
||||
if err := tarWriter.Close(); err != nil {
|
||||
h.reqLogger(ctx).Error(logs.CloseTarWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
log.Error(logs.CloseTarWriter, zap.Error(err))
|
||||
}
|
||||
if err := gzipWriter.Close(); err != nil {
|
||||
h.reqLogger(ctx).Error(logs.CloseGzipWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
log.Error(logs.CloseGzipWriter, zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
var objectsWritten int
|
||||
buf := make([]byte, 3<<20) // the same as for upload
|
||||
|
||||
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, bktInfo.CID, buf,
|
||||
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, log, bktInfo.CID, buf,
|
||||
func(obj *object.Object) (io.Writer, error) {
|
||||
objectsWritten++
|
||||
return h.createTarFile(tarWriter, obj)
|
||||
}),
|
||||
)
|
||||
if errIter != nil {
|
||||
h.reqLogger(ctx).Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath))
|
||||
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter))
|
||||
} else if objectsWritten == 0 {
|
||||
h.reqLogger(ctx).Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath))
|
||||
log.Warn(logs.ObjectsNotFound)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -238,9 +224,9 @@ func (h *Handler) createTarFile(tw *tar.Writer, obj *object.Object) (io.Writer,
|
|||
})
|
||||
}
|
||||
|
||||
func (h *Handler) putObjectToArchive(ctx context.Context, cnrID cid.ID, buf []byte, createArchiveHeader func(obj *object.Object) (io.Writer, error)) func(id oid.ID) bool {
|
||||
func (h *Handler) putObjectToArchive(ctx context.Context, log *zap.Logger, cnrID cid.ID, buf []byte, createArchiveHeader func(obj *object.Object) (io.Writer, error)) func(id oid.ID) bool {
|
||||
return func(id oid.ID) bool {
|
||||
logger := h.reqLogger(ctx).With(zap.String("oid", id.EncodeToString()))
|
||||
log = log.With(zap.String("oid", id.EncodeToString()))
|
||||
|
||||
prm := PrmObjectGet{
|
||||
PrmAuth: PrmAuth{
|
||||
|
@ -251,18 +237,18 @@ func (h *Handler) putObjectToArchive(ctx context.Context, cnrID cid.ID, buf []by
|
|||
|
||||
resGet, err := h.frostfs.GetObject(ctx, prm)
|
||||
if err != nil {
|
||||
logger.Error(logs.FailedToGetObject, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
||||
log.Error(logs.FailedToGetObject, zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
fileWriter, err := createArchiveHeader(&resGet.Header)
|
||||
if err != nil {
|
||||
logger.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
log.Error(logs.FailedToAddObjectToArchive, zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
if err = writeToArchive(resGet, fileWriter, buf); err != nil {
|
||||
logger.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
log.Error(logs.FailedToAddObjectToArchive, zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -270,17 +256,27 @@ func (h *Handler) putObjectToArchive(ctx context.Context, cnrID cid.ID, buf []by
|
|||
}
|
||||
}
|
||||
|
||||
func (h *Handler) searchObjectsByPrefix(ctx context.Context, cnrID cid.ID, prefix string) (ResObjectSearch, error) {
|
||||
func (h *Handler) searchObjectsByPrefix(c *fasthttp.RequestCtx, log *zap.Logger, cnrID cid.ID) (ResObjectSearch, error) {
|
||||
scid, _ := c.UserValue("cid").(string)
|
||||
prefix, _ := c.UserValue("prefix").(string)
|
||||
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
|
||||
prefix, err := url.QueryUnescape(prefix)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unescape prefix: %w", err)
|
||||
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("prefix", prefix), zap.Error(err))
|
||||
ResponseError(c, "could not unescape prefix: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log = log.With(zap.String("cid", scid), zap.String("prefix", prefix))
|
||||
|
||||
resSearch, err := h.search(ctx, cnrID, object.AttributeFilePath, prefix, object.MatchCommonPrefix)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("search objects by prefix: %w", err)
|
||||
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
|
||||
ResponseError(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resSearch, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -50,8 +50,7 @@ func filterHeaders(l *zap.Logger, header *fasthttp.RequestHeader) (map[string]st
|
|||
|
||||
l.Debug(logs.AddAttributeToResultObject,
|
||||
zap.String("key", k),
|
||||
zap.String("val", v),
|
||||
logs.TagField(logs.TagDatapath))
|
||||
zap.String("val", v))
|
||||
})
|
||||
|
||||
return result, err
|
||||
|
|
|
@ -52,10 +52,6 @@ func (t *TestFrostFS) SetContainer(cnrID cid.ID, cnr *container.Container) {
|
|||
t.containers[cnrID.EncodeToString()] = cnr
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) SetObject(addr oid.Address, obj *object.Object) {
|
||||
t.objects[addr.EncodeToString()] = obj
|
||||
}
|
||||
|
||||
// AllowUserOperation grants access to object operations.
|
||||
// Empty userID and objID means any user and object respectively.
|
||||
func (t *TestFrostFS) AllowUserOperation(cnrID cid.ID, userID user.ID, op acl.Op, objID oid.ID) {
|
||||
|
|
|
@ -14,8 +14,8 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
|
@ -35,8 +35,6 @@ type Config interface {
|
|||
BufferMaxSizeForPut() uint64
|
||||
NamespaceHeader() string
|
||||
EnableFilepathFallback() bool
|
||||
FormContainerZone(string) string
|
||||
CORS() *data.CORSRule
|
||||
}
|
||||
|
||||
// PrmContainer groups parameters of FrostFS.Container operation.
|
||||
|
@ -143,10 +141,6 @@ var (
|
|||
ErrGatewayTimeout = errors.New("gateway timeout")
|
||||
// ErrQuotaLimitReached is returned from FrostFS in case of quota exceeded.
|
||||
ErrQuotaLimitReached = errors.New("quota limit reached")
|
||||
// ErrContainerNotFound is returned from FrostFS in case of container was not found.
|
||||
ErrContainerNotFound = errors.New("container not found")
|
||||
// ErrObjectNotFound is returned from FrostFS in case of object was not found.
|
||||
ErrObjectNotFound = errors.New("object not found")
|
||||
)
|
||||
|
||||
// FrostFS represents virtual connection to FrostFS network.
|
||||
|
@ -163,7 +157,7 @@ type FrostFS interface {
|
|||
}
|
||||
|
||||
type ContainerResolver interface {
|
||||
Resolve(ctx context.Context, zone, name string) (*cid.ID, error)
|
||||
Resolve(ctx context.Context, name string) (*cid.ID, error)
|
||||
}
|
||||
|
||||
type Handler struct {
|
||||
|
@ -175,18 +169,14 @@ type Handler struct {
|
|||
tree layer.TreeService
|
||||
cache *cache.BucketCache
|
||||
workerPool *ants.Pool
|
||||
corsCnrID cid.ID
|
||||
corsCache *cache.CORSCache
|
||||
}
|
||||
|
||||
type AppParams struct {
|
||||
Logger *zap.Logger
|
||||
FrostFS FrostFS
|
||||
Owner *user.ID
|
||||
Resolver ContainerResolver
|
||||
Cache *cache.BucketCache
|
||||
CORSCnrID cid.ID
|
||||
CORSCache *cache.CORSCache
|
||||
Logger *zap.Logger
|
||||
FrostFS FrostFS
|
||||
Owner *user.ID
|
||||
Resolver ContainerResolver
|
||||
Cache *cache.BucketCache
|
||||
}
|
||||
|
||||
func New(params *AppParams, config Config, tree layer.TreeService, workerPool *ants.Pool) *Handler {
|
||||
|
@ -199,76 +189,75 @@ func New(params *AppParams, config Config, tree layer.TreeService, workerPool *a
|
|||
tree: tree,
|
||||
cache: params.Cache,
|
||||
workerPool: workerPool,
|
||||
corsCnrID: params.CORSCnrID,
|
||||
corsCache: params.CORSCache,
|
||||
}
|
||||
}
|
||||
|
||||
// byNativeAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
||||
// prepares request and object address to it.
|
||||
func (h *Handler) byNativeAddress(ctx context.Context, req *fasthttp.RequestCtx, cnrID cid.ID, objID oid.ID, handler func(context.Context, *fasthttp.RequestCtx, oid.Address)) {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "handler.byNativeAddress")
|
||||
defer span.End()
|
||||
|
||||
func (h *Handler) byNativeAddress(ctx context.Context, req request, cnrID cid.ID, objID oid.ID, handler func(context.Context, request, oid.Address)) {
|
||||
addr := newAddress(cnrID, objID)
|
||||
handler(ctx, req, addr)
|
||||
}
|
||||
|
||||
// byS3Path is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
||||
// resolves object address from S3-like path <bucket name>/<object key>.
|
||||
func (h *Handler) byS3Path(ctx context.Context, req *fasthttp.RequestCtx, cnrID cid.ID, path string, handler func(context.Context, *fasthttp.RequestCtx, oid.Address)) {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "handler.byS3Path")
|
||||
defer span.End()
|
||||
func (h *Handler) byS3Path(ctx context.Context, req request, cnrID cid.ID, path string, handler func(context.Context, request, oid.Address)) {
|
||||
c, log := req.RequestCtx, req.log
|
||||
|
||||
foundOID, err := h.tree.GetLatestVersion(ctx, &cnrID, path)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToGetLatestVersionOfObject, err, zap.String("path", path))
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
if foundOID.IsDeleteMarker {
|
||||
h.logAndSendError(ctx, req, logs.ObjectWasDeleted, ErrObjectNotFound)
|
||||
log.Error(logs.ObjectWasDeleted)
|
||||
ResponseError(c, "object deleted", fasthttp.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
addr := newAddress(cnrID, foundOID.OID)
|
||||
handler(ctx, req, addr)
|
||||
handler(ctx, newRequest(c, log), addr)
|
||||
}
|
||||
|
||||
// byAttribute is a wrapper similar to byNativeAddress.
|
||||
func (h *Handler) byAttribute(ctx context.Context, req *fasthttp.RequestCtx, handler func(context.Context, *fasthttp.RequestCtx, oid.Address)) {
|
||||
cidParam, _ := req.UserValue("cid").(string)
|
||||
key, _ := req.UserValue("attr_key").(string)
|
||||
val, _ := req.UserValue("attr_val").(string)
|
||||
func (h *Handler) byAttribute(c *fasthttp.RequestCtx, handler func(context.Context, request, oid.Address)) {
|
||||
cidParam, _ := c.UserValue("cid").(string)
|
||||
key, _ := c.UserValue("attr_key").(string)
|
||||
val, _ := c.UserValue("attr_val").(string)
|
||||
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
log := utils.GetReqLogOrDefault(ctx, h.log)
|
||||
|
||||
key, err := url.QueryUnescape(key)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToUnescapeQuery, err, zap.String("cid", cidParam), zap.String("attr_key", key))
|
||||
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", cidParam), zap.String("attr_key", key), zap.Error(err))
|
||||
ResponseError(c, "could not unescape attr_key: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
val, err = url.QueryUnescape(val)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToUnescapeQuery, err, zap.String("cid", cidParam), zap.String("attr_val", key))
|
||||
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", cidParam), zap.String("attr_val", val), zap.Error(err))
|
||||
ResponseError(c, "could not unescape attr_val: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
val = prepareAtribute(key, val)
|
||||
log = log.With(zap.String("cid", cidParam), zap.String("attr_key", key), zap.String("attr_val", val))
|
||||
|
||||
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(zap.String("cid", cidParam),
|
||||
zap.String("attr_key", key), zap.String("attr_val", val)))
|
||||
|
||||
bktInfo, err := h.getBucketInfo(ctx, cidParam)
|
||||
bktInfo, err := h.getBucketInfo(ctx, cidParam, log)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
|
||||
objID, err := h.findObjectByAttribute(ctx, bktInfo.CID, key, val)
|
||||
objID, err := h.findObjectByAttribute(ctx, log, bktInfo.CID, key, val)
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
err = fmt.Errorf("%w: %s", ErrObjectNotFound, err.Error())
|
||||
ResponseError(c, err.Error(), fasthttp.StatusNotFound)
|
||||
return
|
||||
}
|
||||
h.logAndSendError(ctx, req, logs.FailedToFindObjectByAttribute, err)
|
||||
|
||||
ResponseError(c, err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -276,13 +265,14 @@ func (h *Handler) byAttribute(ctx context.Context, req *fasthttp.RequestCtx, han
|
|||
addr.SetContainer(bktInfo.CID)
|
||||
addr.SetObject(objID)
|
||||
|
||||
handler(ctx, req, addr)
|
||||
handler(ctx, newRequest(c, log), addr)
|
||||
}
|
||||
|
||||
func (h *Handler) findObjectByAttribute(ctx context.Context, cnrID cid.ID, attrKey, attrVal string) (oid.ID, error) {
|
||||
func (h *Handler) findObjectByAttribute(ctx context.Context, log *zap.Logger, cnrID cid.ID, attrKey, attrVal string) (oid.ID, error) {
|
||||
res, err := h.search(ctx, cnrID, attrKey, attrVal, object.MatchStringEqual)
|
||||
if err != nil {
|
||||
return oid.ID{}, fmt.Errorf("search objects: %w", err)
|
||||
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
|
||||
return oid.ID{}, fmt.Errorf("could not search for objects: %w", err)
|
||||
}
|
||||
defer res.Close()
|
||||
|
||||
|
@ -292,14 +282,13 @@ func (h *Handler) findObjectByAttribute(ctx context.Context, cnrID cid.ID, attrK
|
|||
if n == 0 {
|
||||
switch {
|
||||
case errors.Is(err, io.EOF) && h.needSearchByFileName(attrKey, attrVal):
|
||||
h.reqLogger(ctx).Debug(logs.ObjectNotFoundByFilePathTrySearchByFileName, logs.TagField(logs.TagExternalStorage))
|
||||
attrVal = prepareAtribute(attrFileName, attrVal)
|
||||
return h.findObjectByAttribute(ctx, cnrID, attrFileName, attrVal)
|
||||
log.Debug(logs.ObjectNotFoundByFilePathTrySearchByFileName)
|
||||
return h.findObjectByAttribute(ctx, log, cnrID, attrFileName, attrVal)
|
||||
case errors.Is(err, io.EOF):
|
||||
h.reqLogger(ctx).Error(logs.ObjectNotFound, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
||||
log.Error(logs.ObjectNotFound, zap.Error(err))
|
||||
return oid.ID{}, fmt.Errorf("object not found: %w", err)
|
||||
default:
|
||||
h.reqLogger(ctx).Error(logs.ReadObjectListFailed, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
||||
log.Error(logs.ReadObjectListFailed, zap.Error(err))
|
||||
return oid.ID{}, fmt.Errorf("read object list failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
@ -315,56 +304,21 @@ func (h *Handler) needSearchByFileName(key, val string) bool {
|
|||
return strings.HasPrefix(val, "/") && strings.Count(val, "/") == 1 || !strings.Contains(val, "/")
|
||||
}
|
||||
|
||||
func prepareAtribute(attrKey, attrVal string) string {
|
||||
if attrKey == attrFileName {
|
||||
return prepareFileName(attrVal)
|
||||
}
|
||||
|
||||
if attrKey == attrFilePath {
|
||||
return prepareFilePath(attrVal)
|
||||
}
|
||||
|
||||
return attrVal
|
||||
}
|
||||
|
||||
func prepareFileName(fileName string) string {
|
||||
if strings.HasPrefix(fileName, "/") {
|
||||
return fileName[1:]
|
||||
}
|
||||
|
||||
return fileName
|
||||
}
|
||||
|
||||
func prepareFilePath(filePath string) string {
|
||||
if !strings.HasPrefix(filePath, "/") {
|
||||
return "/" + filePath
|
||||
}
|
||||
|
||||
return filePath
|
||||
}
|
||||
|
||||
// resolveContainer decode container id, if it's not a valid container id
|
||||
// then trey to resolve name using provided resolver.
|
||||
func (h *Handler) resolveContainer(ctx context.Context, containerID string) (*cid.ID, error) {
|
||||
cnrID := new(cid.ID)
|
||||
err := cnrID.DecodeString(containerID)
|
||||
if err != nil {
|
||||
var namespace string
|
||||
namespace, err = middleware.GetNamespace(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zone := h.config.FormContainerZone(namespace)
|
||||
cnrID, err = h.containerResolver.Resolve(ctx, zone, containerID)
|
||||
cnrID, err = h.containerResolver.Resolve(ctx, containerID)
|
||||
if err != nil && strings.Contains(err.Error(), "not found") {
|
||||
err = fmt.Errorf("%w: %s", ErrContainerNotFound, err.Error())
|
||||
err = fmt.Errorf("%w: %s", new(apistatus.ContainerNotFound), err.Error())
|
||||
}
|
||||
}
|
||||
return cnrID, err
|
||||
}
|
||||
|
||||
func (h *Handler) getBucketInfo(ctx context.Context, containerName string) (*data.BucketInfo, error) {
|
||||
func (h *Handler) getBucketInfo(ctx context.Context, containerName string, log *zap.Logger) (*data.BucketInfo, error) {
|
||||
ns, err := middleware.GetNamespace(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -376,20 +330,19 @@ func (h *Handler) getBucketInfo(ctx context.Context, containerName string) (*dat
|
|||
|
||||
cnrID, err := h.resolveContainer(ctx, containerName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("resolve container: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bktInfo, err := h.readContainer(ctx, *cnrID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read container: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = h.cache.Put(bktInfo); err != nil {
|
||||
h.reqLogger(ctx).Warn(logs.CouldntPutBucketIntoCache,
|
||||
log.Warn(logs.CouldntPutBucketIntoCache,
|
||||
zap.String("bucket name", bktInfo.Name),
|
||||
zap.Stringer("bucket cid", bktInfo.CID),
|
||||
zap.Error(err),
|
||||
logs.TagField(logs.TagDatapath))
|
||||
zap.Error(err))
|
||||
}
|
||||
|
||||
return bktInfo, nil
|
||||
|
@ -418,24 +371,28 @@ func (h *Handler) readContainer(ctx context.Context, cnrID cid.ID) (*data.Bucket
|
|||
return bktInfo, err
|
||||
}
|
||||
|
||||
func (h *Handler) browseIndex(ctx context.Context, req *fasthttp.RequestCtx, cidParam, oidParam string, isNativeList bool) {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "handler.browseIndex")
|
||||
defer span.End()
|
||||
|
||||
func (h *Handler) browseIndex(c *fasthttp.RequestCtx, isNativeList bool) {
|
||||
if !h.config.IndexPageEnabled() {
|
||||
req.SetStatusCode(fasthttp.StatusNotFound)
|
||||
c.SetStatusCode(fasthttp.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
unescapedKey, err := url.QueryUnescape(oidParam)
|
||||
cidURLParam := c.UserValue("cid").(string)
|
||||
oidURLParam := c.UserValue("oid").(string)
|
||||
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
|
||||
log := reqLog.With(zap.String("cid", cidURLParam), zap.String("oid", oidURLParam))
|
||||
|
||||
unescapedKey, err := url.QueryUnescape(oidURLParam)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToUnescapeOIDParam, err)
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
|
||||
bktInfo, err := h.getBucketInfo(ctx, cidParam)
|
||||
bktInfo, err := h.getBucketInfo(ctx, cidURLParam, log)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -445,7 +402,7 @@ func (h *Handler) browseIndex(ctx context.Context, req *fasthttp.RequestCtx, cid
|
|||
listFunc = h.getDirObjectsNative
|
||||
}
|
||||
|
||||
h.browseObjects(ctx, req, browseParams{
|
||||
h.browseObjects(c, browseParams{
|
||||
bucketInfo: bktInfo,
|
||||
prefix: unescapedKey,
|
||||
listObjects: listFunc,
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
go_fuzz_utils "github.com/trailofbits/go-fuzz-utils"
|
||||
"github.com/valyala/fasthttp"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -126,7 +125,7 @@ func maybeFillRandom(tp *go_fuzz_utils.TypeProvider, initValue string) (string,
|
|||
}
|
||||
|
||||
func upload(tp *go_fuzz_utils.TypeProvider) (context.Context, *handlerContext, cid.ID, *fasthttp.RequestCtx, string, string, string, error) {
|
||||
hc, err := prepareHandlerContextBase(zap.NewExample())
|
||||
hc, err := prepareHandlerContext()
|
||||
if err != nil {
|
||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
||||
}
|
||||
|
|
|
@ -16,9 +16,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||
v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
|
@ -32,7 +30,6 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
"github.com/valyala/fasthttp"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
type treeServiceMock struct {
|
||||
|
@ -63,7 +60,6 @@ func (t *treeServiceMock) GetLatestVersion(context.Context, *cid.ID, string) (*d
|
|||
|
||||
type configMock struct {
|
||||
additionalSearch bool
|
||||
cors *data.CORSRule
|
||||
}
|
||||
|
||||
func (c *configMock) DefaultTimestamp() bool {
|
||||
|
@ -102,18 +98,9 @@ func (c *configMock) EnableFilepathFallback() bool {
|
|||
return c.additionalSearch
|
||||
}
|
||||
|
||||
func (c *configMock) FormContainerZone(string) string {
|
||||
return v2container.SysAttributeZoneDefault
|
||||
}
|
||||
|
||||
func (c *configMock) CORS() *data.CORSRule {
|
||||
return c.cors
|
||||
}
|
||||
|
||||
type handlerContext struct {
|
||||
key *keys.PrivateKey
|
||||
owner user.ID
|
||||
corsCnr cid.ID
|
||||
key *keys.PrivateKey
|
||||
owner user.ID
|
||||
|
||||
h *Handler
|
||||
frostfs *TestFrostFS
|
||||
|
@ -125,13 +112,12 @@ func (hc *handlerContext) Handler() *Handler {
|
|||
return hc.h
|
||||
}
|
||||
|
||||
func prepareHandlerContext(t *testing.T) *handlerContext {
|
||||
hc, err := prepareHandlerContextBase(zaptest.NewLogger(t))
|
||||
require.NoError(t, err)
|
||||
return hc
|
||||
}
|
||||
func prepareHandlerContext() (*handlerContext, error) {
|
||||
logger, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func prepareHandlerContextBase(logger *zap.Logger) (*handlerContext, error) {
|
||||
key, err := keys.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -143,12 +129,10 @@ func prepareHandlerContextBase(logger *zap.Logger) (*handlerContext, error) {
|
|||
testFrostFS := NewTestFrostFS(key)
|
||||
|
||||
testResolver := &resolver.Resolver{Name: "test_resolver"}
|
||||
testResolver.SetResolveFunc(func(_ context.Context, _, name string) (*cid.ID, error) {
|
||||
testResolver.SetResolveFunc(func(_ context.Context, name string) (*cid.ID, error) {
|
||||
return testFrostFS.ContainerID(name)
|
||||
})
|
||||
|
||||
cnrID := createCORSContainer(owner, testFrostFS)
|
||||
|
||||
params := &AppParams{
|
||||
Logger: logger,
|
||||
FrostFS: testFrostFS,
|
||||
|
@ -159,12 +143,6 @@ func prepareHandlerContextBase(logger *zap.Logger) (*handlerContext, error) {
|
|||
Lifetime: 1,
|
||||
Logger: logger,
|
||||
}, false),
|
||||
CORSCnrID: cnrID,
|
||||
CORSCache: cache.NewCORSCache(&cache.Config{
|
||||
Size: 1,
|
||||
Lifetime: 1,
|
||||
Logger: logger,
|
||||
}),
|
||||
}
|
||||
|
||||
treeMock := newTreeService()
|
||||
|
@ -179,7 +157,6 @@ func prepareHandlerContextBase(logger *zap.Logger) (*handlerContext, error) {
|
|||
return &handlerContext{
|
||||
key: key,
|
||||
owner: owner,
|
||||
corsCnr: cnrID,
|
||||
h: handler,
|
||||
frostfs: testFrostFS,
|
||||
tree: treeMock,
|
||||
|
@ -187,20 +164,6 @@ func prepareHandlerContextBase(logger *zap.Logger) (*handlerContext, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func createCORSContainer(owner user.ID, frostfs *TestFrostFS) cid.ID {
|
||||
var cnr container.Container
|
||||
cnr.Init()
|
||||
cnr.SetOwner(owner)
|
||||
|
||||
cnrID := cidtest.ID()
|
||||
frostfs.SetContainer(cnrID, &cnr)
|
||||
frostfs.AllowUserOperation(cnrID, owner, acl.OpObjectSearch, oid.ID{})
|
||||
frostfs.AllowUserOperation(cnrID, owner, acl.OpObjectHead, oid.ID{})
|
||||
frostfs.AllowUserOperation(cnrID, owner, acl.OpObjectGet, oid.ID{})
|
||||
|
||||
return cnrID
|
||||
}
|
||||
|
||||
func (hc *handlerContext) prepareContainer(name string, basicACL acl.Basic) (cid.ID, *container.Container, error) {
|
||||
var pp netmap.PlacementPolicy
|
||||
err := pp.DecodeString("REP 1")
|
||||
|
@ -233,7 +196,8 @@ func (hc *handlerContext) prepareContainer(name string, basicACL acl.Basic) (cid
|
|||
}
|
||||
|
||||
func TestBasic(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
hc, err := prepareHandlerContext()
|
||||
require.NoError(t, err)
|
||||
|
||||
bktName := "bucket"
|
||||
cnrID, cnr, err := hc.prepareContainer(bktName, acl.PublicRWExtended)
|
||||
|
@ -255,10 +219,8 @@ func TestBasic(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
|
||||
fileName := prepareObjectAttributes(object.AttributeFileName, objFileName)
|
||||
filePath := prepareObjectAttributes(object.AttributeFilePath, objFilePath)
|
||||
obj.SetAttributes(append(obj.Attributes(), fileName)...)
|
||||
obj.SetAttributes(append(obj.Attributes(), filePath)...)
|
||||
attr := prepareObjectAttributes(object.AttributeFilePath, objFileName)
|
||||
obj.SetAttributes(append(obj.Attributes(), attr)...)
|
||||
|
||||
t.Run("get", func(t *testing.T) {
|
||||
r = prepareGetRequest(ctx, cnrID.EncodeToString(), putRes.ObjectID)
|
||||
|
@ -277,14 +239,6 @@ func TestBasic(t *testing.T) {
|
|||
r = prepareGetByAttributeRequest(ctx, bktName, keyAttr, valAttr)
|
||||
hc.Handler().DownloadByAttribute(r)
|
||||
require.Equal(t, content, string(r.Response.Body()))
|
||||
|
||||
r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, objFilePath)
|
||||
hc.Handler().DownloadByAttribute(r)
|
||||
require.Equal(t, content, string(r.Response.Body()))
|
||||
|
||||
r = prepareGetByAttributeRequest(ctx, bktName, attrFilePath, objFileName)
|
||||
hc.Handler().DownloadByAttribute(r)
|
||||
require.Equal(t, content, string(r.Response.Body()))
|
||||
})
|
||||
|
||||
t.Run("head by attribute", func(t *testing.T) {
|
||||
|
@ -292,16 +246,6 @@ func TestBasic(t *testing.T) {
|
|||
hc.Handler().HeadByAttribute(r)
|
||||
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
|
||||
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
|
||||
|
||||
r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, objFilePath)
|
||||
hc.Handler().HeadByAttribute(r)
|
||||
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
|
||||
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
|
||||
|
||||
r = prepareGetByAttributeRequest(ctx, bktName, attrFilePath, objFileName)
|
||||
hc.Handler().HeadByAttribute(r)
|
||||
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
|
||||
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
|
||||
})
|
||||
|
||||
t.Run("zip", func(t *testing.T) {
|
||||
|
@ -312,7 +256,7 @@ func TestBasic(t *testing.T) {
|
|||
zipReader, err := zip.NewReader(readerAt, int64(len(r.Response.Body())))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, zipReader.File, 1)
|
||||
require.Equal(t, objFilePath, zipReader.File[0].Name)
|
||||
require.Equal(t, objFileName, zipReader.File[0].Name)
|
||||
f, err := zipReader.File[0].Open()
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
|
@ -326,7 +270,8 @@ func TestBasic(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFindObjectByAttribute(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
hc, err := prepareHandlerContext()
|
||||
require.NoError(t, err)
|
||||
hc.cfg.additionalSearch = true
|
||||
|
||||
bktName := "bucket"
|
||||
|
@ -348,8 +293,8 @@ func TestFindObjectByAttribute(t *testing.T) {
|
|||
err = json.Unmarshal(r.Response.Body(), &putRes)
|
||||
require.NoError(t, err)
|
||||
|
||||
testAttrVal1 := "/folder/cat.jpg"
|
||||
testAttrVal2 := "cat.jpg"
|
||||
testAttrVal1 := "test-attr-val1"
|
||||
testAttrVal2 := "test-attr-val2"
|
||||
testAttrVal3 := "test-attr-val3"
|
||||
|
||||
for _, tc := range []struct {
|
||||
|
@ -395,21 +340,13 @@ func TestFindObjectByAttribute(t *testing.T) {
|
|||
err: "not found",
|
||||
additionalSearch: true,
|
||||
},
|
||||
{
|
||||
name: "success search by FilePath with leading slash (with additional search)",
|
||||
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
|
||||
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
|
||||
reqAttrKey: attrFilePath,
|
||||
reqAttrValue: "/cat.jpg",
|
||||
additionalSearch: true,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
|
||||
obj.SetAttributes(tc.firstAttr, tc.secondAttr)
|
||||
hc.cfg.additionalSearch = tc.additionalSearch
|
||||
|
||||
objID, err := hc.Handler().findObjectByAttribute(ctx, cnrID, tc.reqAttrKey, tc.reqAttrValue)
|
||||
objID, err := hc.Handler().findObjectByAttribute(ctx, hc.Handler().log, cnrID, tc.reqAttrKey, tc.reqAttrValue)
|
||||
if tc.err != "" {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tc.err)
|
||||
|
@ -423,7 +360,8 @@ func TestFindObjectByAttribute(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNeedSearchByFileName(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
hc, err := prepareHandlerContext()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
|
@ -484,28 +422,6 @@ func TestNeedSearchByFileName(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPrepareFileName(t *testing.T) {
|
||||
fileName := "/cat.jpg"
|
||||
expected := "cat.jpg"
|
||||
actual := prepareFileName(fileName)
|
||||
require.Equal(t, expected, actual)
|
||||
|
||||
fileName = "cat.jpg"
|
||||
actual = prepareFileName(fileName)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestPrepareFilePath(t *testing.T) {
|
||||
filePath := "cat.jpg"
|
||||
expected := "/cat.jpg"
|
||||
actual := prepareFilePath(filePath)
|
||||
require.Equal(t, expected, actual)
|
||||
|
||||
filePath = "/cat.jpg"
|
||||
actual = prepareFilePath(filePath)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func prepareUploadRequest(ctx context.Context, bucket, content string) (*fasthttp.RequestCtx, error) {
|
||||
r := new(fasthttp.RequestCtx)
|
||||
utils.SetContextToRequest(ctx, r)
|
||||
|
@ -521,25 +437,6 @@ func prepareGetRequest(ctx context.Context, bucket, objID string) *fasthttp.Requ
|
|||
return r
|
||||
}
|
||||
|
||||
func prepareCORSRequest(t *testing.T, bucket string, headers map[string]string) *fasthttp.RequestCtx {
|
||||
ctx := context.Background()
|
||||
ctx = middleware.SetNamespace(ctx, "")
|
||||
|
||||
r := new(fasthttp.RequestCtx)
|
||||
r.SetUserValue("cid", bucket)
|
||||
|
||||
for k, v := range headers {
|
||||
r.Request.Header.Set(k, v)
|
||||
}
|
||||
|
||||
ctx, err := tokens.StoreBearerTokenAppCtx(ctx, r)
|
||||
require.NoError(t, err)
|
||||
|
||||
utils.SetContextToRequest(ctx, r)
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func prepareGetByAttributeRequest(ctx context.Context, bucket, attrKey, attrVal string) *fasthttp.RequestCtx {
|
||||
r := new(fasthttp.RequestCtx)
|
||||
utils.SetContextToRequest(ctx, r)
|
||||
|
@ -568,7 +465,6 @@ const (
|
|||
keyAttr = "User-Attribute"
|
||||
valAttr = "user value"
|
||||
objFileName = "newFile.txt"
|
||||
objFilePath = "/newFile.txt"
|
||||
)
|
||||
|
||||
func fillMultipartBody(r *fasthttp.RequestCtx, content string) error {
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/valyala/fasthttp"
|
||||
|
@ -27,7 +26,7 @@ const (
|
|||
hdrContainerID = "X-Container-Id"
|
||||
)
|
||||
|
||||
func (h *Handler) headObject(ctx context.Context, req *fasthttp.RequestCtx, objectAddress oid.Address) {
|
||||
func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid.Address) {
|
||||
var start = time.Now()
|
||||
|
||||
btoken := bearerToken(ctx)
|
||||
|
@ -41,7 +40,7 @@ func (h *Handler) headObject(ctx context.Context, req *fasthttp.RequestCtx, obje
|
|||
|
||||
obj, err := h.frostfs.HeadObject(ctx, prm)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToHeadObject, err, zap.Stringer("elapsed", time.Since(start)))
|
||||
req.handleFrostFSErr(err, start)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -65,11 +64,10 @@ func (h *Handler) headObject(ctx context.Context, req *fasthttp.RequestCtx, obje
|
|||
case object.AttributeTimestamp:
|
||||
value, err := strconv.ParseInt(val, 10, 64)
|
||||
if err != nil {
|
||||
h.reqLogger(ctx).Info(logs.CouldntParseCreationDate,
|
||||
req.log.Info(logs.CouldntParseCreationDate,
|
||||
zap.String("key", key),
|
||||
zap.String("val", val),
|
||||
zap.Error(err),
|
||||
logs.TagField(logs.TagDatapath))
|
||||
zap.Error(err))
|
||||
continue
|
||||
}
|
||||
req.Response.Header.Set(fasthttp.HeaderLastModified, time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
||||
|
@ -100,7 +98,7 @@ func (h *Handler) headObject(ctx context.Context, req *fasthttp.RequestCtx, obje
|
|||
return h.frostfs.RangeObject(ctx, prmRange)
|
||||
}, filename)
|
||||
if err != nil && err != io.EOF {
|
||||
h.logAndSendError(ctx, req, logs.FailedToDetectContentTypeFromPayload, err, zap.Stringer("elapsed", time.Since(start)))
|
||||
req.handleFrostFSErr(err, start)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -116,44 +114,41 @@ func idsToResponse(resp *fasthttp.Response, obj *object.Object) {
|
|||
}
|
||||
|
||||
// HeadByAddressOrBucketName handles head requests using simple cid/oid or bucketname/key format.
|
||||
func (h *Handler) HeadByAddressOrBucketName(req *fasthttp.RequestCtx) {
|
||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.HeadByAddressOrBucketName")
|
||||
defer span.End()
|
||||
func (h *Handler) HeadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
||||
cidParam, _ := c.UserValue("cid").(string)
|
||||
oidParam, _ := c.UserValue("oid").(string)
|
||||
|
||||
cidParam, _ := req.UserValue("cid").(string)
|
||||
oidParam, _ := req.UserValue("oid").(string)
|
||||
|
||||
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
log := utils.GetReqLogOrDefault(ctx, h.log).With(
|
||||
zap.String("cid", cidParam),
|
||||
zap.String("oid", oidParam),
|
||||
))
|
||||
)
|
||||
|
||||
bktInfo, err := h.getBucketInfo(ctx, cidParam)
|
||||
bktInfo, err := h.getBucketInfo(ctx, cidParam, log)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
|
||||
checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo)
|
||||
if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) {
|
||||
h.logAndSendError(ctx, req, logs.FailedToCheckIfSettingsNodeExist, checkS3Err)
|
||||
logAndSendBucketError(c, log, checkS3Err)
|
||||
return
|
||||
}
|
||||
|
||||
req := newRequest(c, log)
|
||||
|
||||
var objID oid.ID
|
||||
if checkS3Err == nil {
|
||||
h.byS3Path(ctx, req, bktInfo.CID, oidParam, h.headObject)
|
||||
} else if err = objID.DecodeString(oidParam); err == nil {
|
||||
h.byNativeAddress(ctx, req, bktInfo.CID, objID, h.headObject)
|
||||
} else {
|
||||
h.logAndSendError(ctx, req, logs.InvalidOIDParam, err)
|
||||
logAndSendBucketError(c, log, checkS3Err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// HeadByAttribute handles attribute-based head requests.
|
||||
func (h *Handler) HeadByAttribute(req *fasthttp.RequestCtx) {
|
||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.HeadByAttribute")
|
||||
defer span.End()
|
||||
|
||||
h.byAttribute(ctx, req, h.headObject)
|
||||
func (h *Handler) HeadByAttribute(c *fasthttp.RequestCtx) {
|
||||
h.byAttribute(c, h.headObject)
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"strconv"
|
||||
|
@ -34,7 +33,7 @@ func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartF
|
|||
|
||||
name := part.FormName()
|
||||
if name == "" {
|
||||
l.Debug(logs.IgnorePartEmptyFormName, logs.TagField(logs.TagDatapath))
|
||||
l.Debug(logs.IgnorePartEmptyFormName)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -42,9 +41,9 @@ func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartF
|
|||
|
||||
// ignore multipart/form-data values
|
||||
if filename == "" {
|
||||
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name), logs.TagField(logs.TagDatapath))
|
||||
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name))
|
||||
if err = part.Close(); err != nil {
|
||||
l.Warn(logs.FailedToCloseReader, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
l.Warn(logs.FailedToCloseReader, zap.Error(err))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -54,7 +53,7 @@ func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartF
|
|||
}
|
||||
|
||||
// getPayload returns initial payload if object is not multipart else composes new reader with parts data.
|
||||
func (h *Handler) getPayload(ctx context.Context, p getMultiobjectBodyParams) (io.ReadCloser, uint64, error) {
|
||||
func (h *Handler) getPayload(p getMultiobjectBodyParams) (io.ReadCloser, uint64, error) {
|
||||
cid, ok := p.obj.Header.ContainerID()
|
||||
if !ok {
|
||||
return nil, 0, errors.New("no container id set")
|
||||
|
@ -67,6 +66,7 @@ func (h *Handler) getPayload(ctx context.Context, p getMultiobjectBodyParams) (i
|
|||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
ctx := p.req.RequestCtx
|
||||
params := PrmInitMultiObjectReader{
|
||||
Addr: newAddress(cid, oid),
|
||||
Bearer: bearerToken(ctx),
|
||||
|
|
|
@ -60,7 +60,12 @@ func BenchmarkAll(b *testing.B) {
|
|||
func defaultMultipart(filename string) error {
|
||||
r, bound := multipartFile(filename)
|
||||
|
||||
file, err := fetchMultipartFileDefault(zap.NewNop(), r, bound)
|
||||
logger, err := zap.NewProduction()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
file, err := fetchMultipartFileDefault(logger, r, bound)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -82,7 +87,12 @@ func TestName(t *testing.T) {
|
|||
func customMultipart(filename string) error {
|
||||
r, bound := multipartFile(filename)
|
||||
|
||||
file, err := fetchMultipartFile(zap.NewNop(), r, bound)
|
||||
logger, err := zap.NewProduction()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
file, err := fetchMultipartFile(logger, r, bound)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -102,7 +112,7 @@ func fetchMultipartFileDefault(l *zap.Logger, r io.Reader, boundary string) (Mul
|
|||
|
||||
name := part.FormName()
|
||||
if name == "" {
|
||||
l.Debug(logs.IgnorePartEmptyFormName, logs.TagField(logs.TagDatapath))
|
||||
l.Debug(logs.IgnorePartEmptyFormName)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -110,7 +120,8 @@ func fetchMultipartFileDefault(l *zap.Logger, r io.Reader, boundary string) (Mul
|
|||
|
||||
// ignore multipart/form-data values
|
||||
if filename == "" {
|
||||
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name), logs.TagField(logs.TagDatapath))
|
||||
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -63,10 +63,11 @@ func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error), file
|
|||
|
||||
type getMultiobjectBodyParams struct {
|
||||
obj *Object
|
||||
req request
|
||||
strSize string
|
||||
}
|
||||
|
||||
func (h *Handler) receiveFile(ctx context.Context, req *fasthttp.RequestCtx, objAddress oid.Address) {
|
||||
func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.Address) {
|
||||
var (
|
||||
shouldDownload = req.QueryArgs().GetBool("download")
|
||||
start = time.Now()
|
||||
|
@ -84,12 +85,12 @@ func (h *Handler) receiveFile(ctx context.Context, req *fasthttp.RequestCtx, obj
|
|||
|
||||
rObj, err := h.frostfs.GetObject(ctx, prm)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToGetObject, err, zap.Stringer("elapsed", time.Since(start)))
|
||||
req.handleFrostFSErr(err, start)
|
||||
return
|
||||
}
|
||||
|
||||
// we can't close reader in this function, so how to do it?
|
||||
setIDs(req, rObj.Header)
|
||||
req.setIDs(rObj.Header)
|
||||
payload := rObj.Payload
|
||||
payloadSize := rObj.Header.PayloadSize()
|
||||
for _, attr := range rObj.Header.Attributes() {
|
||||
|
@ -106,23 +107,23 @@ func (h *Handler) receiveFile(ctx context.Context, req *fasthttp.RequestCtx, obj
|
|||
case object.AttributeFileName:
|
||||
filename = val
|
||||
case object.AttributeTimestamp:
|
||||
if err = setTimestamp(req, val); err != nil {
|
||||
h.reqLogger(ctx).Error(logs.CouldntParseCreationDate,
|
||||
if err = req.setTimestamp(val); err != nil {
|
||||
req.log.Error(logs.CouldntParseCreationDate,
|
||||
zap.String("val", val),
|
||||
zap.Error(err),
|
||||
logs.TagField(logs.TagDatapath))
|
||||
zap.Error(err))
|
||||
}
|
||||
case object.AttributeContentType:
|
||||
contentType = val
|
||||
case object.AttributeFilePath:
|
||||
filepath = val
|
||||
case attributeMultipartObjectSize:
|
||||
payload, payloadSize, err = h.getPayload(ctx, getMultiobjectBodyParams{
|
||||
payload, payloadSize, err = h.getPayload(getMultiobjectBodyParams{
|
||||
obj: rObj,
|
||||
req: req,
|
||||
strSize: val,
|
||||
})
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToGetObjectPayload, err, zap.Stringer("elapsed", time.Since(start)))
|
||||
req.handleFrostFSErr(err, start)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -131,7 +132,7 @@ func (h *Handler) receiveFile(ctx context.Context, req *fasthttp.RequestCtx, obj
|
|||
filename = filepath
|
||||
}
|
||||
|
||||
setDisposition(req, shouldDownload, filename)
|
||||
req.setDisposition(shouldDownload, filename)
|
||||
|
||||
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(payloadSize, 10))
|
||||
|
||||
|
@ -143,7 +144,8 @@ func (h *Handler) receiveFile(ctx context.Context, req *fasthttp.RequestCtx, obj
|
|||
return payload, nil
|
||||
}, filename)
|
||||
if err != nil && err != io.EOF {
|
||||
h.logAndSendError(ctx, req, logs.FailedToDetectContentTypeFromPayload, err, zap.Stringer("elapsed", time.Since(start)))
|
||||
req.log.Error(logs.CouldNotDetectContentTypeFromPayload, zap.Error(err))
|
||||
ResponseError(req.RequestCtx, "could not detect Content-Type from payload: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -162,7 +164,7 @@ func (h *Handler) receiveFile(ctx context.Context, req *fasthttp.RequestCtx, obj
|
|||
req.Response.SetBodyStream(payload, int(payloadSize))
|
||||
}
|
||||
|
||||
func setIDs(r *fasthttp.RequestCtx, obj object.Object) {
|
||||
func (r *request) setIDs(obj object.Object) {
|
||||
objID, _ := obj.ID()
|
||||
cnrID, _ := obj.ContainerID()
|
||||
r.Response.Header.Set(hdrObjectID, objID.String())
|
||||
|
@ -170,7 +172,7 @@ func setIDs(r *fasthttp.RequestCtx, obj object.Object) {
|
|||
r.Response.Header.Set(hdrContainerID, cnrID.String())
|
||||
}
|
||||
|
||||
func setDisposition(r *fasthttp.RequestCtx, shouldDownload bool, filename string) {
|
||||
func (r *request) setDisposition(shouldDownload bool, filename string) {
|
||||
const (
|
||||
inlineDisposition = "inline"
|
||||
attachmentDisposition = "attachment"
|
||||
|
@ -184,7 +186,7 @@ func setDisposition(r *fasthttp.RequestCtx, shouldDownload bool, filename string
|
|||
r.Response.Header.Set(fasthttp.HeaderContentDisposition, dis+"; filename="+path.Base(filename))
|
||||
}
|
||||
|
||||
func setTimestamp(r *fasthttp.RequestCtx, timestamp string) error {
|
||||
func (r *request) setTimestamp(timestamp string) error {
|
||||
value, err := strconv.ParseInt(timestamp, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -17,7 +17,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
|
@ -50,41 +49,41 @@ func (pr *putResponse) encode(w io.Writer) error {
|
|||
}
|
||||
|
||||
// Upload handles multipart upload request.
|
||||
func (h *Handler) Upload(req *fasthttp.RequestCtx) {
|
||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.Upload")
|
||||
defer span.End()
|
||||
|
||||
func (h *Handler) Upload(c *fasthttp.RequestCtx) {
|
||||
var file MultipartFile
|
||||
|
||||
scid, _ := req.UserValue("cid").(string)
|
||||
bodyStream := req.RequestBodyStream()
|
||||
scid, _ := c.UserValue("cid").(string)
|
||||
bodyStream := c.RequestBodyStream()
|
||||
drainBuf := make([]byte, drainBufSize)
|
||||
|
||||
log := h.reqLogger(ctx)
|
||||
ctx = utils.SetReqLog(ctx, log.With(zap.String("cid", scid)))
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
|
||||
log := reqLog.With(zap.String("cid", scid))
|
||||
|
||||
bktInfo, err := h.getBucketInfo(ctx, scid)
|
||||
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
|
||||
boundary := string(req.Request.Header.MultipartFormBoundary())
|
||||
boundary := string(c.Request.Header.MultipartFormBoundary())
|
||||
if file, err = fetchMultipartFile(log, bodyStream, boundary); err != nil {
|
||||
h.logAndSendError(ctx, req, logs.CouldNotReceiveMultipartForm, err)
|
||||
log.Error(logs.CouldNotReceiveMultipartForm, zap.Error(err))
|
||||
ResponseError(c, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
filtered, err := filterHeaders(log, &req.Request.Header)
|
||||
filtered, err := filterHeaders(log, &c.Request.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToFilterHeaders, err)
|
||||
log.Error(logs.FailedToFilterHeaders, zap.Error(err))
|
||||
ResponseError(c, err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if req.Request.Header.Peek(explodeArchiveHeader) != nil {
|
||||
h.explodeArchive(ctx, req, bktInfo, file, filtered)
|
||||
if c.Request.Header.Peek(explodeArchiveHeader) != nil {
|
||||
h.explodeArchive(request{c, log}, bktInfo, file, filtered)
|
||||
} else {
|
||||
h.uploadSingleObject(ctx, req, bktInfo, file, filtered)
|
||||
h.uploadSingleObject(request{c, log}, bktInfo, file, filtered)
|
||||
}
|
||||
|
||||
// Multipart is multipart and thus can contain more than one part which
|
||||
|
@ -101,39 +100,40 @@ func (h *Handler) Upload(req *fasthttp.RequestCtx) {
|
|||
}
|
||||
}
|
||||
|
||||
func (h *Handler) uploadSingleObject(ctx context.Context, req *fasthttp.RequestCtx, bkt *data.BucketInfo, file MultipartFile, filtered map[string]string) {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "handler.uploadSingleObject")
|
||||
defer span.End()
|
||||
|
||||
func (h *Handler) uploadSingleObject(req request, bkt *data.BucketInfo, file MultipartFile, filtered map[string]string) {
|
||||
c, log := req.RequestCtx, req.log
|
||||
setIfNotExist(filtered, object.AttributeFileName, file.FileName())
|
||||
|
||||
attributes, err := h.extractAttributes(ctx, req, filtered)
|
||||
attributes, err := h.extractAttributes(c, log, filtered)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToGetAttributes, err)
|
||||
log.Error(logs.FailedToGetAttributes, zap.Error(err))
|
||||
ResponseError(c, "could not extract attributes: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
idObj, err := h.uploadObject(ctx, bkt, attributes, file)
|
||||
idObj, err := h.uploadObject(c, bkt, attributes, file)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToUploadObject, err)
|
||||
h.handlePutFrostFSErr(c, err, log)
|
||||
return
|
||||
}
|
||||
h.reqLogger(ctx).Debug(logs.ObjectUploaded,
|
||||
log.Debug(logs.ObjectUploaded,
|
||||
zap.String("oid", idObj.EncodeToString()),
|
||||
zap.String("FileName", file.FileName()),
|
||||
logs.TagField(logs.TagExternalStorage),
|
||||
)
|
||||
|
||||
addr := newAddress(bkt.CID, idObj)
|
||||
req.Response.Header.SetContentType(jsonHeader)
|
||||
c.Response.Header.SetContentType(jsonHeader)
|
||||
// Try to return the response, otherwise, if something went wrong, throw an error.
|
||||
if err = newPutResponse(addr).encode(req); err != nil {
|
||||
h.logAndSendError(ctx, req, logs.CouldNotEncodeResponse, err)
|
||||
if err = newPutResponse(addr).encode(c); err != nil {
|
||||
log.Error(logs.CouldNotEncodeResponse, zap.Error(err))
|
||||
ResponseError(c, "could not encode response", fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) uploadObject(ctx context.Context, bkt *data.BucketInfo, attrs []object.Attribute, file io.Reader) (oid.ID, error) {
|
||||
func (h *Handler) uploadObject(c *fasthttp.RequestCtx, bkt *data.BucketInfo, attrs []object.Attribute, file io.Reader) (oid.ID, error) {
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
|
||||
obj := object.New()
|
||||
obj.SetContainerID(bkt.CID)
|
||||
obj.SetOwnerID(*h.ownerID)
|
||||
|
@ -158,18 +158,17 @@ func (h *Handler) uploadObject(ctx context.Context, bkt *data.BucketInfo, attrs
|
|||
return idObj, nil
|
||||
}
|
||||
|
||||
func (h *Handler) extractAttributes(ctx context.Context, req *fasthttp.RequestCtx, filtered map[string]string) ([]object.Attribute, error) {
|
||||
func (h *Handler) extractAttributes(c *fasthttp.RequestCtx, log *zap.Logger, filtered map[string]string) ([]object.Attribute, error) {
|
||||
now := time.Now()
|
||||
if rawHeader := req.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
|
||||
if rawHeader := c.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
|
||||
if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
|
||||
h.reqLogger(ctx).Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err),
|
||||
logs.TagField(logs.TagDatapath))
|
||||
log.Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err))
|
||||
} else {
|
||||
now = parsed
|
||||
}
|
||||
}
|
||||
if err := utils.PrepareExpirationHeader(ctx, h.frostfs, filtered, now); err != nil {
|
||||
h.reqLogger(ctx).Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
if err := utils.PrepareExpirationHeader(c, h.frostfs, filtered, now); err != nil {
|
||||
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
attributes := make([]object.Attribute, 0, len(filtered))
|
||||
|
@ -196,33 +195,34 @@ func newAttribute(key string, val string) object.Attribute {
|
|||
|
||||
// explodeArchive read files from archive and creates objects for each of them.
|
||||
// Sets FilePath attribute with name from tar.Header.
|
||||
func (h *Handler) explodeArchive(ctx context.Context, req *fasthttp.RequestCtx, bkt *data.BucketInfo, file io.ReadCloser, filtered map[string]string) {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "handler.explodeArchive")
|
||||
defer span.End()
|
||||
func (h *Handler) explodeArchive(req request, bkt *data.BucketInfo, file io.ReadCloser, filtered map[string]string) {
|
||||
c, log := req.RequestCtx, req.log
|
||||
|
||||
// remove user attributes which vary for each file in archive
|
||||
// to guarantee that they won't appear twice
|
||||
delete(filtered, object.AttributeFileName)
|
||||
delete(filtered, object.AttributeFilePath)
|
||||
|
||||
commonAttributes, err := h.extractAttributes(ctx, req, filtered)
|
||||
commonAttributes, err := h.extractAttributes(c, log, filtered)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToGetAttributes, err)
|
||||
log.Error(logs.FailedToGetAttributes, zap.Error(err))
|
||||
ResponseError(c, "could not extract attributes: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
attributes := commonAttributes
|
||||
|
||||
reader := file
|
||||
if bytes.EqualFold(req.Request.Header.Peek(fasthttp.HeaderContentEncoding), []byte("gzip")) {
|
||||
h.reqLogger(ctx).Debug(logs.GzipReaderSelected, logs.TagField(logs.TagDatapath))
|
||||
if bytes.EqualFold(c.Request.Header.Peek(fasthttp.HeaderContentEncoding), []byte("gzip")) {
|
||||
log.Debug(logs.GzipReaderSelected)
|
||||
gzipReader, err := gzip.NewReader(file)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToCreateGzipReader, err)
|
||||
log.Error(logs.FailedToCreateGzipReader, zap.Error(err))
|
||||
ResponseError(c, "could read gzip file: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if err := gzipReader.Close(); err != nil {
|
||||
h.reqLogger(ctx).Warn(logs.FailedToCloseReader, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
log.Warn(logs.FailedToCloseReader, zap.Error(err))
|
||||
}
|
||||
}()
|
||||
reader = gzipReader
|
||||
|
@ -234,7 +234,8 @@ func (h *Handler) explodeArchive(ctx context.Context, req *fasthttp.RequestCtx,
|
|||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
} else if err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToReadFileFromTar, err)
|
||||
log.Error(logs.FailedToReadFileFromTar, zap.Error(err))
|
||||
ResponseError(c, "could not get next entry: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -248,20 +249,27 @@ func (h *Handler) explodeArchive(ctx context.Context, req *fasthttp.RequestCtx,
|
|||
attributes = append(attributes, newAttribute(object.AttributeFilePath, obj.Name))
|
||||
attributes = append(attributes, newAttribute(object.AttributeFileName, fileName))
|
||||
|
||||
idObj, err := h.uploadObject(ctx, bkt, attributes, tarReader)
|
||||
idObj, err := h.uploadObject(c, bkt, attributes, tarReader)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, req, logs.FailedToUploadObject, err)
|
||||
h.handlePutFrostFSErr(c, err, log)
|
||||
return
|
||||
}
|
||||
|
||||
h.reqLogger(ctx).Debug(logs.ObjectUploaded,
|
||||
log.Debug(logs.ObjectUploaded,
|
||||
zap.String("oid", idObj.EncodeToString()),
|
||||
zap.String("FileName", fileName),
|
||||
logs.TagField(logs.TagExternalStorage),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) handlePutFrostFSErr(r *fasthttp.RequestCtx, err error, log *zap.Logger) {
|
||||
statusCode, msg, additionalFields := formErrorResponse("could not store file in frostfs", err)
|
||||
logFields := append([]zap.Field{zap.Error(err)}, additionalFields...)
|
||||
|
||||
log.Error(logs.CouldNotStoreFileInFrostfs, logFields...)
|
||||
ResponseError(r, msg, statusCode)
|
||||
}
|
||||
|
||||
func (h *Handler) fetchBearerToken(ctx context.Context) *bearer.Token {
|
||||
if tkn, err := tokens.LoadBearerToken(ctx); err == nil && tkn != nil {
|
||||
return tkn
|
||||
|
|
|
@ -5,12 +5,13 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
sdkstatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
|
@ -18,6 +19,30 @@ import (
|
|||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type request struct {
|
||||
*fasthttp.RequestCtx
|
||||
log *zap.Logger
|
||||
}
|
||||
|
||||
func newRequest(ctx *fasthttp.RequestCtx, log *zap.Logger) request {
|
||||
return request{
|
||||
RequestCtx: ctx,
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *request) handleFrostFSErr(err error, start time.Time) {
|
||||
logFields := []zap.Field{
|
||||
zap.Stringer("elapsed", time.Since(start)),
|
||||
zap.Error(err),
|
||||
}
|
||||
statusCode, msg, additionalFields := formErrorResponse("could not receive object", err)
|
||||
logFields = append(logFields, additionalFields...)
|
||||
|
||||
r.log.Error(logs.CouldNotReceiveObject, logFields...)
|
||||
ResponseError(r.RequestCtx, msg, statusCode)
|
||||
}
|
||||
|
||||
func bearerToken(ctx context.Context) *bearer.Token {
|
||||
if tkn, err := tokens.LoadBearerToken(ctx); err == nil {
|
||||
return tkn
|
||||
|
@ -59,16 +84,14 @@ func isValidValue(s string) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (h *Handler) reqLogger(ctx context.Context) *zap.Logger {
|
||||
return utils.GetReqLogOrDefault(ctx, h.log)
|
||||
}
|
||||
func logAndSendBucketError(c *fasthttp.RequestCtx, log *zap.Logger, err error) {
|
||||
log.Error(logs.CouldntGetBucket, zap.Error(err))
|
||||
|
||||
func (h *Handler) logAndSendError(ctx context.Context, c *fasthttp.RequestCtx, msg string, err error, additional ...zap.Field) {
|
||||
utils.GetReqLogOrDefault(ctx, h.log).Error(msg,
|
||||
append([]zap.Field{zap.Error(err), logs.TagField(logs.TagDatapath)}, additional...)...)
|
||||
|
||||
msg, code := formErrorResponse(err)
|
||||
ResponseError(c, msg, code)
|
||||
if client.IsErrContainerNotFound(err) {
|
||||
ResponseError(c, "Not Found", fasthttp.StatusNotFound)
|
||||
return
|
||||
}
|
||||
ResponseError(c, "could not get bucket: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
}
|
||||
|
||||
func newAddress(cnr cid.ID, obj oid.ID) oid.Address {
|
||||
|
@ -89,23 +112,31 @@ func ResponseError(r *fasthttp.RequestCtx, msg string, code int) {
|
|||
r.Error(msg+"\n", code)
|
||||
}
|
||||
|
||||
func formErrorResponse(err error) (string, int) {
|
||||
func formErrorResponse(message string, err error) (int, string, []zap.Field) {
|
||||
var (
|
||||
msg string
|
||||
statusCode int
|
||||
logFields []zap.Field
|
||||
)
|
||||
|
||||
st := new(sdkstatus.ObjectAccessDenied)
|
||||
|
||||
switch {
|
||||
case errors.Is(err, ErrAccessDenied):
|
||||
return fmt.Sprintf("Storage Access Denied:\n%v", err), fasthttp.StatusForbidden
|
||||
case errors.Is(err, layer.ErrNodeAccessDenied):
|
||||
return fmt.Sprintf("Tree Access Denied:\n%v", err), fasthttp.StatusForbidden
|
||||
case errors.As(err, &st):
|
||||
statusCode = fasthttp.StatusForbidden
|
||||
reason := st.Reason()
|
||||
msg = fmt.Sprintf("%s: %v: %s", message, err, reason)
|
||||
logFields = append(logFields, zap.String("error_detail", reason))
|
||||
case errors.Is(err, ErrQuotaLimitReached):
|
||||
return fmt.Sprintf("Quota Reached:\n%v", err), fasthttp.StatusConflict
|
||||
case errors.Is(err, ErrContainerNotFound):
|
||||
return fmt.Sprintf("Container Not Found:\n%v", err), fasthttp.StatusNotFound
|
||||
case errors.Is(err, ErrObjectNotFound):
|
||||
return fmt.Sprintf("Object Not Found:\n%v", err), fasthttp.StatusNotFound
|
||||
case errors.Is(err, layer.ErrNodeNotFound):
|
||||
return fmt.Sprintf("Tree Node Not Found:\n%v", err), fasthttp.StatusNotFound
|
||||
case errors.Is(err, ErrGatewayTimeout):
|
||||
return fmt.Sprintf("Gateway Timeout:\n%v", err), fasthttp.StatusGatewayTimeout
|
||||
statusCode = fasthttp.StatusConflict
|
||||
msg = fmt.Sprintf("%s: %v", message, err)
|
||||
case client.IsErrObjectNotFound(err) || client.IsErrContainerNotFound(err):
|
||||
statusCode = fasthttp.StatusNotFound
|
||||
msg = "Not Found"
|
||||
default:
|
||||
return fmt.Sprintf("Bad Request:\n%v", err), fasthttp.StatusBadRequest
|
||||
statusCode = fasthttp.StatusBadRequest
|
||||
msg = fmt.Sprintf("%s: %v", message, err)
|
||||
}
|
||||
|
||||
return statusCode, msg, logFields
|
||||
}
|
||||
|
|
|
@ -1,43 +1,63 @@
|
|||
package logs
|
||||
|
||||
import "go.uber.org/zap"
|
||||
|
||||
const (
|
||||
TagFieldName = "tag"
|
||||
|
||||
TagApp = "app"
|
||||
TagDatapath = "datapath"
|
||||
TagExternalStorage = "external_storage"
|
||||
TagExternalStorageTree = "external_storage_tree"
|
||||
)
|
||||
|
||||
func TagField(tag string) zap.Field {
|
||||
return zap.String(TagFieldName, tag)
|
||||
}
|
||||
|
||||
// Log messages with the "app" tag.
|
||||
const (
|
||||
CouldntParseCreationDate = "couldn't parse creation date"
|
||||
CouldNotDetectContentTypeFromPayload = "could not detect Content-Type from payload"
|
||||
CouldNotReceiveObject = "could not receive object"
|
||||
ObjectWasDeleted = "object was deleted"
|
||||
CouldNotSearchForObjects = "could not search for objects"
|
||||
ObjectNotFound = "object not found"
|
||||
ReadObjectListFailed = "read object list failed"
|
||||
FailedToAddObjectToArchive = "failed to add object to archive"
|
||||
FailedToGetObject = "failed to get object"
|
||||
IteratingOverSelectedObjectsFailed = "iterating over selected objects failed"
|
||||
ObjectsNotFound = "objects not found"
|
||||
CloseZipWriter = "close zip writer"
|
||||
ServiceIsRunning = "service is running"
|
||||
ServiceCouldntStartOnConfiguredPort = "service couldn't start on configured port"
|
||||
ServiceHasntStartedSinceItsDisabled = "service hasn't started since it's disabled"
|
||||
ShuttingDownService = "shutting down service"
|
||||
CantShutDownService = "can't shut down service"
|
||||
CantGracefullyShutDownService = "can't gracefully shut down service, force stop"
|
||||
IgnorePartEmptyFormName = "ignore part, empty form name"
|
||||
IgnorePartEmptyFilename = "ignore part, empty filename"
|
||||
CouldNotReceiveMultipartForm = "could not receive multipart/form"
|
||||
CouldNotParseClientTime = "could not parse client time"
|
||||
CouldNotPrepareExpirationHeader = "could not prepare expiration header"
|
||||
CouldNotEncodeResponse = "could not encode response"
|
||||
CouldNotStoreFileInFrostfs = "could not store file in frostfs"
|
||||
AddAttributeToResultObject = "add attribute to result object"
|
||||
FailedToCreateResolver = "failed to create resolver"
|
||||
FailedToCreateWorkerPool = "failed to create worker pool"
|
||||
FailedToReadIndexPageTemplate = "failed to read index page template"
|
||||
SetCustomIndexPageTemplate = "set custom index page template"
|
||||
ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty = "container resolver will be disabled because of resolvers 'resolver_order' is empty"
|
||||
MetricsAreDisabled = "metrics are disabled"
|
||||
NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun = "no wallet path specified, creating ephemeral key automatically for this run"
|
||||
StartingApplication = "starting application"
|
||||
StartingServer = "starting server"
|
||||
ListenAndServe = "listen and serve"
|
||||
ShuttingDownWebServer = "shutting down web server"
|
||||
FailedToShutdownTracing = "failed to shutdown tracing"
|
||||
SIGHUPConfigReloadStarted = "SIGHUP config reload started"
|
||||
FailedToReloadConfigBecauseItsMissed = "failed to reload config because it's missed"
|
||||
FailedToReloadConfig = "failed to reload config"
|
||||
LogLevelWontBeUpdated = "log level won't be updated"
|
||||
FailedToUpdateResolvers = "failed to update resolvers"
|
||||
FailedToReloadServerParameters = "failed to reload server parameters"
|
||||
SIGHUPConfigReloadCompleted = "SIGHUP config reload completed"
|
||||
AddedPathUploadCid = "added path /upload/{cid}"
|
||||
AddedPathGetCidOid = "added path /get/{cid}/{oid}"
|
||||
AddedPathGetByAttributeCidAttrKeyAttrVal = "added path /get_by_attribute/{cid}/{attr_key}/{attr_val:*}"
|
||||
AddedPathZipCidPrefix = "added path /zip/{cid}/{prefix}"
|
||||
Request = "request"
|
||||
CouldNotFetchAndStoreBearerToken = "could not fetch and store bearer token"
|
||||
FailedToAddServer = "failed to add server"
|
||||
AddServer = "add server"
|
||||
NoHealthyServers = "no healthy servers"
|
||||
FailedToInitializeTracing = "failed to initialize tracing"
|
||||
TracingConfigUpdated = "tracing config updated"
|
||||
ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided = "resolver nns won't be used since rpc_endpoint isn't provided"
|
||||
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped"
|
||||
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated"
|
||||
CouldNotLoadFrostFSPrivateKey = "could not load FrostFS private key"
|
||||
|
@ -46,96 +66,33 @@ const (
|
|||
FailedToDialConnectionPool = "failed to dial connection pool"
|
||||
FailedToCreateTreePool = "failed to create tree pool"
|
||||
FailedToDialTreePool = "failed to dial tree pool"
|
||||
AddedStoragePeer = "added storage peer"
|
||||
CouldntGetBucket = "could not get bucket"
|
||||
CouldntPutBucketIntoCache = "couldn't put bucket info into cache"
|
||||
FailedToSumbitTaskToPool = "failed to submit task to pool"
|
||||
FailedToHeadObject = "failed to head object"
|
||||
FailedToIterateOverResponse = "failed to iterate over search response"
|
||||
InvalidCacheEntryType = "invalid cache entry type"
|
||||
InvalidLifetimeUsingDefaultValue = "invalid lifetime, using default value (in seconds)"
|
||||
InvalidCacheSizeUsingDefaultValue = "invalid cache size, using default value"
|
||||
FailedToUnescapeQuery = "failed to unescape query"
|
||||
ServerReconnecting = "reconnecting server..."
|
||||
ServerReconnectedSuccessfully = "server reconnected successfully"
|
||||
ServerReconnectFailed = "failed to reconnect server"
|
||||
WarnDuplicateAddress = "duplicate address"
|
||||
MultinetDialSuccess = "multinet dial successful"
|
||||
MultinetDialFail = "multinet dial failed"
|
||||
ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty = "container resolver will be disabled because of resolvers 'resolver_order' is empty"
|
||||
MetricsAreDisabled = "metrics are disabled"
|
||||
NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun = "no wallet path specified, creating ephemeral key automatically for this run"
|
||||
SIGHUPConfigReloadStarted = "SIGHUP config reload started"
|
||||
FailedToReloadConfigBecauseItsMissed = "failed to reload config because it's missed"
|
||||
FailedToReloadConfig = "failed to reload config"
|
||||
FailedToUpdateResolvers = "failed to update resolvers"
|
||||
FailedToReloadServerParameters = "failed to reload server parameters"
|
||||
SIGHUPConfigReloadCompleted = "SIGHUP config reload completed"
|
||||
TracingConfigUpdated = "tracing config updated"
|
||||
ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided = "resolver nns won't be used since rpc_endpoint isn't provided"
|
||||
AddedStoragePeer = "added storage peer"
|
||||
InvalidLifetimeUsingDefaultValue = "invalid lifetime, using default value (in seconds)"
|
||||
InvalidCacheSizeUsingDefaultValue = "invalid cache size, using default value"
|
||||
WarnDuplicateAddress = "duplicate address"
|
||||
FailedToLoadMultinetConfig = "failed to load multinet config"
|
||||
MultinetConfigWontBeUpdated = "multinet config won't be updated"
|
||||
LogLevelWontBeUpdated = "log level won't be updated"
|
||||
TagsLogConfigWontBeUpdated = "tags log config won't be updated"
|
||||
FailedToReadIndexPageTemplate = "failed to read index page template"
|
||||
SetCustomIndexPageTemplate = "set custom index page template"
|
||||
CouldNotFetchCORSContainerInfo = "couldn't fetch CORS container info"
|
||||
)
|
||||
|
||||
// Log messages with the "datapath" tag.
|
||||
const (
|
||||
CouldntParseCreationDate = "couldn't parse creation date"
|
||||
FailedToDetectContentTypeFromPayload = "failed to detect Content-Type from payload"
|
||||
FailedToAddObjectToArchive = "failed to add object to archive"
|
||||
CloseZipWriter = "close zip writer"
|
||||
IgnorePartEmptyFormName = "ignore part, empty form name"
|
||||
IgnorePartEmptyFilename = "ignore part, empty filename"
|
||||
CouldNotParseClientTime = "could not parse client time"
|
||||
CouldNotPrepareExpirationHeader = "could not prepare expiration header"
|
||||
CouldNotEncodeResponse = "could not encode response"
|
||||
AddAttributeToResultObject = "add attribute to result object"
|
||||
Request = "request"
|
||||
CouldNotFetchAndStoreBearerToken = "could not fetch and store bearer token"
|
||||
CouldntPutBucketIntoCache = "couldn't put bucket info into cache"
|
||||
FailedToIterateOverResponse = "failed to iterate over search response"
|
||||
InvalidCacheEntryType = "invalid cache entry type"
|
||||
FailedToUnescapeQuery = "failed to unescape query"
|
||||
CouldntCacheNetmap = "couldn't cache netmap"
|
||||
FailedToCloseReader = "failed to close reader"
|
||||
FailedToFilterHeaders = "failed to filter headers"
|
||||
FailedToReadFileFromTar = "failed to read file from tar"
|
||||
FailedToGetAttributes = "failed to get attributes"
|
||||
CloseGzipWriter = "close gzip writer"
|
||||
CloseTarWriter = "close tar writer"
|
||||
FailedToCreateGzipReader = "failed to create gzip reader"
|
||||
GzipReaderSelected = "gzip reader selected"
|
||||
CouldNotReceiveMultipartForm = "could not receive multipart/form"
|
||||
ObjectsNotFound = "objects not found"
|
||||
IteratingOverSelectedObjectsFailed = "iterating over selected objects failed"
|
||||
FailedToGetBucketInfo = "could not get bucket info"
|
||||
FailedToSubmitTaskToPool = "failed to submit task to pool"
|
||||
ObjectWasDeleted = "object was deleted"
|
||||
FailedToGetLatestVersionOfObject = "failed to get latest version of object"
|
||||
FailedToCheckIfSettingsNodeExist = "failed to check if settings node exists"
|
||||
FailedToListObjects = "failed to list objects"
|
||||
FailedToParseTemplate = "failed to parse template"
|
||||
FailedToExecuteTemplate = "failed to execute template"
|
||||
FailedToUploadObject = "failed to upload object"
|
||||
FailedToHeadObject = "failed to head object"
|
||||
FailedToGetObject = "failed to get object"
|
||||
FailedToGetObjectPayload = "failed to get object payload"
|
||||
FailedToFindObjectByAttribute = "failed to get find object by attribute"
|
||||
FailedToUnescapeOIDParam = "failed to unescape oid param"
|
||||
InvalidOIDParam = "invalid oid param"
|
||||
CouldNotGetCORSConfiguration = "could not get cors configuration"
|
||||
EmptyOriginRequestHeader = "empty Origin request header"
|
||||
EmptyAccessControlRequestMethodHeader = "empty Access-Control-Request-Method request header"
|
||||
CORSRuleWasNotMatched = "cors rule was not matched"
|
||||
CouldntCacheCors = "couldn't cache cors"
|
||||
)
|
||||
|
||||
// Log messages with the "external_storage" tag.
|
||||
const (
|
||||
ObjectNotFound = "object not found"
|
||||
ReadObjectListFailed = "read object list failed"
|
||||
ObjectNotFoundByFilePathTrySearchByFileName = "object not found by filePath attribute, try search by fileName"
|
||||
ObjectUploaded = "object uploaded"
|
||||
)
|
||||
|
||||
// Log messages with the "external_storage_tree" tag.
|
||||
const (
|
||||
FoundSeveralSystemTreeNodes = "found several system tree nodes"
|
||||
ObjectNotFoundByFilePathTrySearchByFileName = "object not found by filePath attribute, try search by fileName"
|
||||
CouldntCacheNetmap = "couldn't cache netmap"
|
||||
FailedToFilterHeaders = "failed to filter headers"
|
||||
FailedToReadFileFromTar = "failed to read file from tar"
|
||||
FailedToGetAttributes = "failed to get attributes"
|
||||
ObjectUploaded = "object uploaded"
|
||||
CloseGzipWriter = "close gzip writer"
|
||||
CloseTarWriter = "close tar writer"
|
||||
FailedToCloseReader = "failed to close reader"
|
||||
FailedToCreateGzipReader = "failed to create gzip reader"
|
||||
GzipReaderSelected = "gzip reader selected"
|
||||
)
|
||||
|
|
|
@ -17,11 +17,9 @@ func (l LogEventHandler) DialPerformed(sourceIP net.Addr, _, address string, err
|
|||
sourceIPString = sourceIP.Network() + "://" + sourceIP.String()
|
||||
}
|
||||
if err == nil {
|
||||
l.logger.Debug(logs.MultinetDialSuccess, zap.String("source", sourceIPString),
|
||||
zap.String("destination", address), logs.TagField(logs.TagApp))
|
||||
l.logger.Debug(logs.MultinetDialSuccess, zap.String("source", sourceIPString), zap.String("destination", address))
|
||||
} else {
|
||||
l.logger.Debug(logs.MultinetDialFail, zap.String("source", sourceIPString),
|
||||
zap.String("destination", address), logs.TagField(logs.TagApp))
|
||||
l.logger.Debug(logs.MultinetDialFail, zap.String("source", sourceIPString), zap.String("destination", address), zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -9,8 +9,6 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
|
@ -37,16 +35,13 @@ func NewFrostFS(p *pool.Pool) *FrostFS {
|
|||
|
||||
// Container implements frostfs.FrostFS interface method.
|
||||
func (x *FrostFS) Container(ctx context.Context, containerPrm handler.PrmContainer) (*container.Container, error) {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.Container")
|
||||
defer span.End()
|
||||
|
||||
prm := pool.PrmContainerGet{
|
||||
ContainerID: containerPrm.ContainerID,
|
||||
}
|
||||
|
||||
res, err := x.pool.GetContainer(ctx, prm)
|
||||
if err != nil {
|
||||
return nil, handleStorageError("read container via connection pool", err)
|
||||
return nil, handleObjectError("read container via connection pool", err)
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
|
@ -54,9 +49,6 @@ func (x *FrostFS) Container(ctx context.Context, containerPrm handler.PrmContain
|
|||
|
||||
// CreateObject implements frostfs.FrostFS interface method.
|
||||
func (x *FrostFS) CreateObject(ctx context.Context, prm handler.PrmObjectCreate) (oid.ID, error) {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.CreateObject")
|
||||
defer span.End()
|
||||
|
||||
var prmPut pool.PrmObjectPut
|
||||
prmPut.SetHeader(*prm.Object)
|
||||
prmPut.SetPayload(prm.Payload)
|
||||
|
@ -70,7 +62,7 @@ func (x *FrostFS) CreateObject(ctx context.Context, prm handler.PrmObjectCreate)
|
|||
|
||||
idObj, err := x.pool.PutObject(ctx, prmPut)
|
||||
if err != nil {
|
||||
return oid.ID{}, handleStorageError("save object via connection pool", err)
|
||||
return oid.ID{}, handleObjectError("save object via connection pool", err)
|
||||
}
|
||||
return idObj.ObjectID, nil
|
||||
}
|
||||
|
@ -86,14 +78,11 @@ func (x payloadReader) Read(p []byte) (int, error) {
|
|||
if err != nil && errors.Is(err, io.EOF) {
|
||||
return n, err
|
||||
}
|
||||
return n, handleStorageError("read payload", err)
|
||||
return n, handleObjectError("read payload", err)
|
||||
}
|
||||
|
||||
// HeadObject implements frostfs.FrostFS interface method.
|
||||
func (x *FrostFS) HeadObject(ctx context.Context, prm handler.PrmObjectHead) (*object.Object, error) {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.HeadObject")
|
||||
defer span.End()
|
||||
|
||||
var prmHead pool.PrmObjectHead
|
||||
prmHead.SetAddress(prm.Address)
|
||||
|
||||
|
@ -103,7 +92,7 @@ func (x *FrostFS) HeadObject(ctx context.Context, prm handler.PrmObjectHead) (*o
|
|||
|
||||
res, err := x.pool.HeadObject(ctx, prmHead)
|
||||
if err != nil {
|
||||
return nil, handleStorageError("read object header via connection pool", err)
|
||||
return nil, handleObjectError("read object header via connection pool", err)
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
|
@ -111,9 +100,6 @@ func (x *FrostFS) HeadObject(ctx context.Context, prm handler.PrmObjectHead) (*o
|
|||
|
||||
// GetObject implements frostfs.FrostFS interface method.
|
||||
func (x *FrostFS) GetObject(ctx context.Context, prm handler.PrmObjectGet) (*handler.Object, error) {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.GetObject")
|
||||
defer span.End()
|
||||
|
||||
var prmGet pool.PrmObjectGet
|
||||
prmGet.SetAddress(prm.Address)
|
||||
|
||||
|
@ -123,7 +109,7 @@ func (x *FrostFS) GetObject(ctx context.Context, prm handler.PrmObjectGet) (*han
|
|||
|
||||
res, err := x.pool.GetObject(ctx, prmGet)
|
||||
if err != nil {
|
||||
return nil, handleStorageError("init full object reading via connection pool", err)
|
||||
return nil, handleObjectError("init full object reading via connection pool", err)
|
||||
}
|
||||
|
||||
return &handler.Object{
|
||||
|
@ -134,9 +120,6 @@ func (x *FrostFS) GetObject(ctx context.Context, prm handler.PrmObjectGet) (*han
|
|||
|
||||
// RangeObject implements frostfs.FrostFS interface method.
|
||||
func (x *FrostFS) RangeObject(ctx context.Context, prm handler.PrmObjectRange) (io.ReadCloser, error) {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.RangeObject")
|
||||
defer span.End()
|
||||
|
||||
var prmRange pool.PrmObjectRange
|
||||
prmRange.SetAddress(prm.Address)
|
||||
prmRange.SetOffset(prm.PayloadRange[0])
|
||||
|
@ -148,7 +131,7 @@ func (x *FrostFS) RangeObject(ctx context.Context, prm handler.PrmObjectRange) (
|
|||
|
||||
res, err := x.pool.ObjectRange(ctx, prmRange)
|
||||
if err != nil {
|
||||
return nil, handleStorageError("init payload range reading via connection pool", err)
|
||||
return nil, handleObjectError("init payload range reading via connection pool", err)
|
||||
}
|
||||
|
||||
return payloadReader{&res}, nil
|
||||
|
@ -156,9 +139,6 @@ func (x *FrostFS) RangeObject(ctx context.Context, prm handler.PrmObjectRange) (
|
|||
|
||||
// SearchObjects implements frostfs.FrostFS interface method.
|
||||
func (x *FrostFS) SearchObjects(ctx context.Context, prm handler.PrmObjectSearch) (handler.ResObjectSearch, error) {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.SearchObjects")
|
||||
defer span.End()
|
||||
|
||||
var prmSearch pool.PrmObjectSearch
|
||||
prmSearch.SetContainerID(prm.Container)
|
||||
prmSearch.SetFilters(prm.Filters)
|
||||
|
@ -169,7 +149,7 @@ func (x *FrostFS) SearchObjects(ctx context.Context, prm handler.PrmObjectSearch
|
|||
|
||||
res, err := x.pool.SearchObjects(ctx, prmSearch)
|
||||
if err != nil {
|
||||
return nil, handleStorageError("init object search via connection pool", err)
|
||||
return nil, handleObjectError("init object search via connection pool", err)
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
|
@ -177,9 +157,6 @@ func (x *FrostFS) SearchObjects(ctx context.Context, prm handler.PrmObjectSearch
|
|||
|
||||
// GetEpochDurations implements frostfs.FrostFS interface method.
|
||||
func (x *FrostFS) GetEpochDurations(ctx context.Context) (*utils.EpochDurations, error) {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.GetEpochDurations")
|
||||
defer span.End()
|
||||
|
||||
networkInfo, err := x.pool.NetworkInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -198,12 +175,9 @@ func (x *FrostFS) GetEpochDurations(ctx context.Context) (*utils.EpochDurations,
|
|||
}
|
||||
|
||||
func (x *FrostFS) NetmapSnapshot(ctx context.Context) (netmap.NetMap, error) {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.NetmapSnapshot")
|
||||
defer span.End()
|
||||
|
||||
netmapSnapshot, err := x.pool.NetMapSnapshot(ctx)
|
||||
if err != nil {
|
||||
return netmapSnapshot, handleStorageError("get netmap via connection pool", err)
|
||||
return netmapSnapshot, handleObjectError("get netmap via connection pool", err)
|
||||
}
|
||||
|
||||
return netmapSnapshot, nil
|
||||
|
@ -222,12 +196,9 @@ func NewResolverFrostFS(p *pool.Pool) *ResolverFrostFS {
|
|||
|
||||
// SystemDNS implements resolver.FrostFS interface method.
|
||||
func (x *ResolverFrostFS) SystemDNS(ctx context.Context) (string, error) {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.SystemDNS")
|
||||
defer span.End()
|
||||
|
||||
networkInfo, err := x.pool.NetworkInfo(ctx)
|
||||
if err != nil {
|
||||
return "", handleStorageError("read network info via client", err)
|
||||
return "", handleObjectError("read network info via client", err)
|
||||
}
|
||||
|
||||
domain := networkInfo.RawNetworkParameter("SystemDNS")
|
||||
|
@ -238,7 +209,7 @@ func (x *ResolverFrostFS) SystemDNS(ctx context.Context) (string, error) {
|
|||
return string(domain), nil
|
||||
}
|
||||
|
||||
func handleStorageError(msg string, err error) error {
|
||||
func handleObjectError(msg string, err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -251,14 +222,6 @@ func handleStorageError(msg string, err error) error {
|
|||
return fmt.Errorf("%s: %w: %s", msg, handler.ErrAccessDenied, reason)
|
||||
}
|
||||
|
||||
if client.IsErrContainerNotFound(err) {
|
||||
return fmt.Errorf("%s: %w: %s", msg, handler.ErrContainerNotFound, err.Error())
|
||||
}
|
||||
|
||||
if client.IsErrObjectNotFound(err) {
|
||||
return fmt.Errorf("%s: %w: %s", msg, handler.ErrObjectNotFound, err.Error())
|
||||
}
|
||||
|
||||
if IsTimeoutError(err) {
|
||||
return fmt.Errorf("%s: %w: %s", msg, handler.ErrGatewayTimeout, err.Error())
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ func TestHandleObjectError(t *testing.T) {
|
|||
msg := "some msg"
|
||||
|
||||
t.Run("nil error", func(t *testing.T) {
|
||||
err := handleStorageError(msg, nil)
|
||||
err := handleObjectError(msg, nil)
|
||||
require.Nil(t, err)
|
||||
})
|
||||
|
||||
|
@ -27,7 +27,7 @@ func TestHandleObjectError(t *testing.T) {
|
|||
inputErr := new(apistatus.ObjectAccessDenied)
|
||||
inputErr.WriteReason(reason)
|
||||
|
||||
err := handleStorageError(msg, inputErr)
|
||||
err := handleObjectError(msg, inputErr)
|
||||
require.ErrorIs(t, err, handler.ErrAccessDenied)
|
||||
require.Contains(t, err.Error(), reason)
|
||||
require.Contains(t, err.Error(), msg)
|
||||
|
@ -38,7 +38,7 @@ func TestHandleObjectError(t *testing.T) {
|
|||
inputErr := new(apistatus.ObjectAccessDenied)
|
||||
inputErr.WriteReason(reason)
|
||||
|
||||
err := handleStorageError(msg, inputErr)
|
||||
err := handleObjectError(msg, inputErr)
|
||||
require.ErrorIs(t, err, handler.ErrQuotaLimitReached)
|
||||
require.Contains(t, err.Error(), reason)
|
||||
require.Contains(t, err.Error(), msg)
|
||||
|
@ -47,7 +47,7 @@ func TestHandleObjectError(t *testing.T) {
|
|||
t.Run("simple timeout", func(t *testing.T) {
|
||||
inputErr := errors.New("timeout")
|
||||
|
||||
err := handleStorageError(msg, inputErr)
|
||||
err := handleObjectError(msg, inputErr)
|
||||
require.ErrorIs(t, err, handler.ErrGatewayTimeout)
|
||||
require.Contains(t, err.Error(), inputErr.Error())
|
||||
require.Contains(t, err.Error(), msg)
|
||||
|
@ -58,7 +58,7 @@ func TestHandleObjectError(t *testing.T) {
|
|||
defer cancel()
|
||||
<-ctx.Done()
|
||||
|
||||
err := handleStorageError(msg, ctx.Err())
|
||||
err := handleObjectError(msg, ctx.Err())
|
||||
require.ErrorIs(t, err, handler.ErrGatewayTimeout)
|
||||
require.Contains(t, err.Error(), ctx.Err().Error())
|
||||
require.Contains(t, err.Error(), msg)
|
||||
|
@ -67,7 +67,7 @@ func TestHandleObjectError(t *testing.T) {
|
|||
t.Run("grpc deadline exceeded", func(t *testing.T) {
|
||||
inputErr := fmt.Errorf("wrap grpc error: %w", status.Error(codes.DeadlineExceeded, "error"))
|
||||
|
||||
err := handleStorageError(msg, inputErr)
|
||||
err := handleObjectError(msg, inputErr)
|
||||
require.ErrorIs(t, err, handler.ErrGatewayTimeout)
|
||||
require.Contains(t, err.Error(), inputErr.Error())
|
||||
require.Contains(t, err.Error(), msg)
|
||||
|
@ -76,7 +76,7 @@ func TestHandleObjectError(t *testing.T) {
|
|||
t.Run("unknown error", func(t *testing.T) {
|
||||
inputErr := errors.New("unknown error")
|
||||
|
||||
err := handleStorageError(msg, inputErr)
|
||||
err := handleObjectError(msg, inputErr)
|
||||
require.ErrorIs(t, err, inputErr)
|
||||
require.Contains(t, err.Error(), msg)
|
||||
})
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
)
|
||||
|
||||
|
@ -75,9 +74,6 @@ var (
|
|||
)
|
||||
|
||||
func (x *FrostFS) InitMultiObjectReader(ctx context.Context, p handler.PrmInitMultiObjectReader) (io.Reader, error) {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.InitMultiObjectReader")
|
||||
defer span.End()
|
||||
|
||||
combinedObj, err := x.GetObject(ctx, handler.PrmObjectGet{
|
||||
PrmAuth: handler.PrmAuth{BearerToken: p.Bearer},
|
||||
Address: p.Addr,
|
||||
|
@ -219,9 +215,6 @@ func (x *MultiObjectReader) Read(p []byte) (n int, err error) {
|
|||
// InitFrostFSObjectPayloadReader initializes payload reader of the FrostFS object.
|
||||
// Zero range corresponds to full payload (panics if only offset is set).
|
||||
func (x *FrostFS) InitFrostFSObjectPayloadReader(ctx context.Context, p GetFrostFSParams) (io.ReadCloser, error) {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.InitFrostFSObjectPayloadReader")
|
||||
defer span.End()
|
||||
|
||||
var prmAuth handler.PrmAuth
|
||||
|
||||
if p.Off+p.Ln != 0 {
|
||||
|
|
|
@ -40,7 +40,7 @@ func (s *Source) NetMapSnapshot(ctx context.Context) (netmap.NetMap, error) {
|
|||
}
|
||||
|
||||
if err = s.netmapCache.Put(netmapSnapshot); err != nil {
|
||||
s.log.Warn(logs.CouldntCacheNetmap, zap.Error(err), logs.TagField(logs.TagDatapath))
|
||||
s.log.Warn(logs.CouldntCacheNetmap, zap.Error(err))
|
||||
}
|
||||
|
||||
return netmapSnapshot, nil
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
apitree "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/tree"
|
||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||
)
|
||||
|
@ -47,9 +46,6 @@ func NewPoolWrapper(p *treepool.Pool) *PoolWrapper {
|
|||
}
|
||||
|
||||
func (w *PoolWrapper) GetNodes(ctx context.Context, prm *tree.GetNodesParams) ([]tree.NodeResponse, error) {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.GetNodes")
|
||||
defer span.End()
|
||||
|
||||
poolPrm := treepool.GetNodesParams{
|
||||
CID: prm.CnrID,
|
||||
TreeID: prm.TreeID,
|
||||
|
@ -63,7 +59,7 @@ func (w *PoolWrapper) GetNodes(ctx context.Context, prm *tree.GetNodesParams) ([
|
|||
|
||||
nodes, err := w.p.GetNodes(ctx, poolPrm)
|
||||
if err != nil {
|
||||
return nil, handleTreeError(err)
|
||||
return nil, handleError(err)
|
||||
}
|
||||
|
||||
res := make([]tree.NodeResponse, len(nodes))
|
||||
|
@ -82,7 +78,7 @@ func getBearer(ctx context.Context) []byte {
|
|||
return token.Marshal()
|
||||
}
|
||||
|
||||
func handleTreeError(err error) error {
|
||||
func handleError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -97,9 +93,6 @@ func handleTreeError(err error) error {
|
|||
}
|
||||
|
||||
func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]tree.NodeResponse, error) {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.GetSubTree")
|
||||
defer span.End()
|
||||
|
||||
order := treepool.NoneOrder
|
||||
if sort {
|
||||
order = treepool.AscendingOrder
|
||||
|
@ -122,7 +115,7 @@ func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo,
|
|||
|
||||
subTreeReader, err := w.p.GetSubTree(ctx, poolPrm)
|
||||
if err != nil {
|
||||
return nil, handleTreeError(err)
|
||||
return nil, handleError(err)
|
||||
}
|
||||
|
||||
var subtree []tree.NodeResponse
|
||||
|
@ -133,7 +126,7 @@ func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo,
|
|||
node, err = subTreeReader.Next()
|
||||
}
|
||||
if err != io.EOF {
|
||||
return nil, handleTreeError(err)
|
||||
return nil, handleError(err)
|
||||
}
|
||||
|
||||
return subtree, nil
|
||||
|
|
|
@ -25,24 +25,24 @@ type Config struct {
|
|||
// Start runs http service with the exposed endpoint on the configured port.
|
||||
func (ms *Service) Start() {
|
||||
if ms.enabled {
|
||||
ms.log.Info(logs.ServiceIsRunning, zap.String("endpoint", ms.Addr), logs.TagField(logs.TagApp))
|
||||
ms.log.Info(logs.ServiceIsRunning, zap.String("endpoint", ms.Addr))
|
||||
err := ms.ListenAndServe()
|
||||
if err != nil && err != http.ErrServerClosed {
|
||||
ms.log.Warn(logs.ServiceCouldntStartOnConfiguredPort, logs.TagField(logs.TagApp))
|
||||
ms.log.Warn(logs.ServiceCouldntStartOnConfiguredPort)
|
||||
}
|
||||
} else {
|
||||
ms.log.Info(logs.ServiceHasntStartedSinceItsDisabled, logs.TagField(logs.TagApp))
|
||||
ms.log.Info(logs.ServiceHasntStartedSinceItsDisabled)
|
||||
}
|
||||
}
|
||||
|
||||
// ShutDown stops the service.
|
||||
func (ms *Service) ShutDown(ctx context.Context) {
|
||||
ms.log.Info(logs.ShuttingDownService, zap.String("endpoint", ms.Addr), logs.TagField(logs.TagApp))
|
||||
ms.log.Info(logs.ShuttingDownService, zap.String("endpoint", ms.Addr))
|
||||
err := ms.Shutdown(ctx)
|
||||
if err != nil {
|
||||
ms.log.Error(logs.CantGracefullyShutDownService, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
ms.log.Error(logs.CantGracefullyShutDownService, zap.Error(err))
|
||||
if err = ms.Close(); err != nil {
|
||||
ms.log.Panic(logs.CantShutDownService, zap.Error(err), logs.TagField(logs.TagApp))
|
||||
ms.log.Panic(logs.CantShutDownService, zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"fmt"
|
||||
"sync"
|
||||
|
||||
v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
|
||||
|
@ -29,9 +29,14 @@ type FrostFS interface {
|
|||
SystemDNS(context.Context) (string, error)
|
||||
}
|
||||
|
||||
type Settings interface {
|
||||
FormContainerZone(ns string) (zone string, isDefault bool)
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
FrostFS FrostFS
|
||||
RPCAddress string
|
||||
Settings Settings
|
||||
}
|
||||
|
||||
type ContainerResolver struct {
|
||||
|
@ -41,15 +46,15 @@ type ContainerResolver struct {
|
|||
|
||||
type Resolver struct {
|
||||
Name string
|
||||
resolve func(context.Context, string, string) (*cid.ID, error)
|
||||
resolve func(context.Context, string) (*cid.ID, error)
|
||||
}
|
||||
|
||||
func (r *Resolver) SetResolveFunc(fn func(context.Context, string, string) (*cid.ID, error)) {
|
||||
func (r *Resolver) SetResolveFunc(fn func(context.Context, string) (*cid.ID, error)) {
|
||||
r.resolve = fn
|
||||
}
|
||||
|
||||
func (r *Resolver) Resolve(ctx context.Context, zone, name string) (*cid.ID, error) {
|
||||
return r.resolve(ctx, zone, name)
|
||||
func (r *Resolver) Resolve(ctx context.Context, name string) (*cid.ID, error) {
|
||||
return r.resolve(ctx, name)
|
||||
}
|
||||
|
||||
func NewContainerResolver(resolverNames []string, cfg *Config) (*ContainerResolver, error) {
|
||||
|
@ -76,13 +81,13 @@ func createResolvers(resolverNames []string, cfg *Config) ([]*Resolver, error) {
|
|||
return resolvers, nil
|
||||
}
|
||||
|
||||
func (r *ContainerResolver) Resolve(ctx context.Context, cnrZone, cnrName string) (*cid.ID, error) {
|
||||
func (r *ContainerResolver) Resolve(ctx context.Context, cnrName string) (*cid.ID, error) {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
|
||||
var err error
|
||||
for _, resolver := range r.resolvers {
|
||||
cnrID, resolverErr := resolver.Resolve(ctx, cnrZone, cnrName)
|
||||
cnrID, resolverErr := resolver.Resolve(ctx, cnrName)
|
||||
if resolverErr != nil {
|
||||
resolverErr = fmt.Errorf("%s: %w", resolver.Name, resolverErr)
|
||||
if err == nil {
|
||||
|
@ -136,25 +141,34 @@ func (r *ContainerResolver) equals(resolverNames []string) bool {
|
|||
func newResolver(name string, cfg *Config) (*Resolver, error) {
|
||||
switch name {
|
||||
case DNSResolver:
|
||||
return NewDNSResolver(cfg.FrostFS)
|
||||
return NewDNSResolver(cfg.FrostFS, cfg.Settings)
|
||||
case NNSResolver:
|
||||
return NewNNSResolver(cfg.RPCAddress)
|
||||
return NewNNSResolver(cfg.RPCAddress, cfg.Settings)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown resolver: %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
func NewDNSResolver(frostFS FrostFS) (*Resolver, error) {
|
||||
func NewDNSResolver(frostFS FrostFS, settings Settings) (*Resolver, error) {
|
||||
if frostFS == nil {
|
||||
return nil, fmt.Errorf("pool must not be nil for DNS resolver")
|
||||
}
|
||||
if settings == nil {
|
||||
return nil, fmt.Errorf("resolver settings must not be nil for DNS resolver")
|
||||
}
|
||||
|
||||
var dns ns.DNS
|
||||
|
||||
resolveFunc := func(ctx context.Context, zone, name string) (*cid.ID, error) {
|
||||
resolveFunc := func(ctx context.Context, name string) (*cid.ID, error) {
|
||||
var err error
|
||||
|
||||
if zone == v2container.SysAttributeZoneDefault {
|
||||
namespace, err := middleware.GetNamespace(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zone, isDefault := settings.FormContainerZone(namespace)
|
||||
if isDefault {
|
||||
zone, err = frostFS.SystemDNS(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read system DNS parameter of the FrostFS: %w", err)
|
||||
|
@ -176,10 +190,13 @@ func NewDNSResolver(frostFS FrostFS) (*Resolver, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func NewNNSResolver(rpcAddress string) (*Resolver, error) {
|
||||
func NewNNSResolver(rpcAddress string, settings Settings) (*Resolver, error) {
|
||||
if rpcAddress == "" {
|
||||
return nil, fmt.Errorf("rpc address must not be empty for NNS resolver")
|
||||
}
|
||||
if settings == nil {
|
||||
return nil, fmt.Errorf("resolver settings must not be nil for NNS resolver")
|
||||
}
|
||||
|
||||
var nns ns.NNS
|
||||
|
||||
|
@ -187,9 +204,16 @@ func NewNNSResolver(rpcAddress string) (*Resolver, error) {
|
|||
return nil, fmt.Errorf("could not dial nns: %w", err)
|
||||
}
|
||||
|
||||
resolveFunc := func(_ context.Context, zone, name string) (*cid.ID, error) {
|
||||
resolveFunc := func(ctx context.Context, name string) (*cid.ID, error) {
|
||||
var d container.Domain
|
||||
d.SetName(name)
|
||||
|
||||
namespace, err := middleware.GetNamespace(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zone, _ := settings.FormContainerZone(namespace)
|
||||
d.SetZone(zone)
|
||||
|
||||
cnrID, err := nns.ResolveContainerDomain(d)
|
||||
|
|
30
tree/tree.go
30
tree/tree.go
|
@ -8,18 +8,13 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
Tree struct {
|
||||
service ServiceClient
|
||||
log *zap.Logger
|
||||
}
|
||||
|
||||
// ServiceClient is a client to interact with tree service.
|
||||
|
@ -77,8 +72,8 @@ const (
|
|||
)
|
||||
|
||||
// NewTree creates instance of Tree using provided address and create grpc connection.
|
||||
func NewTree(service ServiceClient, log *zap.Logger) *Tree {
|
||||
return &Tree{service: service, log: log}
|
||||
func NewTree(service ServiceClient) *Tree {
|
||||
return &Tree{service: service}
|
||||
}
|
||||
|
||||
type Meta interface {
|
||||
|
@ -195,9 +190,6 @@ func (m *multiSystemNode) Old() []*treeNode {
|
|||
}
|
||||
|
||||
func (c *Tree) GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*data.NodeVersion, error) {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "tree.GetLatestVersion")
|
||||
defer span.End()
|
||||
|
||||
nodes, err := c.GetVersions(ctx, cnrID, objectName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -212,9 +204,6 @@ func (c *Tree) GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName s
|
|||
}
|
||||
|
||||
func (c *Tree) GetVersions(ctx context.Context, cnrID *cid.ID, objectName string) ([]NodeResponse, error) {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "tree.GetVersions")
|
||||
defer span.End()
|
||||
|
||||
meta := []string{oidKV, isDeleteMarkerKV, sizeKV}
|
||||
path := pathFromName(objectName)
|
||||
|
||||
|
@ -231,9 +220,6 @@ func (c *Tree) GetVersions(ctx context.Context, cnrID *cid.ID, objectName string
|
|||
}
|
||||
|
||||
func (c *Tree) CheckSettingsNodeExists(ctx context.Context, bktInfo *data.BucketInfo) error {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "tree.CheckSettingsNodeExists")
|
||||
defer span.End()
|
||||
|
||||
_, err := c.getSystemNode(ctx, bktInfo, settingsFileName)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -261,9 +247,6 @@ func (c *Tree) getSystemNode(ctx context.Context, bktInfo *data.BucketInfo, name
|
|||
if len(nodes) == 0 {
|
||||
return nil, layer.ErrNodeNotFound
|
||||
}
|
||||
if len(nodes) != 1 {
|
||||
c.reqLogger(ctx).Warn(logs.FoundSeveralSystemTreeNodes, zap.String("name", name), logs.TagField(logs.TagExternalStorageTree))
|
||||
}
|
||||
|
||||
return newMultiNode(nodes)
|
||||
}
|
||||
|
@ -303,7 +286,7 @@ func getLatestVersionNode(nodes []NodeResponse) (NodeResponse, error) {
|
|||
}
|
||||
|
||||
if targetIndexNode == -1 {
|
||||
return nil, fmt.Errorf("latest version: %w", layer.ErrNodeNotFound)
|
||||
return nil, layer.ErrNodeNotFound
|
||||
}
|
||||
|
||||
return nodes[targetIndexNode], nil
|
||||
|
@ -325,9 +308,6 @@ func pathFromName(objectName string) []string {
|
|||
}
|
||||
|
||||
func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]data.NodeInfo, string, error) {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "tree.GetSubTreeByPrefix")
|
||||
defer span.End()
|
||||
|
||||
rootID, tailPrefix, err := c.determinePrefixNode(ctx, bktInfo, versionTree, prefix)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
|
@ -430,10 +410,6 @@ func (c *Tree) getPrefixNodeID(ctx context.Context, bktInfo *data.BucketInfo, tr
|
|||
return intermediateNodes, nil
|
||||
}
|
||||
|
||||
func (c *Tree) reqLogger(ctx context.Context) *zap.Logger {
|
||||
return utils.GetReqLogOrDefault(ctx, c.log)
|
||||
}
|
||||
|
||||
func GetFilename(node NodeResponse) string {
|
||||
for _, kv := range node.GetMeta() {
|
||||
if kv.GetKey() == FileNameKey {
|
||||
|
|
|
@ -11,8 +11,6 @@ import (
|
|||
"time"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
)
|
||||
|
||||
type EpochDurations struct {
|
||||
|
@ -258,12 +256,3 @@ func (t systemTransformer) updateExpirationHeader(headers map[string]string, dur
|
|||
|
||||
headers[t.expirationEpochAttr()] = strconv.FormatUint(expirationEpoch, 10)
|
||||
}
|
||||
|
||||
func GetAttributeValue(attrs []object.Attribute, key string) string {
|
||||
for _, attr := range attrs {
|
||||
if attr.Key() == key {
|
||||
return attr.Value()
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue