Compare commits

..

15 commits

Author SHA1 Message Date
b7b08d9d82 [#230] Refactor logger tag configuration
Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2025-04-17 14:37:02 +00:00
b9f1f455f8 [#229] Add ngfuzz installation to makefile
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2025-04-17 13:57:36 +00:00
304dbdd4c8 [#228] Update Go to 1.23
Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
2025-04-16 16:50:42 +03:00
273459e090 [#225] Support wildcard in allowed origins and headers
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2025-04-07 16:57:07 +03:00
cb72d11515 [#224] Refactor logger tag configuration
Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2025-04-01 11:43:51 +03:00
f0b86c8ba7 [#191] Update integration tests
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2025-03-25 06:27:56 +00:00
458bf933fc [#191] Refactor error handling and logging
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2025-03-25 06:27:56 +00:00
0f73da258b [#223] Bump frostfs-sdk-go
Contains:
* more detailed pool errors
* disabled service config query in gRPC client

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2025-03-20 18:40:28 +03:00
d670983df4 [#208] govulncheck: Fix minor toolchain updates for good
Signed-off-by: Vitaliy Potyarkin <v.potyarkin@yadro.com>
2025-03-20 13:49:55 +00:00
9ef6b06e91 [#212] Support CORS container for CORS settings
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2025-03-10 18:12:36 +03:00
9cf2a4f0e0 [#197] Add a leading slash to the FilePath attribute
According to the frostfs api specification,
the File Path attribute must start with a
leading slash. More info:
https://git.frostfs.info/TrueCloudLab/frostfs-api

Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2025-02-25 14:14:20 +00:00
cc6055bd27 [#211] Add IO tags
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2025-02-25 08:36:38 +00:00
a651b5823f [#219] Use zaptest.Logger
Use zaptest to get logs which get printed only if a test fails
or if you ran go test -v.

Dont use zaptest.Logger for fuzz otherwise ngfuzz/libfuzz crashes

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2025-02-21 16:11:49 +03:00
f9c5dc5260 [#216] Rework http2 test to be tls test
Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
2025-02-18 14:55:19 +03:00
8bfaa84124 [#216] Remove http2 forcing
fasthttp doesn't support http2
which causes errors when we enable it

Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
2025-02-18 14:55:19 +03:00
42 changed files with 2267 additions and 686 deletions

View file

@ -1,4 +1,4 @@
FROM golang:1.22-alpine AS basebuilder FROM golang:1.24-alpine AS basebuilder
RUN apk add --update make bash ca-certificates RUN apk add --update make bash ca-certificates
FROM basebuilder AS builder FROM basebuilder AS builder

View file

@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
go_versions: [ '1.22', '1.23' ] go_versions: [ '1.23', '1.24' ]
fail-fast: false fail-fast: false
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3

View file

@ -14,7 +14,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.23' go-version: '1.24'
cache: true cache: true
- name: Install linters - name: Install linters
@ -28,7 +28,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
go_versions: [ '1.22', '1.23' ] go_versions: [ '1.23', '1.24' ]
fail-fast: false fail-fast: false
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
@ -53,7 +53,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.23' go-version: '1.24'
- name: Run integration tests - name: Run integration tests
run: |- run: |-

View file

@ -16,7 +16,8 @@ jobs:
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.22.12' go-version: '1.23'
check-latest: true
- name: Install govulncheck - name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest run: go install golang.org/x/vuln/cmd/govulncheck@latest

View file

@ -22,9 +22,6 @@ linters-settings:
# 'default' case is present, even if all enum members aren't listed in the # 'default' case is present, even if all enum members aren't listed in the
# switch # switch
default-signifies-exhaustive: true default-signifies-exhaustive: true
govet:
# report about shadowed variables
check-shadowing: false
custom: custom:
truecloudlab-linters: truecloudlab-linters:
path: bin/external_linters.so path: bin/external_linters.so

View file

@ -4,9 +4,12 @@ This document outlines major changes between releases.
## [Unreleased] ## [Unreleased]
- Update Go to 1.23 (#228)
### Added ### Added
- Add handling quota limit reached error (#187) - Add handling quota limit reached error (#187)
- Add slash clipping for FileName attribute (#174) - Add slash clipping for FileName attribute (#174)
- Add new format of tag names config
## [0.32.3] - 2025-02-05 ## [0.32.3] - 2025-02-05

View file

@ -2,9 +2,9 @@
REPO ?= $(shell go list -m) REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --match "v*" --dirty --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop") VERSION ?= $(shell git describe --tags --match "v*" --dirty --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
GO_VERSION ?= 1.22 GO_VERSION ?= 1.23
LINT_VERSION ?= 1.60.3 LINT_VERSION ?= 1.64.8
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6 TRUECLOUDLAB_LINT_VERSION ?= 0.0.10
BUILD ?= $(shell date -u --iso=seconds) BUILD ?= $(shell date -u --iso=seconds)
HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs-http-gw HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs-http-gw
@ -30,9 +30,10 @@ PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
sed "s/-/~/")-${OS_RELEASE} sed "s/-/~/")-${OS_RELEASE}
.PHONY: debpackage debclean .PHONY: debpackage debclean
FUZZ_NGFUZZ_DIR ?= "" FUZZING_DIR = $(shell pwd)/tests/fuzzing/files
NGFUZZ_REPO = https://gitflic.ru/project/yadro/ngfuzz.git
FUZZ_TIMEOUT ?= 30 FUZZ_TIMEOUT ?= 30
FUZZ_FUNCTIONS ?= "all" FUZZ_FUNCTIONS ?= ""
FUZZ_AUX ?= "" FUZZ_AUX ?= ""
# Make all binaries # Make all binaries
@ -99,18 +100,22 @@ check-ngfuzz:
exit 1; \ exit 1; \
fi fi
.PHONY: install-fuzzing-deps .PHONY: install-ngfuzz
install-fuzzing-deps: check-clang check-ngfuzz install-ngfuzz:
ifeq (,$(wildcard $(FUZZING_DIR)/ngfuzz))
@rm -rf $(FUZZING_DIR)/ngfuzz
@git clone $(NGFUZZ_REPO) $(FUZZING_DIR)/ngfuzz
@cd $(FUZZING_DIR)/ngfuzz && make
endif
.PHONY: fuzz .PHONY: fuzz
fuzz: install-fuzzing-deps fuzz: check-clang install-ngfuzz
@START_PATH=$$(pwd); \ @START_PATH=$$(pwd); \
ROOT_PATH=$$(realpath --relative-to=$(FUZZ_NGFUZZ_DIR) $$START_PATH) ; \ ROOT_PATH=$$(realpath --relative-to=$(FUZZING_DIR)/ngfuzz $$START_PATH) ; \
cd $(FUZZ_NGFUZZ_DIR) && \ cd $(FUZZING_DIR)/ngfuzz && \
./ngfuzz -clean && \ ./bin/ngfuzz clean && \
./ngfuzz -fuzz $(FUZZ_FUNCTIONS) -rootdir $$ROOT_PATH -timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \ env CGO_ENABLED=1 ./bin/ngfuzz fuzz --funcs $(FUZZ_FUNCTIONS) --rootdir $$ROOT_PATH --timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \
./ngfuzz -report ./bin/ngfuzz coverage --rootdir $$ROOT_PATH
# Reformat code # Reformat code
fmt: fmt:
@ -150,7 +155,7 @@ dirty-image:
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR) @@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
@rm -rf $(TMP_DIR)/linters @rm -rf $(TMP_DIR)/linters
@rmdir $(TMP_DIR) 2>/dev/null || true @rmdir $(TMP_DIR) 2>/dev/null || true
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION) @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
# Run linters # Run linters
lint: lint:

View file

@ -17,6 +17,7 @@ import (
"time" "time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
@ -30,6 +31,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree" treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@ -65,6 +67,8 @@ type (
settings *appSettings settings *appSettings
loggerSettings *loggerSettings loggerSettings *loggerSettings
bucketCache *cache.BucketCache bucketCache *cache.BucketCache
handle *handler.Handler
corsCnrID cid.ID
servers []Server servers []Server
unbindServers []ServerInfo unbindServers []ServerInfo
@ -105,32 +109,19 @@ type (
bufferMaxSizeForPut uint64 bufferMaxSizeForPut uint64
namespaceHeader string namespaceHeader string
defaultNamespaces []string defaultNamespaces []string
corsAllowOrigin string cors *data.CORSRule
corsAllowMethods []string
corsAllowHeaders []string
corsExposeHeaders []string
corsAllowCredentials bool
corsMaxAge int
enableFilepathFallback bool enableFilepathFallback bool
} }
tagsConfig struct { tagsConfig struct {
tagLogs sync.Map tagLogs sync.Map
defaultLvl zap.AtomicLevel
} }
logLevelConfig struct { logLevelConfig struct {
logLevel zap.AtomicLevel logLevel zap.AtomicLevel
tagsConfig *tagsConfig tagsConfig *tagsConfig
} }
CORS struct {
AllowOrigin string
AllowMethods []string
AllowHeaders []string
ExposeHeaders []string
AllowCredentials bool
MaxAge int
}
) )
func newLogLevel(v *viper.Viper) zap.AtomicLevel { func newLogLevel(v *viper.Viper) zap.AtomicLevel {
@ -144,19 +135,34 @@ func newLogLevel(v *viper.Viper) zap.AtomicLevel {
} }
func newTagsConfig(v *viper.Viper, ll zapcore.Level) *tagsConfig { func newTagsConfig(v *viper.Viper, ll zapcore.Level) *tagsConfig {
var t tagsConfig t := tagsConfig{defaultLvl: zap.NewAtomicLevelAt(ll)}
if err := t.update(v, ll); err != nil { if err := t.update(v, ll); err != nil {
// panic here is analogue of the similar panic during common log level initialization. // panic here is analogue of the similar panic during common log level initialization.
panic(err.Error()) panic(err.Error())
} }
return &t return &t
} }
func newLogLevelConfig(lvl zap.AtomicLevel, tagsConfig *tagsConfig) *logLevelConfig { func newLogLevelConfig(lvl zap.AtomicLevel, tagsConfig *tagsConfig) *logLevelConfig {
return &logLevelConfig{ cfg := &logLevelConfig{
logLevel: lvl, logLevel: lvl,
tagsConfig: tagsConfig, tagsConfig: tagsConfig,
} }
cfg.setMinLogLevel()
return cfg
}
func (l *logLevelConfig) setMinLogLevel() {
l.tagsConfig.tagLogs.Range(func(_, value any) bool {
v := value.(zapcore.Level)
if v < l.logLevel.Level() {
l.logLevel.SetLevel(v)
}
return true
})
} }
func (l *logLevelConfig) update(cfg *viper.Viper, log *zap.Logger) { func (l *logLevelConfig) update(cfg *viper.Viper, log *zap.Logger) {
@ -169,34 +175,34 @@ func (l *logLevelConfig) update(cfg *viper.Viper, log *zap.Logger) {
if err := l.tagsConfig.update(cfg, l.logLevel.Level()); err != nil { if err := l.tagsConfig.update(cfg, l.logLevel.Level()); err != nil {
log.Warn(logs.TagsLogConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp)) log.Warn(logs.TagsLogConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp))
} }
l.setMinLogLevel()
} }
func (t *tagsConfig) LevelEnabled(tag string, tgtLevel zapcore.Level) bool { func (t *tagsConfig) LevelEnabled(tag string, tgtLevel zapcore.Level) bool {
lvl, ok := t.tagLogs.Load(tag) lvl, ok := t.tagLogs.Load(tag)
if !ok { if !ok {
return false return t.defaultLvl.Enabled(tgtLevel)
} }
return lvl.(zapcore.Level).Enabled(tgtLevel) return lvl.(zapcore.Level).Enabled(tgtLevel)
} }
func (t *tagsConfig) DefaultEnabled(lvl zapcore.Level) bool {
return t.defaultLvl.Enabled(lvl)
}
func (t *tagsConfig) update(cfg *viper.Viper, ll zapcore.Level) error { func (t *tagsConfig) update(cfg *viper.Viper, ll zapcore.Level) error {
tags, err := fetchLogTagsConfig(cfg, ll) tags, err := fetchLogTagsConfig(cfg, ll)
if err != nil { if err != nil {
return err return err
} }
t.tagLogs.Range(func(key, value any) bool { t.tagLogs.Range(func(key, _ any) bool {
k := key.(string) k := key.(string)
v := value.(zapcore.Level)
if lvl, ok := tags[k]; ok { if _, ok := tags[k]; !ok {
if lvl != v {
t.tagLogs.Store(key, lvl)
}
} else {
t.tagLogs.Delete(key) t.tagLogs.Delete(key)
delete(tags, k)
} }
return true return true
}) })
@ -204,6 +210,7 @@ func (t *tagsConfig) update(cfg *viper.Viper, ll zapcore.Level) error {
for k, v := range tags { for k, v := range tags {
t.tagLogs.Store(k, v) t.tagLogs.Store(k, v)
} }
t.defaultLvl.SetLevel(ll)
return nil return nil
} }
@ -251,6 +258,7 @@ func newApp(ctx context.Context, cfg *appCfg) App {
a.initResolver() a.initResolver()
a.initMetrics() a.initMetrics()
a.initTracing(ctx) a.initTracing(ctx)
a.initContainers(ctx)
return a return a
} }
@ -259,6 +267,14 @@ func (a *app) config() *viper.Viper {
return a.cfg.config() return a.cfg.config()
} }
func (a *app) initContainers(ctx context.Context) {
corsCnrID, err := a.fetchContainerID(ctx, cfgContainersCORS)
if err != nil {
a.log.Fatal(logs.CouldNotFetchCORSContainerInfo, zap.Error(err), logs.TagField(logs.TagApp))
}
a.corsCnrID = *corsCnrID
}
func (a *app) initAppSettings(lc *logLevelConfig) { func (a *app) initAppSettings(lc *logLevelConfig) {
a.settings = &appSettings{ a.settings = &appSettings{
reconnectInterval: fetchReconnectInterval(a.config()), reconnectInterval: fetchReconnectInterval(a.config()),
@ -278,12 +294,7 @@ func (s *appSettings) update(v *viper.Viper, l *zap.Logger) {
namespaceHeader := v.GetString(cfgResolveNamespaceHeader) namespaceHeader := v.GetString(cfgResolveNamespaceHeader)
defaultNamespaces := fetchDefaultNamespaces(v) defaultNamespaces := fetchDefaultNamespaces(v)
indexPage, indexEnabled := fetchIndexPageTemplate(v, l) indexPage, indexEnabled := fetchIndexPageTemplate(v, l)
corsAllowOrigin := v.GetString(cfgCORSAllowOrigin) cors := fetchCORSConfig(v)
corsAllowMethods := v.GetStringSlice(cfgCORSAllowMethods)
corsAllowHeaders := v.GetStringSlice(cfgCORSAllowHeaders)
corsExposeHeaders := v.GetStringSlice(cfgCORSExposeHeaders)
corsAllowCredentials := v.GetBool(cfgCORSAllowCredentials)
corsMaxAge := fetchCORSMaxAge(v)
enableFilepathFallback := v.GetBool(cfgFeaturesEnableFilepathFallback) enableFilepathFallback := v.GetBool(cfgFeaturesEnableFilepathFallback)
s.mu.Lock() s.mu.Lock()
@ -298,12 +309,7 @@ func (s *appSettings) update(v *viper.Viper, l *zap.Logger) {
s.defaultNamespaces = defaultNamespaces s.defaultNamespaces = defaultNamespaces
s.returnIndexPage = indexEnabled s.returnIndexPage = indexEnabled
s.indexPageTemplate = indexPage s.indexPageTemplate = indexPage
s.corsAllowOrigin = corsAllowOrigin s.cors = cors
s.corsAllowMethods = corsAllowMethods
s.corsAllowHeaders = corsAllowHeaders
s.corsExposeHeaders = corsExposeHeaders
s.corsAllowCredentials = corsAllowCredentials
s.corsMaxAge = corsMaxAge
s.enableFilepathFallback = enableFilepathFallback s.enableFilepathFallback = enableFilepathFallback
} }
@ -350,26 +356,33 @@ func (s *appSettings) IndexPageTemplate() string {
return s.indexPageTemplate return s.indexPageTemplate
} }
func (s *appSettings) CORS() CORS { func (s *appSettings) CORS() *data.CORSRule {
s.mu.RLock() s.mu.RLock()
defer s.mu.RUnlock() defer s.mu.RUnlock()
allowMethods := make([]string, len(s.corsAllowMethods)) if s.cors == nil {
copy(allowMethods, s.corsAllowMethods) return nil
}
allowHeaders := make([]string, len(s.corsAllowHeaders)) allowMethods := make([]string, len(s.cors.AllowedMethods))
copy(allowHeaders, s.corsAllowHeaders) copy(allowMethods, s.cors.AllowedMethods)
exposeHeaders := make([]string, len(s.corsExposeHeaders)) allowHeaders := make([]string, len(s.cors.AllowedHeaders))
copy(exposeHeaders, s.corsExposeHeaders) copy(allowHeaders, s.cors.AllowedHeaders)
return CORS{ exposeHeaders := make([]string, len(s.cors.ExposeHeaders))
AllowOrigin: s.corsAllowOrigin, copy(exposeHeaders, s.cors.ExposeHeaders)
AllowMethods: allowMethods,
AllowHeaders: allowHeaders, allowOrigins := make([]string, len(s.cors.AllowedOrigins))
copy(allowOrigins, s.cors.AllowedOrigins)
return &data.CORSRule{
AllowedOrigins: allowOrigins,
AllowedMethods: allowMethods,
AllowedHeaders: allowHeaders,
ExposeHeaders: exposeHeaders, ExposeHeaders: exposeHeaders,
AllowCredentials: s.corsAllowCredentials, AllowedCredentials: s.cors.AllowedCredentials,
MaxAge: s.corsMaxAge, MaxAgeSeconds: s.cors.MaxAgeSeconds,
} }
} }
@ -391,15 +404,15 @@ func (s *appSettings) NamespaceHeader() string {
return s.namespaceHeader return s.namespaceHeader
} }
func (s *appSettings) FormContainerZone(ns string) (zone string, isDefault bool) { func (s *appSettings) FormContainerZone(ns string) string {
s.mu.RLock() s.mu.RLock()
namespaces := s.defaultNamespaces namespaces := s.defaultNamespaces
s.mu.RUnlock() s.mu.RUnlock()
if slices.Contains(namespaces, ns) { if slices.Contains(namespaces, ns) {
return v2container.SysAttributeZoneDefault, true return v2container.SysAttributeZoneDefault
} }
return ns + ".ns", false return ns + ".ns"
} }
func (s *appSettings) EnableFilepathFallback() bool { func (s *appSettings) EnableFilepathFallback() bool {
@ -420,7 +433,6 @@ func (a *app) getResolverConfig() ([]string, *resolver.Config) {
resolveCfg := &resolver.Config{ resolveCfg := &resolver.Config{
FrostFS: frostfs.NewResolverFrostFS(a.pool), FrostFS: frostfs.NewResolverFrostFS(a.pool),
RPCAddress: a.config().GetString(cfgRPCEndpoint), RPCAddress: a.config().GetString(cfgRPCEndpoint),
Settings: a.settings,
} }
order := a.config().GetStringSlice(cfgResolveOrder) order := a.config().GetStringSlice(cfgResolveOrder)
@ -606,10 +618,8 @@ func (a *app) Serve() {
close(a.webDone) close(a.webDone)
}() }()
handle := handler.New(a.AppParams(), a.settings, tree.NewTree(frostfs.NewPoolWrapper(a.treePool)), workerPool)
// Configure router. // Configure router.
a.configureRouter(handle) a.configureRouter(workerPool)
a.startServices() a.startServices()
a.initServers(a.ctx) a.initServers(a.ctx)
@ -679,7 +689,7 @@ func (a *app) configReload(ctx context.Context) {
return return
} }
a.settings.logLevelConfig.update(a.cfg.settings, a.log) a.settings.logLevelConfig.update(a.cfg.config(), a.log)
if err := a.settings.dialerSource.Update(fetchMultinetConfig(a.config(), a.log)); err != nil { if err := a.settings.dialerSource.Update(fetchMultinetConfig(a.config(), a.log)); err != nil {
a.log.Warn(logs.MultinetConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp)) a.log.Warn(logs.MultinetConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp))
@ -730,31 +740,33 @@ func (a *app) stopServices() {
} }
} }
func (a *app) configureRouter(h *handler.Handler) { func (a *app) configureRouter(workerPool *ants.Pool) {
a.handle = handler.New(a.AppParams(), a.settings, tree.NewTree(frostfs.NewPoolWrapper(a.treePool), a.log), workerPool)
r := router.New() r := router.New()
r.RedirectTrailingSlash = true r.RedirectTrailingSlash = true
r.NotFound = func(r *fasthttp.RequestCtx) { r.NotFound = func(r *fasthttp.RequestCtx) {
handler.ResponseError(r, "Not found", fasthttp.StatusNotFound) handler.ResponseError(r, "Route Not found", fasthttp.StatusNotFound)
} }
r.MethodNotAllowed = func(r *fasthttp.RequestCtx) { r.MethodNotAllowed = func(r *fasthttp.RequestCtx) {
handler.ResponseError(r, "Method Not Allowed", fasthttp.StatusMethodNotAllowed) handler.ResponseError(r, "Method Not Allowed", fasthttp.StatusMethodNotAllowed)
} }
r.POST("/upload/{cid}", a.addMiddlewares(h.Upload)) r.POST("/upload/{cid}", a.addMiddlewares(a.handle.Upload))
r.OPTIONS("/upload/{cid}", a.addPreflight()) r.OPTIONS("/upload/{cid}", a.addPreflight(a.handle.Preflight))
a.log.Info(logs.AddedPathUploadCid, logs.TagField(logs.TagApp)) a.log.Info(logs.AddedPathUploadCid, logs.TagField(logs.TagApp))
r.GET("/get/{cid}/{oid:*}", a.addMiddlewares(h.DownloadByAddressOrBucketName)) r.GET("/get/{cid}/{oid:*}", a.addMiddlewares(a.handle.DownloadByAddressOrBucketName))
r.HEAD("/get/{cid}/{oid:*}", a.addMiddlewares(h.HeadByAddressOrBucketName)) r.HEAD("/get/{cid}/{oid:*}", a.addMiddlewares(a.handle.HeadByAddressOrBucketName))
r.OPTIONS("/get/{cid}/{oid:*}", a.addPreflight()) r.OPTIONS("/get/{cid}/{oid:*}", a.addPreflight(a.handle.Preflight))
a.log.Info(logs.AddedPathGetCidOid, logs.TagField(logs.TagApp)) a.log.Info(logs.AddedPathGetCidOid, logs.TagField(logs.TagApp))
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(h.DownloadByAttribute)) r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(a.handle.DownloadByAttribute))
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(h.HeadByAttribute)) r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(a.handle.HeadByAttribute))
r.OPTIONS("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addPreflight()) r.OPTIONS("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addPreflight(a.handle.Preflight))
a.log.Info(logs.AddedPathGetByAttributeCidAttrKeyAttrVal, logs.TagField(logs.TagApp)) a.log.Info(logs.AddedPathGetByAttributeCidAttrKeyAttrVal, logs.TagField(logs.TagApp))
r.GET("/zip/{cid}/{prefix:*}", a.addMiddlewares(h.DownloadZip)) r.GET("/zip/{cid}/{prefix:*}", a.addMiddlewares(a.handle.DownloadZip))
r.OPTIONS("/zip/{cid}/{prefix:*}", a.addPreflight()) r.OPTIONS("/zip/{cid}/{prefix:*}", a.addPreflight(a.handle.Preflight))
r.GET("/tar/{cid}/{prefix:*}", a.addMiddlewares(h.DownloadTar)) r.GET("/tar/{cid}/{prefix:*}", a.addMiddlewares(a.handle.DownloadTar))
r.OPTIONS("/tar/{cid}/{prefix:*}", a.addPreflight()) r.OPTIONS("/tar/{cid}/{prefix:*}", a.addPreflight(a.handle.Preflight))
a.log.Info(logs.AddedPathZipCidPrefix, logs.TagField(logs.TagApp)) a.log.Info(logs.AddedPathZipCidPrefix, logs.TagField(logs.TagApp))
a.webServer.Handler = r.Handler a.webServer.Handler = r.Handler
@ -777,14 +789,14 @@ func (a *app) addMiddlewares(h fasthttp.RequestHandler) fasthttp.RequestHandler
return h return h
} }
func (a *app) addPreflight() fasthttp.RequestHandler { func (a *app) addPreflight(h fasthttp.RequestHandler) fasthttp.RequestHandler {
list := []func(fasthttp.RequestHandler) fasthttp.RequestHandler{ list := []func(fasthttp.RequestHandler) fasthttp.RequestHandler{
a.tracer, a.tracer,
a.logger, a.logger,
a.canonicalizer,
a.reqNamespace, a.reqNamespace,
} }
h := a.preflightHandler
for i := len(list) - 1; i >= 0; i-- { for i := len(list) - 1; i >= 0; i-- {
h = list[i](h) h = list[i](h)
} }
@ -792,46 +804,16 @@ func (a *app) addPreflight() fasthttp.RequestHandler {
return h return h
} }
func (a *app) preflightHandler(c *fasthttp.RequestCtx) {
cors := a.settings.CORS()
setCORSHeaders(c, cors)
}
func (a *app) cors(h fasthttp.RequestHandler) fasthttp.RequestHandler { func (a *app) cors(h fasthttp.RequestHandler) fasthttp.RequestHandler {
return func(c *fasthttp.RequestCtx) { return func(c *fasthttp.RequestCtx) {
h(c) h(c)
code := c.Response.StatusCode() code := c.Response.StatusCode()
if code >= fasthttp.StatusOK && code < fasthttp.StatusMultipleChoices { if code >= fasthttp.StatusOK && code < fasthttp.StatusMultipleChoices {
cors := a.settings.CORS() a.handle.SetCORSHeaders(c)
setCORSHeaders(c, cors)
} }
} }
} }
func setCORSHeaders(c *fasthttp.RequestCtx, cors CORS) {
c.Response.Header.Set(fasthttp.HeaderAccessControlMaxAge, strconv.Itoa(cors.MaxAge))
if len(cors.AllowOrigin) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, cors.AllowOrigin)
}
if len(cors.AllowMethods) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(cors.AllowMethods, ","))
}
if len(cors.AllowHeaders) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowHeaders, strings.Join(cors.AllowHeaders, ","))
}
if len(cors.ExposeHeaders) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlExposeHeaders, strings.Join(cors.ExposeHeaders, ","))
}
if cors.AllowCredentials {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true")
}
}
func (a *app) logger(h fasthttp.RequestHandler) fasthttp.RequestHandler { func (a *app) logger(h fasthttp.RequestHandler) fasthttp.RequestHandler {
return func(req *fasthttp.RequestCtx) { return func(req *fasthttp.RequestCtx) {
requiredFields := []zap.Field{zap.Uint64("id", req.ID())} requiredFields := []zap.Field{zap.Uint64("id", req.ID())}
@ -935,6 +917,8 @@ func (a *app) AppParams() *handler.AppParams {
Owner: a.owner, Owner: a.owner,
Resolver: a.resolver, Resolver: a.resolver,
Cache: a.bucketCache, Cache: a.bucketCache,
CORSCnrID: a.corsCnrID,
CORSCache: cache.NewCORSCache(getCORSCacheOptions(a.config(), a.log)),
} }
} }
@ -1135,3 +1119,44 @@ func (a *app) tryReconnect(ctx context.Context, sr *fasthttp.Server) bool {
return len(a.unbindServers) == 0 return len(a.unbindServers) == 0
} }
func (a *app) fetchContainerID(ctx context.Context, cfgKey string) (id *cid.ID, err error) {
cnrID, err := a.resolveContainerID(ctx, cfgKey)
if err != nil {
return nil, err
}
err = checkContainerExists(ctx, *cnrID, a.pool)
if err != nil {
return nil, err
}
return cnrID, nil
}
func (a *app) resolveContainerID(ctx context.Context, cfgKey string) (*cid.ID, error) {
containerString := a.config().GetString(cfgKey)
id := new(cid.ID)
if err := id.DecodeString(containerString); err != nil {
i := strings.Index(containerString, ".")
if i < 0 {
return nil, fmt.Errorf("invalid container address: %s", containerString)
}
if id, err = a.resolver.Resolve(ctx, containerString[i+1:], containerString[:i]); err != nil {
return nil, fmt.Errorf("resolve container address %s: %w", containerString, err)
}
}
return id, nil
}
func checkContainerExists(ctx context.Context, id cid.ID, frostFSPool *pool.Pool) error {
prm := pool.PrmContainerGet{
ContainerID: id,
}
_, err := frostFSPool.GetContainer(ctx, prm)
return err
}

View file

@ -20,9 +20,11 @@ import (
containerv2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" containerv2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -46,6 +48,7 @@ const (
testContainerName = "friendly" testContainerName = "friendly"
testListenAddress = "localhost:8082" testListenAddress = "localhost:8082"
testHost = "http://" + testListenAddress testHost = "http://" + testListenAddress
testCORSContainerName = "cors"
) )
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
@ -76,10 +79,14 @@ func TestIntegration(t *testing.T) {
registerUser(t, ctx, aioContainer, file.Name()) registerUser(t, ctx, aioContainer, file.Name())
} }
// Creating CORS container
clientPool := getPool(ctx, t, key)
_, err = createContainer(ctx, t, clientPool, ownerID, testCORSContainerName)
require.NoError(t, err, version)
// See the logs from the command execution. // See the logs from the command execution.
server, cancel := runServer(file.Name()) server, cancel := runServer(file.Name())
clientPool := getPool(ctx, t, key) CID, err := createContainer(ctx, t, clientPool, ownerID, testContainerName)
CID, err := createContainer(ctx, t, clientPool, ownerID)
require.NoError(t, err, version) require.NoError(t, err, version)
jsonToken, binaryToken := makeBearerTokens(t, key, ownerID, version) jsonToken, binaryToken := makeBearerTokens(t, key, ownerID, version)
@ -94,6 +101,7 @@ func TestIntegration(t *testing.T) {
t.Run("get by attribute "+version, func(t *testing.T) { getByAttr(ctx, t, clientPool, ownerID, CID) }) t.Run("get by attribute "+version, func(t *testing.T) { getByAttr(ctx, t, clientPool, ownerID, CID) })
t.Run("get zip "+version, func(t *testing.T) { getZip(ctx, t, clientPool, ownerID, CID) }) t.Run("get zip "+version, func(t *testing.T) { getZip(ctx, t, clientPool, ownerID, CID) })
t.Run("test namespaces "+version, func(t *testing.T) { checkNamespaces(ctx, t, clientPool, ownerID, CID) }) t.Run("test namespaces "+version, func(t *testing.T) { checkNamespaces(ctx, t, clientPool, ownerID, CID) })
t.Run("test status codes "+version, func(t *testing.T) { checkStatusCodes(ctx, t, clientPool, ownerID, version) })
cancel() cancel()
server.Wait() server.Wait()
@ -110,6 +118,8 @@ func runServer(pathToWallet string) (App, context.CancelFunc) {
v.config().Set(cfgWalletPath, pathToWallet) v.config().Set(cfgWalletPath, pathToWallet)
v.config().Set(cfgWalletPassphrase, "") v.config().Set(cfgWalletPassphrase, "")
v.config().Set(cfgContainersCORS, testCORSContainerName+"."+containerv2.SysAttributeZoneDefault)
application := newApp(cancelCtx, v) application := newApp(cancelCtx, v)
go application.Serve() go application.Serve()
@ -260,7 +270,7 @@ func putWithDuplicateKeys(t *testing.T, CID cid.ID) {
body, err := io.ReadAll(resp.Body) body, err := io.ReadAll(resp.Body)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "key duplication error: "+attr+"\n", string(body)) require.Contains(t, string(body), "key duplication error: "+attr+"\n")
require.Equal(t, http.StatusBadRequest, resp.StatusCode) require.Equal(t, http.StatusBadRequest, resp.StatusCode)
} }
@ -429,7 +439,80 @@ func checkNamespaces(ctx context.Context, t *testing.T, clientPool *pool.Pool, o
resp, err = http.DefaultClient.Do(req) resp, err = http.DefaultClient.Do(req)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, http.StatusNotFound, resp.StatusCode) require.Equal(t, http.StatusNotFound, resp.StatusCode)
}
func checkStatusCodes(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, version string) {
cli := http.Client{Timeout: 30 * time.Second}
t.Run("container not found by name", func(t *testing.T) {
resp, err := cli.Get(testHost + "/get/unknown/object")
require.NoError(t, err)
require.Equal(t, http.StatusNotFound, resp.StatusCode)
requireBodyContains(t, resp, "container not found")
})
t.Run("container not found by cid", func(t *testing.T) {
cnrIDTest := cidtest.ID()
resp, err := cli.Get(testHost + "/get/" + cnrIDTest.EncodeToString() + "/object")
require.NoError(t, err)
requireBodyContains(t, resp, "container not found")
require.Equal(t, http.StatusNotFound, resp.StatusCode)
})
t.Run("object not found in storage", func(t *testing.T) {
resp, err := cli.Get(testHost + "/get_by_attribute/" + testContainerName + "/FilePath/object2")
require.NoError(t, err)
requireBodyContains(t, resp, "object not found")
require.Equal(t, http.StatusNotFound, resp.StatusCode)
})
t.Run("access denied", func(t *testing.T) {
basicACL := acl.Private
var recs []*eacl.Record
if version == "1.2.7" {
basicACL = acl.PublicRWExtended
rec := eacl.NewRecord()
rec.SetAction(eacl.ActionDeny)
rec.SetOperation(eacl.OperationGet)
recs = append(recs, rec)
}
cnrID, err := createContainerBase(ctx, t, clientPool, ownerID, basicACL, "")
require.NoError(t, err)
key, err := keys.NewPrivateKey()
require.NoError(t, err)
jsonToken, _ := makeBearerTokens(t, key, ownerID, version, recs...)
t.Run("get", func(t *testing.T) {
request, err := http.NewRequest(http.MethodGet, testHost+"/get/"+cnrID.EncodeToString()+"/object", nil)
require.NoError(t, err)
request.Header.Set("Authorization", "Bearer "+jsonToken)
resp, err := cli.Do(request)
require.NoError(t, err)
requireBodyContains(t, resp, "access denied")
require.Equal(t, http.StatusForbidden, resp.StatusCode)
})
t.Run("upload", func(t *testing.T) {
request, _, _ := makePutRequest(t, testHost+"/upload/"+cnrID.EncodeToString())
request.Header.Set("Authorization", "Bearer "+jsonToken)
resp, err := cli.Do(request)
require.NoError(t, err)
requireBodyContains(t, resp, "access denied")
require.Equal(t, http.StatusForbidden, resp.StatusCode)
})
})
}
func requireBodyContains(t *testing.T, resp *http.Response, msg string) {
data, err := io.ReadAll(resp.Body)
require.NoError(t, err)
defer resp.Body.Close()
require.Contains(t, strings.ToLower(string(data)), strings.ToLower(msg))
} }
func createDockerContainer(ctx context.Context, t *testing.T, image string) testcontainers.Container { func createDockerContainer(ctx context.Context, t *testing.T, image string) testcontainers.Container {
@ -477,7 +560,11 @@ func getPool(ctx context.Context, t *testing.T, key *keys.PrivateKey) *pool.Pool
return clientPool return clientPool
} }
func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID) (cid.ID, error) { func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, name string) (cid.ID, error) {
return createContainerBase(ctx, t, clientPool, ownerID, acl.PublicRWExtended, name)
}
func createContainerBase(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, basicACL acl.Basic, name string) (cid.ID, error) {
var policy netmap.PlacementPolicy var policy netmap.PlacementPolicy
err := policy.DecodeString("REP 1") err := policy.DecodeString("REP 1")
require.NoError(t, err) require.NoError(t, err)
@ -485,24 +572,28 @@ func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, o
var cnr container.Container var cnr container.Container
cnr.Init() cnr.Init()
cnr.SetPlacementPolicy(policy) cnr.SetPlacementPolicy(policy)
cnr.SetBasicACL(acl.PublicRWExtended) cnr.SetBasicACL(basicACL)
cnr.SetOwner(ownerID) cnr.SetOwner(ownerID)
container.SetCreationTime(&cnr, time.Now()) container.SetCreationTime(&cnr, time.Now())
if name != "" {
var domain container.Domain var domain container.Domain
domain.SetName(testContainerName) domain.SetName(name)
cnr.SetAttribute(containerv2.SysAttributeName, domain.Name()) cnr.SetAttribute(containerv2.SysAttributeName, domain.Name())
cnr.SetAttribute(containerv2.SysAttributeZone, domain.Zone()) cnr.SetAttribute(containerv2.SysAttributeZone, domain.Zone())
}
var waitPrm pool.WaitParams prm := pool.PrmContainerPut{
waitPrm.SetTimeout(15 * time.Second) ClientParams: client.PrmContainerPut{
waitPrm.SetPollInterval(3 * time.Second) Container: &cnr,
},
var prm pool.PrmContainerPut WaitParams: &pool.WaitParams{
prm.SetContainer(cnr) Timeout: 15 * time.Second,
prm.SetWaitParams(waitPrm) PollInterval: 3 * time.Second,
},
}
CID, err := clientPool.PutContainer(ctx, prm) CID, err := clientPool.PutContainer(ctx, prm)
if err != nil { if err != nil {
@ -549,13 +640,18 @@ func registerUser(t *testing.T, ctx context.Context, aioContainer testcontainers
require.NoError(t, err) require.NoError(t, err)
} }
func makeBearerTokens(t *testing.T, key *keys.PrivateKey, ownerID user.ID, version string) (jsonTokenBase64, binaryTokenBase64 string) { func makeBearerTokens(t *testing.T, key *keys.PrivateKey, ownerID user.ID, version string, records ...*eacl.Record) (jsonTokenBase64, binaryTokenBase64 string) {
tkn := new(bearer.Token) tkn := new(bearer.Token)
tkn.ForUser(ownerID) tkn.ForUser(ownerID)
tkn.SetExp(10000) tkn.SetExp(10000)
if version == "1.2.7" { if version == "1.2.7" {
tkn.SetEACLTable(*eacl.NewTable()) table := eacl.NewTable()
for i := range records {
table.AddRecord(records[i])
}
tkn.SetEACLTable(*table)
} else { } else {
tkn.SetImpersonate(true) tkn.SetImpersonate(true)
} }

View file

@ -41,6 +41,7 @@ type zapCoreTagFilterWrapper struct {
type TagFilterSettings interface { type TagFilterSettings interface {
LevelEnabled(tag string, lvl zapcore.Level) bool LevelEnabled(tag string, lvl zapcore.Level) bool
DefaultEnabled(lvl zapcore.Level) bool
} }
func (c *zapCoreTagFilterWrapper) Enabled(level zapcore.Level) bool { func (c *zapCoreTagFilterWrapper) Enabled(level zapcore.Level) bool {
@ -63,24 +64,26 @@ func (c *zapCoreTagFilterWrapper) Check(entry zapcore.Entry, checked *zapcore.Ch
} }
func (c *zapCoreTagFilterWrapper) Write(entry zapcore.Entry, fields []zapcore.Field) error { func (c *zapCoreTagFilterWrapper) Write(entry zapcore.Entry, fields []zapcore.Field) error {
if c.shouldSkip(entry, fields) || c.shouldSkip(entry, c.extra) { if c.shouldSkip(entry, fields, c.extra) {
return nil return nil
} }
return c.core.Write(entry, fields) return c.core.Write(entry, fields)
} }
func (c *zapCoreTagFilterWrapper) shouldSkip(entry zapcore.Entry, fields []zap.Field) bool { func (c *zapCoreTagFilterWrapper) shouldSkip(entry zapcore.Entry, fields []zap.Field, extra []zap.Field) bool {
for _, field := range fields { for _, field := range fields {
if field.Key == logs.TagFieldName && field.Type == zapcore.StringType { if field.Key == logs.TagFieldName && field.Type == zapcore.StringType {
if !c.settings.LevelEnabled(field.String, entry.Level) { return !c.settings.LevelEnabled(field.String, entry.Level)
return true
} }
break }
for _, field := range extra {
if field.Key == logs.TagFieldName && field.Type == zapcore.StringType {
return !c.settings.LevelEnabled(field.String, entry.Level)
} }
} }
return false return !c.settings.DefaultEnabled(entry.Level)
} }
func (c *zapCoreTagFilterWrapper) Sync() error { func (c *zapCoreTagFilterWrapper) Sync() error {
@ -127,14 +130,13 @@ func newLogEncoder() zapcore.Encoder {
// //
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace. // See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
func newStdoutLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings LoggerAppSettings, tagSetting TagFilterSettings) *Logger { func newStdoutLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings LoggerAppSettings, tagSetting TagFilterSettings) *Logger {
stdout := zapcore.AddSync(os.Stderr) stdout := zapcore.AddSync(os.Stdout)
consoleOutCore := zapcore.NewCore(newLogEncoder(), stdout, lvl) consoleOutCore := zapcore.NewCore(newLogEncoder(), stdout, lvl)
consoleOutCore = applyZapCoreMiddlewares(consoleOutCore, v, loggerSettings, tagSetting) consoleOutCore = applyZapCoreMiddlewares(consoleOutCore, v, loggerSettings, tagSetting)
return &Logger{ return &Logger{
logger: zap.New(consoleOutCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))), logger: zap.New(consoleOutCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
lvl: lvl,
} }
} }
@ -152,7 +154,6 @@ func newJournaldLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings Logge
return &Logger{ return &Logger{
logger: zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))), logger: zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
lvl: lvl,
} }
} }

View file

@ -74,7 +74,6 @@ func newServer(ctx context.Context, serverInfo ServerInfo) (*server, error) {
ln = tls.NewListener(ln, &tls.Config{ ln = tls.NewListener(ln, &tls.Config{
GetCertificate: tlsProvider.GetCertificate, GetCertificate: tlsProvider.GetCertificate,
NextProtos: []string{"h2"}, // required to enable HTTP/2 requests in `http.Serve`
}) })
} }

View file

@ -18,7 +18,7 @@ import (
"time" "time"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"golang.org/x/net/http2" "github.com/valyala/fasthttp"
) )
const ( const (
@ -26,14 +26,10 @@ const (
expHeaderValue = "Bar" expHeaderValue = "Bar"
) )
func TestHTTP2TLS(t *testing.T) { func TestHTTP_TLS(t *testing.T) {
ctx := context.Background() ctx := context.Background()
certPath, keyPath := prepareTestCerts(t) certPath, keyPath := prepareTestCerts(t)
srv := &http.Server{
Handler: http.HandlerFunc(testHandler),
}
tlsListener, err := newServer(ctx, ServerInfo{ tlsListener, err := newServer(ctx, ServerInfo{
Address: ":0", Address: ":0",
TLS: ServerTLSInfo{ TLS: ServerTLSInfo{
@ -47,37 +43,34 @@ func TestHTTP2TLS(t *testing.T) {
addr := fmt.Sprintf("https://localhost:%d", port) addr := fmt.Sprintf("https://localhost:%d", port)
go func() { go func() {
_ = srv.Serve(tlsListener.Listener()) _ = fasthttp.Serve(tlsListener.Listener(), testHandler)
}() }()
// Server is running, now send HTTP/2 request
tlsClientConfig := &tls.Config{ tlsClientConfig := &tls.Config{
InsecureSkipVerify: true, InsecureSkipVerify: true,
} }
cliHTTP1 := http.Client{Transport: &http.Transport{TLSClientConfig: tlsClientConfig}} cliHTTP := http.Client{Transport: &http.Transport{}}
cliHTTP2 := http.Client{Transport: &http2.Transport{TLSClientConfig: tlsClientConfig}} cliHTTPS := http.Client{Transport: &http.Transport{TLSClientConfig: tlsClientConfig}}
req, err := http.NewRequest("GET", addr, nil) req, err := http.NewRequest("GET", addr, nil)
require.NoError(t, err) require.NoError(t, err)
req.Header[expHeaderKey] = []string{expHeaderValue} req.Header[expHeaderKey] = []string{expHeaderValue}
resp, err := cliHTTP1.Do(req) resp, err := cliHTTPS.Do(req)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode) require.Equal(t, http.StatusOK, resp.StatusCode)
resp, err = cliHTTP2.Do(req) _, err = cliHTTP.Do(req)
require.NoError(t, err) require.ErrorContains(t, err, "failed to verify certificate")
require.Equal(t, http.StatusOK, resp.StatusCode)
} }
func testHandler(resp http.ResponseWriter, req *http.Request) { func testHandler(ctx *fasthttp.RequestCtx) {
hdr, ok := req.Header[expHeaderKey] hdr := ctx.Request.Header.Peek(expHeaderKey)
if !ok || len(hdr) != 1 || hdr[0] != expHeaderValue { if len(hdr) == 0 || string(hdr) != expHeaderValue {
resp.WriteHeader(http.StatusBadRequest) ctx.Response.SetStatusCode(http.StatusBadRequest)
} else { } else {
resp.WriteHeader(http.StatusOK) ctx.Response.SetStatusCode(http.StatusOK)
} }
} }

View file

@ -16,11 +16,13 @@ import (
"time" "time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
internalnet "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/net" internalnet "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
grpctracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" grpctracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
qostagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree" treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
"github.com/spf13/pflag" "github.com/spf13/pflag"
@ -111,7 +113,7 @@ const (
cfgLoggerTags = "logger.tags" cfgLoggerTags = "logger.tags"
cfgLoggerTagsPrefixTmpl = cfgLoggerTags + ".%d." cfgLoggerTagsPrefixTmpl = cfgLoggerTags + ".%d."
cfgLoggerTagsNameTmpl = cfgLoggerTagsPrefixTmpl + "name" cfgLoggerTagsNameTmpl = cfgLoggerTagsPrefixTmpl + "names"
cfgLoggerTagsLevelTmpl = cfgLoggerTagsPrefixTmpl + "level" cfgLoggerTagsLevelTmpl = cfgLoggerTagsPrefixTmpl + "level"
// Wallet. // Wallet.
@ -154,18 +156,21 @@ const (
cfgBucketsCacheLifetime = "cache.buckets.lifetime" cfgBucketsCacheLifetime = "cache.buckets.lifetime"
cfgBucketsCacheSize = "cache.buckets.size" cfgBucketsCacheSize = "cache.buckets.size"
cfgNetmapCacheLifetime = "cache.netmap.lifetime" cfgNetmapCacheLifetime = "cache.netmap.lifetime"
cfgCORSCacheLifetime = "cache.cors.lifetime"
cfgCORSCacheSize = "cache.cors.size"
// Bucket resolving options. // Bucket resolving options.
cfgResolveNamespaceHeader = "resolve_bucket.namespace_header" cfgResolveNamespaceHeader = "resolve_bucket.namespace_header"
cfgResolveDefaultNamespaces = "resolve_bucket.default_namespaces" cfgResolveDefaultNamespaces = "resolve_bucket.default_namespaces"
// CORS. // CORS.
cfgCORSAllowOrigin = "cors.allow_origin" cfgCORS = "cors"
cfgCORSAllowMethods = "cors.allow_methods" cfgCORSAllowOrigin = cfgCORS + ".allow_origin"
cfgCORSAllowHeaders = "cors.allow_headers" cfgCORSAllowMethods = cfgCORS + ".allow_methods"
cfgCORSExposeHeaders = "cors.expose_headers" cfgCORSAllowHeaders = cfgCORS + ".allow_headers"
cfgCORSAllowCredentials = "cors.allow_credentials" cfgCORSExposeHeaders = cfgCORS + ".expose_headers"
cfgCORSMaxAge = "cors.max_age" cfgCORSAllowCredentials = cfgCORS + ".allow_credentials"
cfgCORSMaxAge = cfgCORS + ".max_age"
// Multinet. // Multinet.
cfgMultinetEnabled = "multinet.enabled" cfgMultinetEnabled = "multinet.enabled"
@ -178,6 +183,9 @@ const (
cfgFeaturesEnableFilepathFallback = "features.enable_filepath_fallback" cfgFeaturesEnableFilepathFallback = "features.enable_filepath_fallback"
cfgFeaturesTreePoolNetmapSupport = "features.tree_pool_netmap_support" cfgFeaturesTreePoolNetmapSupport = "features.tree_pool_netmap_support"
// Containers.
cfgContainersCORS = "containers.cors"
// Command line args. // Command line args.
cmdHelp = "help" cmdHelp = "help"
cmdVersion = "version" cmdVersion = "version"
@ -200,7 +208,6 @@ var defaultTags = []string{logs.TagApp, logs.TagDatapath, logs.TagExternalStorag
type Logger struct { type Logger struct {
logger *zap.Logger logger *zap.Logger
lvl zap.AtomicLevel
} }
type appCfg struct { type appCfg struct {
@ -508,8 +515,8 @@ func fetchLogTagsConfig(v *viper.Viper, defaultLvl zapcore.Level) (map[string]za
res := make(map[string]zapcore.Level) res := make(map[string]zapcore.Level)
for i := 0; ; i++ { for i := 0; ; i++ {
name := v.GetString(fmt.Sprintf(cfgLoggerTagsNameTmpl, i)) tagNames := v.GetString(fmt.Sprintf(cfgLoggerTagsNameTmpl, i))
if name == "" { if tagNames == "" {
break break
} }
@ -521,7 +528,12 @@ func fetchLogTagsConfig(v *viper.Viper, defaultLvl zapcore.Level) (map[string]za
} }
} }
res[name] = lvl for _, tagName := range strings.Split(tagNames, ",") {
tagName = strings.TrimSpace(tagName)
if len(tagName) != 0 {
res[tagName] = lvl
}
}
} }
if len(res) == 0 && !v.IsSet(cfgLoggerTags) { if len(res) == 0 && !v.IsSet(cfgLoggerTags) {
@ -670,6 +682,8 @@ func (a *app) initPools(ctx context.Context) {
grpc.WithUnaryInterceptor(grpctracing.NewUnaryClientInteceptor()), grpc.WithUnaryInterceptor(grpctracing.NewUnaryClientInteceptor()),
grpc.WithStreamInterceptor(grpctracing.NewStreamClientInterceptor()), grpc.WithStreamInterceptor(grpctracing.NewStreamClientInterceptor()),
grpc.WithContextDialer(a.settings.dialerSource.GrpcContextDialer()), grpc.WithContextDialer(a.settings.dialerSource.GrpcContextDialer()),
grpc.WithChainUnaryInterceptor(qostagging.NewUnaryClientInteceptor()),
grpc.WithChainStreamInterceptor(qostagging.NewStreamClientInterceptor()),
} }
prm.SetGRPCDialOptions(interceptors...) prm.SetGRPCDialOptions(interceptors...)
prmTree.SetGRPCDialOptions(interceptors...) prmTree.SetGRPCDialOptions(interceptors...)
@ -756,6 +770,15 @@ func getNetmapCacheOptions(v *viper.Viper, l *zap.Logger) *cache.NetmapCacheConf
return cacheCfg return cacheCfg
} }
func getCORSCacheOptions(v *viper.Viper, l *zap.Logger) *cache.Config {
cacheCfg := cache.DefaultCORSConfig(l)
cacheCfg.Lifetime = fetchCacheLifetime(v, l, cfgCORSCacheLifetime, cacheCfg.Lifetime)
cacheCfg.Size = fetchCacheSize(v, l, cfgCORSCacheSize, cacheCfg.Size)
return cacheCfg
}
func fetchCacheLifetime(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue time.Duration) time.Duration { func fetchCacheLifetime(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue time.Duration) time.Duration {
if v.IsSet(cfgEntry) { if v.IsSet(cfgEntry) {
lifetime := v.GetDuration(cfgEntry) lifetime := v.GetDuration(cfgEntry)
@ -851,3 +874,18 @@ func fetchArchiveCompression(v *viper.Viper) bool {
} }
return v.GetBool(cfgArchiveCompression) return v.GetBool(cfgArchiveCompression)
} }
func fetchCORSConfig(v *viper.Viper) *data.CORSRule {
if !v.IsSet(cfgCORS) {
return nil
}
return &data.CORSRule{
AllowedOrigins: []string{v.GetString(cfgCORSAllowOrigin)},
AllowedMethods: v.GetStringSlice(cfgCORSAllowMethods),
AllowedHeaders: v.GetStringSlice(cfgCORSAllowHeaders),
ExposeHeaders: v.GetStringSlice(cfgCORSExposeHeaders),
AllowedCredentials: v.GetBool(cfgCORSAllowCredentials),
MaxAgeSeconds: fetchCORSMaxAge(v),
}
}

View file

@ -20,8 +20,9 @@ HTTP_GW_LOGGER_SAMPLING_ENABLED=false
HTTP_GW_LOGGER_SAMPLING_INITIAL=100 HTTP_GW_LOGGER_SAMPLING_INITIAL=100
HTTP_GW_LOGGER_SAMPLING_THEREAFTER=100 HTTP_GW_LOGGER_SAMPLING_THEREAFTER=100
HTTP_GW_LOGGER_SAMPLING_INTERVAL=1s HTTP_GW_LOGGER_SAMPLING_INTERVAL=1s
HTTP_GW_LOGGER_TAGS_0_NAME=app HTTP_GW_LOGGER_TAGS_0_NAMES=app,datapath
HTTP_GW_LOGGER_TAGS_1_NAME=datapath HTTP_GW_LOGGER_TAGS_0_LEVEL=level
HTTP_GW_LOGGER_TAGS_1_NAME=external_storage_tree
HTTP_GW_SERVER_0_ADDRESS=0.0.0.0:443 HTTP_GW_SERVER_0_ADDRESS=0.0.0.0:443
HTTP_GW_SERVER_0_TLS_ENABLED=false HTTP_GW_SERVER_0_TLS_ENABLED=false
@ -129,6 +130,9 @@ HTTP_GW_CACHE_BUCKETS_LIFETIME=1m
HTTP_GW_CACHE_BUCKETS_SIZE=1000 HTTP_GW_CACHE_BUCKETS_SIZE=1000
# Cache which stores netmap # Cache which stores netmap
HTTP_GW_CACHE_NETMAP_LIFETIME=1m HTTP_GW_CACHE_NETMAP_LIFETIME=1m
# Cache which stores container CORS configurations
HTTP_GW_CACHE_CORS_LIFETIME=5m
HTTP_GW_CACHE_CORS_SIZE=1000
# Header to determine zone to resolve bucket name # Header to determine zone to resolve bucket name
HTTP_GW_RESOLVE_BUCKET_NAMESPACE_HEADER=X-Frostfs-Namespace HTTP_GW_RESOLVE_BUCKET_NAMESPACE_HEADER=X-Frostfs-Namespace
@ -172,3 +176,6 @@ HTTP_GW_INDEX_PAGE_TEMPLATE_PATH=internal/handler/templates/index.gotmpl
HTTP_GW_FEATURES_ENABLE_FILEPATH_FALLBACK=false HTTP_GW_FEATURES_ENABLE_FILEPATH_FALLBACK=false
# Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service # Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service
HTTP_GW_FEATURES_TREE_POOL_NETMAP_SUPPORT=true HTTP_GW_FEATURES_TREE_POOL_NETMAP_SUPPORT=true
# Containers properties
HTTP_GW_CONTAINERS_CORS=AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj

View file

@ -30,8 +30,7 @@ logger:
thereafter: 100 thereafter: 100
interval: 1s interval: 1s
tags: tags:
- name: app - names: app,datapath
- name: datapath
level: debug level: debug
server: server:
@ -156,6 +155,10 @@ cache:
# Cache which stores netmap # Cache which stores netmap
netmap: netmap:
lifetime: 1m lifetime: 1m
# Cache which stores container CORS configurations
cors:
lifetime: 5m
size: 1000
resolve_bucket: resolve_bucket:
namespace_header: X-Frostfs-Namespace namespace_header: X-Frostfs-Namespace
@ -191,3 +194,6 @@ features:
enable_filepath_fallback: false enable_filepath_fallback: false
# Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service # Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service
tree_pool_netmap_support: true tree_pool_netmap_support: true
containers:
cors: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj

View file

@ -94,6 +94,8 @@ The `filename` field from the multipart form will be set as `FileName` attribute
|--------|----------------------------------------------| |--------|----------------------------------------------|
| 200 | Object created successfully. | | 200 | Object created successfully. |
| 400 | Some error occurred during object uploading. | | 400 | Some error occurred during object uploading. |
| 403 | Access denied. |
| 409 | Can not upload object due to quota reached. |
## Get object ## Get object
@ -141,6 +143,7 @@ Get an object (payload and attributes) by an address.
|--------|------------------------------------------------| |--------|------------------------------------------------|
| 200 | Object got successfully. | | 200 | Object got successfully. |
| 400 | Some error occurred during object downloading. | | 400 | Some error occurred during object downloading. |
| 403 | Access denied. |
| 404 | Container or object not found. | | 404 | Container or object not found. |
###### Body ###### Body
@ -183,6 +186,7 @@ Get an object attributes by an address.
|--------|---------------------------------------------------| |--------|---------------------------------------------------|
| 200 | Object head successfully. | | 200 | Object head successfully. |
| 400 | Some error occurred during object HEAD operation. | | 400 | Some error occurred during object HEAD operation. |
| 403 | Access denied. |
| 404 | Container or object not found. | | 404 | Container or object not found. |
## Search object ## Search object
@ -233,6 +237,7 @@ If more than one object is found, an arbitrary one will be returned.
|--------|------------------------------------------------| |--------|------------------------------------------------|
| 200 | Object got successfully. | | 200 | Object got successfully. |
| 400 | Some error occurred during object downloading. | | 400 | Some error occurred during object downloading. |
| 403 | Access denied. |
| 404 | Container or object not found. | | 404 | Container or object not found. |
#### HEAD #### HEAD
@ -269,6 +274,7 @@ If more than one object is found, an arbitrary one will be used to get attribute
|--------|---------------------------------------| |--------|---------------------------------------|
| 200 | Object head successfully. | | 200 | Object head successfully. |
| 400 | Some error occurred during operation. | | 400 | Some error occurred during operation. |
| 403 | Access denied. |
| 404 | Container or object not found. | | 404 | Container or object not found. |
## Download archive ## Download archive
@ -305,15 +311,15 @@ Archive can be compressed (see http-gw [configuration](gate-configuration.md#arc
###### Headers ###### Headers
| Header | Description | | Header | Description |
|-----------------------|-------------------------------------------------------------------------------------------------------------------| |-----------------------|---------------------------------------------------------------------------------------------|
| `Content-Disposition` | Indicate how to browsers should treat file (`attachment`). Set `filename` as `archive.zip`. | | `Content-Disposition` | Indicate how to browsers should treat file (`attachment`). Set `filename` as `archive.zip`. |
| `Content-Type` | Indicate content type of object. Set to `application/zip` | | `Content-Type` | Indicate content type of object. Set to `application/zip` |
###### Status codes ###### Status codes
| Status | Description | | Status | Description |
|--------|-----------------------------------------------------| |--------|------------------------------------------------|
| 200 | Object got successfully. | | 200 | Object got successfully. |
| 400 | Some error occurred during object downloading. | | 400 | Some error occurred during object downloading. |
| 403 | Access denied. |
| 404 | Container or objects not found. | | 404 | Container or objects not found. |
| 500 | Some inner error (e.g. error on streaming objects). |

View file

@ -60,6 +60,7 @@ $ cat http.log
| `index_page` | [Index page configuration](#index_page-section) | | `index_page` | [Index page configuration](#index_page-section) |
| `multinet` | [Multinet configuration](#multinet-section) | | `multinet` | [Multinet configuration](#multinet-section) |
| `features` | [Features configuration](#features-section) | | `features` | [Features configuration](#features-section) |
| `containers` | [Containers configuration](#containers-section) |
# General section # General section
@ -175,10 +176,9 @@ logger:
thereafter: 100 thereafter: 100
interval: 1s interval: 1s
tags: tags:
- name: "app" - names: "app,datapath"
level: info level: info
- name: "datapath" - names: "external_storage_tree"
- name: "external_storage_tree"
``` ```
| Parameter | Type | SIGHUP reload | Default value | Description | | Parameter | Type | SIGHUP reload | Default value | Description |
@ -198,13 +198,13 @@ parameter. Available tags:
```yaml ```yaml
tags: tags:
- name: "app" - names: "app,datapath"
level: info level: info
``` ```
| Parameter | Type | SIGHUP reload | Default value | Description | | Parameter | Type | SIGHUP reload | Default value | Description |
|-----------------------|------------|---------------|---------------------------|-------------------------------------------------------------------------------------------------------| |-----------|------------|---------------|---------------------------|-------------------------------------------------------------------------------------------------------|
| `name` | `string` | yes | | Tag name. Possible values see below in `Tag values` section. | | `names` | `[]string` | yes | | Tag names separated by `,`. Possible values see below in `Tag values` section. |
| `level` | `string` | yes | Value from `logger.level` | Logging level for specific tag. Possible values: `debug`, `info`, `warn`, `dpanic`, `panic`, `fatal`. | | `level` | `string` | yes | Value from `logger.level` | Logging level for specific tag. Possible values: `debug`, `info`, `warn`, `dpanic`, `panic`, `fatal`. |
### Tag values ### Tag values
@ -382,12 +382,16 @@ cache:
size: 1000 size: 1000
netmap: netmap:
lifetime: 1m lifetime: 1m
cors:
lifetime: 5m
size: 1000
``` ```
| Parameter | Type | Default value | Description | | Parameter | Type | Default value | Description |
|-----------|-----------------------------------|---------------------------------|---------------------------------------------------------------------------| |-----------|-----------------------------------|---------------------------------|---------------------------------------------------------------------------|
| `buckets` | [Cache config](#cache-subsection) | `lifetime: 60s`<br>`size: 1000` | Cache which contains mapping of bucket name to bucket info. | | `buckets` | [Cache config](#cache-subsection) | `lifetime: 60s`<br>`size: 1000` | Cache which contains mapping of bucket name to bucket info. |
| `netmap` | [Cache config](#cache-subsection) | `lifetime: 1m` | Cache which stores netmap. `netmap.size` isn't applicable for this cache. | | `netmap` | [Cache config](#cache-subsection) | `lifetime: 1m` | Cache which stores netmap. `netmap.size` isn't applicable for this cache. |
| `cors` | [Cache config](#cache-subsection) | `lifetime: 5m`<br>`size: 1000` | Cache which stores container CORS configurations. |
#### `cache` subsection #### `cache` subsection
@ -441,7 +445,7 @@ index_page:
# `cors` section # `cors` section
Parameters for CORS (used in OPTIONS requests and responses in all handlers). Parameters for CORS (used in OPTIONS requests and responses in all handlers).
If values are not set, headers will not be included to response. If values are not set, settings from CORS container will be used.
```yaml ```yaml
cors: cors:
@ -515,3 +519,16 @@ features:
|-------------------------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| |-------------------------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `features.enable_filepath_fallback` | `bool` | yes | `false` | Enable using fallback path to search for a object by attribute. If the value of the `FilePath` attribute in the request contains no `/` symbols or single leading `/` symbol and the object was not found, then an attempt is made to search for the object by the attribute `FileName`. | | `features.enable_filepath_fallback` | `bool` | yes | `false` | Enable using fallback path to search for a object by attribute. If the value of the `FilePath` attribute in the request contains no `/` symbols or single leading `/` symbol and the object was not found, then an attempt is made to search for the object by the attribute `FileName`. |
| `features.tree_pool_netmap_support` | `bool` | no | `false` | Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service. | | `features.tree_pool_netmap_support` | `bool` | no | `false` | Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service. |
# `containers` section
Section for well-known containers to store data and settings.
```yaml
containers:
cors: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-------------|----------|---------------|---------------|-----------------------------------------|
| `cors` | `string` | no | | Container name for CORS configurations. |

7
go.mod
View file

@ -1,10 +1,11 @@
module git.frostfs.info/TrueCloudLab/frostfs-http-gw module git.frostfs.info/TrueCloudLab/frostfs-http-gw
go 1.22 go 1.23
require ( require (
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121 git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250317082814-87bb55f992dc
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
github.com/bluele/gcache v0.0.2 github.com/bluele/gcache v0.0.2
@ -26,7 +27,6 @@ require (
go.opentelemetry.io/otel/trace v1.31.0 go.opentelemetry.io/otel/trace v1.31.0
go.uber.org/zap v1.27.0 go.uber.org/zap v1.27.0
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
golang.org/x/net v0.30.0
golang.org/x/sys v0.28.0 golang.org/x/sys v0.28.0
google.golang.org/grpc v1.69.2 google.golang.org/grpc v1.69.2
) )
@ -125,6 +125,7 @@ require (
go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.31.0 // indirect golang.org/x/crypto v0.31.0 // indirect
golang.org/x/net v0.30.0 // indirect
golang.org/x/sync v0.10.0 // indirect golang.org/x/sync v0.10.0 // indirect
golang.org/x/term v0.27.0 // indirect golang.org/x/term v0.27.0 // indirect
golang.org/x/text v0.21.0 // indirect golang.org/x/text v0.21.0 // indirect

6
go.sum
View file

@ -42,10 +42,12 @@ git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc= git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a h1:Ud+3zz4WP9HPxEQxDPJZPpiPdm30nDNSKucsWP9L54M=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121 h1:/Z8DfbLZXp7exUQWUKoG/9tbFdI9d5lV1qSReaYoG8I= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121 h1:/Z8DfbLZXp7exUQWUKoG/9tbFdI9d5lV1qSReaYoG8I=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe h1:81gDNdWNLP24oMQukRiCE9R1wGSh0l0dRq3F1W+Oesc=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250317082814-87bb55f992dc h1:fS6Yp4GvI+C22UrWz9oqJXwvQw5Q6SmADIY4H9eIQsc=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250317082814-87bb55f992dc/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=

62
internal/cache/cors.go vendored Normal file
View file

@ -0,0 +1,62 @@
package cache
import (
"fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/bluele/gcache"
"go.uber.org/zap"
)
// CORSCache contains cache with CORS objects.
type CORSCache struct {
cache gcache.Cache
logger *zap.Logger
}
const (
// DefaultCORSCacheSize is a default maximum number of entries in cache.
DefaultCORSCacheSize = 1e3
// DefaultCORSCacheLifetime is a default lifetime of entries in cache.
DefaultCORSCacheLifetime = 5 * time.Minute
)
// DefaultCORSConfig returns new default cache expiration values.
func DefaultCORSConfig(logger *zap.Logger) *Config {
return &Config{
Size: DefaultCORSCacheSize,
Lifetime: DefaultCORSCacheLifetime,
Logger: logger,
}
}
// NewCORSCache creates an object of CORSCache.
func NewCORSCache(config *Config) *CORSCache {
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
return &CORSCache{cache: gc, logger: config.Logger}
}
// Get returns a cached object.
func (o *CORSCache) Get(cnrID cid.ID) *data.CORSConfiguration {
entry, err := o.cache.Get(cnrID)
if err != nil {
return nil
}
result, ok := entry.(*data.CORSConfiguration)
if !ok {
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
return nil
}
return result
}
// Put puts an object to cache.
func (o *CORSCache) Put(cnrID cid.ID, cors *data.CORSConfiguration) error {
return o.cache.Set(cnrID, cors)
}

18
internal/data/cors.go Normal file
View file

@ -0,0 +1,18 @@
package data
type (
// CORSConfiguration stores CORS configuration of a request.
CORSConfiguration struct {
CORSRules []CORSRule `xml:"CORSRule" json:"CORSRules"`
}
// CORSRule stores rules for CORS configuration.
CORSRule struct {
AllowedHeaders []string `xml:"AllowedHeader" json:"AllowedHeaders"`
AllowedMethods []string `xml:"AllowedMethod" json:"AllowedMethods"`
AllowedOrigins []string `xml:"AllowedOrigin" json:"AllowedOrigins"`
ExposeHeaders []string `xml:"ExposeHeader" json:"ExposeHeaders"`
MaxAgeSeconds int `xml:"MaxAgeSeconds,omitempty" json:"MaxAgeSeconds,omitempty"`
AllowedCredentials bool `xml:"AllowedCredentials,omitempty" json:"AllowedCredentials,omitempty"`
}
)

View file

@ -223,7 +223,7 @@ func (h *Handler) getDirObjectsNative(ctx context.Context, bucketInfo *data.Buck
return nil, err return nil, err
} }
log := utils.GetReqLogOrDefault(ctx, h.log) log := h.reqLogger(ctx)
dirs := make(map[string]struct{}) dirs := make(map[string]struct{})
result := &GetObjectsResponse{ result := &GetObjectsResponse{
objects: make([]ResponseObject, 0, 100), objects: make([]ResponseObject, 0, 100),
@ -258,7 +258,7 @@ func (h *Handler) headDirObjects(ctx context.Context, cnrID cid.ID, objectIDs Re
go func() { go func() {
defer close(res) defer close(res)
log := utils.GetReqLogOrDefault(ctx, h.log).With( log := h.reqLogger(ctx).With(
zap.String("cid", cnrID.EncodeToString()), zap.String("cid", cnrID.EncodeToString()),
zap.String("path", basePath), zap.String("path", basePath),
) )
@ -273,7 +273,7 @@ func (h *Handler) headDirObjects(ctx context.Context, cnrID cid.ID, objectIDs Re
}) })
if err != nil { if err != nil {
wg.Done() wg.Done()
log.Warn(logs.FailedToSumbitTaskToPool, zap.Error(err), logs.TagField(logs.TagDatapath)) log.Warn(logs.FailedToSubmitTaskToPool, zap.Error(err), logs.TagField(logs.TagDatapath))
} }
select { select {
case <-ctx.Done(): case <-ctx.Done():
@ -328,20 +328,18 @@ type browseParams struct {
listObjects func(ctx context.Context, bucketName *data.BucketInfo, prefix string) (*GetObjectsResponse, error) listObjects func(ctx context.Context, bucketName *data.BucketInfo, prefix string) (*GetObjectsResponse, error)
} }
func (h *Handler) browseObjects(c *fasthttp.RequestCtx, p browseParams) { func (h *Handler) browseObjects(ctx context.Context, req *fasthttp.RequestCtx, p browseParams) {
const S3Protocol = "s3" const S3Protocol = "s3"
const FrostfsProtocol = "frostfs" const FrostfsProtocol = "frostfs"
ctx := utils.GetContextFromRequest(c) ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
log := reqLog.With(
zap.String("bucket", p.bucketInfo.Name), zap.String("bucket", p.bucketInfo.Name),
zap.String("container", p.bucketInfo.CID.EncodeToString()), zap.String("container", p.bucketInfo.CID.EncodeToString()),
zap.String("prefix", p.prefix), zap.String("prefix", p.prefix),
) ))
resp, err := p.listObjects(ctx, p.bucketInfo, p.prefix) resp, err := p.listObjects(ctx, p.bucketInfo, p.prefix)
if err != nil { if err != nil {
logAndSendBucketError(c, log, err) h.logAndSendError(ctx, req, logs.FailedToListObjects, err)
return return
} }
@ -360,7 +358,7 @@ func (h *Handler) browseObjects(c *fasthttp.RequestCtx, p browseParams) {
"parentDir": parentDir, "parentDir": parentDir,
}).Parse(h.config.IndexPageTemplate()) }).Parse(h.config.IndexPageTemplate())
if err != nil { if err != nil {
logAndSendBucketError(c, log, err) h.logAndSendError(ctx, req, logs.FailedToParseTemplate, err)
return return
} }
bucketName := p.bucketInfo.Name bucketName := p.bucketInfo.Name
@ -369,14 +367,14 @@ func (h *Handler) browseObjects(c *fasthttp.RequestCtx, p browseParams) {
bucketName = p.bucketInfo.CID.EncodeToString() bucketName = p.bucketInfo.CID.EncodeToString()
protocol = FrostfsProtocol protocol = FrostfsProtocol
} }
if err = tmpl.Execute(c, &BrowsePageData{ if err = tmpl.Execute(req, &BrowsePageData{
Container: bucketName, Container: bucketName,
Prefix: p.prefix, Prefix: p.prefix,
Objects: objects, Objects: objects,
Protocol: protocol, Protocol: protocol,
HasErrors: resp.hasErrors, HasErrors: resp.hasErrors,
}); err != nil { }); err != nil {
logAndSendBucketError(c, log, err) h.logAndSendError(ctx, req, logs.FailedToExecuteTemplate, err)
return return
} }
} }

353
internal/handler/cors.go Normal file
View file

@ -0,0 +1,353 @@
package handler
import (
"context"
"encoding/xml"
"errors"
"fmt"
"regexp"
"slices"
"sort"
"strconv"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
qostagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
)
const (
internalIOTag = "internal"
corsFilePathTemplate = "/%s.cors"
wildcard = "*"
)
var errNoCORS = errors.New("no CORS objects found")
func (h *Handler) Preflight(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.Preflight")
defer span.End()
ctx = qostagging.ContextWithIOTag(ctx, internalIOTag)
cidParam, _ := req.UserValue("cid").(string)
reqLog := h.reqLogger(ctx)
log := reqLog.With(zap.String("cid", cidParam))
origin := req.Request.Header.Peek(fasthttp.HeaderOrigin)
if len(origin) == 0 {
log.Error(logs.EmptyOriginRequestHeader, logs.TagField(logs.TagDatapath))
ResponseError(req, "Origin request header needed", fasthttp.StatusBadRequest)
return
}
method := req.Request.Header.Peek(fasthttp.HeaderAccessControlRequestMethod)
if len(method) == 0 {
log.Error(logs.EmptyAccessControlRequestMethodHeader, logs.TagField(logs.TagDatapath))
ResponseError(req, "Access-Control-Request-Method request header needed", fasthttp.StatusBadRequest)
return
}
corsRule := h.config.CORS()
if corsRule != nil {
setCORSHeadersFromRule(req, corsRule)
return
}
corsConfig, err := h.getCORSConfig(ctx, log, cidParam)
if err != nil {
log.Error(logs.CouldNotGetCORSConfiguration, zap.Error(err), logs.TagField(logs.TagDatapath))
status := fasthttp.StatusInternalServerError
if errors.Is(err, errNoCORS) {
status = fasthttp.StatusNotFound
}
ResponseError(req, "could not get CORS configuration: "+err.Error(), status)
return
}
var headers []string
requestHeaders := req.Request.Header.Peek(fasthttp.HeaderAccessControlRequestHeaders)
if len(requestHeaders) > 0 {
headers = strings.Split(string(requestHeaders), ", ")
}
for _, rule := range corsConfig.CORSRules {
for _, o := range rule.AllowedOrigins {
if o == string(origin) || o == wildcard || (strings.Contains(o, "*") && match(o, string(origin))) {
for _, m := range rule.AllowedMethods {
if m == string(method) {
if !checkSubslice(rule.AllowedHeaders, headers) {
continue
}
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, string(origin))
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(rule.AllowedMethods, ", "))
if headers != nil {
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowHeaders, string(requestHeaders))
}
if rule.ExposeHeaders != nil {
req.Response.Header.Set(fasthttp.HeaderAccessControlExposeHeaders, strings.Join(rule.ExposeHeaders, ", "))
}
if rule.MaxAgeSeconds > 0 || rule.MaxAgeSeconds == -1 {
req.Response.Header.Set(fasthttp.HeaderAccessControlMaxAge, strconv.Itoa(rule.MaxAgeSeconds))
}
if o != wildcard {
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true")
}
return
}
}
}
}
}
log.Error(logs.CORSRuleWasNotMatched, logs.TagField(logs.TagDatapath))
ResponseError(req, "Forbidden", fasthttp.StatusForbidden)
}
func (h *Handler) SetCORSHeaders(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.SetCORSHeaders")
defer span.End()
origin := req.Request.Header.Peek(fasthttp.HeaderOrigin)
if len(origin) == 0 {
return
}
method := req.Request.Header.Peek(fasthttp.HeaderAccessControlRequestMethod)
if len(method) == 0 {
method = req.Method()
}
ctx = qostagging.ContextWithIOTag(ctx, internalIOTag)
cidParam, _ := req.UserValue("cid").(string)
reqLog := h.reqLogger(ctx)
log := reqLog.With(zap.String("cid", cidParam))
corsRule := h.config.CORS()
if corsRule != nil {
setCORSHeadersFromRule(req, corsRule)
return
}
corsConfig, err := h.getCORSConfig(ctx, log, cidParam)
if err != nil {
log.Error(logs.CouldNotGetCORSConfiguration, zap.Error(err), logs.TagField(logs.TagDatapath))
return
}
var withCredentials bool
if tkn, err := tokens.LoadBearerToken(ctx); err == nil && tkn != nil {
withCredentials = true
}
for _, rule := range corsConfig.CORSRules {
for _, o := range rule.AllowedOrigins {
if o == string(origin) || (strings.Contains(o, "*") && len(o) > 1 && match(o, string(origin))) {
for _, m := range rule.AllowedMethods {
if m == string(method) {
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, string(origin))
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(rule.AllowedMethods, ", "))
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true")
req.Response.Header.Set(fasthttp.HeaderVary, fasthttp.HeaderOrigin)
return
}
}
}
if o == wildcard {
for _, m := range rule.AllowedMethods {
if m == string(method) {
if withCredentials {
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, string(origin))
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true")
req.Response.Header.Set(fasthttp.HeaderVary, fasthttp.HeaderOrigin)
} else {
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, o)
}
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(rule.AllowedMethods, ", "))
return
}
}
}
}
}
}
func (h *Handler) getCORSConfig(ctx context.Context, log *zap.Logger, cidStr string) (*data.CORSConfiguration, error) {
cnrID, err := h.resolveContainer(ctx, cidStr)
if err != nil {
return nil, fmt.Errorf("resolve container '%s': %w", cidStr, err)
}
if cors := h.corsCache.Get(*cnrID); cors != nil {
return cors, nil
}
objID, err := h.getLastCORSObject(ctx, *cnrID)
if err != nil {
return nil, fmt.Errorf("get last cors object: %w", err)
}
var addr oid.Address
addr.SetContainer(h.corsCnrID)
addr.SetObject(objID)
corsObj, err := h.frostfs.GetObject(ctx, PrmObjectGet{
PrmAuth: PrmAuth{
BearerToken: bearerToken(ctx),
},
Address: addr,
})
if err != nil {
return nil, fmt.Errorf("get cors object '%s': %w", addr.EncodeToString(), err)
}
corsConfig := &data.CORSConfiguration{}
if err = xml.NewDecoder(corsObj.Payload).Decode(corsConfig); err != nil {
return nil, fmt.Errorf("decode cors object: %w", err)
}
if err = h.corsCache.Put(*cnrID, corsConfig); err != nil {
log.Warn(logs.CouldntCacheCors, zap.Error(err), logs.TagField(logs.TagDatapath))
}
return corsConfig, nil
}
func (h *Handler) getLastCORSObject(ctx context.Context, cnrID cid.ID) (oid.ID, error) {
filters := object.NewSearchFilters()
filters.AddRootFilter()
filters.AddFilter(object.AttributeFilePath, fmt.Sprintf(corsFilePathTemplate, cnrID), object.MatchStringEqual)
prmAuth := PrmAuth{
BearerToken: bearerToken(ctx),
}
res, err := h.frostfs.SearchObjects(ctx, PrmObjectSearch{
PrmAuth: prmAuth,
Container: h.corsCnrID,
Filters: filters,
})
if err != nil {
return oid.ID{}, fmt.Errorf("search cors versions: %w", err)
}
defer res.Close()
var (
addr oid.Address
obj *object.Object
headErr error
objs = make([]*object.Object, 0)
)
addr.SetContainer(h.corsCnrID)
err = res.Iterate(func(id oid.ID) bool {
addr.SetObject(id)
obj, headErr = h.frostfs.HeadObject(ctx, PrmObjectHead{
PrmAuth: prmAuth,
Address: addr,
})
if headErr != nil {
headErr = fmt.Errorf("head cors object '%s': %w", addr.EncodeToString(), headErr)
return true
}
objs = append(objs, obj)
return false
})
if err != nil {
return oid.ID{}, fmt.Errorf("iterate cors objects: %w", err)
}
if headErr != nil {
return oid.ID{}, headErr
}
if len(objs) == 0 {
return oid.ID{}, errNoCORS
}
sort.Slice(objs, func(i, j int) bool {
versionID1, _ := objs[i].ID()
versionID2, _ := objs[j].ID()
timestamp1 := utils.GetAttributeValue(objs[i].Attributes(), object.AttributeTimestamp)
timestamp2 := utils.GetAttributeValue(objs[j].Attributes(), object.AttributeTimestamp)
if objs[i].CreationEpoch() != objs[j].CreationEpoch() {
return objs[i].CreationEpoch() < objs[j].CreationEpoch()
}
if len(timestamp1) > 0 && len(timestamp2) > 0 && timestamp1 != timestamp2 {
unixTime1, err := strconv.ParseInt(timestamp1, 10, 64)
if err != nil {
return versionID1.EncodeToString() < versionID2.EncodeToString()
}
unixTime2, err := strconv.ParseInt(timestamp2, 10, 64)
if err != nil {
return versionID1.EncodeToString() < versionID2.EncodeToString()
}
return unixTime1 < unixTime2
}
return versionID1.EncodeToString() < versionID2.EncodeToString()
})
objID, _ := objs[len(objs)-1].ID()
return objID, nil
}
func setCORSHeadersFromRule(c *fasthttp.RequestCtx, cors *data.CORSRule) {
c.Response.Header.Set(fasthttp.HeaderAccessControlMaxAge, strconv.Itoa(cors.MaxAgeSeconds))
if len(cors.AllowedOrigins) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, cors.AllowedOrigins[0])
}
if len(cors.AllowedMethods) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(cors.AllowedMethods, ", "))
}
if len(cors.AllowedHeaders) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowHeaders, strings.Join(cors.AllowedHeaders, ", "))
}
if len(cors.ExposeHeaders) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlExposeHeaders, strings.Join(cors.ExposeHeaders, ", "))
}
if cors.AllowedCredentials {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true")
}
}
func checkSubslice(slice []string, subSlice []string) bool {
if slices.Contains(slice, wildcard) {
return true
}
for _, r := range subSlice {
if !sliceContains(slice, r) {
return false
}
}
return true
}
func sliceContains(slice []string, str string) bool {
for _, s := range slice {
if s == str || (strings.Contains(s, "*") && match(s, str)) {
return true
}
}
return false
}
func match(tmpl, str string) bool {
regexpStr := "^" + regexp.QuoteMeta(tmpl) + "$"
regexpStr = regexpStr[:strings.Index(regexpStr, "*")-1] + "." + regexpStr[strings.Index(regexpStr, "*"):]
reg := regexp.MustCompile(regexpStr)
return reg.Match([]byte(str))
}

View file

@ -0,0 +1,930 @@
package handler
import (
"encoding/base64"
"encoding/xml"
"fmt"
"net/http"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
"github.com/valyala/fasthttp"
)
func TestPreflight(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-preflight"
cnrID, cnr, err := hc.prepareContainer(bktName, acl.Private)
require.NoError(t, err)
hc.frostfs.SetContainer(cnrID, cnr)
var epoch uint64
t.Run("CORS object", func(t *testing.T) {
for _, tc := range []struct {
name string
corsConfig *data.CORSConfiguration
requestHeaders map[string]string
expectedHeaders map[string]string
status int
}{
{
name: "no CORS configuration",
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
fasthttp.HeaderAccessControlExposeHeaders: "",
fasthttp.HeaderAccessControlMaxAge: "",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
},
status: fasthttp.StatusNotFound,
},
{
name: "specific allowed origin",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"http://example.com"},
AllowedMethods: []string{"GET", "HEAD"},
AllowedHeaders: []string{"Content-Type"},
ExposeHeaders: []string{"x-amz-*", "X-Amz-*"},
MaxAgeSeconds: 900,
},
},
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "Content-Type",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "http://example.com",
fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD",
fasthttp.HeaderAccessControlAllowHeaders: "Content-Type",
fasthttp.HeaderAccessControlExposeHeaders: "x-amz-*, X-Amz-*",
fasthttp.HeaderAccessControlMaxAge: "900",
fasthttp.HeaderAccessControlAllowCredentials: "true",
},
status: fasthttp.StatusOK,
},
{
name: "wildcard allowed origin",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
AllowedHeaders: []string{"Content-Type"},
ExposeHeaders: []string{"x-amz-*", "X-Amz-*"},
MaxAgeSeconds: 900,
},
},
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "http://example.com",
fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD",
fasthttp.HeaderAccessControlAllowHeaders: "",
fasthttp.HeaderAccessControlExposeHeaders: "x-amz-*, X-Amz-*",
fasthttp.HeaderAccessControlMaxAge: "900",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
status: fasthttp.StatusOK,
},
{
name: "not allowed header",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
AllowedHeaders: []string{"Content-Type"},
},
},
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
fasthttp.HeaderAccessControlRequestMethod: "GET",
fasthttp.HeaderAccessControlRequestHeaders: "Authorization",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
fasthttp.HeaderAccessControlExposeHeaders: "",
fasthttp.HeaderAccessControlMaxAge: "",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
status: fasthttp.StatusForbidden,
},
{
name: "empty Origin header",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
},
},
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
fasthttp.HeaderAccessControlExposeHeaders: "",
fasthttp.HeaderAccessControlMaxAge: "",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
status: fasthttp.StatusBadRequest,
},
{
name: "empty Access-Control-Request-Method header",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
},
},
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
fasthttp.HeaderAccessControlExposeHeaders: "",
fasthttp.HeaderAccessControlMaxAge: "",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
status: fasthttp.StatusBadRequest,
},
} {
t.Run(tc.name, func(t *testing.T) {
if tc.corsConfig != nil {
epoch++
setCORSObject(t, hc, cnrID, tc.corsConfig, epoch)
}
r := prepareCORSRequest(t, bktName, tc.requestHeaders)
hc.Handler().Preflight(r)
require.Equal(t, tc.status, r.Response.StatusCode())
for k, v := range tc.expectedHeaders {
require.Equal(t, v, string(r.Response.Header.Peek(k)))
}
})
}
})
t.Run("CORS config", func(t *testing.T) {
hc.cfg.cors = &data.CORSRule{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
AllowedHeaders: []string{"Content-Type", "Content-Encoding"},
ExposeHeaders: []string{"x-amz-*", "X-Amz-*"},
MaxAgeSeconds: 900,
AllowedCredentials: true,
}
r := prepareCORSRequest(t, bktName, map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
fasthttp.HeaderAccessControlRequestMethod: "GET",
})
hc.Handler().Preflight(r)
require.Equal(t, fasthttp.StatusOK, r.Response.StatusCode())
require.Equal(t, "900", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlMaxAge)))
require.Equal(t, "*", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowOrigin)))
require.Equal(t, "GET, HEAD", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowMethods)))
require.Equal(t, "Content-Type, Content-Encoding", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowHeaders)))
require.Equal(t, "x-amz-*, X-Amz-*", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlExposeHeaders)))
require.Equal(t, "true", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowCredentials)))
})
}
func TestSetCORSHeaders(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-set-cors-headers"
cnrID, cnr, err := hc.prepareContainer(bktName, acl.Private)
require.NoError(t, err)
hc.frostfs.SetContainer(cnrID, cnr)
var epoch uint64
t.Run("CORS object", func(t *testing.T) {
for _, tc := range []struct {
name string
corsConfig *data.CORSConfiguration
requestHeaders map[string]string
expectedHeaders map[string]string
}{
{
name: "empty Origin header",
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderVary: "",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
},
{
name: "no CORS configuration",
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderVary: "",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
},
},
{
name: "specific allowed origin",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"http://example.com"},
AllowedMethods: []string{"GET", "HEAD"},
},
},
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "http://example.com",
fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD",
fasthttp.HeaderVary: fasthttp.HeaderOrigin,
fasthttp.HeaderAccessControlAllowCredentials: "true",
},
},
{
name: "wildcard allowed origin, with credentials",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
},
},
},
requestHeaders: func() map[string]string {
tkn := new(bearer.Token)
err = tkn.Sign(hc.key.PrivateKey)
require.NoError(t, err)
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
require.NotEmpty(t, t64)
return map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
fasthttp.HeaderAuthorization: "Bearer " + t64,
}
}(),
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "http://example.com",
fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD",
fasthttp.HeaderVary: fasthttp.HeaderOrigin,
fasthttp.HeaderAccessControlAllowCredentials: "true",
},
},
{
name: "wildcard allowed origin, without credentials",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
},
},
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "*",
fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD",
fasthttp.HeaderVary: "",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
},
} {
t.Run(tc.name, func(t *testing.T) {
epoch++
setCORSObject(t, hc, cnrID, tc.corsConfig, epoch)
r := prepareCORSRequest(t, bktName, tc.requestHeaders)
hc.Handler().SetCORSHeaders(r)
require.Equal(t, fasthttp.StatusOK, r.Response.StatusCode())
for k, v := range tc.expectedHeaders {
require.Equal(t, v, string(r.Response.Header.Peek(k)))
}
})
}
})
t.Run("CORS config", func(t *testing.T) {
hc.cfg.cors = &data.CORSRule{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
AllowedHeaders: []string{"Content-Type", "Content-Encoding"},
ExposeHeaders: []string{"x-amz-*", "X-Amz-*"},
MaxAgeSeconds: 900,
AllowedCredentials: true,
}
r := prepareCORSRequest(t, bktName, map[string]string{fasthttp.HeaderOrigin: "http://example.com"})
hc.Handler().SetCORSHeaders(r)
require.Equal(t, "900", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlMaxAge)))
require.Equal(t, "*", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowOrigin)))
require.Equal(t, "GET, HEAD", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowMethods)))
require.Equal(t, "Content-Type, Content-Encoding", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowHeaders)))
require.Equal(t, "x-amz-*, X-Amz-*", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlExposeHeaders)))
require.Equal(t, "true", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowCredentials)))
})
}
func TestCheckSubslice(t *testing.T) {
for _, tc := range []struct {
name string
allowed []string
actual []string
expected bool
}{
{
name: "empty allowed slice",
allowed: []string{},
actual: []string{"str1", "str2", "str3"},
expected: false,
},
{
name: "empty actual slice",
allowed: []string{"str1", "str2", "str3"},
actual: []string{},
expected: true,
},
{
name: "allowed wildcard",
allowed: []string{"str", "*"},
actual: []string{"str1", "str2", "str3"},
expected: true,
},
{
name: "similar allowed and actual",
allowed: []string{"str1", "str2", "str3"},
actual: []string{"str1", "str2", "str3"},
expected: true,
},
{
name: "allowed actual",
allowed: []string{"str", "str1", "str2", "str4"},
actual: []string{"str1", "str2"},
expected: true,
},
{
name: "not allowed actual",
allowed: []string{"str", "str1", "str2", "str4"},
actual: []string{"str1", "str5"},
expected: false,
},
{
name: "wildcard in allowed",
allowed: []string{"str*"},
actual: []string{"str", "str5"},
expected: true,
},
} {
t.Run(tc.name, func(t *testing.T) {
require.Equal(t, tc.expected, checkSubslice(tc.allowed, tc.actual))
})
}
}
func TestAllowedOriginWildcards(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-allowed-origin-wildcards"
cnrID, cnr, err := hc.prepareContainer(bktName, acl.Private)
require.NoError(t, err)
hc.frostfs.SetContainer(cnrID, cnr)
cfg := &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"*suffix.example"},
AllowedMethods: []string{"GET"},
},
{
AllowedOrigins: []string{"https://*example"},
AllowedMethods: []string{"GET"},
},
{
AllowedOrigins: []string{"prefix.example*"},
AllowedMethods: []string{"GET"},
},
},
}
setCORSObject(t, hc, cnrID, cfg, 1)
for _, tc := range []struct {
name string
handler func(*fasthttp.RequestCtx)
requestHeaders map[string]string
expectedHeaders map[string]string
expectedStatus int
}{
{
name: "set cors headers, empty request cors headers",
handler: hc.Handler().SetCORSHeaders,
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
},
{
name: "set cors headers, invalid origin",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://origin.com",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
},
{
name: "set cors headers, first rule, no symbols in place of wildcard",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "suffix.example",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "suffix.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "set cors headers, first rule, valid origin",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://suffix.example",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "http://suffix.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "set cors headers, first rule, invalid origin",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://suffix-example",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
},
{
name: "set cors headers, second rule, no symbols in place of wildcard",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://example",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "https://example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "set cors headers, second rule, valid origin",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "https://www.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "set cors headers, second rule, invalid origin",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
},
{
name: "set cors headers, third rule, no symbols in place of wildcard",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "prefix.example",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "prefix.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "set cors headers, third rule, valid origin",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "prefix.example.com",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "prefix.example.com",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "set cors headers, third rule, invalid origin",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "www.prefix.example",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
},
{
name: "set cors headers, third rule, invalid request method in header",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "prefix.example.com",
fasthttp.HeaderAccessControlRequestMethod: "PUT",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
},
{
name: "set cors headers, third rule, valid request method in header",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "prefix.example.com",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "prefix.example.com",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "preflight, empty request cors headers",
handler: hc.Handler().Preflight,
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
expectedStatus: http.StatusBadRequest,
},
{
name: "preflight, invalid origin",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://origin.com",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
expectedStatus: http.StatusForbidden,
},
{
name: "preflight, first rule, no symbols in place of wildcard",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "suffix.example",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "suffix.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "prelight, first rule, valid origin",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://suffix.example",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "http://suffix.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "preflight, first rule, invalid origin",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://suffix-example",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
expectedStatus: http.StatusForbidden,
},
{
name: "preflight, second rule, no symbols in place of wildcard",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://example",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "https://example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "preflight, second rule, valid origin",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "https://www.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "preflight, second rule, invalid origin",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
expectedStatus: http.StatusForbidden,
},
{
name: "preflight, third rule, no symbols in place of wildcard",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "prefix.example",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "prefix.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "preflight, third rule, valid origin",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "prefix.example.com",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "prefix.example.com",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "preflight, third rule, invalid origin",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "www.prefix.example",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
expectedStatus: http.StatusForbidden,
},
{
name: "preflight, third rule, invalid request method in header",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "prefix.example.com",
fasthttp.HeaderAccessControlRequestMethod: "PUT",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
expectedStatus: http.StatusForbidden,
},
} {
t.Run(tc.name, func(t *testing.T) {
r := prepareCORSRequest(t, bktName, tc.requestHeaders)
tc.handler(r)
expectedStatus := fasthttp.StatusOK
if tc.expectedStatus != 0 {
expectedStatus = tc.expectedStatus
}
require.Equal(t, expectedStatus, r.Response.StatusCode())
for k, v := range tc.expectedHeaders {
require.Equal(t, v, string(r.Response.Header.Peek(k)))
}
})
}
}
func TestAllowedHeaderWildcards(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-allowed-header-wildcards"
cnrID, cnr, err := hc.prepareContainer(bktName, acl.Private)
require.NoError(t, err)
hc.frostfs.SetContainer(cnrID, cnr)
cfg := &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"https://www.example.com"},
AllowedMethods: []string{"HEAD"},
AllowedHeaders: []string{"*-suffix"},
},
{
AllowedOrigins: []string{"https://www.example.com"},
AllowedMethods: []string{"HEAD"},
AllowedHeaders: []string{"start-*-end"},
},
{
AllowedOrigins: []string{"https://www.example.com"},
AllowedMethods: []string{"HEAD"},
AllowedHeaders: []string{"X-Amz-*"},
},
},
}
setCORSObject(t, hc, cnrID, cfg, 1)
for _, tc := range []struct {
name string
requestHeaders map[string]string
expectedHeaders map[string]string
expectedStatus int
}{
{
name: "first rule, valid headers",
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "header-suffix, -suffix",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlAllowMethods: "HEAD",
fasthttp.HeaderAccessControlAllowHeaders: "header-suffix, -suffix",
},
},
{
name: "first rule, invalid headers",
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "header-suffix-*",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
},
expectedStatus: http.StatusForbidden,
},
{
name: "second rule, valid headers",
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "start--end, start-header-end",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlAllowMethods: "HEAD",
fasthttp.HeaderAccessControlAllowHeaders: "start--end, start-header-end",
},
},
{
name: "second rule, invalid header ending",
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "start-header-end-*",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
},
expectedStatus: http.StatusForbidden,
},
{
name: "second rule, invalid header beginning",
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "*-start-header-end",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
},
expectedStatus: http.StatusForbidden,
},
{
name: "third rule, valid headers",
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "X-Amz-Date, X-Amz-Content-Sha256",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlAllowMethods: "HEAD",
fasthttp.HeaderAccessControlAllowHeaders: "X-Amz-Date, X-Amz-Content-Sha256",
},
},
{
name: "third rule, invalid headers",
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "Authorization",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
},
expectedStatus: http.StatusForbidden,
},
} {
t.Run(tc.name, func(t *testing.T) {
r := prepareCORSRequest(t, bktName, tc.requestHeaders)
hc.Handler().Preflight(r)
expectedStatus := http.StatusOK
if tc.expectedStatus != 0 {
expectedStatus = tc.expectedStatus
}
require.Equal(t, expectedStatus, r.Response.StatusCode())
for k, v := range tc.expectedHeaders {
require.Equal(t, v, string(r.Response.Header.Peek(k)))
}
})
}
}
func setCORSObject(t *testing.T, hc *handlerContext, cnrID cid.ID, corsConfig *data.CORSConfiguration, epoch uint64) {
payload, err := xml.Marshal(corsConfig)
require.NoError(t, err)
a := object.NewAttribute()
a.SetKey(object.AttributeFilePath)
a.SetValue(fmt.Sprintf(corsFilePathTemplate, cnrID))
objID := oidtest.ID()
obj := object.New()
obj.SetAttributes(*a)
obj.SetOwnerID(hc.owner)
obj.SetPayload(payload)
obj.SetPayloadSize(uint64(len(payload)))
obj.SetContainerID(hc.corsCnr)
obj.SetID(objID)
obj.SetCreationEpoch(epoch)
var addr oid.Address
addr.SetObject(objID)
addr.SetContainer(hc.corsCnr)
hc.frostfs.SetObject(addr, obj)
}

View file

@ -25,43 +25,38 @@ import (
) )
// DownloadByAddressOrBucketName handles download requests using simple cid/oid or bucketname/key format. // DownloadByAddressOrBucketName handles download requests using simple cid/oid or bucketname/key format.
func (h *Handler) DownloadByAddressOrBucketName(c *fasthttp.RequestCtx) { func (h *Handler) DownloadByAddressOrBucketName(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadByAddressOrBucketName") ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.DownloadByAddressOrBucketName")
defer span.End() defer span.End()
utils.SetContextToRequest(ctx, c)
cidParam := c.UserValue("cid").(string) cidParam := req.UserValue("cid").(string)
oidParam := c.UserValue("oid").(string) oidParam := req.UserValue("oid").(string)
downloadParam := c.QueryArgs().GetBool("download") downloadParam := req.QueryArgs().GetBool("download")
log := utils.GetReqLogOrDefault(ctx, h.log).With( ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(
zap.String("cid", cidParam), zap.String("cid", cidParam),
zap.String("oid", oidParam), zap.String("oid", oidParam),
) ))
bktInfo, err := h.getBucketInfo(ctx, cidParam, log) bktInfo, err := h.getBucketInfo(ctx, cidParam)
if err != nil { if err != nil {
logAndSendBucketError(c, log, err) h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
return return
} }
checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo) checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo)
if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) { if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) {
log.Error(logs.FailedToCheckIfSettingsNodeExist, zap.String("cid", bktInfo.CID.String()), h.logAndSendError(ctx, req, logs.FailedToCheckIfSettingsNodeExist, checkS3Err)
zap.Error(checkS3Err), logs.TagField(logs.TagExternalStorageTree))
logAndSendBucketError(c, log, checkS3Err)
return return
} }
req := newRequest(c, log)
var objID oid.ID var objID oid.ID
if checkS3Err == nil && shouldDownload(oidParam, downloadParam) { if checkS3Err == nil && shouldDownload(oidParam, downloadParam) {
h.byS3Path(ctx, req, bktInfo.CID, oidParam, h.receiveFile) h.byS3Path(ctx, req, bktInfo.CID, oidParam, h.receiveFile)
} else if err = objID.DecodeString(oidParam); err == nil { } else if err = objID.DecodeString(oidParam); err == nil {
h.byNativeAddress(ctx, req, bktInfo.CID, objID, h.receiveFile) h.byNativeAddress(ctx, req, bktInfo.CID, objID, h.receiveFile)
} else { } else {
h.browseIndex(c, checkS3Err != nil) h.browseIndex(ctx, req, cidParam, oidParam, checkS3Err != nil)
} }
} }
@ -70,12 +65,11 @@ func shouldDownload(oidParam string, downloadParam bool) bool {
} }
// DownloadByAttribute handles attribute-based download requests. // DownloadByAttribute handles attribute-based download requests.
func (h *Handler) DownloadByAttribute(c *fasthttp.RequestCtx) { func (h *Handler) DownloadByAttribute(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadByAttribute") ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.DownloadByAttribute")
defer span.End() defer span.End()
utils.SetContextToRequest(ctx, c)
h.byAttribute(c, h.receiveFile) h.byAttribute(ctx, req, h.receiveFile)
} }
func (h *Handler) search(ctx context.Context, cnrID cid.ID, key, val string, op object.SearchMatchType) (ResObjectSearch, error) { func (h *Handler) search(ctx context.Context, cnrID cid.ID, key, val string, op object.SearchMatchType) (ResObjectSearch, error) {
@ -95,31 +89,33 @@ func (h *Handler) search(ctx context.Context, cnrID cid.ID, key, val string, op
} }
// DownloadZip handles zip by prefix requests. // DownloadZip handles zip by prefix requests.
func (h *Handler) DownloadZip(c *fasthttp.RequestCtx) { func (h *Handler) DownloadZip(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadZip") ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.DownloadZip")
defer span.End() defer span.End()
utils.SetContextToRequest(ctx, c)
scid, _ := c.UserValue("cid").(string) scid, _ := req.UserValue("cid").(string)
prefix, _ := req.UserValue("prefix").(string)
log := utils.GetReqLogOrDefault(ctx, h.log) ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(zap.String("cid", scid), zap.String("prefix", prefix)))
bktInfo, err := h.getBucketInfo(ctx, scid, log)
bktInfo, err := h.getBucketInfo(ctx, scid)
if err != nil { if err != nil {
logAndSendBucketError(c, log, err) h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
return return
} }
resSearch, err := h.searchObjectsByPrefix(c, log, bktInfo.CID)
resSearch, err := h.searchObjectsByPrefix(ctx, bktInfo.CID, prefix)
if err != nil { if err != nil {
return return
} }
c.Response.Header.Set(fasthttp.HeaderContentType, "application/zip") req.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"") req.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
c.SetBodyStreamWriter(h.getZipResponseWriter(ctx, log, resSearch, bktInfo)) req.SetBodyStreamWriter(h.getZipResponseWriter(ctx, resSearch, bktInfo))
} }
func (h *Handler) getZipResponseWriter(ctx context.Context, log *zap.Logger, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) { func (h *Handler) getZipResponseWriter(ctx context.Context, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
return func(w *bufio.Writer) { return func(w *bufio.Writer) {
defer resSearch.Close() defer resSearch.Close()
@ -127,20 +123,20 @@ func (h *Handler) getZipResponseWriter(ctx context.Context, log *zap.Logger, res
zipWriter := zip.NewWriter(w) zipWriter := zip.NewWriter(w)
var objectsWritten int var objectsWritten int
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, log, bktInfo.CID, buf, errIter := resSearch.Iterate(h.putObjectToArchive(ctx, bktInfo.CID, buf,
func(obj *object.Object) (io.Writer, error) { func(obj *object.Object) (io.Writer, error) {
objectsWritten++ objectsWritten++
return h.createZipFile(zipWriter, obj) return h.createZipFile(zipWriter, obj)
}), }),
) )
if errIter != nil { if errIter != nil {
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath)) h.reqLogger(ctx).Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath))
return return
} else if objectsWritten == 0 { } else if objectsWritten == 0 {
log.Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath)) h.reqLogger(ctx).Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath))
} }
if err := zipWriter.Close(); err != nil { if err := zipWriter.Close(); err != nil {
log.Error(logs.CloseZipWriter, zap.Error(err), logs.TagField(logs.TagDatapath)) h.reqLogger(ctx).Error(logs.CloseZipWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
} }
} }
} }
@ -164,31 +160,33 @@ func (h *Handler) createZipFile(zw *zip.Writer, obj *object.Object) (io.Writer,
} }
// DownloadTar forms tar.gz from objects by prefix. // DownloadTar forms tar.gz from objects by prefix.
func (h *Handler) DownloadTar(c *fasthttp.RequestCtx) { func (h *Handler) DownloadTar(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadTar") ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.DownloadTar")
defer span.End() defer span.End()
utils.SetContextToRequest(ctx, c)
scid, _ := c.UserValue("cid").(string) scid, _ := req.UserValue("cid").(string)
prefix, _ := req.UserValue("prefix").(string)
log := utils.GetReqLogOrDefault(ctx, h.log) ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(zap.String("cid", scid), zap.String("prefix", prefix)))
bktInfo, err := h.getBucketInfo(ctx, scid, log)
bktInfo, err := h.getBucketInfo(ctx, scid)
if err != nil { if err != nil {
logAndSendBucketError(c, log, err) h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
return return
} }
resSearch, err := h.searchObjectsByPrefix(c, log, bktInfo.CID)
resSearch, err := h.searchObjectsByPrefix(ctx, bktInfo.CID, prefix)
if err != nil { if err != nil {
return return
} }
c.Response.Header.Set(fasthttp.HeaderContentType, "application/gzip") req.Response.Header.Set(fasthttp.HeaderContentType, "application/gzip")
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.tar.gz\"") req.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.tar.gz\"")
c.SetBodyStreamWriter(h.getTarResponseWriter(ctx, log, resSearch, bktInfo)) req.SetBodyStreamWriter(h.getTarResponseWriter(ctx, resSearch, bktInfo))
} }
func (h *Handler) getTarResponseWriter(ctx context.Context, log *zap.Logger, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) { func (h *Handler) getTarResponseWriter(ctx context.Context, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
return func(w *bufio.Writer) { return func(w *bufio.Writer) {
defer resSearch.Close() defer resSearch.Close()
@ -203,26 +201,26 @@ func (h *Handler) getTarResponseWriter(ctx context.Context, log *zap.Logger, res
defer func() { defer func() {
if err := tarWriter.Close(); err != nil { if err := tarWriter.Close(); err != nil {
log.Error(logs.CloseTarWriter, zap.Error(err), logs.TagField(logs.TagDatapath)) h.reqLogger(ctx).Error(logs.CloseTarWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
} }
if err := gzipWriter.Close(); err != nil { if err := gzipWriter.Close(); err != nil {
log.Error(logs.CloseGzipWriter, zap.Error(err), logs.TagField(logs.TagDatapath)) h.reqLogger(ctx).Error(logs.CloseGzipWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
} }
}() }()
var objectsWritten int var objectsWritten int
buf := make([]byte, 3<<20) // the same as for upload buf := make([]byte, 3<<20) // the same as for upload
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, log, bktInfo.CID, buf, errIter := resSearch.Iterate(h.putObjectToArchive(ctx, bktInfo.CID, buf,
func(obj *object.Object) (io.Writer, error) { func(obj *object.Object) (io.Writer, error) {
objectsWritten++ objectsWritten++
return h.createTarFile(tarWriter, obj) return h.createTarFile(tarWriter, obj)
}), }),
) )
if errIter != nil { if errIter != nil {
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath)) h.reqLogger(ctx).Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath))
} else if objectsWritten == 0 { } else if objectsWritten == 0 {
log.Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath)) h.reqLogger(ctx).Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath))
} }
} }
} }
@ -240,9 +238,9 @@ func (h *Handler) createTarFile(tw *tar.Writer, obj *object.Object) (io.Writer,
}) })
} }
func (h *Handler) putObjectToArchive(ctx context.Context, log *zap.Logger, cnrID cid.ID, buf []byte, createArchiveHeader func(obj *object.Object) (io.Writer, error)) func(id oid.ID) bool { func (h *Handler) putObjectToArchive(ctx context.Context, cnrID cid.ID, buf []byte, createArchiveHeader func(obj *object.Object) (io.Writer, error)) func(id oid.ID) bool {
return func(id oid.ID) bool { return func(id oid.ID) bool {
log = log.With(zap.String("oid", id.EncodeToString())) logger := h.reqLogger(ctx).With(zap.String("oid", id.EncodeToString()))
prm := PrmObjectGet{ prm := PrmObjectGet{
PrmAuth: PrmAuth{ PrmAuth: PrmAuth{
@ -253,18 +251,18 @@ func (h *Handler) putObjectToArchive(ctx context.Context, log *zap.Logger, cnrID
resGet, err := h.frostfs.GetObject(ctx, prm) resGet, err := h.frostfs.GetObject(ctx, prm)
if err != nil { if err != nil {
log.Error(logs.FailedToGetObject, zap.Error(err), logs.TagField(logs.TagExternalStorage)) logger.Error(logs.FailedToGetObject, zap.Error(err), logs.TagField(logs.TagExternalStorage))
return false return false
} }
fileWriter, err := createArchiveHeader(&resGet.Header) fileWriter, err := createArchiveHeader(&resGet.Header)
if err != nil { if err != nil {
log.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath)) logger.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath))
return false return false
} }
if err = writeToArchive(resGet, fileWriter, buf); err != nil { if err = writeToArchive(resGet, fileWriter, buf); err != nil {
log.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath)) logger.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath))
return false return false
} }
@ -272,28 +270,17 @@ func (h *Handler) putObjectToArchive(ctx context.Context, log *zap.Logger, cnrID
} }
} }
func (h *Handler) searchObjectsByPrefix(c *fasthttp.RequestCtx, log *zap.Logger, cnrID cid.ID) (ResObjectSearch, error) { func (h *Handler) searchObjectsByPrefix(ctx context.Context, cnrID cid.ID, prefix string) (ResObjectSearch, error) {
scid, _ := c.UserValue("cid").(string)
prefix, _ := c.UserValue("prefix").(string)
ctx := utils.GetContextFromRequest(c)
prefix, err := url.QueryUnescape(prefix) prefix, err := url.QueryUnescape(prefix)
if err != nil { if err != nil {
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("prefix", prefix), return nil, fmt.Errorf("unescape prefix: %w", err)
zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not unescape prefix: "+err.Error(), fasthttp.StatusBadRequest)
return nil, err
} }
log = log.With(zap.String("cid", scid), zap.String("prefix", prefix))
resSearch, err := h.search(ctx, cnrID, object.AttributeFilePath, prefix, object.MatchCommonPrefix) resSearch, err := h.search(ctx, cnrID, object.AttributeFilePath, prefix, object.MatchCommonPrefix)
if err != nil { if err != nil {
log.Error(logs.CouldNotSearchForObjects, zap.Error(err), logs.TagField(logs.TagExternalStorage)) return nil, fmt.Errorf("search objects by prefix: %w", err)
ResponseError(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
return nil, err
} }
return resSearch, nil return resSearch, nil
} }

View file

@ -52,6 +52,10 @@ func (t *TestFrostFS) SetContainer(cnrID cid.ID, cnr *container.Container) {
t.containers[cnrID.EncodeToString()] = cnr t.containers[cnrID.EncodeToString()] = cnr
} }
func (t *TestFrostFS) SetObject(addr oid.Address, obj *object.Object) {
t.objects[addr.EncodeToString()] = obj
}
// AllowUserOperation grants access to object operations. // AllowUserOperation grants access to object operations.
// Empty userID and objID means any user and object respectively. // Empty userID and objID means any user and object respectively.
func (t *TestFrostFS) AllowUserOperation(cnrID cid.ID, userID user.ID, op acl.Op, objID oid.ID) { func (t *TestFrostFS) AllowUserOperation(cnrID cid.ID, userID user.ID, op acl.Op, objID oid.ID) {

View file

@ -16,7 +16,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -36,6 +35,8 @@ type Config interface {
BufferMaxSizeForPut() uint64 BufferMaxSizeForPut() uint64
NamespaceHeader() string NamespaceHeader() string
EnableFilepathFallback() bool EnableFilepathFallback() bool
FormContainerZone(string) string
CORS() *data.CORSRule
} }
// PrmContainer groups parameters of FrostFS.Container operation. // PrmContainer groups parameters of FrostFS.Container operation.
@ -142,6 +143,10 @@ var (
ErrGatewayTimeout = errors.New("gateway timeout") ErrGatewayTimeout = errors.New("gateway timeout")
// ErrQuotaLimitReached is returned from FrostFS in case of quota exceeded. // ErrQuotaLimitReached is returned from FrostFS in case of quota exceeded.
ErrQuotaLimitReached = errors.New("quota limit reached") ErrQuotaLimitReached = errors.New("quota limit reached")
// ErrContainerNotFound is returned from FrostFS in case of container was not found.
ErrContainerNotFound = errors.New("container not found")
// ErrObjectNotFound is returned from FrostFS in case of object was not found.
ErrObjectNotFound = errors.New("object not found")
) )
// FrostFS represents virtual connection to FrostFS network. // FrostFS represents virtual connection to FrostFS network.
@ -158,7 +163,7 @@ type FrostFS interface {
} }
type ContainerResolver interface { type ContainerResolver interface {
Resolve(ctx context.Context, name string) (*cid.ID, error) Resolve(ctx context.Context, zone, name string) (*cid.ID, error)
} }
type Handler struct { type Handler struct {
@ -170,6 +175,8 @@ type Handler struct {
tree layer.TreeService tree layer.TreeService
cache *cache.BucketCache cache *cache.BucketCache
workerPool *ants.Pool workerPool *ants.Pool
corsCnrID cid.ID
corsCache *cache.CORSCache
} }
type AppParams struct { type AppParams struct {
@ -178,6 +185,8 @@ type AppParams struct {
Owner *user.ID Owner *user.ID
Resolver ContainerResolver Resolver ContainerResolver
Cache *cache.BucketCache Cache *cache.BucketCache
CORSCnrID cid.ID
CORSCache *cache.CORSCache
} }
func New(params *AppParams, config Config, tree layer.TreeService, workerPool *ants.Pool) *Handler { func New(params *AppParams, config Config, tree layer.TreeService, workerPool *ants.Pool) *Handler {
@ -190,12 +199,14 @@ func New(params *AppParams, config Config, tree layer.TreeService, workerPool *a
tree: tree, tree: tree,
cache: params.Cache, cache: params.Cache,
workerPool: workerPool, workerPool: workerPool,
corsCnrID: params.CORSCnrID,
corsCache: params.CORSCache,
} }
} }
// byNativeAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that // byNativeAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that
// prepares request and object address to it. // prepares request and object address to it.
func (h *Handler) byNativeAddress(ctx context.Context, req request, cnrID cid.ID, objID oid.ID, handler func(context.Context, request, oid.Address)) { func (h *Handler) byNativeAddress(ctx context.Context, req *fasthttp.RequestCtx, cnrID cid.ID, objID oid.ID, handler func(context.Context, *fasthttp.RequestCtx, oid.Address)) {
ctx, span := tracing.StartSpanFromContext(ctx, "handler.byNativeAddress") ctx, span := tracing.StartSpanFromContext(ctx, "handler.byNativeAddress")
defer span.End() defer span.End()
@ -205,74 +216,59 @@ func (h *Handler) byNativeAddress(ctx context.Context, req request, cnrID cid.ID
// byS3Path is a wrapper for function (e.g. request.headObject, request.receiveFile) that // byS3Path is a wrapper for function (e.g. request.headObject, request.receiveFile) that
// resolves object address from S3-like path <bucket name>/<object key>. // resolves object address from S3-like path <bucket name>/<object key>.
func (h *Handler) byS3Path(ctx context.Context, req request, cnrID cid.ID, path string, handler func(context.Context, request, oid.Address)) { func (h *Handler) byS3Path(ctx context.Context, req *fasthttp.RequestCtx, cnrID cid.ID, path string, handler func(context.Context, *fasthttp.RequestCtx, oid.Address)) {
ctx, span := tracing.StartSpanFromContext(ctx, "handler.byS3Path") ctx, span := tracing.StartSpanFromContext(ctx, "handler.byS3Path")
defer span.End() defer span.End()
c, log := req.RequestCtx, req.log
foundOID, err := h.tree.GetLatestVersion(ctx, &cnrID, path) foundOID, err := h.tree.GetLatestVersion(ctx, &cnrID, path)
if err != nil { if err != nil {
log.Error(logs.FailedToGetLatestVersionOfObject, zap.Error(err), zap.String("cid", cnrID.String()), h.logAndSendError(ctx, req, logs.FailedToGetLatestVersionOfObject, err, zap.String("path", path))
zap.String("path", path), logs.TagField(logs.TagExternalStorageTree))
logAndSendBucketError(c, log, err)
return return
} }
if foundOID.IsDeleteMarker { if foundOID.IsDeleteMarker {
log.Error(logs.ObjectWasDeleted, logs.TagField(logs.TagExternalStorageTree)) h.logAndSendError(ctx, req, logs.ObjectWasDeleted, ErrObjectNotFound)
ResponseError(c, "object deleted", fasthttp.StatusNotFound)
return return
} }
addr := newAddress(cnrID, foundOID.OID) addr := newAddress(cnrID, foundOID.OID)
handler(ctx, newRequest(c, log), addr) handler(ctx, req, addr)
} }
// byAttribute is a wrapper similar to byNativeAddress. // byAttribute is a wrapper similar to byNativeAddress.
func (h *Handler) byAttribute(c *fasthttp.RequestCtx, handler func(context.Context, request, oid.Address)) { func (h *Handler) byAttribute(ctx context.Context, req *fasthttp.RequestCtx, handler func(context.Context, *fasthttp.RequestCtx, oid.Address)) {
cidParam, _ := c.UserValue("cid").(string) cidParam, _ := req.UserValue("cid").(string)
key, _ := c.UserValue("attr_key").(string) key, _ := req.UserValue("attr_key").(string)
val, _ := c.UserValue("attr_val").(string) val, _ := req.UserValue("attr_val").(string)
ctx := utils.GetContextFromRequest(c)
log := utils.GetReqLogOrDefault(ctx, h.log)
key, err := url.QueryUnescape(key) key, err := url.QueryUnescape(key)
if err != nil { if err != nil {
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", cidParam), zap.String("attr_key", key), h.logAndSendError(ctx, req, logs.FailedToUnescapeQuery, err, zap.String("cid", cidParam), zap.String("attr_key", key))
zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not unescape attr_key: "+err.Error(), fasthttp.StatusBadRequest)
return return
} }
val, err = url.QueryUnescape(val) val, err = url.QueryUnescape(val)
if err != nil { if err != nil {
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", cidParam), zap.String("attr_val", val), h.logAndSendError(ctx, req, logs.FailedToUnescapeQuery, err, zap.String("cid", cidParam), zap.String("attr_val", key))
zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not unescape attr_val: "+err.Error(), fasthttp.StatusBadRequest)
return return
} }
if key == attrFileName { val = prepareAtribute(key, val)
val = prepareFileName(val)
}
log = log.With(zap.String("cid", cidParam), zap.String("attr_key", key), zap.String("attr_val", val)) ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(zap.String("cid", cidParam),
zap.String("attr_key", key), zap.String("attr_val", val)))
bktInfo, err := h.getBucketInfo(ctx, cidParam, log) bktInfo, err := h.getBucketInfo(ctx, cidParam)
if err != nil { if err != nil {
logAndSendBucketError(c, log, err) h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
return return
} }
objID, err := h.findObjectByAttribute(ctx, log, bktInfo.CID, key, val) objID, err := h.findObjectByAttribute(ctx, bktInfo.CID, key, val)
if err != nil { if err != nil {
if errors.Is(err, io.EOF) { if errors.Is(err, io.EOF) {
ResponseError(c, err.Error(), fasthttp.StatusNotFound) err = fmt.Errorf("%w: %s", ErrObjectNotFound, err.Error())
return
} }
h.logAndSendError(ctx, req, logs.FailedToFindObjectByAttribute, err)
ResponseError(c, err.Error(), fasthttp.StatusBadRequest)
return return
} }
@ -280,14 +276,13 @@ func (h *Handler) byAttribute(c *fasthttp.RequestCtx, handler func(context.Conte
addr.SetContainer(bktInfo.CID) addr.SetContainer(bktInfo.CID)
addr.SetObject(objID) addr.SetObject(objID)
handler(ctx, newRequest(c, log), addr) handler(ctx, req, addr)
} }
func (h *Handler) findObjectByAttribute(ctx context.Context, log *zap.Logger, cnrID cid.ID, attrKey, attrVal string) (oid.ID, error) { func (h *Handler) findObjectByAttribute(ctx context.Context, cnrID cid.ID, attrKey, attrVal string) (oid.ID, error) {
res, err := h.search(ctx, cnrID, attrKey, attrVal, object.MatchStringEqual) res, err := h.search(ctx, cnrID, attrKey, attrVal, object.MatchStringEqual)
if err != nil { if err != nil {
log.Error(logs.CouldNotSearchForObjects, zap.Error(err), logs.TagField(logs.TagExternalStorage)) return oid.ID{}, fmt.Errorf("search objects: %w", err)
return oid.ID{}, fmt.Errorf("could not search for objects: %w", err)
} }
defer res.Close() defer res.Close()
@ -297,13 +292,14 @@ func (h *Handler) findObjectByAttribute(ctx context.Context, log *zap.Logger, cn
if n == 0 { if n == 0 {
switch { switch {
case errors.Is(err, io.EOF) && h.needSearchByFileName(attrKey, attrVal): case errors.Is(err, io.EOF) && h.needSearchByFileName(attrKey, attrVal):
log.Debug(logs.ObjectNotFoundByFilePathTrySearchByFileName, logs.TagField(logs.TagExternalStorage)) h.reqLogger(ctx).Debug(logs.ObjectNotFoundByFilePathTrySearchByFileName, logs.TagField(logs.TagExternalStorage))
return h.findObjectByAttribute(ctx, log, cnrID, attrFileName, prepareFileName(attrVal)) attrVal = prepareAtribute(attrFileName, attrVal)
return h.findObjectByAttribute(ctx, cnrID, attrFileName, attrVal)
case errors.Is(err, io.EOF): case errors.Is(err, io.EOF):
log.Error(logs.ObjectNotFound, zap.Error(err), logs.TagField(logs.TagExternalStorage)) h.reqLogger(ctx).Error(logs.ObjectNotFound, zap.Error(err), logs.TagField(logs.TagExternalStorage))
return oid.ID{}, fmt.Errorf("object not found: %w", err) return oid.ID{}, fmt.Errorf("object not found: %w", err)
default: default:
log.Error(logs.ReadObjectListFailed, zap.Error(err), logs.TagField(logs.TagExternalStorage)) h.reqLogger(ctx).Error(logs.ReadObjectListFailed, zap.Error(err), logs.TagField(logs.TagExternalStorage))
return oid.ID{}, fmt.Errorf("read object list failed: %w", err) return oid.ID{}, fmt.Errorf("read object list failed: %w", err)
} }
} }
@ -319,6 +315,18 @@ func (h *Handler) needSearchByFileName(key, val string) bool {
return strings.HasPrefix(val, "/") && strings.Count(val, "/") == 1 || !strings.Contains(val, "/") return strings.HasPrefix(val, "/") && strings.Count(val, "/") == 1 || !strings.Contains(val, "/")
} }
func prepareAtribute(attrKey, attrVal string) string {
if attrKey == attrFileName {
return prepareFileName(attrVal)
}
if attrKey == attrFilePath {
return prepareFilePath(attrVal)
}
return attrVal
}
func prepareFileName(fileName string) string { func prepareFileName(fileName string) string {
if strings.HasPrefix(fileName, "/") { if strings.HasPrefix(fileName, "/") {
return fileName[1:] return fileName[1:]
@ -327,21 +335,36 @@ func prepareFileName(fileName string) string {
return fileName return fileName
} }
func prepareFilePath(filePath string) string {
if !strings.HasPrefix(filePath, "/") {
return "/" + filePath
}
return filePath
}
// resolveContainer decode container id, if it's not a valid container id // resolveContainer decode container id, if it's not a valid container id
// then trey to resolve name using provided resolver. // then trey to resolve name using provided resolver.
func (h *Handler) resolveContainer(ctx context.Context, containerID string) (*cid.ID, error) { func (h *Handler) resolveContainer(ctx context.Context, containerID string) (*cid.ID, error) {
cnrID := new(cid.ID) cnrID := new(cid.ID)
err := cnrID.DecodeString(containerID) err := cnrID.DecodeString(containerID)
if err != nil { if err != nil {
cnrID, err = h.containerResolver.Resolve(ctx, containerID) var namespace string
namespace, err = middleware.GetNamespace(ctx)
if err != nil {
return nil, err
}
zone := h.config.FormContainerZone(namespace)
cnrID, err = h.containerResolver.Resolve(ctx, zone, containerID)
if err != nil && strings.Contains(err.Error(), "not found") { if err != nil && strings.Contains(err.Error(), "not found") {
err = fmt.Errorf("%w: %s", new(apistatus.ContainerNotFound), err.Error()) err = fmt.Errorf("%w: %s", ErrContainerNotFound, err.Error())
} }
} }
return cnrID, err return cnrID, err
} }
func (h *Handler) getBucketInfo(ctx context.Context, containerName string, log *zap.Logger) (*data.BucketInfo, error) { func (h *Handler) getBucketInfo(ctx context.Context, containerName string) (*data.BucketInfo, error) {
ns, err := middleware.GetNamespace(ctx) ns, err := middleware.GetNamespace(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
@ -353,21 +376,16 @@ func (h *Handler) getBucketInfo(ctx context.Context, containerName string, log *
cnrID, err := h.resolveContainer(ctx, containerName) cnrID, err := h.resolveContainer(ctx, containerName)
if err != nil { if err != nil {
log.Error(logs.CouldNotResolveContainerID, zap.Error(err), zap.String("cnrName", containerName), return nil, fmt.Errorf("resolve container: %w", err)
logs.TagField(logs.TagDatapath))
return nil, err
} }
bktInfo, err := h.readContainer(ctx, *cnrID) bktInfo, err := h.readContainer(ctx, *cnrID)
if err != nil { if err != nil {
log.Error(logs.CouldNotGetContainerInfo, zap.Error(err), zap.String("cnrName", containerName), return nil, fmt.Errorf("read container: %w", err)
zap.String("cnrName", cnrID.String()),
logs.TagField(logs.TagExternalStorage))
return nil, err
} }
if err = h.cache.Put(bktInfo); err != nil { if err = h.cache.Put(bktInfo); err != nil {
log.Warn(logs.CouldntPutBucketIntoCache, h.reqLogger(ctx).Warn(logs.CouldntPutBucketIntoCache,
zap.String("bucket name", bktInfo.Name), zap.String("bucket name", bktInfo.Name),
zap.Stringer("bucket cid", bktInfo.CID), zap.Stringer("bucket cid", bktInfo.CID),
zap.Error(err), zap.Error(err),
@ -400,31 +418,24 @@ func (h *Handler) readContainer(ctx context.Context, cnrID cid.ID) (*data.Bucket
return bktInfo, err return bktInfo, err
} }
func (h *Handler) browseIndex(c *fasthttp.RequestCtx, isNativeList bool) { func (h *Handler) browseIndex(ctx context.Context, req *fasthttp.RequestCtx, cidParam, oidParam string, isNativeList bool) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.browseIndex") ctx, span := tracing.StartSpanFromContext(ctx, "handler.browseIndex")
defer span.End() defer span.End()
utils.SetContextToRequest(ctx, c)
if !h.config.IndexPageEnabled() { if !h.config.IndexPageEnabled() {
c.SetStatusCode(fasthttp.StatusNotFound) req.SetStatusCode(fasthttp.StatusNotFound)
return return
} }
cidURLParam := c.UserValue("cid").(string) unescapedKey, err := url.QueryUnescape(oidParam)
oidURLParam := c.UserValue("oid").(string)
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
log := reqLog.With(zap.String("cid", cidURLParam), zap.String("oid", oidURLParam))
unescapedKey, err := url.QueryUnescape(oidURLParam)
if err != nil { if err != nil {
logAndSendBucketError(c, log, err) h.logAndSendError(ctx, req, logs.FailedToUnescapeOIDParam, err)
return return
} }
bktInfo, err := h.getBucketInfo(ctx, cidURLParam, log) bktInfo, err := h.getBucketInfo(ctx, cidParam)
if err != nil { if err != nil {
logAndSendBucketError(c, log, err) h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
return return
} }
@ -434,7 +445,7 @@ func (h *Handler) browseIndex(c *fasthttp.RequestCtx, isNativeList bool) {
listFunc = h.getDirObjectsNative listFunc = h.getDirObjectsNative
} }
h.browseObjects(c, browseParams{ h.browseObjects(ctx, req, browseParams{
bucketInfo: bktInfo, bucketInfo: bktInfo,
prefix: unescapedKey, prefix: unescapedKey,
listObjects: listFunc, listObjects: listFunc,

View file

@ -21,6 +21,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
go_fuzz_utils "github.com/trailofbits/go-fuzz-utils" go_fuzz_utils "github.com/trailofbits/go-fuzz-utils"
"github.com/valyala/fasthttp" "github.com/valyala/fasthttp"
"go.uber.org/zap"
) )
const ( const (
@ -125,7 +126,7 @@ func maybeFillRandom(tp *go_fuzz_utils.TypeProvider, initValue string) (string,
} }
func upload(tp *go_fuzz_utils.TypeProvider) (context.Context, *handlerContext, cid.ID, *fasthttp.RequestCtx, string, string, string, error) { func upload(tp *go_fuzz_utils.TypeProvider) (context.Context, *handlerContext, cid.ID, *fasthttp.RequestCtx, string, string, string, error) {
hc, err := prepareHandlerContext() hc, err := prepareHandlerContextBase(zap.NewExample())
if err != nil { if err != nil {
return nil, nil, cid.ID{}, nil, "", "", "", err return nil, nil, cid.ID{}, nil, "", "", "", err
} }

View file

@ -16,7 +16,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@ -30,6 +32,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/valyala/fasthttp" "github.com/valyala/fasthttp"
"go.uber.org/zap" "go.uber.org/zap"
"go.uber.org/zap/zaptest"
) )
type treeServiceMock struct { type treeServiceMock struct {
@ -60,6 +63,7 @@ func (t *treeServiceMock) GetLatestVersion(context.Context, *cid.ID, string) (*d
type configMock struct { type configMock struct {
additionalSearch bool additionalSearch bool
cors *data.CORSRule
} }
func (c *configMock) DefaultTimestamp() bool { func (c *configMock) DefaultTimestamp() bool {
@ -98,9 +102,18 @@ func (c *configMock) EnableFilepathFallback() bool {
return c.additionalSearch return c.additionalSearch
} }
func (c *configMock) FormContainerZone(string) string {
return v2container.SysAttributeZoneDefault
}
func (c *configMock) CORS() *data.CORSRule {
return c.cors
}
type handlerContext struct { type handlerContext struct {
key *keys.PrivateKey key *keys.PrivateKey
owner user.ID owner user.ID
corsCnr cid.ID
h *Handler h *Handler
frostfs *TestFrostFS frostfs *TestFrostFS
@ -112,12 +125,13 @@ func (hc *handlerContext) Handler() *Handler {
return hc.h return hc.h
} }
func prepareHandlerContext() (*handlerContext, error) { func prepareHandlerContext(t *testing.T) *handlerContext {
logger, err := zap.NewDevelopment() hc, err := prepareHandlerContextBase(zaptest.NewLogger(t))
if err != nil { require.NoError(t, err)
return nil, err return hc
} }
func prepareHandlerContextBase(logger *zap.Logger) (*handlerContext, error) {
key, err := keys.NewPrivateKey() key, err := keys.NewPrivateKey()
if err != nil { if err != nil {
return nil, err return nil, err
@ -129,10 +143,12 @@ func prepareHandlerContext() (*handlerContext, error) {
testFrostFS := NewTestFrostFS(key) testFrostFS := NewTestFrostFS(key)
testResolver := &resolver.Resolver{Name: "test_resolver"} testResolver := &resolver.Resolver{Name: "test_resolver"}
testResolver.SetResolveFunc(func(_ context.Context, name string) (*cid.ID, error) { testResolver.SetResolveFunc(func(_ context.Context, _, name string) (*cid.ID, error) {
return testFrostFS.ContainerID(name) return testFrostFS.ContainerID(name)
}) })
cnrID := createCORSContainer(owner, testFrostFS)
params := &AppParams{ params := &AppParams{
Logger: logger, Logger: logger,
FrostFS: testFrostFS, FrostFS: testFrostFS,
@ -143,6 +159,12 @@ func prepareHandlerContext() (*handlerContext, error) {
Lifetime: 1, Lifetime: 1,
Logger: logger, Logger: logger,
}, false), }, false),
CORSCnrID: cnrID,
CORSCache: cache.NewCORSCache(&cache.Config{
Size: 1,
Lifetime: 1,
Logger: logger,
}),
} }
treeMock := newTreeService() treeMock := newTreeService()
@ -157,6 +179,7 @@ func prepareHandlerContext() (*handlerContext, error) {
return &handlerContext{ return &handlerContext{
key: key, key: key,
owner: owner, owner: owner,
corsCnr: cnrID,
h: handler, h: handler,
frostfs: testFrostFS, frostfs: testFrostFS,
tree: treeMock, tree: treeMock,
@ -164,6 +187,20 @@ func prepareHandlerContext() (*handlerContext, error) {
}, nil }, nil
} }
func createCORSContainer(owner user.ID, frostfs *TestFrostFS) cid.ID {
var cnr container.Container
cnr.Init()
cnr.SetOwner(owner)
cnrID := cidtest.ID()
frostfs.SetContainer(cnrID, &cnr)
frostfs.AllowUserOperation(cnrID, owner, acl.OpObjectSearch, oid.ID{})
frostfs.AllowUserOperation(cnrID, owner, acl.OpObjectHead, oid.ID{})
frostfs.AllowUserOperation(cnrID, owner, acl.OpObjectGet, oid.ID{})
return cnrID
}
func (hc *handlerContext) prepareContainer(name string, basicACL acl.Basic) (cid.ID, *container.Container, error) { func (hc *handlerContext) prepareContainer(name string, basicACL acl.Basic) (cid.ID, *container.Container, error) {
var pp netmap.PlacementPolicy var pp netmap.PlacementPolicy
err := pp.DecodeString("REP 1") err := pp.DecodeString("REP 1")
@ -196,8 +233,7 @@ func (hc *handlerContext) prepareContainer(name string, basicACL acl.Basic) (cid
} }
func TestBasic(t *testing.T) { func TestBasic(t *testing.T) {
hc, err := prepareHandlerContext() hc := prepareHandlerContext(t)
require.NoError(t, err)
bktName := "bucket" bktName := "bucket"
cnrID, cnr, err := hc.prepareContainer(bktName, acl.PublicRWExtended) cnrID, cnr, err := hc.prepareContainer(bktName, acl.PublicRWExtended)
@ -219,8 +255,10 @@ func TestBasic(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID] obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
attr := prepareObjectAttributes(object.AttributeFilePath, objFileName) fileName := prepareObjectAttributes(object.AttributeFileName, objFileName)
obj.SetAttributes(append(obj.Attributes(), attr)...) filePath := prepareObjectAttributes(object.AttributeFilePath, objFilePath)
obj.SetAttributes(append(obj.Attributes(), fileName)...)
obj.SetAttributes(append(obj.Attributes(), filePath)...)
t.Run("get", func(t *testing.T) { t.Run("get", func(t *testing.T) {
r = prepareGetRequest(ctx, cnrID.EncodeToString(), putRes.ObjectID) r = prepareGetRequest(ctx, cnrID.EncodeToString(), putRes.ObjectID)
@ -240,7 +278,11 @@ func TestBasic(t *testing.T) {
hc.Handler().DownloadByAttribute(r) hc.Handler().DownloadByAttribute(r)
require.Equal(t, content, string(r.Response.Body())) require.Equal(t, content, string(r.Response.Body()))
r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, "/"+objFileName) r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, objFilePath)
hc.Handler().DownloadByAttribute(r)
require.Equal(t, content, string(r.Response.Body()))
r = prepareGetByAttributeRequest(ctx, bktName, attrFilePath, objFileName)
hc.Handler().DownloadByAttribute(r) hc.Handler().DownloadByAttribute(r)
require.Equal(t, content, string(r.Response.Body())) require.Equal(t, content, string(r.Response.Body()))
}) })
@ -251,7 +293,12 @@ func TestBasic(t *testing.T) {
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID))) require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID))) require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, "/"+objFileName) r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, objFilePath)
hc.Handler().HeadByAttribute(r)
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
r = prepareGetByAttributeRequest(ctx, bktName, attrFilePath, objFileName)
hc.Handler().HeadByAttribute(r) hc.Handler().HeadByAttribute(r)
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID))) require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID))) require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
@ -265,7 +312,7 @@ func TestBasic(t *testing.T) {
zipReader, err := zip.NewReader(readerAt, int64(len(r.Response.Body()))) zipReader, err := zip.NewReader(readerAt, int64(len(r.Response.Body())))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, zipReader.File, 1) require.Len(t, zipReader.File, 1)
require.Equal(t, objFileName, zipReader.File[0].Name) require.Equal(t, objFilePath, zipReader.File[0].Name)
f, err := zipReader.File[0].Open() f, err := zipReader.File[0].Open()
require.NoError(t, err) require.NoError(t, err)
defer func() { defer func() {
@ -279,8 +326,7 @@ func TestBasic(t *testing.T) {
} }
func TestFindObjectByAttribute(t *testing.T) { func TestFindObjectByAttribute(t *testing.T) {
hc, err := prepareHandlerContext() hc := prepareHandlerContext(t)
require.NoError(t, err)
hc.cfg.additionalSearch = true hc.cfg.additionalSearch = true
bktName := "bucket" bktName := "bucket"
@ -363,7 +409,7 @@ func TestFindObjectByAttribute(t *testing.T) {
obj.SetAttributes(tc.firstAttr, tc.secondAttr) obj.SetAttributes(tc.firstAttr, tc.secondAttr)
hc.cfg.additionalSearch = tc.additionalSearch hc.cfg.additionalSearch = tc.additionalSearch
objID, err := hc.Handler().findObjectByAttribute(ctx, hc.Handler().log, cnrID, tc.reqAttrKey, tc.reqAttrValue) objID, err := hc.Handler().findObjectByAttribute(ctx, cnrID, tc.reqAttrKey, tc.reqAttrValue)
if tc.err != "" { if tc.err != "" {
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), tc.err) require.Contains(t, err.Error(), tc.err)
@ -377,8 +423,7 @@ func TestFindObjectByAttribute(t *testing.T) {
} }
func TestNeedSearchByFileName(t *testing.T) { func TestNeedSearchByFileName(t *testing.T) {
hc, err := prepareHandlerContext() hc := prepareHandlerContext(t)
require.NoError(t, err)
for _, tc := range []struct { for _, tc := range []struct {
name string name string
@ -450,6 +495,17 @@ func TestPrepareFileName(t *testing.T) {
require.Equal(t, expected, actual) require.Equal(t, expected, actual)
} }
func TestPrepareFilePath(t *testing.T) {
filePath := "cat.jpg"
expected := "/cat.jpg"
actual := prepareFilePath(filePath)
require.Equal(t, expected, actual)
filePath = "/cat.jpg"
actual = prepareFilePath(filePath)
require.Equal(t, expected, actual)
}
func prepareUploadRequest(ctx context.Context, bucket, content string) (*fasthttp.RequestCtx, error) { func prepareUploadRequest(ctx context.Context, bucket, content string) (*fasthttp.RequestCtx, error) {
r := new(fasthttp.RequestCtx) r := new(fasthttp.RequestCtx)
utils.SetContextToRequest(ctx, r) utils.SetContextToRequest(ctx, r)
@ -465,6 +521,25 @@ func prepareGetRequest(ctx context.Context, bucket, objID string) *fasthttp.Requ
return r return r
} }
func prepareCORSRequest(t *testing.T, bucket string, headers map[string]string) *fasthttp.RequestCtx {
ctx := context.Background()
ctx = middleware.SetNamespace(ctx, "")
r := new(fasthttp.RequestCtx)
r.SetUserValue("cid", bucket)
for k, v := range headers {
r.Request.Header.Set(k, v)
}
ctx, err := tokens.StoreBearerTokenAppCtx(ctx, r)
require.NoError(t, err)
utils.SetContextToRequest(ctx, r)
return r
}
func prepareGetByAttributeRequest(ctx context.Context, bucket, attrKey, attrVal string) *fasthttp.RequestCtx { func prepareGetByAttributeRequest(ctx context.Context, bucket, attrKey, attrVal string) *fasthttp.RequestCtx {
r := new(fasthttp.RequestCtx) r := new(fasthttp.RequestCtx)
utils.SetContextToRequest(ctx, r) utils.SetContextToRequest(ctx, r)
@ -493,6 +568,7 @@ const (
keyAttr = "User-Attribute" keyAttr = "User-Attribute"
valAttr = "user value" valAttr = "user value"
objFileName = "newFile.txt" objFileName = "newFile.txt"
objFilePath = "/newFile.txt"
) )
func fillMultipartBody(r *fasthttp.RequestCtx, content string) error { func fillMultipartBody(r *fasthttp.RequestCtx, content string) error {

View file

@ -27,7 +27,7 @@ const (
hdrContainerID = "X-Container-Id" hdrContainerID = "X-Container-Id"
) )
func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid.Address) { func (h *Handler) headObject(ctx context.Context, req *fasthttp.RequestCtx, objectAddress oid.Address) {
var start = time.Now() var start = time.Now()
btoken := bearerToken(ctx) btoken := bearerToken(ctx)
@ -41,7 +41,7 @@ func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid
obj, err := h.frostfs.HeadObject(ctx, prm) obj, err := h.frostfs.HeadObject(ctx, prm)
if err != nil { if err != nil {
req.handleFrostFSErr(err, start) h.logAndSendError(ctx, req, logs.FailedToHeadObject, err, zap.Stringer("elapsed", time.Since(start)))
return return
} }
@ -65,7 +65,7 @@ func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid
case object.AttributeTimestamp: case object.AttributeTimestamp:
value, err := strconv.ParseInt(val, 10, 64) value, err := strconv.ParseInt(val, 10, 64)
if err != nil { if err != nil {
req.log.Info(logs.CouldntParseCreationDate, h.reqLogger(ctx).Info(logs.CouldntParseCreationDate,
zap.String("key", key), zap.String("key", key),
zap.String("val", val), zap.String("val", val),
zap.Error(err), zap.Error(err),
@ -100,7 +100,7 @@ func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid
return h.frostfs.RangeObject(ctx, prmRange) return h.frostfs.RangeObject(ctx, prmRange)
}, filename) }, filename)
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
req.handleFrostFSErr(err, start) h.logAndSendError(ctx, req, logs.FailedToDetectContentTypeFromPayload, err, zap.Stringer("elapsed", time.Since(start)))
return return
} }
} }
@ -116,48 +116,44 @@ func idsToResponse(resp *fasthttp.Response, obj *object.Object) {
} }
// HeadByAddressOrBucketName handles head requests using simple cid/oid or bucketname/key format. // HeadByAddressOrBucketName handles head requests using simple cid/oid or bucketname/key format.
func (h *Handler) HeadByAddressOrBucketName(c *fasthttp.RequestCtx) { func (h *Handler) HeadByAddressOrBucketName(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.HeadByAddressOrBucketName") ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.HeadByAddressOrBucketName")
defer span.End() defer span.End()
cidParam, _ := c.UserValue("cid").(string) cidParam, _ := req.UserValue("cid").(string)
oidParam, _ := c.UserValue("oid").(string) oidParam, _ := req.UserValue("oid").(string)
log := utils.GetReqLogOrDefault(ctx, h.log).With( ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(
zap.String("cid", cidParam), zap.String("cid", cidParam),
zap.String("oid", oidParam), zap.String("oid", oidParam),
) ))
bktInfo, err := h.getBucketInfo(ctx, cidParam, log) bktInfo, err := h.getBucketInfo(ctx, cidParam)
if err != nil { if err != nil {
logAndSendBucketError(c, log, err) h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
return return
} }
checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo) checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo)
if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) { if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) {
log.Error(logs.FailedToCheckIfSettingsNodeExist, zap.String("cid", bktInfo.CID.String()), h.logAndSendError(ctx, req, logs.FailedToCheckIfSettingsNodeExist, checkS3Err)
zap.Error(checkS3Err), logs.TagField(logs.TagExternalStorageTree))
logAndSendBucketError(c, log, checkS3Err)
return return
} }
req := newRequest(c, log)
var objID oid.ID var objID oid.ID
if checkS3Err == nil { if checkS3Err == nil {
h.byS3Path(ctx, req, bktInfo.CID, oidParam, h.headObject) h.byS3Path(ctx, req, bktInfo.CID, oidParam, h.headObject)
} else if err = objID.DecodeString(oidParam); err == nil { } else if err = objID.DecodeString(oidParam); err == nil {
h.byNativeAddress(ctx, req, bktInfo.CID, objID, h.headObject) h.byNativeAddress(ctx, req, bktInfo.CID, objID, h.headObject)
} else { } else {
logAndSendBucketError(c, log, checkS3Err) h.logAndSendError(ctx, req, logs.InvalidOIDParam, err)
} }
} }
// HeadByAttribute handles attribute-based head requests. // HeadByAttribute handles attribute-based head requests.
func (h *Handler) HeadByAttribute(c *fasthttp.RequestCtx) { func (h *Handler) HeadByAttribute(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.HeadByAttribute") ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.HeadByAttribute")
defer span.End() defer span.End()
utils.SetContextToRequest(ctx, c)
h.byAttribute(c, h.headObject) h.byAttribute(ctx, req, h.headObject)
} }

View file

@ -1,6 +1,7 @@
package handler package handler
import ( import (
"context"
"errors" "errors"
"io" "io"
"strconv" "strconv"
@ -53,7 +54,7 @@ func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartF
} }
// getPayload returns initial payload if object is not multipart else composes new reader with parts data. // getPayload returns initial payload if object is not multipart else composes new reader with parts data.
func (h *Handler) getPayload(p getMultiobjectBodyParams) (io.ReadCloser, uint64, error) { func (h *Handler) getPayload(ctx context.Context, p getMultiobjectBodyParams) (io.ReadCloser, uint64, error) {
cid, ok := p.obj.Header.ContainerID() cid, ok := p.obj.Header.ContainerID()
if !ok { if !ok {
return nil, 0, errors.New("no container id set") return nil, 0, errors.New("no container id set")
@ -66,7 +67,6 @@ func (h *Handler) getPayload(p getMultiobjectBodyParams) (io.ReadCloser, uint64,
if err != nil { if err != nil {
return nil, 0, err return nil, 0, err
} }
ctx := p.req.RequestCtx
params := PrmInitMultiObjectReader{ params := PrmInitMultiObjectReader{
Addr: newAddress(cid, oid), Addr: newAddress(cid, oid),
Bearer: bearerToken(ctx), Bearer: bearerToken(ctx),

View file

@ -60,12 +60,7 @@ func BenchmarkAll(b *testing.B) {
func defaultMultipart(filename string) error { func defaultMultipart(filename string) error {
r, bound := multipartFile(filename) r, bound := multipartFile(filename)
logger, err := zap.NewProduction() file, err := fetchMultipartFileDefault(zap.NewNop(), r, bound)
if err != nil {
return err
}
file, err := fetchMultipartFileDefault(logger, r, bound)
if err != nil { if err != nil {
return err return err
} }
@ -87,12 +82,7 @@ func TestName(t *testing.T) {
func customMultipart(filename string) error { func customMultipart(filename string) error {
r, bound := multipartFile(filename) r, bound := multipartFile(filename)
logger, err := zap.NewProduction() file, err := fetchMultipartFile(zap.NewNop(), r, bound)
if err != nil {
return err
}
file, err := fetchMultipartFile(logger, r, bound)
if err != nil { if err != nil {
return err return err
} }

View file

@ -63,11 +63,10 @@ func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error), file
type getMultiobjectBodyParams struct { type getMultiobjectBodyParams struct {
obj *Object obj *Object
req request
strSize string strSize string
} }
func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.Address) { func (h *Handler) receiveFile(ctx context.Context, req *fasthttp.RequestCtx, objAddress oid.Address) {
var ( var (
shouldDownload = req.QueryArgs().GetBool("download") shouldDownload = req.QueryArgs().GetBool("download")
start = time.Now() start = time.Now()
@ -85,12 +84,12 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.A
rObj, err := h.frostfs.GetObject(ctx, prm) rObj, err := h.frostfs.GetObject(ctx, prm)
if err != nil { if err != nil {
req.handleFrostFSErr(err, start) h.logAndSendError(ctx, req, logs.FailedToGetObject, err, zap.Stringer("elapsed", time.Since(start)))
return return
} }
// we can't close reader in this function, so how to do it? // we can't close reader in this function, so how to do it?
req.setIDs(rObj.Header) setIDs(req, rObj.Header)
payload := rObj.Payload payload := rObj.Payload
payloadSize := rObj.Header.PayloadSize() payloadSize := rObj.Header.PayloadSize()
for _, attr := range rObj.Header.Attributes() { for _, attr := range rObj.Header.Attributes() {
@ -107,8 +106,8 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.A
case object.AttributeFileName: case object.AttributeFileName:
filename = val filename = val
case object.AttributeTimestamp: case object.AttributeTimestamp:
if err = req.setTimestamp(val); err != nil { if err = setTimestamp(req, val); err != nil {
req.log.Error(logs.CouldntParseCreationDate, h.reqLogger(ctx).Error(logs.CouldntParseCreationDate,
zap.String("val", val), zap.String("val", val),
zap.Error(err), zap.Error(err),
logs.TagField(logs.TagDatapath)) logs.TagField(logs.TagDatapath))
@ -118,13 +117,12 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.A
case object.AttributeFilePath: case object.AttributeFilePath:
filepath = val filepath = val
case attributeMultipartObjectSize: case attributeMultipartObjectSize:
payload, payloadSize, err = h.getPayload(getMultiobjectBodyParams{ payload, payloadSize, err = h.getPayload(ctx, getMultiobjectBodyParams{
obj: rObj, obj: rObj,
req: req,
strSize: val, strSize: val,
}) })
if err != nil { if err != nil {
req.handleFrostFSErr(err, start) h.logAndSendError(ctx, req, logs.FailedToGetObjectPayload, err, zap.Stringer("elapsed", time.Since(start)))
return return
} }
} }
@ -133,7 +131,7 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.A
filename = filepath filename = filepath
} }
req.setDisposition(shouldDownload, filename) setDisposition(req, shouldDownload, filename)
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(payloadSize, 10)) req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(payloadSize, 10))
@ -145,8 +143,7 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.A
return payload, nil return payload, nil
}, filename) }, filename)
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
req.log.Error(logs.CouldNotDetectContentTypeFromPayload, zap.Error(err), logs.TagField(logs.TagDatapath)) h.logAndSendError(ctx, req, logs.FailedToDetectContentTypeFromPayload, err, zap.Stringer("elapsed", time.Since(start)))
ResponseError(req.RequestCtx, "could not detect Content-Type from payload: "+err.Error(), fasthttp.StatusBadRequest)
return return
} }
@ -165,7 +162,7 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.A
req.Response.SetBodyStream(payload, int(payloadSize)) req.Response.SetBodyStream(payload, int(payloadSize))
} }
func (r *request) setIDs(obj object.Object) { func setIDs(r *fasthttp.RequestCtx, obj object.Object) {
objID, _ := obj.ID() objID, _ := obj.ID()
cnrID, _ := obj.ContainerID() cnrID, _ := obj.ContainerID()
r.Response.Header.Set(hdrObjectID, objID.String()) r.Response.Header.Set(hdrObjectID, objID.String())
@ -173,7 +170,7 @@ func (r *request) setIDs(obj object.Object) {
r.Response.Header.Set(hdrContainerID, cnrID.String()) r.Response.Header.Set(hdrContainerID, cnrID.String())
} }
func (r *request) setDisposition(shouldDownload bool, filename string) { func setDisposition(r *fasthttp.RequestCtx, shouldDownload bool, filename string) {
const ( const (
inlineDisposition = "inline" inlineDisposition = "inline"
attachmentDisposition = "attachment" attachmentDisposition = "attachment"
@ -187,7 +184,7 @@ func (r *request) setDisposition(shouldDownload bool, filename string) {
r.Response.Header.Set(fasthttp.HeaderContentDisposition, dis+"; filename="+path.Base(filename)) r.Response.Header.Set(fasthttp.HeaderContentDisposition, dis+"; filename="+path.Base(filename))
} }
func (r *request) setTimestamp(timestamp string) error { func setTimestamp(r *fasthttp.RequestCtx, timestamp string) error {
value, err := strconv.ParseInt(timestamp, 10, 64) value, err := strconv.ParseInt(timestamp, 10, 64)
if err != nil { if err != nil {
return err return err

View file

@ -50,44 +50,41 @@ func (pr *putResponse) encode(w io.Writer) error {
} }
// Upload handles multipart upload request. // Upload handles multipart upload request.
func (h *Handler) Upload(c *fasthttp.RequestCtx) { func (h *Handler) Upload(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.Upload") ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.Upload")
defer span.End() defer span.End()
utils.SetContextToRequest(ctx, c)
var file MultipartFile var file MultipartFile
scid, _ := c.UserValue("cid").(string) scid, _ := req.UserValue("cid").(string)
bodyStream := c.RequestBodyStream() bodyStream := req.RequestBodyStream()
drainBuf := make([]byte, drainBufSize) drainBuf := make([]byte, drainBufSize)
reqLog := utils.GetReqLogOrDefault(ctx, h.log) log := h.reqLogger(ctx)
log := reqLog.With(zap.String("cid", scid)) ctx = utils.SetReqLog(ctx, log.With(zap.String("cid", scid)))
bktInfo, err := h.getBucketInfo(ctx, scid, log) bktInfo, err := h.getBucketInfo(ctx, scid)
if err != nil { if err != nil {
logAndSendBucketError(c, log, err) h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
return return
} }
boundary := string(c.Request.Header.MultipartFormBoundary()) boundary := string(req.Request.Header.MultipartFormBoundary())
if file, err = fetchMultipartFile(log, bodyStream, boundary); err != nil { if file, err = fetchMultipartFile(log, bodyStream, boundary); err != nil {
log.Error(logs.CouldNotReceiveMultipartForm, zap.Error(err), logs.TagField(logs.TagDatapath)) h.logAndSendError(ctx, req, logs.CouldNotReceiveMultipartForm, err)
ResponseError(c, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
return return
} }
filtered, err := filterHeaders(log, &c.Request.Header) filtered, err := filterHeaders(log, &req.Request.Header)
if err != nil { if err != nil {
log.Error(logs.FailedToFilterHeaders, zap.Error(err), logs.TagField(logs.TagDatapath)) h.logAndSendError(ctx, req, logs.FailedToFilterHeaders, err)
ResponseError(c, err.Error(), fasthttp.StatusBadRequest)
return return
} }
if c.Request.Header.Peek(explodeArchiveHeader) != nil { if req.Request.Header.Peek(explodeArchiveHeader) != nil {
h.explodeArchive(request{c, log}, bktInfo, file, filtered) h.explodeArchive(ctx, req, bktInfo, file, filtered)
} else { } else {
h.uploadSingleObject(request{c, log}, bktInfo, file, filtered) h.uploadSingleObject(ctx, req, bktInfo, file, filtered)
} }
// Multipart is multipart and thus can contain more than one part which // Multipart is multipart and thus can contain more than one part which
@ -104,46 +101,39 @@ func (h *Handler) Upload(c *fasthttp.RequestCtx) {
} }
} }
func (h *Handler) uploadSingleObject(req request, bkt *data.BucketInfo, file MultipartFile, filtered map[string]string) { func (h *Handler) uploadSingleObject(ctx context.Context, req *fasthttp.RequestCtx, bkt *data.BucketInfo, file MultipartFile, filtered map[string]string) {
c, log := req.RequestCtx, req.log ctx, span := tracing.StartSpanFromContext(ctx, "handler.uploadSingleObject")
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.uploadSingleObject")
defer span.End() defer span.End()
utils.SetContextToRequest(ctx, c)
setIfNotExist(filtered, object.AttributeFileName, file.FileName()) setIfNotExist(filtered, object.AttributeFileName, file.FileName())
attributes, err := h.extractAttributes(c, log, filtered) attributes, err := h.extractAttributes(ctx, req, filtered)
if err != nil { if err != nil {
log.Error(logs.FailedToGetAttributes, zap.Error(err), logs.TagField(logs.TagDatapath)) h.logAndSendError(ctx, req, logs.FailedToGetAttributes, err)
ResponseError(c, "could not extract attributes: "+err.Error(), fasthttp.StatusBadRequest)
return return
} }
idObj, err := h.uploadObject(c, bkt, attributes, file) idObj, err := h.uploadObject(ctx, bkt, attributes, file)
if err != nil { if err != nil {
h.handlePutFrostFSErr(c, err, log) h.logAndSendError(ctx, req, logs.FailedToUploadObject, err)
return return
} }
log.Debug(logs.ObjectUploaded, h.reqLogger(ctx).Debug(logs.ObjectUploaded,
zap.String("oid", idObj.EncodeToString()), zap.String("oid", idObj.EncodeToString()),
zap.String("FileName", file.FileName()), zap.String("FileName", file.FileName()),
logs.TagField(logs.TagExternalStorage), logs.TagField(logs.TagExternalStorage),
) )
addr := newAddress(bkt.CID, idObj) addr := newAddress(bkt.CID, idObj)
c.Response.Header.SetContentType(jsonHeader) req.Response.Header.SetContentType(jsonHeader)
// Try to return the response, otherwise, if something went wrong, throw an error. // Try to return the response, otherwise, if something went wrong, throw an error.
if err = newPutResponse(addr).encode(c); err != nil { if err = newPutResponse(addr).encode(req); err != nil {
log.Error(logs.CouldNotEncodeResponse, zap.Error(err), logs.TagField(logs.TagDatapath)) h.logAndSendError(ctx, req, logs.CouldNotEncodeResponse, err)
ResponseError(c, "could not encode response", fasthttp.StatusBadRequest)
return return
} }
} }
func (h *Handler) uploadObject(c *fasthttp.RequestCtx, bkt *data.BucketInfo, attrs []object.Attribute, file io.Reader) (oid.ID, error) { func (h *Handler) uploadObject(ctx context.Context, bkt *data.BucketInfo, attrs []object.Attribute, file io.Reader) (oid.ID, error) {
ctx := utils.GetContextFromRequest(c)
obj := object.New() obj := object.New()
obj.SetContainerID(bkt.CID) obj.SetContainerID(bkt.CID)
obj.SetOwnerID(*h.ownerID) obj.SetOwnerID(*h.ownerID)
@ -168,19 +158,18 @@ func (h *Handler) uploadObject(c *fasthttp.RequestCtx, bkt *data.BucketInfo, att
return idObj, nil return idObj, nil
} }
func (h *Handler) extractAttributes(c *fasthttp.RequestCtx, log *zap.Logger, filtered map[string]string) ([]object.Attribute, error) { func (h *Handler) extractAttributes(ctx context.Context, req *fasthttp.RequestCtx, filtered map[string]string) ([]object.Attribute, error) {
ctx := utils.GetContextFromRequest(c)
now := time.Now() now := time.Now()
if rawHeader := c.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil { if rawHeader := req.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil { if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
log.Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err), h.reqLogger(ctx).Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err),
logs.TagField(logs.TagDatapath)) logs.TagField(logs.TagDatapath))
} else { } else {
now = parsed now = parsed
} }
} }
if err := utils.PrepareExpirationHeader(ctx, h.frostfs, filtered, now); err != nil { if err := utils.PrepareExpirationHeader(ctx, h.frostfs, filtered, now); err != nil {
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err), logs.TagField(logs.TagDatapath)) h.reqLogger(ctx).Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err), logs.TagField(logs.TagDatapath))
return nil, err return nil, err
} }
attributes := make([]object.Attribute, 0, len(filtered)) attributes := make([]object.Attribute, 0, len(filtered))
@ -207,38 +196,33 @@ func newAttribute(key string, val string) object.Attribute {
// explodeArchive read files from archive and creates objects for each of them. // explodeArchive read files from archive and creates objects for each of them.
// Sets FilePath attribute with name from tar.Header. // Sets FilePath attribute with name from tar.Header.
func (h *Handler) explodeArchive(req request, bkt *data.BucketInfo, file io.ReadCloser, filtered map[string]string) { func (h *Handler) explodeArchive(ctx context.Context, req *fasthttp.RequestCtx, bkt *data.BucketInfo, file io.ReadCloser, filtered map[string]string) {
c, log := req.RequestCtx, req.log ctx, span := tracing.StartSpanFromContext(ctx, "handler.explodeArchive")
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.explodeArchive")
defer span.End() defer span.End()
utils.SetContextToRequest(ctx, c)
// remove user attributes which vary for each file in archive // remove user attributes which vary for each file in archive
// to guarantee that they won't appear twice // to guarantee that they won't appear twice
delete(filtered, object.AttributeFileName) delete(filtered, object.AttributeFileName)
delete(filtered, object.AttributeFilePath) delete(filtered, object.AttributeFilePath)
commonAttributes, err := h.extractAttributes(c, log, filtered) commonAttributes, err := h.extractAttributes(ctx, req, filtered)
if err != nil { if err != nil {
log.Error(logs.FailedToGetAttributes, zap.Error(err), logs.TagField(logs.TagDatapath)) h.logAndSendError(ctx, req, logs.FailedToGetAttributes, err)
ResponseError(c, "could not extract attributes: "+err.Error(), fasthttp.StatusBadRequest)
return return
} }
attributes := commonAttributes attributes := commonAttributes
reader := file reader := file
if bytes.EqualFold(c.Request.Header.Peek(fasthttp.HeaderContentEncoding), []byte("gzip")) { if bytes.EqualFold(req.Request.Header.Peek(fasthttp.HeaderContentEncoding), []byte("gzip")) {
log.Debug(logs.GzipReaderSelected, logs.TagField(logs.TagDatapath)) h.reqLogger(ctx).Debug(logs.GzipReaderSelected, logs.TagField(logs.TagDatapath))
gzipReader, err := gzip.NewReader(file) gzipReader, err := gzip.NewReader(file)
if err != nil { if err != nil {
log.Error(logs.FailedToCreateGzipReader, zap.Error(err), logs.TagField(logs.TagDatapath)) h.logAndSendError(ctx, req, logs.FailedToCreateGzipReader, err)
ResponseError(c, "could read gzip file: "+err.Error(), fasthttp.StatusBadRequest)
return return
} }
defer func() { defer func() {
if err := gzipReader.Close(); err != nil { if err := gzipReader.Close(); err != nil {
log.Warn(logs.FailedToCloseReader, zap.Error(err), logs.TagField(logs.TagDatapath)) h.reqLogger(ctx).Warn(logs.FailedToCloseReader, zap.Error(err), logs.TagField(logs.TagDatapath))
} }
}() }()
reader = gzipReader reader = gzipReader
@ -250,8 +234,7 @@ func (h *Handler) explodeArchive(req request, bkt *data.BucketInfo, file io.Read
if errors.Is(err, io.EOF) { if errors.Is(err, io.EOF) {
break break
} else if err != nil { } else if err != nil {
log.Error(logs.FailedToReadFileFromTar, zap.Error(err), logs.TagField(logs.TagDatapath)) h.logAndSendError(ctx, req, logs.FailedToReadFileFromTar, err)
ResponseError(c, "could not get next entry: "+err.Error(), fasthttp.StatusBadRequest)
return return
} }
@ -265,13 +248,13 @@ func (h *Handler) explodeArchive(req request, bkt *data.BucketInfo, file io.Read
attributes = append(attributes, newAttribute(object.AttributeFilePath, obj.Name)) attributes = append(attributes, newAttribute(object.AttributeFilePath, obj.Name))
attributes = append(attributes, newAttribute(object.AttributeFileName, fileName)) attributes = append(attributes, newAttribute(object.AttributeFileName, fileName))
idObj, err := h.uploadObject(c, bkt, attributes, tarReader) idObj, err := h.uploadObject(ctx, bkt, attributes, tarReader)
if err != nil { if err != nil {
h.handlePutFrostFSErr(c, err, log) h.logAndSendError(ctx, req, logs.FailedToUploadObject, err)
return return
} }
log.Debug(logs.ObjectUploaded, h.reqLogger(ctx).Debug(logs.ObjectUploaded,
zap.String("oid", idObj.EncodeToString()), zap.String("oid", idObj.EncodeToString()),
zap.String("FileName", fileName), zap.String("FileName", fileName),
logs.TagField(logs.TagExternalStorage), logs.TagField(logs.TagExternalStorage),
@ -279,14 +262,6 @@ func (h *Handler) explodeArchive(req request, bkt *data.BucketInfo, file io.Read
} }
} }
func (h *Handler) handlePutFrostFSErr(r *fasthttp.RequestCtx, err error, log *zap.Logger) {
statusCode, msg, additionalFields := formErrorResponse("could not store file in frostfs", err)
logFields := append([]zap.Field{zap.Error(err)}, additionalFields...)
log.Error(logs.CouldNotStoreFileInFrostfs, append(logFields, logs.TagField(logs.TagExternalStorage))...)
ResponseError(r, msg, statusCode)
}
func (h *Handler) fetchBearerToken(ctx context.Context) *bearer.Token { func (h *Handler) fetchBearerToken(ctx context.Context) *bearer.Token {
if tkn, err := tokens.LoadBearerToken(ctx); err == nil && tkn != nil { if tkn, err := tokens.LoadBearerToken(ctx); err == nil && tkn != nil {
return tkn return tkn

View file

@ -5,13 +5,12 @@ import (
"errors" "errors"
"fmt" "fmt"
"strings" "strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
sdkstatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -19,30 +18,6 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
) )
type request struct {
*fasthttp.RequestCtx
log *zap.Logger
}
func newRequest(ctx *fasthttp.RequestCtx, log *zap.Logger) request {
return request{
RequestCtx: ctx,
log: log,
}
}
func (r *request) handleFrostFSErr(err error, start time.Time) {
logFields := []zap.Field{
zap.Stringer("elapsed", time.Since(start)),
zap.Error(err),
}
statusCode, msg, additionalFields := formErrorResponse("could not receive object", err)
logFields = append(logFields, additionalFields...)
r.log.Error(logs.CouldNotReceiveObject, append(logFields, logs.TagField(logs.TagExternalStorage))...)
ResponseError(r.RequestCtx, msg, statusCode)
}
func bearerToken(ctx context.Context) *bearer.Token { func bearerToken(ctx context.Context) *bearer.Token {
if tkn, err := tokens.LoadBearerToken(ctx); err == nil { if tkn, err := tokens.LoadBearerToken(ctx); err == nil {
return tkn return tkn
@ -84,14 +59,16 @@ func isValidValue(s string) bool {
return true return true
} }
func logAndSendBucketError(c *fasthttp.RequestCtx, log *zap.Logger, err error) { func (h *Handler) reqLogger(ctx context.Context) *zap.Logger {
log.Error(logs.CouldNotGetBucket, zap.Error(err), logs.TagField(logs.TagDatapath)) return utils.GetReqLogOrDefault(ctx, h.log)
if client.IsErrContainerNotFound(err) {
ResponseError(c, "Not Found", fasthttp.StatusNotFound)
return
} }
ResponseError(c, "could not get bucket: "+err.Error(), fasthttp.StatusBadRequest)
func (h *Handler) logAndSendError(ctx context.Context, c *fasthttp.RequestCtx, msg string, err error, additional ...zap.Field) {
utils.GetReqLogOrDefault(ctx, h.log).Error(msg,
append([]zap.Field{zap.Error(err), logs.TagField(logs.TagDatapath)}, additional...)...)
msg, code := formErrorResponse(err)
ResponseError(c, msg, code)
} }
func newAddress(cnr cid.ID, obj oid.ID) oid.Address { func newAddress(cnr cid.ID, obj oid.ID) oid.Address {
@ -112,31 +89,23 @@ func ResponseError(r *fasthttp.RequestCtx, msg string, code int) {
r.Error(msg+"\n", code) r.Error(msg+"\n", code)
} }
func formErrorResponse(message string, err error) (int, string, []zap.Field) { func formErrorResponse(err error) (string, int) {
var (
msg string
statusCode int
logFields []zap.Field
)
st := new(sdkstatus.ObjectAccessDenied)
switch { switch {
case errors.As(err, &st): case errors.Is(err, ErrAccessDenied):
statusCode = fasthttp.StatusForbidden return fmt.Sprintf("Storage Access Denied:\n%v", err), fasthttp.StatusForbidden
reason := st.Reason() case errors.Is(err, layer.ErrNodeAccessDenied):
msg = fmt.Sprintf("%s: %v: %s", message, err, reason) return fmt.Sprintf("Tree Access Denied:\n%v", err), fasthttp.StatusForbidden
logFields = append(logFields, zap.String("error_detail", reason))
case errors.Is(err, ErrQuotaLimitReached): case errors.Is(err, ErrQuotaLimitReached):
statusCode = fasthttp.StatusConflict return fmt.Sprintf("Quota Reached:\n%v", err), fasthttp.StatusConflict
msg = fmt.Sprintf("%s: %v", message, err) case errors.Is(err, ErrContainerNotFound):
case client.IsErrObjectNotFound(err) || client.IsErrContainerNotFound(err): return fmt.Sprintf("Container Not Found:\n%v", err), fasthttp.StatusNotFound
statusCode = fasthttp.StatusNotFound case errors.Is(err, ErrObjectNotFound):
msg = "Not Found" return fmt.Sprintf("Object Not Found:\n%v", err), fasthttp.StatusNotFound
case errors.Is(err, layer.ErrNodeNotFound):
return fmt.Sprintf("Tree Node Not Found:\n%v", err), fasthttp.StatusNotFound
case errors.Is(err, ErrGatewayTimeout):
return fmt.Sprintf("Gateway Timeout:\n%v", err), fasthttp.StatusGatewayTimeout
default: default:
statusCode = fasthttp.StatusBadRequest return fmt.Sprintf("Bad Request:\n%v", err), fasthttp.StatusBadRequest
msg = fmt.Sprintf("%s: %v", message, err)
} }
return statusCode, msg, logFields
} }

View file

@ -72,12 +72,13 @@ const (
TagsLogConfigWontBeUpdated = "tags log config won't be updated" TagsLogConfigWontBeUpdated = "tags log config won't be updated"
FailedToReadIndexPageTemplate = "failed to read index page template" FailedToReadIndexPageTemplate = "failed to read index page template"
SetCustomIndexPageTemplate = "set custom index page template" SetCustomIndexPageTemplate = "set custom index page template"
CouldNotFetchCORSContainerInfo = "couldn't fetch CORS container info"
) )
// Log messages with the "datapath" tag. // Log messages with the "datapath" tag.
const ( const (
CouldntParseCreationDate = "couldn't parse creation date" CouldntParseCreationDate = "couldn't parse creation date"
CouldNotDetectContentTypeFromPayload = "could not detect Content-Type from payload" FailedToDetectContentTypeFromPayload = "failed to detect Content-Type from payload"
FailedToAddObjectToArchive = "failed to add object to archive" FailedToAddObjectToArchive = "failed to add object to archive"
CloseZipWriter = "close zip writer" CloseZipWriter = "close zip writer"
IgnorePartEmptyFormName = "ignore part, empty form name" IgnorePartEmptyFormName = "ignore part, empty form name"
@ -104,28 +105,37 @@ const (
CouldNotReceiveMultipartForm = "could not receive multipart/form" CouldNotReceiveMultipartForm = "could not receive multipart/form"
ObjectsNotFound = "objects not found" ObjectsNotFound = "objects not found"
IteratingOverSelectedObjectsFailed = "iterating over selected objects failed" IteratingOverSelectedObjectsFailed = "iterating over selected objects failed"
CouldNotGetBucket = "could not get bucket" FailedToGetBucketInfo = "could not get bucket info"
CouldNotResolveContainerID = "could not resolve container id" FailedToSubmitTaskToPool = "failed to submit task to pool"
FailedToSumbitTaskToPool = "failed to submit task to pool" ObjectWasDeleted = "object was deleted"
FailedToGetLatestVersionOfObject = "failed to get latest version of object"
FailedToCheckIfSettingsNodeExist = "failed to check if settings node exists"
FailedToListObjects = "failed to list objects"
FailedToParseTemplate = "failed to parse template"
FailedToExecuteTemplate = "failed to execute template"
FailedToUploadObject = "failed to upload object"
FailedToHeadObject = "failed to head object"
FailedToGetObject = "failed to get object"
FailedToGetObjectPayload = "failed to get object payload"
FailedToFindObjectByAttribute = "failed to get find object by attribute"
FailedToUnescapeOIDParam = "failed to unescape oid param"
InvalidOIDParam = "invalid oid param"
CouldNotGetCORSConfiguration = "could not get cors configuration"
EmptyOriginRequestHeader = "empty Origin request header"
EmptyAccessControlRequestMethodHeader = "empty Access-Control-Request-Method request header"
CORSRuleWasNotMatched = "cors rule was not matched"
CouldntCacheCors = "couldn't cache cors"
) )
// Log messages with the "external_storage" tag. // Log messages with the "external_storage" tag.
const ( const (
CouldNotReceiveObject = "could not receive object"
CouldNotSearchForObjects = "could not search for objects"
ObjectNotFound = "object not found" ObjectNotFound = "object not found"
ReadObjectListFailed = "read object list failed" ReadObjectListFailed = "read object list failed"
CouldNotStoreFileInFrostfs = "could not store file in frostfs"
FailedToHeadObject = "failed to head object"
ObjectNotFoundByFilePathTrySearchByFileName = "object not found by filePath attribute, try search by fileName" ObjectNotFoundByFilePathTrySearchByFileName = "object not found by filePath attribute, try search by fileName"
FailedToGetObject = "failed to get object"
ObjectUploaded = "object uploaded" ObjectUploaded = "object uploaded"
CouldNotGetContainerInfo = "could not get container info"
) )
// Log messages with the "external_storage_tree" tag. // Log messages with the "external_storage_tree" tag.
const ( const (
ObjectWasDeleted = "object was deleted" FoundSeveralSystemTreeNodes = "found several system tree nodes"
FailedToGetLatestVersionOfObject = "failed to get latest version of object"
FailedToCheckIfSettingsNodeExist = "Failed to check if settings node exists"
) )

View file

@ -10,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@ -45,7 +46,7 @@ func (x *FrostFS) Container(ctx context.Context, containerPrm handler.PrmContain
res, err := x.pool.GetContainer(ctx, prm) res, err := x.pool.GetContainer(ctx, prm)
if err != nil { if err != nil {
return nil, handleObjectError("read container via connection pool", err) return nil, handleStorageError("read container via connection pool", err)
} }
return &res, nil return &res, nil
@ -69,7 +70,7 @@ func (x *FrostFS) CreateObject(ctx context.Context, prm handler.PrmObjectCreate)
idObj, err := x.pool.PutObject(ctx, prmPut) idObj, err := x.pool.PutObject(ctx, prmPut)
if err != nil { if err != nil {
return oid.ID{}, handleObjectError("save object via connection pool", err) return oid.ID{}, handleStorageError("save object via connection pool", err)
} }
return idObj.ObjectID, nil return idObj.ObjectID, nil
} }
@ -85,7 +86,7 @@ func (x payloadReader) Read(p []byte) (int, error) {
if err != nil && errors.Is(err, io.EOF) { if err != nil && errors.Is(err, io.EOF) {
return n, err return n, err
} }
return n, handleObjectError("read payload", err) return n, handleStorageError("read payload", err)
} }
// HeadObject implements frostfs.FrostFS interface method. // HeadObject implements frostfs.FrostFS interface method.
@ -102,7 +103,7 @@ func (x *FrostFS) HeadObject(ctx context.Context, prm handler.PrmObjectHead) (*o
res, err := x.pool.HeadObject(ctx, prmHead) res, err := x.pool.HeadObject(ctx, prmHead)
if err != nil { if err != nil {
return nil, handleObjectError("read object header via connection pool", err) return nil, handleStorageError("read object header via connection pool", err)
} }
return &res, nil return &res, nil
@ -122,7 +123,7 @@ func (x *FrostFS) GetObject(ctx context.Context, prm handler.PrmObjectGet) (*han
res, err := x.pool.GetObject(ctx, prmGet) res, err := x.pool.GetObject(ctx, prmGet)
if err != nil { if err != nil {
return nil, handleObjectError("init full object reading via connection pool", err) return nil, handleStorageError("init full object reading via connection pool", err)
} }
return &handler.Object{ return &handler.Object{
@ -147,7 +148,7 @@ func (x *FrostFS) RangeObject(ctx context.Context, prm handler.PrmObjectRange) (
res, err := x.pool.ObjectRange(ctx, prmRange) res, err := x.pool.ObjectRange(ctx, prmRange)
if err != nil { if err != nil {
return nil, handleObjectError("init payload range reading via connection pool", err) return nil, handleStorageError("init payload range reading via connection pool", err)
} }
return payloadReader{&res}, nil return payloadReader{&res}, nil
@ -168,7 +169,7 @@ func (x *FrostFS) SearchObjects(ctx context.Context, prm handler.PrmObjectSearch
res, err := x.pool.SearchObjects(ctx, prmSearch) res, err := x.pool.SearchObjects(ctx, prmSearch)
if err != nil { if err != nil {
return nil, handleObjectError("init object search via connection pool", err) return nil, handleStorageError("init object search via connection pool", err)
} }
return &res, nil return &res, nil
@ -202,7 +203,7 @@ func (x *FrostFS) NetmapSnapshot(ctx context.Context) (netmap.NetMap, error) {
netmapSnapshot, err := x.pool.NetMapSnapshot(ctx) netmapSnapshot, err := x.pool.NetMapSnapshot(ctx)
if err != nil { if err != nil {
return netmapSnapshot, handleObjectError("get netmap via connection pool", err) return netmapSnapshot, handleStorageError("get netmap via connection pool", err)
} }
return netmapSnapshot, nil return netmapSnapshot, nil
@ -226,7 +227,7 @@ func (x *ResolverFrostFS) SystemDNS(ctx context.Context) (string, error) {
networkInfo, err := x.pool.NetworkInfo(ctx) networkInfo, err := x.pool.NetworkInfo(ctx)
if err != nil { if err != nil {
return "", handleObjectError("read network info via client", err) return "", handleStorageError("read network info via client", err)
} }
domain := networkInfo.RawNetworkParameter("SystemDNS") domain := networkInfo.RawNetworkParameter("SystemDNS")
@ -237,7 +238,7 @@ func (x *ResolverFrostFS) SystemDNS(ctx context.Context) (string, error) {
return string(domain), nil return string(domain), nil
} }
func handleObjectError(msg string, err error) error { func handleStorageError(msg string, err error) error {
if err == nil { if err == nil {
return nil return nil
} }
@ -250,6 +251,14 @@ func handleObjectError(msg string, err error) error {
return fmt.Errorf("%s: %w: %s", msg, handler.ErrAccessDenied, reason) return fmt.Errorf("%s: %w: %s", msg, handler.ErrAccessDenied, reason)
} }
if client.IsErrContainerNotFound(err) {
return fmt.Errorf("%s: %w: %s", msg, handler.ErrContainerNotFound, err.Error())
}
if client.IsErrObjectNotFound(err) {
return fmt.Errorf("%s: %w: %s", msg, handler.ErrObjectNotFound, err.Error())
}
if IsTimeoutError(err) { if IsTimeoutError(err) {
return fmt.Errorf("%s: %w: %s", msg, handler.ErrGatewayTimeout, err.Error()) return fmt.Errorf("%s: %w: %s", msg, handler.ErrGatewayTimeout, err.Error())
} }

View file

@ -18,7 +18,7 @@ func TestHandleObjectError(t *testing.T) {
msg := "some msg" msg := "some msg"
t.Run("nil error", func(t *testing.T) { t.Run("nil error", func(t *testing.T) {
err := handleObjectError(msg, nil) err := handleStorageError(msg, nil)
require.Nil(t, err) require.Nil(t, err)
}) })
@ -27,7 +27,7 @@ func TestHandleObjectError(t *testing.T) {
inputErr := new(apistatus.ObjectAccessDenied) inputErr := new(apistatus.ObjectAccessDenied)
inputErr.WriteReason(reason) inputErr.WriteReason(reason)
err := handleObjectError(msg, inputErr) err := handleStorageError(msg, inputErr)
require.ErrorIs(t, err, handler.ErrAccessDenied) require.ErrorIs(t, err, handler.ErrAccessDenied)
require.Contains(t, err.Error(), reason) require.Contains(t, err.Error(), reason)
require.Contains(t, err.Error(), msg) require.Contains(t, err.Error(), msg)
@ -38,7 +38,7 @@ func TestHandleObjectError(t *testing.T) {
inputErr := new(apistatus.ObjectAccessDenied) inputErr := new(apistatus.ObjectAccessDenied)
inputErr.WriteReason(reason) inputErr.WriteReason(reason)
err := handleObjectError(msg, inputErr) err := handleStorageError(msg, inputErr)
require.ErrorIs(t, err, handler.ErrQuotaLimitReached) require.ErrorIs(t, err, handler.ErrQuotaLimitReached)
require.Contains(t, err.Error(), reason) require.Contains(t, err.Error(), reason)
require.Contains(t, err.Error(), msg) require.Contains(t, err.Error(), msg)
@ -47,7 +47,7 @@ func TestHandleObjectError(t *testing.T) {
t.Run("simple timeout", func(t *testing.T) { t.Run("simple timeout", func(t *testing.T) {
inputErr := errors.New("timeout") inputErr := errors.New("timeout")
err := handleObjectError(msg, inputErr) err := handleStorageError(msg, inputErr)
require.ErrorIs(t, err, handler.ErrGatewayTimeout) require.ErrorIs(t, err, handler.ErrGatewayTimeout)
require.Contains(t, err.Error(), inputErr.Error()) require.Contains(t, err.Error(), inputErr.Error())
require.Contains(t, err.Error(), msg) require.Contains(t, err.Error(), msg)
@ -58,7 +58,7 @@ func TestHandleObjectError(t *testing.T) {
defer cancel() defer cancel()
<-ctx.Done() <-ctx.Done()
err := handleObjectError(msg, ctx.Err()) err := handleStorageError(msg, ctx.Err())
require.ErrorIs(t, err, handler.ErrGatewayTimeout) require.ErrorIs(t, err, handler.ErrGatewayTimeout)
require.Contains(t, err.Error(), ctx.Err().Error()) require.Contains(t, err.Error(), ctx.Err().Error())
require.Contains(t, err.Error(), msg) require.Contains(t, err.Error(), msg)
@ -67,7 +67,7 @@ func TestHandleObjectError(t *testing.T) {
t.Run("grpc deadline exceeded", func(t *testing.T) { t.Run("grpc deadline exceeded", func(t *testing.T) {
inputErr := fmt.Errorf("wrap grpc error: %w", status.Error(codes.DeadlineExceeded, "error")) inputErr := fmt.Errorf("wrap grpc error: %w", status.Error(codes.DeadlineExceeded, "error"))
err := handleObjectError(msg, inputErr) err := handleStorageError(msg, inputErr)
require.ErrorIs(t, err, handler.ErrGatewayTimeout) require.ErrorIs(t, err, handler.ErrGatewayTimeout)
require.Contains(t, err.Error(), inputErr.Error()) require.Contains(t, err.Error(), inputErr.Error())
require.Contains(t, err.Error(), msg) require.Contains(t, err.Error(), msg)
@ -76,7 +76,7 @@ func TestHandleObjectError(t *testing.T) {
t.Run("unknown error", func(t *testing.T) { t.Run("unknown error", func(t *testing.T) {
inputErr := errors.New("unknown error") inputErr := errors.New("unknown error")
err := handleObjectError(msg, inputErr) err := handleStorageError(msg, inputErr)
require.ErrorIs(t, err, inputErr) require.ErrorIs(t, err, inputErr)
require.Contains(t, err.Error(), msg) require.Contains(t, err.Error(), msg)
}) })

View file

@ -63,7 +63,7 @@ func (w *PoolWrapper) GetNodes(ctx context.Context, prm *tree.GetNodesParams) ([
nodes, err := w.p.GetNodes(ctx, poolPrm) nodes, err := w.p.GetNodes(ctx, poolPrm)
if err != nil { if err != nil {
return nil, handleError(err) return nil, handleTreeError(err)
} }
res := make([]tree.NodeResponse, len(nodes)) res := make([]tree.NodeResponse, len(nodes))
@ -82,7 +82,7 @@ func getBearer(ctx context.Context) []byte {
return token.Marshal() return token.Marshal()
} }
func handleError(err error) error { func handleTreeError(err error) error {
if err == nil { if err == nil {
return nil return nil
} }
@ -122,7 +122,7 @@ func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo,
subTreeReader, err := w.p.GetSubTree(ctx, poolPrm) subTreeReader, err := w.p.GetSubTree(ctx, poolPrm)
if err != nil { if err != nil {
return nil, handleError(err) return nil, handleTreeError(err)
} }
var subtree []tree.NodeResponse var subtree []tree.NodeResponse
@ -133,7 +133,7 @@ func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo,
node, err = subTreeReader.Next() node, err = subTreeReader.Next()
} }
if err != io.EOF { if err != io.EOF {
return nil, handleError(err) return nil, handleTreeError(err)
} }
return subtree, nil return subtree, nil

View file

@ -6,7 +6,7 @@ import (
"fmt" "fmt"
"sync" "sync"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware" v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
@ -29,14 +29,9 @@ type FrostFS interface {
SystemDNS(context.Context) (string, error) SystemDNS(context.Context) (string, error)
} }
type Settings interface {
FormContainerZone(ns string) (zone string, isDefault bool)
}
type Config struct { type Config struct {
FrostFS FrostFS FrostFS FrostFS
RPCAddress string RPCAddress string
Settings Settings
} }
type ContainerResolver struct { type ContainerResolver struct {
@ -46,15 +41,15 @@ type ContainerResolver struct {
type Resolver struct { type Resolver struct {
Name string Name string
resolve func(context.Context, string) (*cid.ID, error) resolve func(context.Context, string, string) (*cid.ID, error)
} }
func (r *Resolver) SetResolveFunc(fn func(context.Context, string) (*cid.ID, error)) { func (r *Resolver) SetResolveFunc(fn func(context.Context, string, string) (*cid.ID, error)) {
r.resolve = fn r.resolve = fn
} }
func (r *Resolver) Resolve(ctx context.Context, name string) (*cid.ID, error) { func (r *Resolver) Resolve(ctx context.Context, zone, name string) (*cid.ID, error) {
return r.resolve(ctx, name) return r.resolve(ctx, zone, name)
} }
func NewContainerResolver(resolverNames []string, cfg *Config) (*ContainerResolver, error) { func NewContainerResolver(resolverNames []string, cfg *Config) (*ContainerResolver, error) {
@ -81,13 +76,13 @@ func createResolvers(resolverNames []string, cfg *Config) ([]*Resolver, error) {
return resolvers, nil return resolvers, nil
} }
func (r *ContainerResolver) Resolve(ctx context.Context, cnrName string) (*cid.ID, error) { func (r *ContainerResolver) Resolve(ctx context.Context, cnrZone, cnrName string) (*cid.ID, error) {
r.mu.RLock() r.mu.RLock()
defer r.mu.RUnlock() defer r.mu.RUnlock()
var err error var err error
for _, resolver := range r.resolvers { for _, resolver := range r.resolvers {
cnrID, resolverErr := resolver.Resolve(ctx, cnrName) cnrID, resolverErr := resolver.Resolve(ctx, cnrZone, cnrName)
if resolverErr != nil { if resolverErr != nil {
resolverErr = fmt.Errorf("%s: %w", resolver.Name, resolverErr) resolverErr = fmt.Errorf("%s: %w", resolver.Name, resolverErr)
if err == nil { if err == nil {
@ -141,34 +136,25 @@ func (r *ContainerResolver) equals(resolverNames []string) bool {
func newResolver(name string, cfg *Config) (*Resolver, error) { func newResolver(name string, cfg *Config) (*Resolver, error) {
switch name { switch name {
case DNSResolver: case DNSResolver:
return NewDNSResolver(cfg.FrostFS, cfg.Settings) return NewDNSResolver(cfg.FrostFS)
case NNSResolver: case NNSResolver:
return NewNNSResolver(cfg.RPCAddress, cfg.Settings) return NewNNSResolver(cfg.RPCAddress)
default: default:
return nil, fmt.Errorf("unknown resolver: %s", name) return nil, fmt.Errorf("unknown resolver: %s", name)
} }
} }
func NewDNSResolver(frostFS FrostFS, settings Settings) (*Resolver, error) { func NewDNSResolver(frostFS FrostFS) (*Resolver, error) {
if frostFS == nil { if frostFS == nil {
return nil, fmt.Errorf("pool must not be nil for DNS resolver") return nil, fmt.Errorf("pool must not be nil for DNS resolver")
} }
if settings == nil {
return nil, fmt.Errorf("resolver settings must not be nil for DNS resolver")
}
var dns ns.DNS var dns ns.DNS
resolveFunc := func(ctx context.Context, name string) (*cid.ID, error) { resolveFunc := func(ctx context.Context, zone, name string) (*cid.ID, error) {
var err error var err error
namespace, err := middleware.GetNamespace(ctx) if zone == v2container.SysAttributeZoneDefault {
if err != nil {
return nil, err
}
zone, isDefault := settings.FormContainerZone(namespace)
if isDefault {
zone, err = frostFS.SystemDNS(ctx) zone, err = frostFS.SystemDNS(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("read system DNS parameter of the FrostFS: %w", err) return nil, fmt.Errorf("read system DNS parameter of the FrostFS: %w", err)
@ -190,13 +176,10 @@ func NewDNSResolver(frostFS FrostFS, settings Settings) (*Resolver, error) {
}, nil }, nil
} }
func NewNNSResolver(rpcAddress string, settings Settings) (*Resolver, error) { func NewNNSResolver(rpcAddress string) (*Resolver, error) {
if rpcAddress == "" { if rpcAddress == "" {
return nil, fmt.Errorf("rpc address must not be empty for NNS resolver") return nil, fmt.Errorf("rpc address must not be empty for NNS resolver")
} }
if settings == nil {
return nil, fmt.Errorf("resolver settings must not be nil for NNS resolver")
}
var nns ns.NNS var nns ns.NNS
@ -204,16 +187,9 @@ func NewNNSResolver(rpcAddress string, settings Settings) (*Resolver, error) {
return nil, fmt.Errorf("could not dial nns: %w", err) return nil, fmt.Errorf("could not dial nns: %w", err)
} }
resolveFunc := func(ctx context.Context, name string) (*cid.ID, error) { resolveFunc := func(_ context.Context, zone, name string) (*cid.ID, error) {
var d container.Domain var d container.Domain
d.SetName(name) d.SetName(name)
namespace, err := middleware.GetNamespace(ctx)
if err != nil {
return nil, err
}
zone, _ := settings.FormContainerZone(namespace)
d.SetZone(zone) d.SetZone(zone)
cnrID, err := nns.ResolveContainerDomain(d) cnrID, err := nns.ResolveContainerDomain(d)

View file

@ -8,14 +8,18 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
) )
type ( type (
Tree struct { Tree struct {
service ServiceClient service ServiceClient
log *zap.Logger
} }
// ServiceClient is a client to interact with tree service. // ServiceClient is a client to interact with tree service.
@ -73,8 +77,8 @@ const (
) )
// NewTree creates instance of Tree using provided address and create grpc connection. // NewTree creates instance of Tree using provided address and create grpc connection.
func NewTree(service ServiceClient) *Tree { func NewTree(service ServiceClient, log *zap.Logger) *Tree {
return &Tree{service: service} return &Tree{service: service, log: log}
} }
type Meta interface { type Meta interface {
@ -257,6 +261,9 @@ func (c *Tree) getSystemNode(ctx context.Context, bktInfo *data.BucketInfo, name
if len(nodes) == 0 { if len(nodes) == 0 {
return nil, layer.ErrNodeNotFound return nil, layer.ErrNodeNotFound
} }
if len(nodes) != 1 {
c.reqLogger(ctx).Warn(logs.FoundSeveralSystemTreeNodes, zap.String("name", name), logs.TagField(logs.TagExternalStorageTree))
}
return newMultiNode(nodes) return newMultiNode(nodes)
} }
@ -296,7 +303,7 @@ func getLatestVersionNode(nodes []NodeResponse) (NodeResponse, error) {
} }
if targetIndexNode == -1 { if targetIndexNode == -1 {
return nil, layer.ErrNodeNotFound return nil, fmt.Errorf("latest version: %w", layer.ErrNodeNotFound)
} }
return nodes[targetIndexNode], nil return nodes[targetIndexNode], nil
@ -423,6 +430,10 @@ func (c *Tree) getPrefixNodeID(ctx context.Context, bktInfo *data.BucketInfo, tr
return intermediateNodes, nil return intermediateNodes, nil
} }
func (c *Tree) reqLogger(ctx context.Context) *zap.Logger {
return utils.GetReqLogOrDefault(ctx, c.log)
}
func GetFilename(node NodeResponse) string { func GetFilename(node NodeResponse) string {
for _, kv := range node.GetMeta() { for _, kv := range node.GetMeta() {
if kv.GetKey() == FileNameKey { if kv.GetKey() == FileNameKey {

View file

@ -11,6 +11,8 @@ import (
"time" "time"
"unicode" "unicode"
"unicode/utf8" "unicode/utf8"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
) )
type EpochDurations struct { type EpochDurations struct {
@ -256,3 +258,12 @@ func (t systemTransformer) updateExpirationHeader(headers map[string]string, dur
headers[t.expirationEpochAttr()] = strconv.FormatUint(expirationEpoch, 10) headers[t.expirationEpochAttr()] = strconv.FormatUint(expirationEpoch, 10)
} }
func GetAttributeValue(attrs []object.Attribute, key string) string {
for _, attr := range attrs {
if attr.Key() == key {
return attr.Value()
}
}
return ""
}