Compare commits

...
Sign in to create a new pull request.

21 commits

Author SHA1 Message Date
96a22d98f2 [#232] Use contract to get container info
Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
2025-04-28 16:58:35 +03:00
dbb1bcad00 [#233] Fix browsing
Simplify tree listing (we need only nodes in exactly the same parent level)

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2025-04-28 15:13:19 +03:00
e579549b41 [#233] Add fallback tests
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2025-04-23 13:03:38 +03:00
0b9b23e67c [#233] Make search by attribute as it is
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2025-04-23 13:03:34 +03:00
9cb9d14146 [#233] get/head: Middleware refactor
Add:
 * search index.html
 * fallback by leading slash
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2025-04-23 13:03:29 +03:00
ee628617a3 [#227] Don't use bearer token with CORS container
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2025-04-18 14:34:16 +03:00
b7b08d9d82 [#230] Refactor logger tag configuration
Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2025-04-17 14:37:02 +00:00
b9f1f455f8 [#229] Add ngfuzz installation to makefile
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2025-04-17 13:57:36 +00:00
304dbdd4c8 [#228] Update Go to 1.23
Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
2025-04-16 16:50:42 +03:00
273459e090 [#225] Support wildcard in allowed origins and headers
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2025-04-07 16:57:07 +03:00
cb72d11515 [#224] Refactor logger tag configuration
Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2025-04-01 11:43:51 +03:00
f0b86c8ba7 [#191] Update integration tests
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2025-03-25 06:27:56 +00:00
458bf933fc [#191] Refactor error handling and logging
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2025-03-25 06:27:56 +00:00
0f73da258b [#223] Bump frostfs-sdk-go
Contains:
* more detailed pool errors
* disabled service config query in gRPC client

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2025-03-20 18:40:28 +03:00
d670983df4 [#208] govulncheck: Fix minor toolchain updates for good
Signed-off-by: Vitaliy Potyarkin <v.potyarkin@yadro.com>
2025-03-20 13:49:55 +00:00
9ef6b06e91 [#212] Support CORS container for CORS settings
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2025-03-10 18:12:36 +03:00
9cf2a4f0e0 [#197] Add a leading slash to the FilePath attribute
According to the frostfs api specification,
the File Path attribute must start with a
leading slash. More info:
https://git.frostfs.info/TrueCloudLab/frostfs-api

Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2025-02-25 14:14:20 +00:00
cc6055bd27 [#211] Add IO tags
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2025-02-25 08:36:38 +00:00
a651b5823f [#219] Use zaptest.Logger
Use zaptest to get logs which get printed only if a test fails
or if you ran go test -v.

Dont use zaptest.Logger for fuzz otherwise ngfuzz/libfuzz crashes

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2025-02-21 16:11:49 +03:00
f9c5dc5260 [#216] Rework http2 test to be tls test
Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
2025-02-18 14:55:19 +03:00
8bfaa84124 [#216] Remove http2 forcing
fasthttp doesn't support http2
which causes errors when we enable it

Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
2025-02-18 14:55:19 +03:00
48 changed files with 3199 additions and 1131 deletions

View file

@ -1,4 +1,4 @@
FROM golang:1.22-alpine AS basebuilder
FROM golang:1.24-alpine AS basebuilder
RUN apk add --update make bash ca-certificates
FROM basebuilder AS builder

View file

@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.22', '1.23' ]
go_versions: [ '1.23', '1.24' ]
fail-fast: false
steps:
- uses: actions/checkout@v3

View file

@ -14,7 +14,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.23'
go-version: '1.24'
cache: true
- name: Install linters
@ -28,7 +28,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.22', '1.23' ]
go_versions: [ '1.23', '1.24' ]
fail-fast: false
steps:
- uses: actions/checkout@v3
@ -53,7 +53,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.23'
go-version: '1.24'
- name: Run integration tests
run: |-

View file

@ -16,7 +16,8 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.22.12'
go-version: '1.23'
check-latest: true
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest

View file

@ -22,9 +22,6 @@ linters-settings:
# 'default' case is present, even if all enum members aren't listed in the
# switch
default-signifies-exhaustive: true
govet:
# report about shadowed variables
check-shadowing: false
custom:
truecloudlab-linters:
path: bin/external_linters.so

View file

@ -4,9 +4,12 @@ This document outlines major changes between releases.
## [Unreleased]
- Update Go to 1.23 (#228)
### Added
- Add handling quota limit reached error (#187)
- Add slash clipping for FileName attribute (#174)
- Add new format of tag names config
## [0.32.3] - 2025-02-05

View file

@ -2,9 +2,9 @@
REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --match "v*" --dirty --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
GO_VERSION ?= 1.22
LINT_VERSION ?= 1.60.3
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
GO_VERSION ?= 1.23
LINT_VERSION ?= 1.64.8
TRUECLOUDLAB_LINT_VERSION ?= 0.0.10
BUILD ?= $(shell date -u --iso=seconds)
HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs-http-gw
@ -30,9 +30,10 @@ PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
sed "s/-/~/")-${OS_RELEASE}
.PHONY: debpackage debclean
FUZZ_NGFUZZ_DIR ?= ""
FUZZING_DIR = $(shell pwd)/tests/fuzzing/files
NGFUZZ_REPO = https://gitflic.ru/project/yadro/ngfuzz.git
FUZZ_TIMEOUT ?= 30
FUZZ_FUNCTIONS ?= "all"
FUZZ_FUNCTIONS ?= ""
FUZZ_AUX ?= ""
# Make all binaries
@ -99,18 +100,22 @@ check-ngfuzz:
exit 1; \
fi
.PHONY: install-fuzzing-deps
install-fuzzing-deps: check-clang check-ngfuzz
.PHONY: install-ngfuzz
install-ngfuzz:
ifeq (,$(wildcard $(FUZZING_DIR)/ngfuzz))
@rm -rf $(FUZZING_DIR)/ngfuzz
@git clone $(NGFUZZ_REPO) $(FUZZING_DIR)/ngfuzz
@cd $(FUZZING_DIR)/ngfuzz && make
endif
.PHONY: fuzz
fuzz: install-fuzzing-deps
fuzz: check-clang install-ngfuzz
@START_PATH=$$(pwd); \
ROOT_PATH=$$(realpath --relative-to=$(FUZZ_NGFUZZ_DIR) $$START_PATH) ; \
cd $(FUZZ_NGFUZZ_DIR) && \
./ngfuzz -clean && \
./ngfuzz -fuzz $(FUZZ_FUNCTIONS) -rootdir $$ROOT_PATH -timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \
./ngfuzz -report
ROOT_PATH=$$(realpath --relative-to=$(FUZZING_DIR)/ngfuzz $$START_PATH) ; \
cd $(FUZZING_DIR)/ngfuzz && \
./bin/ngfuzz clean && \
env CGO_ENABLED=1 ./bin/ngfuzz fuzz --funcs $(FUZZ_FUNCTIONS) --rootdir $$ROOT_PATH --timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \
./bin/ngfuzz coverage --rootdir $$ROOT_PATH
# Reformat code
fmt:
@ -150,7 +155,7 @@ dirty-image:
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
@rm -rf $(TMP_DIR)/linters
@rmdir $(TMP_DIR) 2>/dev/null || true
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
# Run linters
lint:

View file

@ -17,10 +17,13 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
internalnet "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/net"
containerClient "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/contracts/container"
contractsUtil "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/contracts/util"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/templates"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/metrics"
@ -30,6 +33,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@ -37,6 +41,7 @@ import (
"github.com/nspcc-dev/neo-go/cli/flags"
"github.com/nspcc-dev/neo-go/cli/input"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/panjf2000/ants/v2"
@ -65,6 +70,8 @@ type (
settings *appSettings
loggerSettings *loggerSettings
bucketCache *cache.BucketCache
handle *handler.Handler
corsCnrID cid.ID
servers []Server
unbindServers []ServerInfo
@ -96,41 +103,29 @@ type (
workerPoolSize int
logLevelConfig *logLevelConfig
mu sync.RWMutex
defaultTimestamp bool
archiveCompression bool
clientCut bool
returnIndexPage bool
indexPageTemplate string
bufferMaxSizeForPut uint64
namespaceHeader string
defaultNamespaces []string
corsAllowOrigin string
corsAllowMethods []string
corsAllowHeaders []string
corsExposeHeaders []string
corsAllowCredentials bool
corsMaxAge int
enableFilepathFallback bool
mu sync.RWMutex
defaultTimestamp bool
archiveCompression bool
clientCut bool
returnIndexPage bool
indexPageTemplate string
bufferMaxSizeForPut uint64
namespaceHeader string
defaultNamespaces []string
cors *data.CORSRule
enableFilepathFallback bool
enableFilepathSlashFallback bool
}
tagsConfig struct {
tagLogs sync.Map
tagLogs sync.Map
defaultLvl zap.AtomicLevel
}
logLevelConfig struct {
logLevel zap.AtomicLevel
tagsConfig *tagsConfig
}
CORS struct {
AllowOrigin string
AllowMethods []string
AllowHeaders []string
ExposeHeaders []string
AllowCredentials bool
MaxAge int
}
)
func newLogLevel(v *viper.Viper) zap.AtomicLevel {
@ -144,19 +139,34 @@ func newLogLevel(v *viper.Viper) zap.AtomicLevel {
}
func newTagsConfig(v *viper.Viper, ll zapcore.Level) *tagsConfig {
var t tagsConfig
t := tagsConfig{defaultLvl: zap.NewAtomicLevelAt(ll)}
if err := t.update(v, ll); err != nil {
// panic here is analogue of the similar panic during common log level initialization.
panic(err.Error())
}
return &t
}
func newLogLevelConfig(lvl zap.AtomicLevel, tagsConfig *tagsConfig) *logLevelConfig {
return &logLevelConfig{
cfg := &logLevelConfig{
logLevel: lvl,
tagsConfig: tagsConfig,
}
cfg.setMinLogLevel()
return cfg
}
func (l *logLevelConfig) setMinLogLevel() {
l.tagsConfig.tagLogs.Range(func(_, value any) bool {
v := value.(zapcore.Level)
if v < l.logLevel.Level() {
l.logLevel.SetLevel(v)
}
return true
})
}
func (l *logLevelConfig) update(cfg *viper.Viper, log *zap.Logger) {
@ -169,34 +179,34 @@ func (l *logLevelConfig) update(cfg *viper.Viper, log *zap.Logger) {
if err := l.tagsConfig.update(cfg, l.logLevel.Level()); err != nil {
log.Warn(logs.TagsLogConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp))
}
l.setMinLogLevel()
}
func (t *tagsConfig) LevelEnabled(tag string, tgtLevel zapcore.Level) bool {
lvl, ok := t.tagLogs.Load(tag)
if !ok {
return false
return t.defaultLvl.Enabled(tgtLevel)
}
return lvl.(zapcore.Level).Enabled(tgtLevel)
}
func (t *tagsConfig) DefaultEnabled(lvl zapcore.Level) bool {
return t.defaultLvl.Enabled(lvl)
}
func (t *tagsConfig) update(cfg *viper.Viper, ll zapcore.Level) error {
tags, err := fetchLogTagsConfig(cfg, ll)
if err != nil {
return err
}
t.tagLogs.Range(func(key, value any) bool {
t.tagLogs.Range(func(key, _ any) bool {
k := key.(string)
v := value.(zapcore.Level)
if lvl, ok := tags[k]; ok {
if lvl != v {
t.tagLogs.Store(key, lvl)
}
} else {
if _, ok := tags[k]; !ok {
t.tagLogs.Delete(key)
delete(tags, k)
}
return true
})
@ -204,6 +214,7 @@ func (t *tagsConfig) update(cfg *viper.Viper, ll zapcore.Level) error {
for k, v := range tags {
t.tagLogs.Store(k, v)
}
t.defaultLvl.SetLevel(ll)
return nil
}
@ -251,6 +262,7 @@ func newApp(ctx context.Context, cfg *appCfg) App {
a.initResolver()
a.initMetrics()
a.initTracing(ctx)
a.initContainers(ctx)
return a
}
@ -259,6 +271,22 @@ func (a *app) config() *viper.Viper {
return a.cfg.config()
}
func (a *app) initContainers(ctx context.Context) {
corsCnrID, err := a.fetchContainerID(ctx, cfgContainersCORS)
if err != nil {
a.log.Fatal(logs.CouldNotFetchCORSContainerInfo, zap.Error(err), logs.TagField(logs.TagApp))
}
a.corsCnrID = *corsCnrID
}
func (a *app) initRPCClient(ctx context.Context) *rpcclient.Client {
rpcCli, err := rpcclient.New(ctx, a.config().GetString(cfgRPCEndpoint), rpcclient.Options{})
if err != nil {
a.log.Fatal(logs.InitRPCClientFailed, zap.Error(err), logs.TagField(logs.TagApp))
}
return rpcCli
}
func (a *app) initAppSettings(lc *logLevelConfig) {
a.settings = &appSettings{
reconnectInterval: fetchReconnectInterval(a.config()),
@ -278,13 +306,9 @@ func (s *appSettings) update(v *viper.Viper, l *zap.Logger) {
namespaceHeader := v.GetString(cfgResolveNamespaceHeader)
defaultNamespaces := fetchDefaultNamespaces(v)
indexPage, indexEnabled := fetchIndexPageTemplate(v, l)
corsAllowOrigin := v.GetString(cfgCORSAllowOrigin)
corsAllowMethods := v.GetStringSlice(cfgCORSAllowMethods)
corsAllowHeaders := v.GetStringSlice(cfgCORSAllowHeaders)
corsExposeHeaders := v.GetStringSlice(cfgCORSExposeHeaders)
corsAllowCredentials := v.GetBool(cfgCORSAllowCredentials)
corsMaxAge := fetchCORSMaxAge(v)
cors := fetchCORSConfig(v)
enableFilepathFallback := v.GetBool(cfgFeaturesEnableFilepathFallback)
enableFilepathSlashFallback := v.GetBool(cfgFeaturesEnableFilepathSlashFallback)
s.mu.Lock()
defer s.mu.Unlock()
@ -298,13 +322,9 @@ func (s *appSettings) update(v *viper.Viper, l *zap.Logger) {
s.defaultNamespaces = defaultNamespaces
s.returnIndexPage = indexEnabled
s.indexPageTemplate = indexPage
s.corsAllowOrigin = corsAllowOrigin
s.corsAllowMethods = corsAllowMethods
s.corsAllowHeaders = corsAllowHeaders
s.corsExposeHeaders = corsExposeHeaders
s.corsAllowCredentials = corsAllowCredentials
s.corsMaxAge = corsMaxAge
s.cors = cors
s.enableFilepathFallback = enableFilepathFallback
s.enableFilepathSlashFallback = enableFilepathSlashFallback
}
func (s *loggerSettings) DroppedLogsInc() {
@ -350,26 +370,33 @@ func (s *appSettings) IndexPageTemplate() string {
return s.indexPageTemplate
}
func (s *appSettings) CORS() CORS {
func (s *appSettings) CORS() *data.CORSRule {
s.mu.RLock()
defer s.mu.RUnlock()
allowMethods := make([]string, len(s.corsAllowMethods))
copy(allowMethods, s.corsAllowMethods)
if s.cors == nil {
return nil
}
allowHeaders := make([]string, len(s.corsAllowHeaders))
copy(allowHeaders, s.corsAllowHeaders)
allowMethods := make([]string, len(s.cors.AllowedMethods))
copy(allowMethods, s.cors.AllowedMethods)
exposeHeaders := make([]string, len(s.corsExposeHeaders))
copy(exposeHeaders, s.corsExposeHeaders)
allowHeaders := make([]string, len(s.cors.AllowedHeaders))
copy(allowHeaders, s.cors.AllowedHeaders)
return CORS{
AllowOrigin: s.corsAllowOrigin,
AllowMethods: allowMethods,
AllowHeaders: allowHeaders,
ExposeHeaders: exposeHeaders,
AllowCredentials: s.corsAllowCredentials,
MaxAge: s.corsMaxAge,
exposeHeaders := make([]string, len(s.cors.ExposeHeaders))
copy(exposeHeaders, s.cors.ExposeHeaders)
allowOrigins := make([]string, len(s.cors.AllowedOrigins))
copy(allowOrigins, s.cors.AllowedOrigins)
return &data.CORSRule{
AllowedOrigins: allowOrigins,
AllowedMethods: allowMethods,
AllowedHeaders: allowHeaders,
ExposeHeaders: exposeHeaders,
AllowedCredentials: s.cors.AllowedCredentials,
MaxAgeSeconds: s.cors.MaxAgeSeconds,
}
}
@ -391,15 +418,15 @@ func (s *appSettings) NamespaceHeader() string {
return s.namespaceHeader
}
func (s *appSettings) FormContainerZone(ns string) (zone string, isDefault bool) {
func (s *appSettings) FormContainerZone(ns string) string {
s.mu.RLock()
namespaces := s.defaultNamespaces
s.mu.RUnlock()
if slices.Contains(namespaces, ns) {
return v2container.SysAttributeZoneDefault, true
return v2container.SysAttributeZoneDefault
}
return ns + ".ns", false
return ns + ".ns"
}
func (s *appSettings) EnableFilepathFallback() bool {
@ -408,6 +435,12 @@ func (s *appSettings) EnableFilepathFallback() bool {
return s.enableFilepathFallback
}
func (s *appSettings) EnableFilepathSlashFallback() bool {
s.mu.RLock()
defer s.mu.RUnlock()
return s.enableFilepathSlashFallback
}
func (a *app) initResolver() {
var err error
a.resolver, err = resolver.NewContainerResolver(a.getResolverConfig())
@ -420,7 +453,6 @@ func (a *app) getResolverConfig() ([]string, *resolver.Config) {
resolveCfg := &resolver.Config{
FrostFS: frostfs.NewResolverFrostFS(a.pool),
RPCAddress: a.config().GetString(cfgRPCEndpoint),
Settings: a.settings,
}
order := a.config().GetStringSlice(cfgResolveOrder)
@ -606,10 +638,8 @@ func (a *app) Serve() {
close(a.webDone)
}()
handle := handler.New(a.AppParams(), a.settings, tree.NewTree(frostfs.NewPoolWrapper(a.treePool)), workerPool)
// Configure router.
a.configureRouter(handle)
a.configureRouter(workerPool)
a.startServices()
a.initServers(a.ctx)
@ -679,7 +709,7 @@ func (a *app) configReload(ctx context.Context) {
return
}
a.settings.logLevelConfig.update(a.cfg.settings, a.log)
a.settings.logLevelConfig.update(a.cfg.config(), a.log)
if err := a.settings.dialerSource.Update(fetchMultinetConfig(a.config(), a.log)); err != nil {
a.log.Warn(logs.MultinetConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp))
@ -730,31 +760,48 @@ func (a *app) stopServices() {
}
}
func (a *app) configureRouter(h *handler.Handler) {
func (a *app) configureRouter(workerPool *ants.Pool) {
rpcCli := a.initRPCClient(a.ctx)
cnrContractName := a.config().GetString(cfgContractsContainerName)
rpcEndpoint := a.config().GetString(cfgRPCEndpoint)
cnrAddr, err := contractsUtil.ResolveContractHash(cnrContractName, rpcEndpoint)
if err != nil {
a.log.Fatal(logs.FailedToResolveContractHash, zap.Error(err), logs.TagField(logs.TagApp))
}
cnrClient, err := containerClient.New(containerClient.Config{
ContractHash: cnrAddr,
Key: a.key,
RPCClient: rpcCli,
})
if err != nil {
a.log.Fatal(logs.InitContainerContractFailed, zap.Error(err), logs.TagField(logs.TagApp))
}
a.handle = handler.New(a.AppParams(), a.settings, tree.NewTree(frostfs.NewPoolWrapper(a.treePool), a.log), cnrClient, workerPool)
r := router.New()
r.RedirectTrailingSlash = true
r.NotFound = func(r *fasthttp.RequestCtx) {
handler.ResponseError(r, "Not found", fasthttp.StatusNotFound)
handler.ResponseError(r, "Route Not found", fasthttp.StatusNotFound)
}
r.MethodNotAllowed = func(r *fasthttp.RequestCtx) {
handler.ResponseError(r, "Method Not Allowed", fasthttp.StatusMethodNotAllowed)
}
r.POST("/upload/{cid}", a.addMiddlewares(h.Upload))
r.OPTIONS("/upload/{cid}", a.addPreflight())
r.POST("/upload/{cid}", a.addMiddlewares(a.handle.Upload))
r.OPTIONS("/upload/{cid}", a.addPreflight(a.handle.Preflight))
a.log.Info(logs.AddedPathUploadCid, logs.TagField(logs.TagApp))
r.GET("/get/{cid}/{oid:*}", a.addMiddlewares(h.DownloadByAddressOrBucketName))
r.HEAD("/get/{cid}/{oid:*}", a.addMiddlewares(h.HeadByAddressOrBucketName))
r.OPTIONS("/get/{cid}/{oid:*}", a.addPreflight())
r.GET("/get/{cid}/{oid:*}", a.addMiddlewares(a.handle.DownloadByAddressOrBucketName))
r.HEAD("/get/{cid}/{oid:*}", a.addMiddlewares(a.handle.HeadByAddressOrBucketName))
r.OPTIONS("/get/{cid}/{oid:*}", a.addPreflight(a.handle.Preflight))
a.log.Info(logs.AddedPathGetCidOid, logs.TagField(logs.TagApp))
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(h.DownloadByAttribute))
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(h.HeadByAttribute))
r.OPTIONS("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addPreflight())
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(a.handle.DownloadByAttribute))
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(a.handle.HeadByAttribute))
r.OPTIONS("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addPreflight(a.handle.Preflight))
a.log.Info(logs.AddedPathGetByAttributeCidAttrKeyAttrVal, logs.TagField(logs.TagApp))
r.GET("/zip/{cid}/{prefix:*}", a.addMiddlewares(h.DownloadZip))
r.OPTIONS("/zip/{cid}/{prefix:*}", a.addPreflight())
r.GET("/tar/{cid}/{prefix:*}", a.addMiddlewares(h.DownloadTar))
r.OPTIONS("/tar/{cid}/{prefix:*}", a.addPreflight())
r.GET("/zip/{cid}/{prefix:*}", a.addMiddlewares(a.handle.DownloadZip))
r.OPTIONS("/zip/{cid}/{prefix:*}", a.addPreflight(a.handle.Preflight))
r.GET("/tar/{cid}/{prefix:*}", a.addMiddlewares(a.handle.DownloadTar))
r.OPTIONS("/tar/{cid}/{prefix:*}", a.addPreflight(a.handle.Preflight))
a.log.Info(logs.AddedPathZipCidPrefix, logs.TagField(logs.TagApp))
a.webServer.Handler = r.Handler
@ -777,14 +824,14 @@ func (a *app) addMiddlewares(h fasthttp.RequestHandler) fasthttp.RequestHandler
return h
}
func (a *app) addPreflight() fasthttp.RequestHandler {
func (a *app) addPreflight(h fasthttp.RequestHandler) fasthttp.RequestHandler {
list := []func(fasthttp.RequestHandler) fasthttp.RequestHandler{
a.tracer,
a.logger,
a.canonicalizer,
a.reqNamespace,
}
h := a.preflightHandler
for i := len(list) - 1; i >= 0; i-- {
h = list[i](h)
}
@ -792,46 +839,16 @@ func (a *app) addPreflight() fasthttp.RequestHandler {
return h
}
func (a *app) preflightHandler(c *fasthttp.RequestCtx) {
cors := a.settings.CORS()
setCORSHeaders(c, cors)
}
func (a *app) cors(h fasthttp.RequestHandler) fasthttp.RequestHandler {
return func(c *fasthttp.RequestCtx) {
h(c)
code := c.Response.StatusCode()
if code >= fasthttp.StatusOK && code < fasthttp.StatusMultipleChoices {
cors := a.settings.CORS()
setCORSHeaders(c, cors)
a.handle.SetCORSHeaders(c)
}
}
}
func setCORSHeaders(c *fasthttp.RequestCtx, cors CORS) {
c.Response.Header.Set(fasthttp.HeaderAccessControlMaxAge, strconv.Itoa(cors.MaxAge))
if len(cors.AllowOrigin) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, cors.AllowOrigin)
}
if len(cors.AllowMethods) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(cors.AllowMethods, ","))
}
if len(cors.AllowHeaders) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowHeaders, strings.Join(cors.AllowHeaders, ","))
}
if len(cors.ExposeHeaders) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlExposeHeaders, strings.Join(cors.ExposeHeaders, ","))
}
if cors.AllowCredentials {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true")
}
}
func (a *app) logger(h fasthttp.RequestHandler) fasthttp.RequestHandler {
return func(req *fasthttp.RequestCtx) {
requiredFields := []zap.Field{zap.Uint64("id", req.ID())}
@ -930,11 +947,13 @@ func (a *app) reqNamespace(h fasthttp.RequestHandler) fasthttp.RequestHandler {
func (a *app) AppParams() *handler.AppParams {
return &handler.AppParams{
Logger: a.log,
FrostFS: frostfs.NewFrostFS(a.pool),
Owner: a.owner,
Resolver: a.resolver,
Cache: a.bucketCache,
Logger: a.log,
FrostFS: frostfs.NewFrostFS(a.pool),
Owner: a.owner,
Resolver: a.resolver,
Cache: a.bucketCache,
CORSCnrID: a.corsCnrID,
CORSCache: cache.NewCORSCache(getCORSCacheOptions(a.config(), a.log)),
}
}
@ -1135,3 +1154,44 @@ func (a *app) tryReconnect(ctx context.Context, sr *fasthttp.Server) bool {
return len(a.unbindServers) == 0
}
func (a *app) fetchContainerID(ctx context.Context, cfgKey string) (id *cid.ID, err error) {
cnrID, err := a.resolveContainerID(ctx, cfgKey)
if err != nil {
return nil, err
}
err = checkContainerExists(ctx, *cnrID, a.pool)
if err != nil {
return nil, err
}
return cnrID, nil
}
func (a *app) resolveContainerID(ctx context.Context, cfgKey string) (*cid.ID, error) {
containerString := a.config().GetString(cfgKey)
id := new(cid.ID)
if err := id.DecodeString(containerString); err != nil {
i := strings.Index(containerString, ".")
if i < 0 {
return nil, fmt.Errorf("invalid container address: %s", containerString)
}
if id, err = a.resolver.Resolve(ctx, containerString[i+1:], containerString[:i]); err != nil {
return nil, fmt.Errorf("resolve container address %s: %w", containerString, err)
}
}
return id, nil
}
func checkContainerExists(ctx context.Context, id cid.ID, frostFSPool *pool.Pool) error {
prm := pool.PrmContainerGet{
ContainerID: id,
}
_, err := frostFSPool.GetContainer(ctx, prm)
return err
}

View file

@ -20,9 +20,11 @@ import (
containerv2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -43,9 +45,10 @@ type putResponse struct {
}
const (
testContainerName = "friendly"
testListenAddress = "localhost:8082"
testHost = "http://" + testListenAddress
testContainerName = "friendly"
testListenAddress = "localhost:8082"
testHost = "http://" + testListenAddress
testCORSContainerName = "cors"
)
func TestIntegration(t *testing.T) {
@ -76,10 +79,14 @@ func TestIntegration(t *testing.T) {
registerUser(t, ctx, aioContainer, file.Name())
}
// Creating CORS container
clientPool := getPool(ctx, t, key)
_, err = createContainer(ctx, t, clientPool, ownerID, testCORSContainerName)
require.NoError(t, err, version)
// See the logs from the command execution.
server, cancel := runServer(file.Name())
clientPool := getPool(ctx, t, key)
CID, err := createContainer(ctx, t, clientPool, ownerID)
CID, err := createContainer(ctx, t, clientPool, ownerID, testContainerName)
require.NoError(t, err, version)
jsonToken, binaryToken := makeBearerTokens(t, key, ownerID, version)
@ -94,6 +101,7 @@ func TestIntegration(t *testing.T) {
t.Run("get by attribute "+version, func(t *testing.T) { getByAttr(ctx, t, clientPool, ownerID, CID) })
t.Run("get zip "+version, func(t *testing.T) { getZip(ctx, t, clientPool, ownerID, CID) })
t.Run("test namespaces "+version, func(t *testing.T) { checkNamespaces(ctx, t, clientPool, ownerID, CID) })
t.Run("test status codes "+version, func(t *testing.T) { checkStatusCodes(ctx, t, clientPool, ownerID, version) })
cancel()
server.Wait()
@ -110,6 +118,8 @@ func runServer(pathToWallet string) (App, context.CancelFunc) {
v.config().Set(cfgWalletPath, pathToWallet)
v.config().Set(cfgWalletPassphrase, "")
v.config().Set(cfgContainersCORS, testCORSContainerName+"."+containerv2.SysAttributeZoneDefault)
application := newApp(cancelCtx, v)
go application.Serve()
@ -260,7 +270,7 @@ func putWithDuplicateKeys(t *testing.T, CID cid.ID) {
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t, "key duplication error: "+attr+"\n", string(body))
require.Contains(t, string(body), "key duplication error: "+attr+"\n")
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
}
@ -429,7 +439,80 @@ func checkNamespaces(ctx context.Context, t *testing.T, clientPool *pool.Pool, o
resp, err = http.DefaultClient.Do(req)
require.NoError(t, err)
require.Equal(t, http.StatusNotFound, resp.StatusCode)
}
func checkStatusCodes(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, version string) {
cli := http.Client{Timeout: 30 * time.Second}
t.Run("container not found by name", func(t *testing.T) {
resp, err := cli.Get(testHost + "/get/unknown/object")
require.NoError(t, err)
require.Equal(t, http.StatusNotFound, resp.StatusCode)
requireBodyContains(t, resp, "container not found")
})
t.Run("container not found by cid", func(t *testing.T) {
cnrIDTest := cidtest.ID()
resp, err := cli.Get(testHost + "/get/" + cnrIDTest.EncodeToString() + "/object")
require.NoError(t, err)
requireBodyContains(t, resp, "container not found")
require.Equal(t, http.StatusNotFound, resp.StatusCode)
})
t.Run("object not found in storage", func(t *testing.T) {
resp, err := cli.Get(testHost + "/get_by_attribute/" + testContainerName + "/FilePath/object2")
require.NoError(t, err)
requireBodyContains(t, resp, "object not found")
require.Equal(t, http.StatusNotFound, resp.StatusCode)
})
t.Run("access denied", func(t *testing.T) {
basicACL := acl.Private
var recs []*eacl.Record
if version == "1.2.7" {
basicACL = acl.PublicRWExtended
rec := eacl.NewRecord()
rec.SetAction(eacl.ActionDeny)
rec.SetOperation(eacl.OperationGet)
recs = append(recs, rec)
}
cnrID, err := createContainerBase(ctx, t, clientPool, ownerID, basicACL, "")
require.NoError(t, err)
key, err := keys.NewPrivateKey()
require.NoError(t, err)
jsonToken, _ := makeBearerTokens(t, key, ownerID, version, recs...)
t.Run("get", func(t *testing.T) {
request, err := http.NewRequest(http.MethodGet, testHost+"/get/"+cnrID.EncodeToString()+"/object", nil)
require.NoError(t, err)
request.Header.Set("Authorization", "Bearer "+jsonToken)
resp, err := cli.Do(request)
require.NoError(t, err)
requireBodyContains(t, resp, "access denied")
require.Equal(t, http.StatusForbidden, resp.StatusCode)
})
t.Run("upload", func(t *testing.T) {
request, _, _ := makePutRequest(t, testHost+"/upload/"+cnrID.EncodeToString())
request.Header.Set("Authorization", "Bearer "+jsonToken)
resp, err := cli.Do(request)
require.NoError(t, err)
requireBodyContains(t, resp, "access denied")
require.Equal(t, http.StatusForbidden, resp.StatusCode)
})
})
}
func requireBodyContains(t *testing.T, resp *http.Response, msg string) {
data, err := io.ReadAll(resp.Body)
require.NoError(t, err)
defer resp.Body.Close()
require.Contains(t, strings.ToLower(string(data)), strings.ToLower(msg))
}
func createDockerContainer(ctx context.Context, t *testing.T, image string) testcontainers.Container {
@ -477,7 +560,11 @@ func getPool(ctx context.Context, t *testing.T, key *keys.PrivateKey) *pool.Pool
return clientPool
}
func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID) (cid.ID, error) {
func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, name string) (cid.ID, error) {
return createContainerBase(ctx, t, clientPool, ownerID, acl.PublicRWExtended, name)
}
func createContainerBase(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, basicACL acl.Basic, name string) (cid.ID, error) {
var policy netmap.PlacementPolicy
err := policy.DecodeString("REP 1")
require.NoError(t, err)
@ -485,24 +572,28 @@ func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, o
var cnr container.Container
cnr.Init()
cnr.SetPlacementPolicy(policy)
cnr.SetBasicACL(acl.PublicRWExtended)
cnr.SetBasicACL(basicACL)
cnr.SetOwner(ownerID)
container.SetCreationTime(&cnr, time.Now())
var domain container.Domain
domain.SetName(testContainerName)
if name != "" {
var domain container.Domain
domain.SetName(name)
cnr.SetAttribute(containerv2.SysAttributeName, domain.Name())
cnr.SetAttribute(containerv2.SysAttributeZone, domain.Zone())
cnr.SetAttribute(containerv2.SysAttributeName, domain.Name())
cnr.SetAttribute(containerv2.SysAttributeZone, domain.Zone())
}
var waitPrm pool.WaitParams
waitPrm.SetTimeout(15 * time.Second)
waitPrm.SetPollInterval(3 * time.Second)
var prm pool.PrmContainerPut
prm.SetContainer(cnr)
prm.SetWaitParams(waitPrm)
prm := pool.PrmContainerPut{
ClientParams: client.PrmContainerPut{
Container: &cnr,
},
WaitParams: &pool.WaitParams{
Timeout: 15 * time.Second,
PollInterval: 3 * time.Second,
},
}
CID, err := clientPool.PutContainer(ctx, prm)
if err != nil {
@ -549,13 +640,18 @@ func registerUser(t *testing.T, ctx context.Context, aioContainer testcontainers
require.NoError(t, err)
}
func makeBearerTokens(t *testing.T, key *keys.PrivateKey, ownerID user.ID, version string) (jsonTokenBase64, binaryTokenBase64 string) {
func makeBearerTokens(t *testing.T, key *keys.PrivateKey, ownerID user.ID, version string, records ...*eacl.Record) (jsonTokenBase64, binaryTokenBase64 string) {
tkn := new(bearer.Token)
tkn.ForUser(ownerID)
tkn.SetExp(10000)
if version == "1.2.7" {
tkn.SetEACLTable(*eacl.NewTable())
table := eacl.NewTable()
for i := range records {
table.AddRecord(records[i])
}
tkn.SetEACLTable(*table)
} else {
tkn.SetImpersonate(true)
}

View file

@ -41,6 +41,7 @@ type zapCoreTagFilterWrapper struct {
type TagFilterSettings interface {
LevelEnabled(tag string, lvl zapcore.Level) bool
DefaultEnabled(lvl zapcore.Level) bool
}
func (c *zapCoreTagFilterWrapper) Enabled(level zapcore.Level) bool {
@ -63,24 +64,26 @@ func (c *zapCoreTagFilterWrapper) Check(entry zapcore.Entry, checked *zapcore.Ch
}
func (c *zapCoreTagFilterWrapper) Write(entry zapcore.Entry, fields []zapcore.Field) error {
if c.shouldSkip(entry, fields) || c.shouldSkip(entry, c.extra) {
if c.shouldSkip(entry, fields, c.extra) {
return nil
}
return c.core.Write(entry, fields)
}
func (c *zapCoreTagFilterWrapper) shouldSkip(entry zapcore.Entry, fields []zap.Field) bool {
func (c *zapCoreTagFilterWrapper) shouldSkip(entry zapcore.Entry, fields []zap.Field, extra []zap.Field) bool {
for _, field := range fields {
if field.Key == logs.TagFieldName && field.Type == zapcore.StringType {
if !c.settings.LevelEnabled(field.String, entry.Level) {
return true
}
break
return !c.settings.LevelEnabled(field.String, entry.Level)
}
}
for _, field := range extra {
if field.Key == logs.TagFieldName && field.Type == zapcore.StringType {
return !c.settings.LevelEnabled(field.String, entry.Level)
}
}
return false
return !c.settings.DefaultEnabled(entry.Level)
}
func (c *zapCoreTagFilterWrapper) Sync() error {
@ -127,14 +130,13 @@ func newLogEncoder() zapcore.Encoder {
//
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
func newStdoutLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings LoggerAppSettings, tagSetting TagFilterSettings) *Logger {
stdout := zapcore.AddSync(os.Stderr)
stdout := zapcore.AddSync(os.Stdout)
consoleOutCore := zapcore.NewCore(newLogEncoder(), stdout, lvl)
consoleOutCore = applyZapCoreMiddlewares(consoleOutCore, v, loggerSettings, tagSetting)
return &Logger{
logger: zap.New(consoleOutCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
lvl: lvl,
}
}
@ -152,7 +154,6 @@ func newJournaldLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings Logge
return &Logger{
logger: zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
lvl: lvl,
}
}

View file

@ -74,7 +74,6 @@ func newServer(ctx context.Context, serverInfo ServerInfo) (*server, error) {
ln = tls.NewListener(ln, &tls.Config{
GetCertificate: tlsProvider.GetCertificate,
NextProtos: []string{"h2"}, // required to enable HTTP/2 requests in `http.Serve`
})
}

View file

@ -18,7 +18,7 @@ import (
"time"
"github.com/stretchr/testify/require"
"golang.org/x/net/http2"
"github.com/valyala/fasthttp"
)
const (
@ -26,14 +26,10 @@ const (
expHeaderValue = "Bar"
)
func TestHTTP2TLS(t *testing.T) {
func TestHTTP_TLS(t *testing.T) {
ctx := context.Background()
certPath, keyPath := prepareTestCerts(t)
srv := &http.Server{
Handler: http.HandlerFunc(testHandler),
}
tlsListener, err := newServer(ctx, ServerInfo{
Address: ":0",
TLS: ServerTLSInfo{
@ -47,37 +43,34 @@ func TestHTTP2TLS(t *testing.T) {
addr := fmt.Sprintf("https://localhost:%d", port)
go func() {
_ = srv.Serve(tlsListener.Listener())
_ = fasthttp.Serve(tlsListener.Listener(), testHandler)
}()
// Server is running, now send HTTP/2 request
tlsClientConfig := &tls.Config{
InsecureSkipVerify: true,
}
cliHTTP1 := http.Client{Transport: &http.Transport{TLSClientConfig: tlsClientConfig}}
cliHTTP2 := http.Client{Transport: &http2.Transport{TLSClientConfig: tlsClientConfig}}
cliHTTP := http.Client{Transport: &http.Transport{}}
cliHTTPS := http.Client{Transport: &http.Transport{TLSClientConfig: tlsClientConfig}}
req, err := http.NewRequest("GET", addr, nil)
require.NoError(t, err)
req.Header[expHeaderKey] = []string{expHeaderValue}
resp, err := cliHTTP1.Do(req)
resp, err := cliHTTPS.Do(req)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
resp, err = cliHTTP2.Do(req)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
_, err = cliHTTP.Do(req)
require.ErrorContains(t, err, "failed to verify certificate")
}
func testHandler(resp http.ResponseWriter, req *http.Request) {
hdr, ok := req.Header[expHeaderKey]
if !ok || len(hdr) != 1 || hdr[0] != expHeaderValue {
resp.WriteHeader(http.StatusBadRequest)
func testHandler(ctx *fasthttp.RequestCtx) {
hdr := ctx.Request.Header.Peek(expHeaderKey)
if len(hdr) == 0 || string(hdr) != expHeaderValue {
ctx.Response.SetStatusCode(http.StatusBadRequest)
} else {
resp.WriteHeader(http.StatusOK)
ctx.Response.SetStatusCode(http.StatusOK)
}
}

View file

@ -16,11 +16,13 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
internalnet "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
grpctracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
qostagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
"github.com/spf13/pflag"
@ -60,6 +62,8 @@ const (
defaultMultinetFallbackDelay = 300 * time.Millisecond
defaultContainerContractName = "container.frostfs"
cfgServer = "server"
cfgTLSEnabled = "tls.enabled"
cfgTLSCertFile = "tls.cert_file"
@ -111,7 +115,7 @@ const (
cfgLoggerTags = "logger.tags"
cfgLoggerTagsPrefixTmpl = cfgLoggerTags + ".%d."
cfgLoggerTagsNameTmpl = cfgLoggerTagsPrefixTmpl + "name"
cfgLoggerTagsNameTmpl = cfgLoggerTagsPrefixTmpl + "names"
cfgLoggerTagsLevelTmpl = cfgLoggerTagsPrefixTmpl + "level"
// Wallet.
@ -154,18 +158,21 @@ const (
cfgBucketsCacheLifetime = "cache.buckets.lifetime"
cfgBucketsCacheSize = "cache.buckets.size"
cfgNetmapCacheLifetime = "cache.netmap.lifetime"
cfgCORSCacheLifetime = "cache.cors.lifetime"
cfgCORSCacheSize = "cache.cors.size"
// Bucket resolving options.
cfgResolveNamespaceHeader = "resolve_bucket.namespace_header"
cfgResolveDefaultNamespaces = "resolve_bucket.default_namespaces"
// CORS.
cfgCORSAllowOrigin = "cors.allow_origin"
cfgCORSAllowMethods = "cors.allow_methods"
cfgCORSAllowHeaders = "cors.allow_headers"
cfgCORSExposeHeaders = "cors.expose_headers"
cfgCORSAllowCredentials = "cors.allow_credentials"
cfgCORSMaxAge = "cors.max_age"
cfgCORS = "cors"
cfgCORSAllowOrigin = cfgCORS + ".allow_origin"
cfgCORSAllowMethods = cfgCORS + ".allow_methods"
cfgCORSAllowHeaders = cfgCORS + ".allow_headers"
cfgCORSExposeHeaders = cfgCORS + ".expose_headers"
cfgCORSAllowCredentials = cfgCORS + ".allow_credentials"
cfgCORSMaxAge = cfgCORS + ".max_age"
// Multinet.
cfgMultinetEnabled = "multinet.enabled"
@ -175,8 +182,12 @@ const (
cfgMultinetSubnets = "multinet.subnets"
// Feature.
cfgFeaturesEnableFilepathFallback = "features.enable_filepath_fallback"
cfgFeaturesTreePoolNetmapSupport = "features.tree_pool_netmap_support"
cfgFeaturesEnableFilepathFallback = "features.enable_filepath_fallback"
cfgFeaturesEnableFilepathSlashFallback = "features.enable_filepath_slash_fallback"
cfgFeaturesTreePoolNetmapSupport = "features.tree_pool_netmap_support"
// Containers.
cfgContainersCORS = "containers.cors"
// Command line args.
cmdHelp = "help"
@ -188,6 +199,9 @@ const (
cmdConfig = "config"
cmdConfigDir = "config-dir"
cmdListenAddress = "listen_address"
// Contracts.
cfgContractsContainerName = "contracts.container.name"
)
var ignore = map[string]struct{}{
@ -200,7 +214,6 @@ var defaultTags = []string{logs.TagApp, logs.TagDatapath, logs.TagExternalStorag
type Logger struct {
logger *zap.Logger
lvl zap.AtomicLevel
}
type appCfg struct {
@ -393,6 +406,9 @@ func setDefaults(v *viper.Viper, flags *pflag.FlagSet) {
// multinet
v.SetDefault(cfgMultinetFallbackDelay, defaultMultinetFallbackDelay)
// contracts
v.SetDefault(cfgContractsContainerName, defaultContainerContractName)
if resolveMethods, err := flags.GetStringSlice(cfgResolveOrder); err == nil {
v.SetDefault(cfgResolveOrder, resolveMethods)
}
@ -508,8 +524,8 @@ func fetchLogTagsConfig(v *viper.Viper, defaultLvl zapcore.Level) (map[string]za
res := make(map[string]zapcore.Level)
for i := 0; ; i++ {
name := v.GetString(fmt.Sprintf(cfgLoggerTagsNameTmpl, i))
if name == "" {
tagNames := v.GetString(fmt.Sprintf(cfgLoggerTagsNameTmpl, i))
if tagNames == "" {
break
}
@ -521,7 +537,12 @@ func fetchLogTagsConfig(v *viper.Viper, defaultLvl zapcore.Level) (map[string]za
}
}
res[name] = lvl
for _, tagName := range strings.Split(tagNames, ",") {
tagName = strings.TrimSpace(tagName)
if len(tagName) != 0 {
res[tagName] = lvl
}
}
}
if len(res) == 0 && !v.IsSet(cfgLoggerTags) {
@ -670,6 +691,8 @@ func (a *app) initPools(ctx context.Context) {
grpc.WithUnaryInterceptor(grpctracing.NewUnaryClientInteceptor()),
grpc.WithStreamInterceptor(grpctracing.NewStreamClientInterceptor()),
grpc.WithContextDialer(a.settings.dialerSource.GrpcContextDialer()),
grpc.WithChainUnaryInterceptor(qostagging.NewUnaryClientInteceptor()),
grpc.WithChainStreamInterceptor(qostagging.NewStreamClientInterceptor()),
}
prm.SetGRPCDialOptions(interceptors...)
prmTree.SetGRPCDialOptions(interceptors...)
@ -756,6 +779,15 @@ func getNetmapCacheOptions(v *viper.Viper, l *zap.Logger) *cache.NetmapCacheConf
return cacheCfg
}
func getCORSCacheOptions(v *viper.Viper, l *zap.Logger) *cache.Config {
cacheCfg := cache.DefaultCORSConfig(l)
cacheCfg.Lifetime = fetchCacheLifetime(v, l, cfgCORSCacheLifetime, cacheCfg.Lifetime)
cacheCfg.Size = fetchCacheSize(v, l, cfgCORSCacheSize, cacheCfg.Size)
return cacheCfg
}
func fetchCacheLifetime(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue time.Duration) time.Duration {
if v.IsSet(cfgEntry) {
lifetime := v.GetDuration(cfgEntry)
@ -851,3 +883,18 @@ func fetchArchiveCompression(v *viper.Viper) bool {
}
return v.GetBool(cfgArchiveCompression)
}
func fetchCORSConfig(v *viper.Viper) *data.CORSRule {
if !v.IsSet(cfgCORS) {
return nil
}
return &data.CORSRule{
AllowedOrigins: []string{v.GetString(cfgCORSAllowOrigin)},
AllowedMethods: v.GetStringSlice(cfgCORSAllowMethods),
AllowedHeaders: v.GetStringSlice(cfgCORSAllowHeaders),
ExposeHeaders: v.GetStringSlice(cfgCORSExposeHeaders),
AllowedCredentials: v.GetBool(cfgCORSAllowCredentials),
MaxAgeSeconds: fetchCORSMaxAge(v),
}
}

View file

@ -20,8 +20,9 @@ HTTP_GW_LOGGER_SAMPLING_ENABLED=false
HTTP_GW_LOGGER_SAMPLING_INITIAL=100
HTTP_GW_LOGGER_SAMPLING_THEREAFTER=100
HTTP_GW_LOGGER_SAMPLING_INTERVAL=1s
HTTP_GW_LOGGER_TAGS_0_NAME=app
HTTP_GW_LOGGER_TAGS_1_NAME=datapath
HTTP_GW_LOGGER_TAGS_0_NAMES=app,datapath
HTTP_GW_LOGGER_TAGS_0_LEVEL=level
HTTP_GW_LOGGER_TAGS_1_NAME=external_storage_tree
HTTP_GW_SERVER_0_ADDRESS=0.0.0.0:443
HTTP_GW_SERVER_0_TLS_ENABLED=false
@ -129,6 +130,9 @@ HTTP_GW_CACHE_BUCKETS_LIFETIME=1m
HTTP_GW_CACHE_BUCKETS_SIZE=1000
# Cache which stores netmap
HTTP_GW_CACHE_NETMAP_LIFETIME=1m
# Cache which stores container CORS configurations
HTTP_GW_CACHE_CORS_LIFETIME=5m
HTTP_GW_CACHE_CORS_SIZE=1000
# Header to determine zone to resolve bucket name
HTTP_GW_RESOLVE_BUCKET_NAMESPACE_HEADER=X-Frostfs-Namespace
@ -170,5 +174,13 @@ HTTP_GW_INDEX_PAGE_TEMPLATE_PATH=internal/handler/templates/index.gotmpl
# Enable using fallback path to search for a object by attribute
HTTP_GW_FEATURES_ENABLE_FILEPATH_FALLBACK=false
# See description in docs/gate-configuration.md
HTTP_GW_FEATURES_ENABLE_FILEPATH_SLASH_FALLBACK=false
# Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service
HTTP_GW_FEATURES_TREE_POOL_NETMAP_SUPPORT=true
# Containers properties
HTTP_GW_CONTAINERS_CORS=AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
# Container contract hash (LE) or name in NNS.
HTTP_GW_CONTRACTS_CONTAINER_NAME=container.frostfs

View file

@ -30,8 +30,7 @@ logger:
thereafter: 100
interval: 1s
tags:
- name: app
- name: datapath
- names: app,datapath
level: debug
server:
@ -156,6 +155,10 @@ cache:
# Cache which stores netmap
netmap:
lifetime: 1m
# Cache which stores container CORS configurations
cors:
lifetime: 5m
size: 1000
resolve_bucket:
namespace_header: X-Frostfs-Namespace
@ -189,5 +192,15 @@ multinet:
features:
# Enable using fallback path to search for a object by attribute
enable_filepath_fallback: false
# See description in docs/gate-configuration.md
enable_filepath_slash_fallback: false
# Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service
tree_pool_netmap_support: true
containers:
cors: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
contracts:
container:
# Container contract hash (LE) or name in NNS.
name: container.frostfs

View file

@ -94,6 +94,8 @@ The `filename` field from the multipart form will be set as `FileName` attribute
|--------|----------------------------------------------|
| 200 | Object created successfully. |
| 400 | Some error occurred during object uploading. |
| 403 | Access denied. |
| 409 | Can not upload object due to quota reached. |
## Get object
@ -141,6 +143,7 @@ Get an object (payload and attributes) by an address.
|--------|------------------------------------------------|
| 200 | Object got successfully. |
| 400 | Some error occurred during object downloading. |
| 403 | Access denied. |
| 404 | Container or object not found. |
###### Body
@ -183,6 +186,7 @@ Get an object attributes by an address.
|--------|---------------------------------------------------|
| 200 | Object head successfully. |
| 400 | Some error occurred during object HEAD operation. |
| 403 | Access denied. |
| 404 | Container or object not found. |
## Search object
@ -233,6 +237,7 @@ If more than one object is found, an arbitrary one will be returned.
|--------|------------------------------------------------|
| 200 | Object got successfully. |
| 400 | Some error occurred during object downloading. |
| 403 | Access denied. |
| 404 | Container or object not found. |
#### HEAD
@ -269,6 +274,7 @@ If more than one object is found, an arbitrary one will be used to get attribute
|--------|---------------------------------------|
| 200 | Object head successfully. |
| 400 | Some error occurred during operation. |
| 403 | Access denied. |
| 404 | Container or object not found. |
## Download archive
@ -304,16 +310,16 @@ Archive can be compressed (see http-gw [configuration](gate-configuration.md#arc
###### Headers
| Header | Description |
|-----------------------|-------------------------------------------------------------------------------------------------------------------|
| `Content-Disposition` | Indicate how to browsers should treat file (`attachment`). Set `filename` as `archive.zip`. |
| `Content-Type` | Indicate content type of object. Set to `application/zip` |
| Header | Description |
|-----------------------|---------------------------------------------------------------------------------------------|
| `Content-Disposition` | Indicate how to browsers should treat file (`attachment`). Set `filename` as `archive.zip`. |
| `Content-Type` | Indicate content type of object. Set to `application/zip` |
###### Status codes
| Status | Description |
|--------|-----------------------------------------------------|
| 200 | Object got successfully. |
| 400 | Some error occurred during object downloading. |
| 404 | Container or objects not found. |
| 500 | Some inner error (e.g. error on streaming objects). |
| Status | Description |
|--------|------------------------------------------------|
| 200 | Object got successfully. |
| 400 | Some error occurred during object downloading. |
| 403 | Access denied. |
| 404 | Container or objects not found. |

View file

@ -8,7 +8,6 @@ There are some custom types used for brevity:
* `duration` -- string consisting of a number and a suffix. Suffix examples include `s` (seconds), `m` (minutes), `ms` (
milliseconds).
# Reload on SIGHUP
Some config values can be reloaded on SIGHUP signal.
@ -60,6 +59,8 @@ $ cat http.log
| `index_page` | [Index page configuration](#index_page-section) |
| `multinet` | [Multinet configuration](#multinet-section) |
| `features` | [Features configuration](#features-section) |
| `containers` | [Containers configuration](#containers-section) |
| `contracts` | [Contracts configuration](#contracts-section) |
# General section
@ -162,7 +163,6 @@ server:
| `tls.cert_file` | `string` | yes | | Path to the TLS certificate. |
| `tls.key_file` | `string` | yes | | Path to the key. |
# `logger` section
```yaml
@ -175,10 +175,9 @@ logger:
thereafter: 100
interval: 1s
tags:
- name: "app"
level: info
- name: "datapath"
- name: "external_storage_tree"
- names: "app,datapath"
level: info
- names: "external_storage_tree"
```
| Parameter | Type | SIGHUP reload | Default value | Description |
@ -198,14 +197,14 @@ parameter. Available tags:
```yaml
tags:
- name: "app"
- names: "app,datapath"
level: info
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-----------------------|------------|---------------|---------------------------|-------------------------------------------------------------------------------------------------------|
| `name` | `string` | yes | | Tag name. Possible values see below in `Tag values` section. |
| `level` | `string` | yes | Value from `logger.level` | Logging level for specific tag. Possible values: `debug`, `info`, `warn`, `dpanic`, `panic`, `fatal`. |
| Parameter | Type | SIGHUP reload | Default value | Description |
|-----------|------------|---------------|---------------------------|-------------------------------------------------------------------------------------------------------|
| `names` | `[]string` | yes | | Tag names separated by `,`. Possible values see below in `Tag values` section. |
| `level` | `string` | yes | Value from `logger.level` | Logging level for specific tag. Possible values: `debug`, `info`, `warn`, `dpanic`, `panic`, `fatal`. |
### Tag values
@ -235,7 +234,6 @@ web:
| `stream_request_body` | `bool` | `true` | Enables request body streaming, and calls the handler sooner when given body is larger than the current limit. |
| `max_request_body_size` | `int` | `4194304` | Maximum request body size. The server rejects requests with bodies exceeding this limit. |
# `upload-header` section
```yaml
@ -271,7 +269,6 @@ archive:
|---------------|--------|---------------|---------------|------------------------------------------------------------------|
| `compression` | `bool` | yes | `false` | Enable archive compression when download files by common prefix. |
# `pprof` section
Contains configuration for the `pprof` profiler.
@ -320,14 +317,13 @@ tracing:
```
| Parameter | Type | SIGHUP reload | Default value | Description |
| ------------ | -------------------------------------- | ------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------- |
|--------------|----------------------------------------|---------------|---------------|---------------------------------------------------------------------------------------------------------------------------------|
| `enabled` | `bool` | yes | `false` | Flag to enable the tracing. |
| `exporter` | `string` | yes | | Trace collector type (`stdout` or `otlp_grpc` are supported). |
| `endpoint` | `string` | yes | | Address of collector endpoint for OTLP exporters. |
| `trusted_ca` | `string` | yes | | Path to certificate of a certification authority in pem format, that issued the TLS certificate of the telemetry remote server. |
| `attributes` | [[]Attributes](#attributes-subsection) | yes | | An array of configurable attributes in key-value format. |
#### `attributes` subsection
```yaml
@ -338,12 +334,13 @@ tracing:
value: value
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-----------------------|----------|---------------|---------------|----------------------------------------------------------|
| `key` | `string` | yes | | Attribute key. |
| `value` | `string` | yes | | Attribute value. |
| Parameter | Type | SIGHUP reload | Default value | Description |
|-----------|----------|---------------|---------------|------------------|
| `key` | `string` | yes | | Attribute key. |
| `value` | `string` | yes | | Attribute value. |
# `runtime` section
Contains runtime parameters.
```yaml
@ -372,7 +369,6 @@ frostfs:
| `buffer_max_size_for_put` | `uint64` | yes | `1048576` | Sets max buffer size for read payload in put operations. |
| `tree_pool_max_attempts` | `uint32` | no | `0` | Sets max attempt to make successful tree request. Value 0 means the number of attempts equals to number of nodes in pool. |
### `cache` section
```yaml
@ -382,13 +378,16 @@ cache:
size: 1000
netmap:
lifetime: 1m
cors:
lifetime: 5m
size: 1000
```
| Parameter | Type | Default value | Description |
|-----------|-----------------------------------|---------------------------------|---------------------------------------------------------------------------|
| `buckets` | [Cache config](#cache-subsection) | `lifetime: 60s`<br>`size: 1000` | Cache which contains mapping of bucket name to bucket info. |
| `netmap` | [Cache config](#cache-subsection) | `lifetime: 1m` | Cache which stores netmap. `netmap.size` isn't applicable for this cache. |
| `cors` | [Cache config](#cache-subsection) | `lifetime: 5m`<br>`size: 1000` | Cache which stores container CORS configurations. |
#### `cache` subsection
@ -402,7 +401,6 @@ size: 1000
| `lifetime` | `duration` | depends on cache | Lifetime of entries in cache. |
| `size` | `int` | depends on cache | LRU cache size. |
# `resolve_bucket` section
Bucket name resolving parameters from and to container ID.
@ -413,10 +411,10 @@ resolve_bucket:
default_namespaces: [ "", "root" ]
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|----------------------|------------|---------------|-----------------------|--------------------------------------------------------------------------------------------------------------------------|
| `namespace_header` | `string` | yes | `X-Frostfs-Namespace` | Header to determine zone to resolve bucket name. |
| `default_namespaces` | `[]string` | yes | ["","root"] | Namespaces that should be handled as default. |
| Parameter | Type | SIGHUP reload | Default value | Description |
|----------------------|------------|---------------|-----------------------|--------------------------------------------------|
| `namespace_header` | `string` | yes | `X-Frostfs-Namespace` | Header to determine zone to resolve bucket name. |
| `default_namespaces` | `[]string` | yes | ["","root"] | Namespaces that should be handled as default. |
# `index_page` section
@ -441,14 +439,14 @@ index_page:
# `cors` section
Parameters for CORS (used in OPTIONS requests and responses in all handlers).
If values are not set, headers will not be included to response.
If values are not set, settings from CORS container will be used.
```yaml
cors:
allow_origin: "*"
allow_methods: ["GET", "HEAD"]
allow_headers: ["Authorization"]
expose_headers: ["*"]
allow_methods: [ "GET", "HEAD" ]
allow_headers: [ "Authorization" ]
expose_headers: [ "*" ]
allow_credentials: false
max_age: 600
```
@ -468,15 +466,15 @@ Configuration of multinet support.
```yaml
multinet:
enabled: false
balancer: roundrobin
restrict: false
fallback_delay: 300ms
subnets:
- mask: 1.2.3.4/24
source_ips:
- 1.2.3.4
- 1.2.3.5
enabled: false
balancer: roundrobin
restrict: false
fallback_delay: 300ms
subnets:
- mask: 1.2.3.4/24
source_ips:
- 1.2.3.4
- 1.2.3.5
```
| Parameter | Type | SIGHUP reload | Default value | Description |
@ -508,10 +506,37 @@ Contains parameters for enabling features.
```yaml
features:
enable_filepath_fallback: true
enable_filepath_slash_fallback: false
tree_pool_netmap_support: true
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-------------------------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `features.enable_filepath_fallback` | `bool` | yes | `false` | Enable using fallback path to search for a object by attribute. If the value of the `FilePath` attribute in the request contains no `/` symbols or single leading `/` symbol and the object was not found, then an attempt is made to search for the object by the attribute `FileName`. |
| `features.tree_pool_netmap_support` | `bool` | no | `false` | Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service. |
| Parameter | Type | SIGHUP reload | Default value | Description |
|-------------------------------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `features.enable_filepath_fallback` | `bool` | yes | `false` | Enable using fallback path to search for a object by `FileName` attribute if object with `FilePath` attribute wasn't found. |
| `features.enable_filepath_slash_fallback` | `bool` | yes | `false` | Enable using fallback path to search for a object by `FilePath`/`FileName` with/without (depends on provided value in `FilePath`/`FileName`) leading slash if object with provided `FilePath`/`FileName` wasn't found. This fallback goes before `enable_filepath_fallback`. |
| `features.tree_pool_netmap_support` | `bool` | no | `false` | Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service. |
# `containers` section
Section for well-known containers to store data and settings.
```yaml
containers:
cors: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-----------|----------|---------------|---------------|-----------------------------------------|
| `cors` | `string` | no | | Container name for CORS configurations. |
# `contracts` section
```yaml
contracts:
container:
name: container.frostfs
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|------------------|----------|---------------|---------------------|----------------------------------------------|
| `container.name` | `string` | no | `container.frostfs` | Container contract hash (LE) or name in NNS. |

9
go.mod
View file

@ -1,10 +1,12 @@
module git.frostfs.info/TrueCloudLab/frostfs-http-gw
go 1.22
go 1.23
require (
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250317082814-87bb55f992dc
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
github.com/bluele/gcache v0.0.2
@ -26,14 +28,12 @@ require (
go.opentelemetry.io/otel/trace v1.31.0
go.uber.org/zap v1.27.0
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
golang.org/x/net v0.30.0
golang.org/x/sys v0.28.0
google.golang.org/grpc v1.69.2
)
require (
dario.cat/mergo v1.0.0 // indirect
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e // indirect
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
@ -125,6 +125,7 @@ require (
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.31.0 // indirect
golang.org/x/net v0.30.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/term v0.27.0 // indirect
golang.org/x/text v0.21.0 // indirect

6
go.sum
View file

@ -42,10 +42,12 @@ git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a h1:Ud+3zz4WP9HPxEQxDPJZPpiPdm30nDNSKucsWP9L54M=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121 h1:/Z8DfbLZXp7exUQWUKoG/9tbFdI9d5lV1qSReaYoG8I=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe h1:81gDNdWNLP24oMQukRiCE9R1wGSh0l0dRq3F1W+Oesc=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250317082814-87bb55f992dc h1:fS6Yp4GvI+C22UrWz9oqJXwvQw5Q6SmADIY4H9eIQsc=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250317082814-87bb55f992dc/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=

62
internal/cache/cors.go vendored Normal file
View file

@ -0,0 +1,62 @@
package cache
import (
"fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/bluele/gcache"
"go.uber.org/zap"
)
// CORSCache contains cache with CORS objects.
type CORSCache struct {
cache gcache.Cache
logger *zap.Logger
}
const (
// DefaultCORSCacheSize is a default maximum number of entries in cache.
DefaultCORSCacheSize = 1e3
// DefaultCORSCacheLifetime is a default lifetime of entries in cache.
DefaultCORSCacheLifetime = 5 * time.Minute
)
// DefaultCORSConfig returns new default cache expiration values.
func DefaultCORSConfig(logger *zap.Logger) *Config {
return &Config{
Size: DefaultCORSCacheSize,
Lifetime: DefaultCORSCacheLifetime,
Logger: logger,
}
}
// NewCORSCache creates an object of CORSCache.
func NewCORSCache(config *Config) *CORSCache {
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
return &CORSCache{cache: gc, logger: config.Logger}
}
// Get returns a cached object.
func (o *CORSCache) Get(cnrID cid.ID) *data.CORSConfiguration {
entry, err := o.cache.Get(cnrID)
if err != nil {
return nil
}
result, ok := entry.(*data.CORSConfiguration)
if !ok {
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
return nil
}
return result
}
// Put puts an object to cache.
func (o *CORSCache) Put(cnrID cid.ID, cors *data.CORSConfiguration) error {
return o.cache.Set(cnrID, cors)
}

18
internal/data/cors.go Normal file
View file

@ -0,0 +1,18 @@
package data
type (
// CORSConfiguration stores CORS configuration of a request.
CORSConfiguration struct {
CORSRules []CORSRule `xml:"CORSRule" json:"CORSRules"`
}
// CORSRule stores rules for CORS configuration.
CORSRule struct {
AllowedHeaders []string `xml:"AllowedHeader" json:"AllowedHeaders"`
AllowedMethods []string `xml:"AllowedMethod" json:"AllowedMethods"`
AllowedOrigins []string `xml:"AllowedOrigin" json:"AllowedOrigins"`
ExposeHeaders []string `xml:"ExposeHeader" json:"ExposeHeaders"`
MaxAgeSeconds int `xml:"MaxAgeSeconds,omitempty" json:"MaxAgeSeconds,omitempty"`
AllowedCredentials bool `xml:"AllowedCredentials,omitempty" json:"AllowedCredentials,omitempty"`
}
)

View file

@ -12,7 +12,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -131,11 +130,15 @@ func parentDir(prefix string) string {
return prefix[index:]
}
func trimPrefix(encPrefix string) string {
func getParent(encPrefix string) string {
prefix, err := url.PathUnescape(encPrefix)
if err != nil {
return ""
}
if prefix != "" && prefix[len(prefix)-1] == '/' {
prefix = prefix[:len(prefix)-1]
}
slashIndex := strings.LastIndex(prefix, "/")
if slashIndex == -1 {
return ""
@ -161,10 +164,15 @@ func urlencode(path string) string {
type GetObjectsResponse struct {
objects []ResponseObject
hasErrors bool
isNative bool
}
func (h *Handler) getDirObjectsS3(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) (*GetObjectsResponse, error) {
nodes, _, err := h.tree.GetSubTreeByPrefix(ctx, bucketInfo, prefix, true)
if prefix != "" && prefix[len(prefix)-1] == '/' {
prefix = prefix[:len(prefix)-1]
}
nodes, err := h.tree.GetSubTreeByPrefix(ctx, bucketInfo, prefix, true)
if err != nil {
return nil, err
}
@ -185,7 +193,7 @@ func (h *Handler) getDirObjectsS3(ctx context.Context, bucketInfo *data.BucketIn
if obj.IsDeleteMarker {
continue
}
obj.FilePath = prefix + obj.FileName
obj.FilePath = prefix + "/" + obj.FileName
obj.GetURL = "/get/" + bucketInfo.Name + urlencode(obj.FilePath)
result.objects = append(result.objects, obj)
}
@ -194,9 +202,9 @@ func (h *Handler) getDirObjectsS3(ctx context.Context, bucketInfo *data.BucketIn
}
func (h *Handler) getDirObjectsNative(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) (*GetObjectsResponse, error) {
var basePath string
if ind := strings.LastIndex(prefix, "/"); ind != -1 {
basePath = prefix[:ind+1]
basePath := prefix
if basePath != "" && basePath[len(basePath)-1] != '/' {
basePath += "/"
}
filters := object.NewSearchFilters()
@ -223,10 +231,11 @@ func (h *Handler) getDirObjectsNative(ctx context.Context, bucketInfo *data.Buck
return nil, err
}
log := utils.GetReqLogOrDefault(ctx, h.log)
log := h.reqLogger(ctx)
dirs := make(map[string]struct{})
result := &GetObjectsResponse{
objects: make([]ResponseObject, 0, 100),
objects: make([]ResponseObject, 0, 100),
isNative: true,
}
for objExt := range resp {
if objExt.Error != nil {
@ -258,7 +267,7 @@ func (h *Handler) headDirObjects(ctx context.Context, cnrID cid.ID, objectIDs Re
go func() {
defer close(res)
log := utils.GetReqLogOrDefault(ctx, h.log).With(
log := h.reqLogger(ctx).With(
zap.String("cid", cnrID.EncodeToString()),
zap.String("path", basePath),
)
@ -273,7 +282,7 @@ func (h *Handler) headDirObjects(ctx context.Context, cnrID cid.ID, objectIDs Re
})
if err != nil {
wg.Done()
log.Warn(logs.FailedToSumbitTaskToPool, zap.Error(err), logs.TagField(logs.TagDatapath))
log.Warn(logs.FailedToSubmitTaskToPool, zap.Error(err), logs.TagField(logs.TagDatapath))
}
select {
case <-ctx.Done():
@ -322,30 +331,16 @@ func (h *Handler) headDirObject(ctx context.Context, cnrID cid.ID, objID oid.ID,
}
type browseParams struct {
bucketInfo *data.BucketInfo
prefix string
isNative bool
listObjects func(ctx context.Context, bucketName *data.BucketInfo, prefix string) (*GetObjectsResponse, error)
bucketInfo *data.BucketInfo
prefix string
objects *GetObjectsResponse
}
func (h *Handler) browseObjects(c *fasthttp.RequestCtx, p browseParams) {
func (h *Handler) browseObjects(ctx context.Context, req *fasthttp.RequestCtx, p browseParams) {
const S3Protocol = "s3"
const FrostfsProtocol = "frostfs"
ctx := utils.GetContextFromRequest(c)
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
log := reqLog.With(
zap.String("bucket", p.bucketInfo.Name),
zap.String("container", p.bucketInfo.CID.EncodeToString()),
zap.String("prefix", p.prefix),
)
resp, err := p.listObjects(ctx, p.bucketInfo, p.prefix)
if err != nil {
logAndSendBucketError(c, log, err)
return
}
objects := resp.objects
objects := p.objects.objects
sort.Slice(objects, func(i, j int) bool {
if objects[i].IsDir == objects[j].IsDir {
return objects[i].FileName < objects[j].FileName
@ -355,28 +350,33 @@ func (h *Handler) browseObjects(c *fasthttp.RequestCtx, p browseParams) {
tmpl, err := template.New("index").Funcs(template.FuncMap{
"formatSize": formatSize,
"trimPrefix": trimPrefix,
"getParent": getParent,
"urlencode": urlencode,
"parentDir": parentDir,
}).Parse(h.config.IndexPageTemplate())
if err != nil {
logAndSendBucketError(c, log, err)
h.logAndSendError(ctx, req, logs.FailedToParseTemplate, err)
return
}
bucketName := p.bucketInfo.Name
protocol := S3Protocol
if p.isNative {
if p.objects.isNative {
bucketName = p.bucketInfo.CID.EncodeToString()
protocol = FrostfsProtocol
}
if err = tmpl.Execute(c, &BrowsePageData{
prefix := p.prefix
if prefix != "" && prefix[len(prefix)-1] != '/' {
prefix += "/"
}
if err = tmpl.Execute(req, &BrowsePageData{
Container: bucketName,
Prefix: p.prefix,
Prefix: prefix,
Objects: objects,
Protocol: protocol,
HasErrors: resp.hasErrors,
HasErrors: p.objects.hasErrors,
}); err != nil {
logAndSendBucketError(c, log, err)
h.logAndSendError(ctx, req, logs.FailedToExecuteTemplate, err)
return
}
}

View file

@ -0,0 +1,42 @@
package handler
import (
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.uber.org/zap"
)
func (h *Handler) containerInfo(ctx context.Context, cnrID cid.ID) (*data.BucketInfo, error) {
info := &data.BucketInfo{
CID: cnrID,
Name: cnrID.EncodeToString(),
}
res, err := h.cnrContract.GetContainerByID(cnrID)
if err != nil {
return nil, fmt.Errorf("get frostfs container: %w", err)
}
cnr := *res
if domain := container.ReadDomain(cnr); domain.Name() != "" {
info.Name = domain.Name()
info.Zone = domain.Zone()
}
info.HomomorphicHashDisabled = container.IsHomomorphicHashingDisabled(cnr)
info.PlacementPolicy = cnr.PlacementPolicy()
if err = h.cache.Put(info); err != nil {
h.reqLogger(ctx).Warn(logs.CouldntPutBucketIntoCache,
zap.String("bucket name", info.Name),
zap.Stringer("cid", info.CID),
zap.Error(err),
logs.TagField(logs.TagDatapath))
}
return info, nil
}

345
internal/handler/cors.go Normal file
View file

@ -0,0 +1,345 @@
package handler
import (
"context"
"encoding/xml"
"errors"
"fmt"
"regexp"
"slices"
"sort"
"strconv"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
qostagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
)
const (
internalIOTag = "internal"
corsFilePathTemplate = "/%s.cors"
wildcard = "*"
)
var errNoCORS = errors.New("no CORS objects found")
func (h *Handler) Preflight(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.Preflight")
defer span.End()
ctx = qostagging.ContextWithIOTag(ctx, internalIOTag)
cidParam, _ := req.UserValue("cid").(string)
reqLog := h.reqLogger(ctx)
log := reqLog.With(zap.String("cid", cidParam))
origin := req.Request.Header.Peek(fasthttp.HeaderOrigin)
if len(origin) == 0 {
log.Error(logs.EmptyOriginRequestHeader, logs.TagField(logs.TagDatapath))
ResponseError(req, "Origin request header needed", fasthttp.StatusBadRequest)
return
}
method := req.Request.Header.Peek(fasthttp.HeaderAccessControlRequestMethod)
if len(method) == 0 {
log.Error(logs.EmptyAccessControlRequestMethodHeader, logs.TagField(logs.TagDatapath))
ResponseError(req, "Access-Control-Request-Method request header needed", fasthttp.StatusBadRequest)
return
}
corsRule := h.config.CORS()
if corsRule != nil {
setCORSHeadersFromRule(req, corsRule)
return
}
corsConfig, err := h.getCORSConfig(ctx, log, cidParam)
if err != nil {
log.Error(logs.CouldNotGetCORSConfiguration, zap.Error(err), logs.TagField(logs.TagDatapath))
status := fasthttp.StatusInternalServerError
if errors.Is(err, errNoCORS) {
status = fasthttp.StatusNotFound
}
ResponseError(req, "could not get CORS configuration: "+err.Error(), status)
return
}
var headers []string
requestHeaders := req.Request.Header.Peek(fasthttp.HeaderAccessControlRequestHeaders)
if len(requestHeaders) > 0 {
headers = strings.Split(string(requestHeaders), ", ")
}
for _, rule := range corsConfig.CORSRules {
for _, o := range rule.AllowedOrigins {
if o == string(origin) || o == wildcard || (strings.Contains(o, "*") && match(o, string(origin))) {
for _, m := range rule.AllowedMethods {
if m == string(method) {
if !checkSubslice(rule.AllowedHeaders, headers) {
continue
}
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, string(origin))
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(rule.AllowedMethods, ", "))
if headers != nil {
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowHeaders, string(requestHeaders))
}
if rule.ExposeHeaders != nil {
req.Response.Header.Set(fasthttp.HeaderAccessControlExposeHeaders, strings.Join(rule.ExposeHeaders, ", "))
}
if rule.MaxAgeSeconds > 0 || rule.MaxAgeSeconds == -1 {
req.Response.Header.Set(fasthttp.HeaderAccessControlMaxAge, strconv.Itoa(rule.MaxAgeSeconds))
}
if o != wildcard {
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true")
}
return
}
}
}
}
}
log.Error(logs.CORSRuleWasNotMatched, logs.TagField(logs.TagDatapath))
ResponseError(req, "Forbidden", fasthttp.StatusForbidden)
}
func (h *Handler) SetCORSHeaders(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.SetCORSHeaders")
defer span.End()
origin := req.Request.Header.Peek(fasthttp.HeaderOrigin)
if len(origin) == 0 {
return
}
method := req.Request.Header.Peek(fasthttp.HeaderAccessControlRequestMethod)
if len(method) == 0 {
method = req.Method()
}
ctx = qostagging.ContextWithIOTag(ctx, internalIOTag)
cidParam, _ := req.UserValue("cid").(string)
reqLog := h.reqLogger(ctx)
log := reqLog.With(zap.String("cid", cidParam))
corsRule := h.config.CORS()
if corsRule != nil {
setCORSHeadersFromRule(req, corsRule)
return
}
corsConfig, err := h.getCORSConfig(ctx, log, cidParam)
if err != nil {
log.Error(logs.CouldNotGetCORSConfiguration, zap.Error(err), logs.TagField(logs.TagDatapath))
return
}
var withCredentials bool
if tkn, err := tokens.LoadBearerToken(ctx); err == nil && tkn != nil {
withCredentials = true
}
for _, rule := range corsConfig.CORSRules {
for _, o := range rule.AllowedOrigins {
if o == string(origin) || (strings.Contains(o, "*") && len(o) > 1 && match(o, string(origin))) {
for _, m := range rule.AllowedMethods {
if m == string(method) {
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, string(origin))
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(rule.AllowedMethods, ", "))
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true")
req.Response.Header.Set(fasthttp.HeaderVary, fasthttp.HeaderOrigin)
return
}
}
}
if o == wildcard {
for _, m := range rule.AllowedMethods {
if m == string(method) {
if withCredentials {
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, string(origin))
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true")
req.Response.Header.Set(fasthttp.HeaderVary, fasthttp.HeaderOrigin)
} else {
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, o)
}
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(rule.AllowedMethods, ", "))
return
}
}
}
}
}
}
func (h *Handler) getCORSConfig(ctx context.Context, log *zap.Logger, cidStr string) (*data.CORSConfiguration, error) {
cnrID, err := h.resolveContainer(ctx, cidStr)
if err != nil {
return nil, fmt.Errorf("resolve container '%s': %w", cidStr, err)
}
if cors := h.corsCache.Get(*cnrID); cors != nil {
return cors, nil
}
objID, err := h.getLastCORSObject(ctx, *cnrID)
if err != nil {
return nil, fmt.Errorf("get last cors object: %w", err)
}
var addr oid.Address
addr.SetContainer(h.corsCnrID)
addr.SetObject(objID)
corsObj, err := h.frostfs.GetObject(ctx, PrmObjectGet{
Address: addr,
})
if err != nil {
return nil, fmt.Errorf("get cors object '%s': %w", addr.EncodeToString(), err)
}
corsConfig := &data.CORSConfiguration{}
if err = xml.NewDecoder(corsObj.Payload).Decode(corsConfig); err != nil {
return nil, fmt.Errorf("decode cors object: %w", err)
}
if err = h.corsCache.Put(*cnrID, corsConfig); err != nil {
log.Warn(logs.CouldntCacheCors, zap.Error(err), logs.TagField(logs.TagDatapath))
}
return corsConfig, nil
}
func (h *Handler) getLastCORSObject(ctx context.Context, cnrID cid.ID) (oid.ID, error) {
filters := object.NewSearchFilters()
filters.AddRootFilter()
filters.AddFilter(object.AttributeFilePath, fmt.Sprintf(corsFilePathTemplate, cnrID), object.MatchStringEqual)
res, err := h.frostfs.SearchObjects(ctx, PrmObjectSearch{
Container: h.corsCnrID,
Filters: filters,
})
if err != nil {
return oid.ID{}, fmt.Errorf("search cors versions: %w", err)
}
defer res.Close()
var (
addr oid.Address
obj *object.Object
headErr error
objs = make([]*object.Object, 0)
)
addr.SetContainer(h.corsCnrID)
err = res.Iterate(func(id oid.ID) bool {
addr.SetObject(id)
obj, headErr = h.frostfs.HeadObject(ctx, PrmObjectHead{
Address: addr,
})
if headErr != nil {
headErr = fmt.Errorf("head cors object '%s': %w", addr.EncodeToString(), headErr)
return true
}
objs = append(objs, obj)
return false
})
if err != nil {
return oid.ID{}, fmt.Errorf("iterate cors objects: %w", err)
}
if headErr != nil {
return oid.ID{}, headErr
}
if len(objs) == 0 {
return oid.ID{}, errNoCORS
}
sort.Slice(objs, func(i, j int) bool {
versionID1, _ := objs[i].ID()
versionID2, _ := objs[j].ID()
timestamp1 := utils.GetAttributeValue(objs[i].Attributes(), object.AttributeTimestamp)
timestamp2 := utils.GetAttributeValue(objs[j].Attributes(), object.AttributeTimestamp)
if objs[i].CreationEpoch() != objs[j].CreationEpoch() {
return objs[i].CreationEpoch() < objs[j].CreationEpoch()
}
if len(timestamp1) > 0 && len(timestamp2) > 0 && timestamp1 != timestamp2 {
unixTime1, err := strconv.ParseInt(timestamp1, 10, 64)
if err != nil {
return versionID1.EncodeToString() < versionID2.EncodeToString()
}
unixTime2, err := strconv.ParseInt(timestamp2, 10, 64)
if err != nil {
return versionID1.EncodeToString() < versionID2.EncodeToString()
}
return unixTime1 < unixTime2
}
return versionID1.EncodeToString() < versionID2.EncodeToString()
})
objID, _ := objs[len(objs)-1].ID()
return objID, nil
}
func setCORSHeadersFromRule(c *fasthttp.RequestCtx, cors *data.CORSRule) {
c.Response.Header.Set(fasthttp.HeaderAccessControlMaxAge, strconv.Itoa(cors.MaxAgeSeconds))
if len(cors.AllowedOrigins) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, cors.AllowedOrigins[0])
}
if len(cors.AllowedMethods) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(cors.AllowedMethods, ", "))
}
if len(cors.AllowedHeaders) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowHeaders, strings.Join(cors.AllowedHeaders, ", "))
}
if len(cors.ExposeHeaders) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlExposeHeaders, strings.Join(cors.ExposeHeaders, ", "))
}
if cors.AllowedCredentials {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true")
}
}
func checkSubslice(slice []string, subSlice []string) bool {
if slices.Contains(slice, wildcard) {
return true
}
for _, r := range subSlice {
if !sliceContains(slice, r) {
return false
}
}
return true
}
func sliceContains(slice []string, str string) bool {
for _, s := range slice {
if s == str || (strings.Contains(s, "*") && match(s, str)) {
return true
}
}
return false
}
func match(tmpl, str string) bool {
regexpStr := "^" + regexp.QuoteMeta(tmpl) + "$"
regexpStr = regexpStr[:strings.Index(regexpStr, "*")-1] + "." + regexpStr[strings.Index(regexpStr, "*"):]
reg := regexp.MustCompile(regexpStr)
return reg.Match([]byte(str))
}

View file

@ -0,0 +1,930 @@
package handler
import (
"encoding/base64"
"encoding/xml"
"fmt"
"net/http"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
"github.com/valyala/fasthttp"
)
func TestPreflight(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-preflight"
cnrID, cnr, err := hc.prepareContainer(bktName, acl.Private)
require.NoError(t, err)
hc.frostfs.SetContainer(cnrID, cnr)
var epoch uint64
t.Run("CORS object", func(t *testing.T) {
for _, tc := range []struct {
name string
corsConfig *data.CORSConfiguration
requestHeaders map[string]string
expectedHeaders map[string]string
status int
}{
{
name: "no CORS configuration",
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
fasthttp.HeaderAccessControlExposeHeaders: "",
fasthttp.HeaderAccessControlMaxAge: "",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
},
status: fasthttp.StatusNotFound,
},
{
name: "specific allowed origin",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"http://example.com"},
AllowedMethods: []string{"GET", "HEAD"},
AllowedHeaders: []string{"Content-Type"},
ExposeHeaders: []string{"x-amz-*", "X-Amz-*"},
MaxAgeSeconds: 900,
},
},
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "Content-Type",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "http://example.com",
fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD",
fasthttp.HeaderAccessControlAllowHeaders: "Content-Type",
fasthttp.HeaderAccessControlExposeHeaders: "x-amz-*, X-Amz-*",
fasthttp.HeaderAccessControlMaxAge: "900",
fasthttp.HeaderAccessControlAllowCredentials: "true",
},
status: fasthttp.StatusOK,
},
{
name: "wildcard allowed origin",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
AllowedHeaders: []string{"Content-Type"},
ExposeHeaders: []string{"x-amz-*", "X-Amz-*"},
MaxAgeSeconds: 900,
},
},
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "http://example.com",
fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD",
fasthttp.HeaderAccessControlAllowHeaders: "",
fasthttp.HeaderAccessControlExposeHeaders: "x-amz-*, X-Amz-*",
fasthttp.HeaderAccessControlMaxAge: "900",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
status: fasthttp.StatusOK,
},
{
name: "not allowed header",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
AllowedHeaders: []string{"Content-Type"},
},
},
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
fasthttp.HeaderAccessControlRequestMethod: "GET",
fasthttp.HeaderAccessControlRequestHeaders: "Authorization",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
fasthttp.HeaderAccessControlExposeHeaders: "",
fasthttp.HeaderAccessControlMaxAge: "",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
status: fasthttp.StatusForbidden,
},
{
name: "empty Origin header",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
},
},
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
fasthttp.HeaderAccessControlExposeHeaders: "",
fasthttp.HeaderAccessControlMaxAge: "",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
status: fasthttp.StatusBadRequest,
},
{
name: "empty Access-Control-Request-Method header",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
},
},
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
fasthttp.HeaderAccessControlExposeHeaders: "",
fasthttp.HeaderAccessControlMaxAge: "",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
status: fasthttp.StatusBadRequest,
},
} {
t.Run(tc.name, func(t *testing.T) {
if tc.corsConfig != nil {
epoch++
setCORSObject(t, hc, cnrID, tc.corsConfig, epoch)
}
r := prepareCORSRequest(t, bktName, tc.requestHeaders)
hc.Handler().Preflight(r)
require.Equal(t, tc.status, r.Response.StatusCode())
for k, v := range tc.expectedHeaders {
require.Equal(t, v, string(r.Response.Header.Peek(k)))
}
})
}
})
t.Run("CORS config", func(t *testing.T) {
hc.cfg.cors = &data.CORSRule{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
AllowedHeaders: []string{"Content-Type", "Content-Encoding"},
ExposeHeaders: []string{"x-amz-*", "X-Amz-*"},
MaxAgeSeconds: 900,
AllowedCredentials: true,
}
r := prepareCORSRequest(t, bktName, map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
fasthttp.HeaderAccessControlRequestMethod: "GET",
})
hc.Handler().Preflight(r)
require.Equal(t, fasthttp.StatusOK, r.Response.StatusCode())
require.Equal(t, "900", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlMaxAge)))
require.Equal(t, "*", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowOrigin)))
require.Equal(t, "GET, HEAD", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowMethods)))
require.Equal(t, "Content-Type, Content-Encoding", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowHeaders)))
require.Equal(t, "x-amz-*, X-Amz-*", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlExposeHeaders)))
require.Equal(t, "true", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowCredentials)))
})
}
func TestSetCORSHeaders(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-set-cors-headers"
cnrID, cnr, err := hc.prepareContainer(bktName, acl.Private)
require.NoError(t, err)
hc.frostfs.SetContainer(cnrID, cnr)
var epoch uint64
t.Run("CORS object", func(t *testing.T) {
for _, tc := range []struct {
name string
corsConfig *data.CORSConfiguration
requestHeaders map[string]string
expectedHeaders map[string]string
}{
{
name: "empty Origin header",
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderVary: "",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
},
{
name: "no CORS configuration",
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderVary: "",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
},
},
{
name: "specific allowed origin",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"http://example.com"},
AllowedMethods: []string{"GET", "HEAD"},
},
},
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "http://example.com",
fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD",
fasthttp.HeaderVary: fasthttp.HeaderOrigin,
fasthttp.HeaderAccessControlAllowCredentials: "true",
},
},
{
name: "wildcard allowed origin, with credentials",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
},
},
},
requestHeaders: func() map[string]string {
tkn := new(bearer.Token)
err = tkn.Sign(hc.key.PrivateKey)
require.NoError(t, err)
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
require.NotEmpty(t, t64)
return map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
fasthttp.HeaderAuthorization: "Bearer " + t64,
}
}(),
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "http://example.com",
fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD",
fasthttp.HeaderVary: fasthttp.HeaderOrigin,
fasthttp.HeaderAccessControlAllowCredentials: "true",
},
},
{
name: "wildcard allowed origin, without credentials",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
},
},
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "*",
fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD",
fasthttp.HeaderVary: "",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
},
} {
t.Run(tc.name, func(t *testing.T) {
epoch++
setCORSObject(t, hc, cnrID, tc.corsConfig, epoch)
r := prepareCORSRequest(t, bktName, tc.requestHeaders)
hc.Handler().SetCORSHeaders(r)
require.Equal(t, fasthttp.StatusOK, r.Response.StatusCode())
for k, v := range tc.expectedHeaders {
require.Equal(t, v, string(r.Response.Header.Peek(k)))
}
})
}
})
t.Run("CORS config", func(t *testing.T) {
hc.cfg.cors = &data.CORSRule{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
AllowedHeaders: []string{"Content-Type", "Content-Encoding"},
ExposeHeaders: []string{"x-amz-*", "X-Amz-*"},
MaxAgeSeconds: 900,
AllowedCredentials: true,
}
r := prepareCORSRequest(t, bktName, map[string]string{fasthttp.HeaderOrigin: "http://example.com"})
hc.Handler().SetCORSHeaders(r)
require.Equal(t, "900", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlMaxAge)))
require.Equal(t, "*", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowOrigin)))
require.Equal(t, "GET, HEAD", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowMethods)))
require.Equal(t, "Content-Type, Content-Encoding", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowHeaders)))
require.Equal(t, "x-amz-*, X-Amz-*", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlExposeHeaders)))
require.Equal(t, "true", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowCredentials)))
})
}
func TestCheckSubslice(t *testing.T) {
for _, tc := range []struct {
name string
allowed []string
actual []string
expected bool
}{
{
name: "empty allowed slice",
allowed: []string{},
actual: []string{"str1", "str2", "str3"},
expected: false,
},
{
name: "empty actual slice",
allowed: []string{"str1", "str2", "str3"},
actual: []string{},
expected: true,
},
{
name: "allowed wildcard",
allowed: []string{"str", "*"},
actual: []string{"str1", "str2", "str3"},
expected: true,
},
{
name: "similar allowed and actual",
allowed: []string{"str1", "str2", "str3"},
actual: []string{"str1", "str2", "str3"},
expected: true,
},
{
name: "allowed actual",
allowed: []string{"str", "str1", "str2", "str4"},
actual: []string{"str1", "str2"},
expected: true,
},
{
name: "not allowed actual",
allowed: []string{"str", "str1", "str2", "str4"},
actual: []string{"str1", "str5"},
expected: false,
},
{
name: "wildcard in allowed",
allowed: []string{"str*"},
actual: []string{"str", "str5"},
expected: true,
},
} {
t.Run(tc.name, func(t *testing.T) {
require.Equal(t, tc.expected, checkSubslice(tc.allowed, tc.actual))
})
}
}
func TestAllowedOriginWildcards(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-allowed-origin-wildcards"
cnrID, cnr, err := hc.prepareContainer(bktName, acl.Private)
require.NoError(t, err)
hc.frostfs.SetContainer(cnrID, cnr)
cfg := &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"*suffix.example"},
AllowedMethods: []string{"GET"},
},
{
AllowedOrigins: []string{"https://*example"},
AllowedMethods: []string{"GET"},
},
{
AllowedOrigins: []string{"prefix.example*"},
AllowedMethods: []string{"GET"},
},
},
}
setCORSObject(t, hc, cnrID, cfg, 1)
for _, tc := range []struct {
name string
handler func(*fasthttp.RequestCtx)
requestHeaders map[string]string
expectedHeaders map[string]string
expectedStatus int
}{
{
name: "set cors headers, empty request cors headers",
handler: hc.Handler().SetCORSHeaders,
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
},
{
name: "set cors headers, invalid origin",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://origin.com",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
},
{
name: "set cors headers, first rule, no symbols in place of wildcard",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "suffix.example",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "suffix.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "set cors headers, first rule, valid origin",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://suffix.example",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "http://suffix.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "set cors headers, first rule, invalid origin",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://suffix-example",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
},
{
name: "set cors headers, second rule, no symbols in place of wildcard",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://example",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "https://example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "set cors headers, second rule, valid origin",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "https://www.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "set cors headers, second rule, invalid origin",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
},
{
name: "set cors headers, third rule, no symbols in place of wildcard",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "prefix.example",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "prefix.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "set cors headers, third rule, valid origin",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "prefix.example.com",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "prefix.example.com",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "set cors headers, third rule, invalid origin",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "www.prefix.example",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
},
{
name: "set cors headers, third rule, invalid request method in header",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "prefix.example.com",
fasthttp.HeaderAccessControlRequestMethod: "PUT",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
},
{
name: "set cors headers, third rule, valid request method in header",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "prefix.example.com",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "prefix.example.com",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "preflight, empty request cors headers",
handler: hc.Handler().Preflight,
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
expectedStatus: http.StatusBadRequest,
},
{
name: "preflight, invalid origin",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://origin.com",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
expectedStatus: http.StatusForbidden,
},
{
name: "preflight, first rule, no symbols in place of wildcard",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "suffix.example",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "suffix.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "prelight, first rule, valid origin",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://suffix.example",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "http://suffix.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "preflight, first rule, invalid origin",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://suffix-example",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
expectedStatus: http.StatusForbidden,
},
{
name: "preflight, second rule, no symbols in place of wildcard",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://example",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "https://example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "preflight, second rule, valid origin",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "https://www.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "preflight, second rule, invalid origin",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
expectedStatus: http.StatusForbidden,
},
{
name: "preflight, third rule, no symbols in place of wildcard",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "prefix.example",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "prefix.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "preflight, third rule, valid origin",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "prefix.example.com",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "prefix.example.com",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "preflight, third rule, invalid origin",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "www.prefix.example",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
expectedStatus: http.StatusForbidden,
},
{
name: "preflight, third rule, invalid request method in header",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "prefix.example.com",
fasthttp.HeaderAccessControlRequestMethod: "PUT",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
expectedStatus: http.StatusForbidden,
},
} {
t.Run(tc.name, func(t *testing.T) {
r := prepareCORSRequest(t, bktName, tc.requestHeaders)
tc.handler(r)
expectedStatus := fasthttp.StatusOK
if tc.expectedStatus != 0 {
expectedStatus = tc.expectedStatus
}
require.Equal(t, expectedStatus, r.Response.StatusCode())
for k, v := range tc.expectedHeaders {
require.Equal(t, v, string(r.Response.Header.Peek(k)))
}
})
}
}
func TestAllowedHeaderWildcards(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-allowed-header-wildcards"
cnrID, cnr, err := hc.prepareContainer(bktName, acl.Private)
require.NoError(t, err)
hc.frostfs.SetContainer(cnrID, cnr)
cfg := &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"https://www.example.com"},
AllowedMethods: []string{"HEAD"},
AllowedHeaders: []string{"*-suffix"},
},
{
AllowedOrigins: []string{"https://www.example.com"},
AllowedMethods: []string{"HEAD"},
AllowedHeaders: []string{"start-*-end"},
},
{
AllowedOrigins: []string{"https://www.example.com"},
AllowedMethods: []string{"HEAD"},
AllowedHeaders: []string{"X-Amz-*"},
},
},
}
setCORSObject(t, hc, cnrID, cfg, 1)
for _, tc := range []struct {
name string
requestHeaders map[string]string
expectedHeaders map[string]string
expectedStatus int
}{
{
name: "first rule, valid headers",
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "header-suffix, -suffix",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlAllowMethods: "HEAD",
fasthttp.HeaderAccessControlAllowHeaders: "header-suffix, -suffix",
},
},
{
name: "first rule, invalid headers",
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "header-suffix-*",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
},
expectedStatus: http.StatusForbidden,
},
{
name: "second rule, valid headers",
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "start--end, start-header-end",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlAllowMethods: "HEAD",
fasthttp.HeaderAccessControlAllowHeaders: "start--end, start-header-end",
},
},
{
name: "second rule, invalid header ending",
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "start-header-end-*",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
},
expectedStatus: http.StatusForbidden,
},
{
name: "second rule, invalid header beginning",
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "*-start-header-end",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
},
expectedStatus: http.StatusForbidden,
},
{
name: "third rule, valid headers",
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "X-Amz-Date, X-Amz-Content-Sha256",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlAllowMethods: "HEAD",
fasthttp.HeaderAccessControlAllowHeaders: "X-Amz-Date, X-Amz-Content-Sha256",
},
},
{
name: "third rule, invalid headers",
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "Authorization",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
},
expectedStatus: http.StatusForbidden,
},
} {
t.Run(tc.name, func(t *testing.T) {
r := prepareCORSRequest(t, bktName, tc.requestHeaders)
hc.Handler().Preflight(r)
expectedStatus := http.StatusOK
if tc.expectedStatus != 0 {
expectedStatus = tc.expectedStatus
}
require.Equal(t, expectedStatus, r.Response.StatusCode())
for k, v := range tc.expectedHeaders {
require.Equal(t, v, string(r.Response.Header.Peek(k)))
}
})
}
}
func setCORSObject(t *testing.T, hc *handlerContext, cnrID cid.ID, corsConfig *data.CORSConfiguration, epoch uint64) {
payload, err := xml.Marshal(corsConfig)
require.NoError(t, err)
a := object.NewAttribute()
a.SetKey(object.AttributeFilePath)
a.SetValue(fmt.Sprintf(corsFilePathTemplate, cnrID))
objID := oidtest.ID()
obj := object.New()
obj.SetAttributes(*a)
obj.SetOwnerID(hc.owner)
obj.SetPayload(payload)
obj.SetPayloadSize(uint64(len(payload)))
obj.SetContainerID(hc.corsCnr)
obj.SetID(objID)
obj.SetCreationEpoch(epoch)
var addr oid.Address
addr.SetObject(objID)
addr.SetContainer(hc.corsCnr)
hc.frostfs.SetObject(addr, obj)
}

View file

@ -10,11 +10,12 @@ import (
"fmt"
"io"
"net/url"
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@ -25,57 +26,199 @@ import (
)
// DownloadByAddressOrBucketName handles download requests using simple cid/oid or bucketname/key format.
func (h *Handler) DownloadByAddressOrBucketName(c *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadByAddressOrBucketName")
func (h *Handler) DownloadByAddressOrBucketName(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.DownloadByAddressOrBucketName")
defer span.End()
utils.SetContextToRequest(ctx, c)
cidParam := c.UserValue("cid").(string)
oidParam := c.UserValue("oid").(string)
downloadParam := c.QueryArgs().GetBool("download")
cidParam := req.UserValue("cid").(string)
oidParam := req.UserValue("oid").(string)
log := utils.GetReqLogOrDefault(ctx, h.log).With(
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(
zap.String("cid", cidParam),
zap.String("oid", oidParam),
)
))
bktInfo, err := h.getBucketInfo(ctx, cidParam, log)
path, err := url.QueryUnescape(oidParam)
if err != nil {
logAndSendBucketError(c, log, err)
h.logAndSendError(ctx, req, logs.FailedToUnescapePath, err)
return
}
bktInfo, err := h.getBucketInfo(ctx, cidParam)
if err != nil {
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
return
}
checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo)
if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) {
log.Error(logs.FailedToCheckIfSettingsNodeExist, zap.String("cid", bktInfo.CID.String()),
zap.Error(checkS3Err), logs.TagField(logs.TagExternalStorageTree))
logAndSendBucketError(c, log, checkS3Err)
if checkS3Err != nil && !errors.Is(checkS3Err, tree.ErrNodeNotFound) {
h.logAndSendError(ctx, req, logs.FailedToCheckIfSettingsNodeExist, checkS3Err)
return
}
req := newRequest(c, log)
prm := MiddlewareParam{
Context: ctx,
Request: req,
BktInfo: bktInfo,
Path: path,
}
var objID oid.ID
if checkS3Err == nil && shouldDownload(oidParam, downloadParam) {
h.byS3Path(ctx, req, bktInfo.CID, oidParam, h.receiveFile)
} else if err = objID.DecodeString(oidParam); err == nil {
h.byNativeAddress(ctx, req, bktInfo.CID, objID, h.receiveFile)
indexPageEnabled := h.config.IndexPageEnabled()
if checkS3Err == nil {
run(prm, h.errorMiddleware(logs.ObjectNotFound, ErrObjectNotFound),
Middleware{Func: h.byS3PathMiddleware(h.receiveFile, noopFormer), Enabled: true},
Middleware{Func: h.byS3PathMiddleware(h.receiveFile, indexFormer), Enabled: indexPageEnabled},
Middleware{Func: h.browseIndexMiddleware(h.getDirObjectsS3), Enabled: indexPageEnabled},
)
} else {
h.browseIndex(c, checkS3Err != nil)
slashFallbackEnabled := h.config.EnableFilepathSlashFallback()
fileNameFallbackEnabled := h.config.EnableFilepathFallback()
run(prm, h.errorMiddleware(logs.ObjectNotFound, ErrObjectNotFound),
Middleware{Func: h.byAddressMiddleware(h.receiveFile), Enabled: true},
Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFilePath, noopFormer), Enabled: true},
Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFilePath, reverseLeadingSlash), Enabled: slashFallbackEnabled},
Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFileName, noopFormer), Enabled: fileNameFallbackEnabled},
Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFileName, reverseLeadingSlash), Enabled: fileNameFallbackEnabled && slashFallbackEnabled},
Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFilePath, indexFormer), Enabled: indexPageEnabled},
Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFileName, indexFormer), Enabled: fileNameFallbackEnabled && indexPageEnabled},
Middleware{Func: h.browseIndexMiddleware(h.getDirObjectsNative), Enabled: indexPageEnabled},
)
}
}
func shouldDownload(oidParam string, downloadParam bool) bool {
return !isDir(oidParam) || downloadParam
type ObjectHandlerFunc func(context.Context, *fasthttp.RequestCtx, oid.Address)
type MiddlewareFunc func(param MiddlewareParam) bool
type MiddlewareParam struct {
Context context.Context
Request *fasthttp.RequestCtx
BktInfo *data.BucketInfo
Path string
}
type Middleware struct {
Func MiddlewareFunc
Enabled bool
}
func run(prm MiddlewareParam, defaultMiddleware MiddlewareFunc, middlewares ...Middleware) {
for _, m := range middlewares {
if m.Enabled && !m.Func(prm) {
return
}
}
defaultMiddleware(prm)
}
func indexFormer(path string) string {
indexPath := path
if indexPath != "" && !strings.HasSuffix(indexPath, "/") {
indexPath += "/"
}
return indexPath + "index.html"
}
func reverseLeadingSlash(path string) string {
if path == "" || path == "/" {
return path
}
if path[0] == '/' {
return path[1:]
}
return "/" + path
}
func noopFormer(path string) string {
return path
}
func (h *Handler) byS3PathMiddleware(handler func(context.Context, *fasthttp.RequestCtx, oid.Address), pathFormer func(string) string) MiddlewareFunc {
return func(prm MiddlewareParam) bool {
ctx, span := tracing.StartSpanFromContext(prm.Context, "handler.byS3Path")
defer span.End()
path := pathFormer(prm.Path)
foundOID, err := h.tree.GetLatestVersion(ctx, &prm.BktInfo.CID, path)
if err == nil {
if foundOID.IsDeleteMarker {
h.logAndSendError(ctx, prm.Request, logs.IndexWasDeleted, ErrObjectNotFound)
return false
}
addr := newAddress(prm.BktInfo.CID, foundOID.OID)
handler(ctx, prm.Request, addr)
return false
}
if !errors.Is(err, tree.ErrNodeNotFound) {
h.logAndSendError(ctx, prm.Request, logs.FailedToGetLatestVersionOfIndexObject, err, zap.String("path", path))
return false
}
return true
}
}
func (h *Handler) byAttributeSearchMiddleware(handler ObjectHandlerFunc, attr string, pathFormer func(string) string) MiddlewareFunc {
return func(prm MiddlewareParam) bool {
ctx, span := tracing.StartSpanFromContext(prm.Context, "handler.byAttributeSearch")
defer span.End()
path := pathFormer(prm.Path)
res, err := h.search(ctx, prm.BktInfo.CID, attr, path, object.MatchStringEqual)
if err != nil {
h.logAndSendError(ctx, prm.Request, logs.FailedToFindObjectByAttribute, err)
return false
}
defer res.Close()
buf := make([]oid.ID, 1)
n, err := res.Read(buf)
if err == nil && n > 0 {
addr := newAddress(prm.BktInfo.CID, buf[0])
handler(ctx, prm.Request, addr)
return false
}
if !errors.Is(err, io.EOF) {
h.logAndSendError(ctx, prm.Request, logs.FailedToFindObjectByAttribute, err)
return false
}
return true
}
}
func (h *Handler) byAddressMiddleware(handler ObjectHandlerFunc) MiddlewareFunc {
return func(prm MiddlewareParam) bool {
ctx, span := tracing.StartSpanFromContext(prm.Context, "handler.byAddress")
defer span.End()
var objID oid.ID
if objID.DecodeString(prm.Path) == nil {
handler(ctx, prm.Request, newAddress(prm.BktInfo.CID, objID))
return false
}
return true
}
}
// DownloadByAttribute handles attribute-based download requests.
func (h *Handler) DownloadByAttribute(c *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadByAttribute")
func (h *Handler) DownloadByAttribute(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.DownloadByAttribute")
defer span.End()
utils.SetContextToRequest(ctx, c)
h.byAttribute(c, h.receiveFile)
h.byAttribute(ctx, req, h.receiveFile)
}
func (h *Handler) search(ctx context.Context, cnrID cid.ID, key, val string, op object.SearchMatchType) (ResObjectSearch, error) {
@ -95,31 +238,33 @@ func (h *Handler) search(ctx context.Context, cnrID cid.ID, key, val string, op
}
// DownloadZip handles zip by prefix requests.
func (h *Handler) DownloadZip(c *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadZip")
func (h *Handler) DownloadZip(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.DownloadZip")
defer span.End()
utils.SetContextToRequest(ctx, c)
scid, _ := c.UserValue("cid").(string)
scid, _ := req.UserValue("cid").(string)
prefix, _ := req.UserValue("prefix").(string)
log := utils.GetReqLogOrDefault(ctx, h.log)
bktInfo, err := h.getBucketInfo(ctx, scid, log)
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(zap.String("cid", scid), zap.String("prefix", prefix)))
bktInfo, err := h.getBucketInfo(ctx, scid)
if err != nil {
logAndSendBucketError(c, log, err)
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
return
}
resSearch, err := h.searchObjectsByPrefix(c, log, bktInfo.CID)
resSearch, err := h.searchObjectsByPrefix(ctx, bktInfo.CID, prefix)
if err != nil {
return
}
c.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
req.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
req.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
c.SetBodyStreamWriter(h.getZipResponseWriter(ctx, log, resSearch, bktInfo))
req.SetBodyStreamWriter(h.getZipResponseWriter(ctx, resSearch, bktInfo))
}
func (h *Handler) getZipResponseWriter(ctx context.Context, log *zap.Logger, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
func (h *Handler) getZipResponseWriter(ctx context.Context, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
return func(w *bufio.Writer) {
defer resSearch.Close()
@ -127,20 +272,20 @@ func (h *Handler) getZipResponseWriter(ctx context.Context, log *zap.Logger, res
zipWriter := zip.NewWriter(w)
var objectsWritten int
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, log, bktInfo.CID, buf,
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, bktInfo.CID, buf,
func(obj *object.Object) (io.Writer, error) {
objectsWritten++
return h.createZipFile(zipWriter, obj)
}),
)
if errIter != nil {
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath))
h.reqLogger(ctx).Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath))
return
} else if objectsWritten == 0 {
log.Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath))
h.reqLogger(ctx).Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath))
}
if err := zipWriter.Close(); err != nil {
log.Error(logs.CloseZipWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
h.reqLogger(ctx).Error(logs.CloseZipWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
}
}
}
@ -164,31 +309,33 @@ func (h *Handler) createZipFile(zw *zip.Writer, obj *object.Object) (io.Writer,
}
// DownloadTar forms tar.gz from objects by prefix.
func (h *Handler) DownloadTar(c *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadTar")
func (h *Handler) DownloadTar(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.DownloadTar")
defer span.End()
utils.SetContextToRequest(ctx, c)
scid, _ := c.UserValue("cid").(string)
scid, _ := req.UserValue("cid").(string)
prefix, _ := req.UserValue("prefix").(string)
log := utils.GetReqLogOrDefault(ctx, h.log)
bktInfo, err := h.getBucketInfo(ctx, scid, log)
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(zap.String("cid", scid), zap.String("prefix", prefix)))
bktInfo, err := h.getBucketInfo(ctx, scid)
if err != nil {
logAndSendBucketError(c, log, err)
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
return
}
resSearch, err := h.searchObjectsByPrefix(c, log, bktInfo.CID)
resSearch, err := h.searchObjectsByPrefix(ctx, bktInfo.CID, prefix)
if err != nil {
return
}
c.Response.Header.Set(fasthttp.HeaderContentType, "application/gzip")
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.tar.gz\"")
req.Response.Header.Set(fasthttp.HeaderContentType, "application/gzip")
req.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.tar.gz\"")
c.SetBodyStreamWriter(h.getTarResponseWriter(ctx, log, resSearch, bktInfo))
req.SetBodyStreamWriter(h.getTarResponseWriter(ctx, resSearch, bktInfo))
}
func (h *Handler) getTarResponseWriter(ctx context.Context, log *zap.Logger, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
func (h *Handler) getTarResponseWriter(ctx context.Context, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
return func(w *bufio.Writer) {
defer resSearch.Close()
@ -203,26 +350,26 @@ func (h *Handler) getTarResponseWriter(ctx context.Context, log *zap.Logger, res
defer func() {
if err := tarWriter.Close(); err != nil {
log.Error(logs.CloseTarWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
h.reqLogger(ctx).Error(logs.CloseTarWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
}
if err := gzipWriter.Close(); err != nil {
log.Error(logs.CloseGzipWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
h.reqLogger(ctx).Error(logs.CloseGzipWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
}
}()
var objectsWritten int
buf := make([]byte, 3<<20) // the same as for upload
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, log, bktInfo.CID, buf,
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, bktInfo.CID, buf,
func(obj *object.Object) (io.Writer, error) {
objectsWritten++
return h.createTarFile(tarWriter, obj)
}),
)
if errIter != nil {
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath))
h.reqLogger(ctx).Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath))
} else if objectsWritten == 0 {
log.Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath))
h.reqLogger(ctx).Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath))
}
}
}
@ -240,9 +387,9 @@ func (h *Handler) createTarFile(tw *tar.Writer, obj *object.Object) (io.Writer,
})
}
func (h *Handler) putObjectToArchive(ctx context.Context, log *zap.Logger, cnrID cid.ID, buf []byte, createArchiveHeader func(obj *object.Object) (io.Writer, error)) func(id oid.ID) bool {
func (h *Handler) putObjectToArchive(ctx context.Context, cnrID cid.ID, buf []byte, createArchiveHeader func(obj *object.Object) (io.Writer, error)) func(id oid.ID) bool {
return func(id oid.ID) bool {
log = log.With(zap.String("oid", id.EncodeToString()))
logger := h.reqLogger(ctx).With(zap.String("oid", id.EncodeToString()))
prm := PrmObjectGet{
PrmAuth: PrmAuth{
@ -253,18 +400,18 @@ func (h *Handler) putObjectToArchive(ctx context.Context, log *zap.Logger, cnrID
resGet, err := h.frostfs.GetObject(ctx, prm)
if err != nil {
log.Error(logs.FailedToGetObject, zap.Error(err), logs.TagField(logs.TagExternalStorage))
logger.Error(logs.FailedToGetObject, zap.Error(err), logs.TagField(logs.TagExternalStorage))
return false
}
fileWriter, err := createArchiveHeader(&resGet.Header)
if err != nil {
log.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath))
logger.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath))
return false
}
if err = writeToArchive(resGet, fileWriter, buf); err != nil {
log.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath))
logger.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath))
return false
}
@ -272,28 +419,17 @@ func (h *Handler) putObjectToArchive(ctx context.Context, log *zap.Logger, cnrID
}
}
func (h *Handler) searchObjectsByPrefix(c *fasthttp.RequestCtx, log *zap.Logger, cnrID cid.ID) (ResObjectSearch, error) {
scid, _ := c.UserValue("cid").(string)
prefix, _ := c.UserValue("prefix").(string)
ctx := utils.GetContextFromRequest(c)
func (h *Handler) searchObjectsByPrefix(ctx context.Context, cnrID cid.ID, prefix string) (ResObjectSearch, error) {
prefix, err := url.QueryUnescape(prefix)
if err != nil {
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("prefix", prefix),
zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not unescape prefix: "+err.Error(), fasthttp.StatusBadRequest)
return nil, err
return nil, fmt.Errorf("unescape prefix: %w", err)
}
log = log.With(zap.String("cid", scid), zap.String("prefix", prefix))
resSearch, err := h.search(ctx, cnrID, object.AttributeFilePath, prefix, object.MatchCommonPrefix)
if err != nil {
log.Error(logs.CouldNotSearchForObjects, zap.Error(err), logs.TagField(logs.TagExternalStorage))
ResponseError(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
return nil, err
return nil, fmt.Errorf("search objects by prefix: %w", err)
}
return resSearch, nil
}

View file

@ -52,6 +52,10 @@ func (t *TestFrostFS) SetContainer(cnrID cid.ID, cnr *container.Container) {
t.containers[cnrID.EncodeToString()] = cnr
}
func (t *TestFrostFS) SetObject(addr oid.Address, obj *object.Object) {
t.objects[addr.EncodeToString()] = obj
}
// AllowUserOperation grants access to object operations.
// Empty userID and objID means any user and object respectively.
func (t *TestFrostFS) AllowUserOperation(cnrID cid.ID, userID user.ID, op acl.Op, objID oid.ID) {
@ -229,6 +233,16 @@ func (t *TestFrostFS) SearchObjects(_ context.Context, prm PrmObjectSearch) (Res
return &resObjectSearchMock{res: res}, nil
}
func (t *TestFrostFS) GetContainerByID(cid cid.ID) (*container.Container, error) {
for k, v := range t.containers {
if k == cid.EncodeToString() {
return v, nil
}
}
return nil, fmt.Errorf("container does not exist %s", cid)
}
func (t *TestFrostFS) InitMultiObjectReader(context.Context, PrmInitMultiObjectReader) (io.Reader, error) {
return nil, nil
}

View file

@ -11,12 +11,11 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -36,6 +35,9 @@ type Config interface {
BufferMaxSizeForPut() uint64
NamespaceHeader() string
EnableFilepathFallback() bool
EnableFilepathSlashFallback() bool
FormContainerZone(string) string
CORS() *data.CORSRule
}
// PrmContainer groups parameters of FrostFS.Container operation.
@ -142,6 +144,10 @@ var (
ErrGatewayTimeout = errors.New("gateway timeout")
// ErrQuotaLimitReached is returned from FrostFS in case of quota exceeded.
ErrQuotaLimitReached = errors.New("quota limit reached")
// ErrContainerNotFound is returned from FrostFS in case of container was not found.
ErrContainerNotFound = errors.New("container not found")
// ErrObjectNotFound is returned from FrostFS in case of object was not found.
ErrObjectNotFound = errors.New("object not found")
)
// FrostFS represents virtual connection to FrostFS network.
@ -158,7 +164,12 @@ type FrostFS interface {
}
type ContainerResolver interface {
Resolve(ctx context.Context, name string) (*cid.ID, error)
Resolve(ctx context.Context, zone, name string) (*cid.ID, error)
}
type ContainerContract interface {
// GetContainerByID reads a container from contract by ID.
GetContainerByID(cid.ID) (*container.Container, error)
}
type Handler struct {
@ -167,20 +178,25 @@ type Handler struct {
ownerID *user.ID
config Config
containerResolver ContainerResolver
tree layer.TreeService
cnrContract ContainerContract
tree *tree.Tree
cache *cache.BucketCache
workerPool *ants.Pool
corsCnrID cid.ID
corsCache *cache.CORSCache
}
type AppParams struct {
Logger *zap.Logger
FrostFS FrostFS
Owner *user.ID
Resolver ContainerResolver
Cache *cache.BucketCache
Logger *zap.Logger
FrostFS FrostFS
Owner *user.ID
Resolver ContainerResolver
Cache *cache.BucketCache
CORSCnrID cid.ID
CORSCache *cache.CORSCache
}
func New(params *AppParams, config Config, tree layer.TreeService, workerPool *ants.Pool) *Handler {
func New(params *AppParams, config Config, tree *tree.Tree, rpcCli ContainerContract, workerPool *ants.Pool) *Handler {
return &Handler{
log: params.Logger,
frostfs: params.FrostFS,
@ -190,89 +206,45 @@ func New(params *AppParams, config Config, tree layer.TreeService, workerPool *a
tree: tree,
cache: params.Cache,
workerPool: workerPool,
corsCnrID: params.CORSCnrID,
corsCache: params.CORSCache,
cnrContract: rpcCli,
}
}
// byNativeAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that
// prepares request and object address to it.
func (h *Handler) byNativeAddress(ctx context.Context, req request, cnrID cid.ID, objID oid.ID, handler func(context.Context, request, oid.Address)) {
ctx, span := tracing.StartSpanFromContext(ctx, "handler.byNativeAddress")
defer span.End()
addr := newAddress(cnrID, objID)
handler(ctx, req, addr)
}
// byS3Path is a wrapper for function (e.g. request.headObject, request.receiveFile) that
// resolves object address from S3-like path <bucket name>/<object key>.
func (h *Handler) byS3Path(ctx context.Context, req request, cnrID cid.ID, path string, handler func(context.Context, request, oid.Address)) {
ctx, span := tracing.StartSpanFromContext(ctx, "handler.byS3Path")
defer span.End()
c, log := req.RequestCtx, req.log
foundOID, err := h.tree.GetLatestVersion(ctx, &cnrID, path)
if err != nil {
log.Error(logs.FailedToGetLatestVersionOfObject, zap.Error(err), zap.String("cid", cnrID.String()),
zap.String("path", path), logs.TagField(logs.TagExternalStorageTree))
logAndSendBucketError(c, log, err)
return
}
if foundOID.IsDeleteMarker {
log.Error(logs.ObjectWasDeleted, logs.TagField(logs.TagExternalStorageTree))
ResponseError(c, "object deleted", fasthttp.StatusNotFound)
return
}
addr := newAddress(cnrID, foundOID.OID)
handler(ctx, newRequest(c, log), addr)
}
// byAttribute is a wrapper similar to byNativeAddress.
func (h *Handler) byAttribute(c *fasthttp.RequestCtx, handler func(context.Context, request, oid.Address)) {
cidParam, _ := c.UserValue("cid").(string)
key, _ := c.UserValue("attr_key").(string)
val, _ := c.UserValue("attr_val").(string)
ctx := utils.GetContextFromRequest(c)
log := utils.GetReqLogOrDefault(ctx, h.log)
func (h *Handler) byAttribute(ctx context.Context, req *fasthttp.RequestCtx, handler func(context.Context, *fasthttp.RequestCtx, oid.Address)) {
cidParam, _ := req.UserValue("cid").(string)
key, _ := req.UserValue("attr_key").(string)
val, _ := req.UserValue("attr_val").(string)
key, err := url.QueryUnescape(key)
if err != nil {
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", cidParam), zap.String("attr_key", key),
zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not unescape attr_key: "+err.Error(), fasthttp.StatusBadRequest)
h.logAndSendError(ctx, req, logs.FailedToUnescapeQuery, err, zap.String("cid", cidParam), zap.String("attr_key", key))
return
}
val, err = url.QueryUnescape(val)
if err != nil {
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", cidParam), zap.String("attr_val", val),
zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not unescape attr_val: "+err.Error(), fasthttp.StatusBadRequest)
h.logAndSendError(ctx, req, logs.FailedToUnescapeQuery, err, zap.String("cid", cidParam), zap.String("attr_val", key))
return
}
if key == attrFileName {
val = prepareFileName(val)
}
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(zap.String("cid", cidParam),
zap.String("attr_key", key), zap.String("attr_val", val)))
log = log.With(zap.String("cid", cidParam), zap.String("attr_key", key), zap.String("attr_val", val))
bktInfo, err := h.getBucketInfo(ctx, cidParam, log)
bktInfo, err := h.getBucketInfo(ctx, cidParam)
if err != nil {
logAndSendBucketError(c, log, err)
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
return
}
objID, err := h.findObjectByAttribute(ctx, log, bktInfo.CID, key, val)
objID, err := h.findObjectByAttribute(ctx, bktInfo.CID, key, val)
if err != nil {
if errors.Is(err, io.EOF) {
ResponseError(c, err.Error(), fasthttp.StatusNotFound)
return
err = fmt.Errorf("%w: %s", ErrObjectNotFound, err.Error())
}
ResponseError(c, err.Error(), fasthttp.StatusBadRequest)
h.logAndSendError(ctx, req, logs.FailedToFindObjectByAttribute, err)
return
}
@ -280,14 +252,13 @@ func (h *Handler) byAttribute(c *fasthttp.RequestCtx, handler func(context.Conte
addr.SetContainer(bktInfo.CID)
addr.SetObject(objID)
handler(ctx, newRequest(c, log), addr)
handler(ctx, req, addr)
}
func (h *Handler) findObjectByAttribute(ctx context.Context, log *zap.Logger, cnrID cid.ID, attrKey, attrVal string) (oid.ID, error) {
func (h *Handler) findObjectByAttribute(ctx context.Context, cnrID cid.ID, attrKey, attrVal string) (oid.ID, error) {
res, err := h.search(ctx, cnrID, attrKey, attrVal, object.MatchStringEqual)
if err != nil {
log.Error(logs.CouldNotSearchForObjects, zap.Error(err), logs.TagField(logs.TagExternalStorage))
return oid.ID{}, fmt.Errorf("could not search for objects: %w", err)
return oid.ID{}, fmt.Errorf("search objects: %w", err)
}
defer res.Close()
@ -296,14 +267,11 @@ func (h *Handler) findObjectByAttribute(ctx context.Context, log *zap.Logger, cn
n, err := res.Read(buf)
if n == 0 {
switch {
case errors.Is(err, io.EOF) && h.needSearchByFileName(attrKey, attrVal):
log.Debug(logs.ObjectNotFoundByFilePathTrySearchByFileName, logs.TagField(logs.TagExternalStorage))
return h.findObjectByAttribute(ctx, log, cnrID, attrFileName, prepareFileName(attrVal))
case errors.Is(err, io.EOF):
log.Error(logs.ObjectNotFound, zap.Error(err), logs.TagField(logs.TagExternalStorage))
h.reqLogger(ctx).Error(logs.ObjectNotFound, zap.Error(err), logs.TagField(logs.TagExternalStorage))
return oid.ID{}, fmt.Errorf("object not found: %w", err)
default:
log.Error(logs.ReadObjectListFailed, zap.Error(err), logs.TagField(logs.TagExternalStorage))
h.reqLogger(ctx).Error(logs.ReadObjectListFailed, zap.Error(err), logs.TagField(logs.TagExternalStorage))
return oid.ID{}, fmt.Errorf("read object list failed: %w", err)
}
}
@ -311,37 +279,28 @@ func (h *Handler) findObjectByAttribute(ctx context.Context, log *zap.Logger, cn
return buf[0], nil
}
func (h *Handler) needSearchByFileName(key, val string) bool {
if key != attrFilePath || !h.config.EnableFilepathFallback() {
return false
}
return strings.HasPrefix(val, "/") && strings.Count(val, "/") == 1 || !strings.Contains(val, "/")
}
func prepareFileName(fileName string) string {
if strings.HasPrefix(fileName, "/") {
return fileName[1:]
}
return fileName
}
// resolveContainer decode container id, if it's not a valid container id
// then trey to resolve name using provided resolver.
func (h *Handler) resolveContainer(ctx context.Context, containerID string) (*cid.ID, error) {
cnrID := new(cid.ID)
err := cnrID.DecodeString(containerID)
if err != nil {
cnrID, err = h.containerResolver.Resolve(ctx, containerID)
var namespace string
namespace, err = middleware.GetNamespace(ctx)
if err != nil {
return nil, err
}
zone := h.config.FormContainerZone(namespace)
cnrID, err = h.containerResolver.Resolve(ctx, zone, containerID)
if err != nil && strings.Contains(err.Error(), "not found") {
err = fmt.Errorf("%w: %s", new(apistatus.ContainerNotFound), err.Error())
err = fmt.Errorf("%w: %s", ErrContainerNotFound, err.Error())
}
}
return cnrID, err
}
func (h *Handler) getBucketInfo(ctx context.Context, containerName string, log *zap.Logger) (*data.BucketInfo, error) {
func (h *Handler) getBucketInfo(ctx context.Context, containerName string) (*data.BucketInfo, error) {
ns, err := middleware.GetNamespace(ctx)
if err != nil {
return nil, err
@ -353,91 +312,37 @@ func (h *Handler) getBucketInfo(ctx context.Context, containerName string, log *
cnrID, err := h.resolveContainer(ctx, containerName)
if err != nil {
log.Error(logs.CouldNotResolveContainerID, zap.Error(err), zap.String("cnrName", containerName),
logs.TagField(logs.TagDatapath))
return nil, err
return nil, fmt.Errorf("resolve container: %w", err)
}
bktInfo, err := h.readContainer(ctx, *cnrID)
if err != nil {
log.Error(logs.CouldNotGetContainerInfo, zap.Error(err), zap.String("cnrName", containerName),
zap.String("cnrName", cnrID.String()),
logs.TagField(logs.TagExternalStorage))
return nil, err
}
if err = h.cache.Put(bktInfo); err != nil {
log.Warn(logs.CouldntPutBucketIntoCache,
zap.String("bucket name", bktInfo.Name),
zap.Stringer("bucket cid", bktInfo.CID),
zap.Error(err),
logs.TagField(logs.TagDatapath))
}
return bktInfo, nil
return h.containerInfo(ctx, *cnrID)
}
func (h *Handler) readContainer(ctx context.Context, cnrID cid.ID) (*data.BucketInfo, error) {
prm := PrmContainer{ContainerID: cnrID}
res, err := h.frostfs.Container(ctx, prm)
if err != nil {
return nil, fmt.Errorf("get frostfs container '%s': %w", cnrID.String(), err)
type ListFunc func(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) (*GetObjectsResponse, error)
func (h *Handler) browseIndexMiddleware(fn ListFunc) MiddlewareFunc {
return func(prm MiddlewareParam) bool {
ctx, span := tracing.StartSpanFromContext(prm.Context, "handler.browseIndex")
defer span.End()
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(
zap.String("bucket", prm.BktInfo.Name),
zap.String("container", prm.BktInfo.CID.EncodeToString()),
zap.String("prefix", prm.Path),
))
objects, err := fn(ctx, prm.BktInfo, prm.Path)
if err != nil {
h.logAndSendError(ctx, prm.Request, logs.FailedToListObjects, err)
return false
}
h.browseObjects(ctx, prm.Request, browseParams{
bucketInfo: prm.BktInfo,
prefix: prm.Path,
objects: objects,
})
return false
}
bktInfo := &data.BucketInfo{
CID: cnrID,
Name: cnrID.EncodeToString(),
}
if domain := container.ReadDomain(*res); domain.Name() != "" {
bktInfo.Name = domain.Name()
bktInfo.Zone = domain.Zone()
}
bktInfo.HomomorphicHashDisabled = container.IsHomomorphicHashingDisabled(*res)
bktInfo.PlacementPolicy = res.PlacementPolicy()
return bktInfo, err
}
func (h *Handler) browseIndex(c *fasthttp.RequestCtx, isNativeList bool) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.browseIndex")
defer span.End()
utils.SetContextToRequest(ctx, c)
if !h.config.IndexPageEnabled() {
c.SetStatusCode(fasthttp.StatusNotFound)
return
}
cidURLParam := c.UserValue("cid").(string)
oidURLParam := c.UserValue("oid").(string)
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
log := reqLog.With(zap.String("cid", cidURLParam), zap.String("oid", oidURLParam))
unescapedKey, err := url.QueryUnescape(oidURLParam)
if err != nil {
logAndSendBucketError(c, log, err)
return
}
bktInfo, err := h.getBucketInfo(ctx, cidURLParam, log)
if err != nil {
logAndSendBucketError(c, log, err)
return
}
listFunc := h.getDirObjectsS3
if isNativeList {
// tree probe failed, trying to use native
listFunc = h.getDirObjectsNative
}
h.browseObjects(c, browseParams{
bucketInfo: bktInfo,
prefix: unescapedKey,
listObjects: listFunc,
isNative: isNativeList,
})
}

View file

@ -21,6 +21,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
go_fuzz_utils "github.com/trailofbits/go-fuzz-utils"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
)
const (
@ -125,7 +126,7 @@ func maybeFillRandom(tp *go_fuzz_utils.TypeProvider, initValue string) (string,
}
func upload(tp *go_fuzz_utils.TypeProvider) (context.Context, *handlerContext, cid.ID, *fasthttp.RequestCtx, string, string, string, error) {
hc, err := prepareHandlerContext()
hc, err := prepareHandlerContextBase(zap.NewExample())
if err != nil {
return nil, nil, cid.ID{}, nil, "", "", "", err
}

View file

@ -14,9 +14,12 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/templates"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@ -24,42 +27,21 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/panjf2000/ants/v2"
"github.com/stretchr/testify/require"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
type treeServiceMock struct {
system map[string]map[string]*data.BaseNodeVersion
}
func newTreeService() *treeServiceMock {
return &treeServiceMock{
system: make(map[string]map[string]*data.BaseNodeVersion),
}
}
func (t *treeServiceMock) CheckSettingsNodeExists(context.Context, *data.BucketInfo) error {
_, ok := t.system["bucket-settings"]
if !ok {
return layer.ErrNodeNotFound
}
return nil
}
func (t *treeServiceMock) GetSubTreeByPrefix(context.Context, *data.BucketInfo, string, bool) ([]data.NodeInfo, string, error) {
return nil, "", nil
}
func (t *treeServiceMock) GetLatestVersion(context.Context, *cid.ID, string) (*data.NodeVersion, error) {
return nil, nil
}
type configMock struct {
additionalSearch bool
additionalFilenameSearch bool
additionalSlashSearch bool
indexEnabled bool
cors *data.CORSRule
}
func (c *configMock) DefaultTimestamp() bool {
@ -71,11 +53,11 @@ func (c *configMock) ArchiveCompression() bool {
}
func (c *configMock) IndexPageEnabled() bool {
return false
return c.indexEnabled
}
func (c *configMock) IndexPageTemplate() string {
return ""
return templates.DefaultIndexTemplate
}
func (c *configMock) IndexPageNativeTemplate() string {
@ -95,16 +77,29 @@ func (c *configMock) NamespaceHeader() string {
}
func (c *configMock) EnableFilepathFallback() bool {
return c.additionalSearch
return c.additionalFilenameSearch
}
func (c *configMock) EnableFilepathSlashFallback() bool {
return c.additionalSlashSearch
}
func (c *configMock) FormContainerZone(string) string {
return v2container.SysAttributeZoneDefault
}
func (c *configMock) CORS() *data.CORSRule {
return c.cors
}
type handlerContext struct {
key *keys.PrivateKey
owner user.ID
key *keys.PrivateKey
owner user.ID
corsCnr cid.ID
h *Handler
frostfs *TestFrostFS
tree *treeServiceMock
tree *treeServiceClientMock
cfg *configMock
}
@ -112,12 +107,13 @@ func (hc *handlerContext) Handler() *Handler {
return hc.h
}
func prepareHandlerContext() (*handlerContext, error) {
logger, err := zap.NewDevelopment()
if err != nil {
return nil, err
}
func prepareHandlerContext(t *testing.T) *handlerContext {
hc, err := prepareHandlerContextBase(zaptest.NewLogger(t))
require.NoError(t, err)
return hc
}
func prepareHandlerContextBase(logger *zap.Logger) (*handlerContext, error) {
key, err := keys.NewPrivateKey()
if err != nil {
return nil, err
@ -129,10 +125,12 @@ func prepareHandlerContext() (*handlerContext, error) {
testFrostFS := NewTestFrostFS(key)
testResolver := &resolver.Resolver{Name: "test_resolver"}
testResolver.SetResolveFunc(func(_ context.Context, name string) (*cid.ID, error) {
testResolver.SetResolveFunc(func(_ context.Context, _, name string) (*cid.ID, error) {
return testFrostFS.ContainerID(name)
})
cnrID := createCORSContainer(owner, testFrostFS)
params := &AppParams{
Logger: logger,
FrostFS: testFrostFS,
@ -143,20 +141,27 @@ func prepareHandlerContext() (*handlerContext, error) {
Lifetime: 1,
Logger: logger,
}, false),
CORSCnrID: cnrID,
CORSCache: cache.NewCORSCache(&cache.Config{
Size: 1,
Lifetime: 1,
Logger: logger,
}),
}
treeMock := newTreeService()
treeMock := newTreeServiceClientMock()
cfgMock := &configMock{}
workerPool, err := ants.NewPool(1)
if err != nil {
return nil, err
}
handler := New(params, cfgMock, treeMock, workerPool)
handler := New(params, cfgMock, tree.NewTree(treeMock, logger), testFrostFS, workerPool)
return &handlerContext{
key: key,
owner: owner,
corsCnr: cnrID,
h: handler,
frostfs: testFrostFS,
tree: treeMock,
@ -164,6 +169,20 @@ func prepareHandlerContext() (*handlerContext, error) {
}, nil
}
func createCORSContainer(owner user.ID, frostfs *TestFrostFS) cid.ID {
var cnr container.Container
cnr.Init()
cnr.SetOwner(owner)
cnrID := cidtest.ID()
frostfs.SetContainer(cnrID, &cnr)
frostfs.AllowUserOperation(cnrID, owner, acl.OpObjectSearch, oid.ID{})
frostfs.AllowUserOperation(cnrID, owner, acl.OpObjectHead, oid.ID{})
frostfs.AllowUserOperation(cnrID, owner, acl.OpObjectGet, oid.ID{})
return cnrID
}
func (hc *handlerContext) prepareContainer(name string, basicACL acl.Basic) (cid.ID, *container.Container, error) {
var pp netmap.PlacementPolicy
err := pp.DecodeString("REP 1")
@ -196,8 +215,7 @@ func (hc *handlerContext) prepareContainer(name string, basicACL acl.Basic) (cid
}
func TestBasic(t *testing.T) {
hc, err := prepareHandlerContext()
require.NoError(t, err)
hc := prepareHandlerContext(t)
bktName := "bucket"
cnrID, cnr, err := hc.prepareContainer(bktName, acl.PublicRWExtended)
@ -218,14 +236,25 @@ func TestBasic(t *testing.T) {
err = json.Unmarshal(r.Response.Body(), &putRes)
require.NoError(t, err)
hc.cfg.additionalFilenameSearch = true
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
attr := prepareObjectAttributes(object.AttributeFilePath, objFileName)
obj.SetAttributes(append(obj.Attributes(), attr)...)
fileName := prepareObjectAttributes(object.AttributeFileName, objFileName)
filePath := prepareObjectAttributes(object.AttributeFilePath, objFilePath)
obj.SetAttributes(append(obj.Attributes(), fileName)...)
obj.SetAttributes(append(obj.Attributes(), filePath)...)
t.Run("get", func(t *testing.T) {
r = prepareGetRequest(ctx, cnrID.EncodeToString(), putRes.ObjectID)
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, content, string(r.Response.Body()))
r = prepareGetRequest(ctx, cnrID.EncodeToString(), objFilePath)
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, content, string(r.Response.Body()))
r = prepareGetRequest(ctx, cnrID.EncodeToString(), objFileName)
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, content, string(r.Response.Body()))
})
t.Run("head", func(t *testing.T) {
@ -233,6 +262,16 @@ func TestBasic(t *testing.T) {
hc.Handler().HeadByAddressOrBucketName(r)
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
r = prepareGetRequest(ctx, cnrID.EncodeToString(), objFilePath)
hc.Handler().HeadByAddressOrBucketName(r)
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
r = prepareGetRequest(ctx, cnrID.EncodeToString(), objFileName)
hc.Handler().HeadByAddressOrBucketName(r)
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
})
t.Run("get by attribute", func(t *testing.T) {
@ -240,9 +279,13 @@ func TestBasic(t *testing.T) {
hc.Handler().DownloadByAttribute(r)
require.Equal(t, content, string(r.Response.Body()))
r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, "/"+objFileName)
r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, objFilePath)
hc.Handler().DownloadByAttribute(r)
require.Equal(t, content, string(r.Response.Body()))
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
r = prepareGetByAttributeRequest(ctx, bktName, attrFilePath, objFileName)
hc.Handler().DownloadByAttribute(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
})
t.Run("head by attribute", func(t *testing.T) {
@ -251,10 +294,13 @@ func TestBasic(t *testing.T) {
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, "/"+objFileName)
r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, objFilePath)
hc.Handler().HeadByAttribute(r)
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
r = prepareGetByAttributeRequest(ctx, bktName, attrFilePath, objFileName)
hc.Handler().HeadByAttribute(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
})
t.Run("zip", func(t *testing.T) {
@ -265,7 +311,7 @@ func TestBasic(t *testing.T) {
zipReader, err := zip.NewReader(readerAt, int64(len(r.Response.Body())))
require.NoError(t, err)
require.Len(t, zipReader.File, 1)
require.Equal(t, objFileName, zipReader.File[0].Name)
require.Equal(t, objFilePath, zipReader.File[0].Name)
f, err := zipReader.File[0].Open()
require.NoError(t, err)
defer func() {
@ -278,176 +324,281 @@ func TestBasic(t *testing.T) {
})
}
func TestFindObjectByAttribute(t *testing.T) {
hc, err := prepareHandlerContext()
require.NoError(t, err)
hc.cfg.additionalSearch = true
func prepareHandlerAndBucket(t *testing.T) (*handlerContext, cid.ID) {
hc := prepareHandlerContext(t)
bktName := "bucket"
cnrID, cnr, err := hc.prepareContainer(bktName, acl.PublicRWExtended)
require.NoError(t, err)
hc.frostfs.SetContainer(cnrID, cnr)
ctx := context.Background()
ctx = middleware.SetNamespace(ctx, "")
content := "hello"
r, err := prepareUploadRequest(ctx, cnrID.EncodeToString(), content)
require.NoError(t, err)
hc.Handler().Upload(r)
require.Equal(t, r.Response.StatusCode(), http.StatusOK)
var putRes putResponse
err = json.Unmarshal(r.Response.Body(), &putRes)
require.NoError(t, err)
testAttrVal1 := "/folder/cat.jpg"
testAttrVal2 := "cat.jpg"
testAttrVal3 := "test-attr-val3"
for _, tc := range []struct {
name string
firstAttr object.Attribute
secondAttr object.Attribute
reqAttrKey string
reqAttrValue string
err string
additionalSearch bool
}{
{
name: "success search by FileName",
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
reqAttrKey: attrFileName,
reqAttrValue: testAttrVal2,
additionalSearch: false,
},
{
name: "failed search by FileName",
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
reqAttrKey: attrFileName,
reqAttrValue: testAttrVal3,
err: "not found",
additionalSearch: false,
},
{
name: "success search by FilePath (with additional search)",
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
reqAttrKey: attrFilePath,
reqAttrValue: testAttrVal2,
additionalSearch: true,
},
{
name: "failed by FilePath (with additional search)",
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
reqAttrKey: attrFilePath,
reqAttrValue: testAttrVal3,
err: "not found",
additionalSearch: true,
},
{
name: "success search by FilePath with leading slash (with additional search)",
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
reqAttrKey: attrFilePath,
reqAttrValue: "/cat.jpg",
additionalSearch: true,
},
} {
t.Run(tc.name, func(t *testing.T) {
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
obj.SetAttributes(tc.firstAttr, tc.secondAttr)
hc.cfg.additionalSearch = tc.additionalSearch
objID, err := hc.Handler().findObjectByAttribute(ctx, hc.Handler().log, cnrID, tc.reqAttrKey, tc.reqAttrValue)
if tc.err != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tc.err)
return
}
require.NoError(t, err)
require.Equal(t, putRes.ObjectID, objID.EncodeToString())
})
}
return hc, cnrID
}
func TestNeedSearchByFileName(t *testing.T) {
hc, err := prepareHandlerContext()
require.NoError(t, err)
func TestGetObjectWithFallback(t *testing.T) {
ctx := middleware.SetNamespace(context.Background(), "")
for _, tc := range []struct {
name string
attrKey string
attrVal string
additionalSearch bool
expected bool
}{
{
name: "need search - not contains slash",
attrKey: attrFilePath,
attrVal: "cat.png",
additionalSearch: true,
expected: true,
},
{
name: "need search - single lead slash",
attrKey: attrFilePath,
attrVal: "/cat.png",
additionalSearch: true,
expected: true,
},
{
name: "don't need search - single slash but not lead",
attrKey: attrFilePath,
attrVal: "cats/cat.png",
additionalSearch: true,
expected: false,
},
{
name: "don't need search - more one slash",
attrKey: attrFilePath,
attrVal: "/cats/cat.png",
additionalSearch: true,
expected: false,
},
{
name: "don't need search - incorrect attribute key",
attrKey: attrFileName,
attrVal: "cat.png",
additionalSearch: true,
expected: false,
},
{
name: "don't need search - additional search disabled",
attrKey: attrFilePath,
attrVal: "cat.png",
additionalSearch: false,
expected: false,
},
} {
t.Run(tc.name, func(t *testing.T) {
hc.cfg.additionalSearch = tc.additionalSearch
t.Run("by oid", func(t *testing.T) {
hc, cnrID := prepareHandlerAndBucket(t)
res := hc.h.needSearchByFileName(tc.attrKey, tc.attrVal)
require.Equal(t, tc.expected, res)
})
}
obj1ID := oidtest.ID()
obj1 := object.New()
obj1.SetID(obj1ID)
obj1.SetPayload([]byte("obj1"))
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
r := prepareGetRequest(ctx, cnrID.EncodeToString(), obj1ID.String())
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
})
t.Run("by filepath as it is", func(t *testing.T) {
hc, cnrID := prepareHandlerAndBucket(t)
obj1ID := oidtest.ID()
obj1 := object.New()
obj1.SetID(obj1ID)
obj1.SetPayload([]byte("obj1"))
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "filepath/obj1"))
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
obj2ID := oidtest.ID()
obj2 := object.New()
obj2.SetID(obj2ID)
obj2.SetPayload([]byte("obj2"))
obj2.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "/filepath/obj2"))
hc.frostfs.objects[cnrID.String()+"/"+obj2ID.String()] = obj2
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath/obj1")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "/filepath/obj2")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, string(obj2.Payload()), string(r.Response.Body()))
})
t.Run("by filepath slash fallback", func(t *testing.T) {
hc, cnrID := prepareHandlerAndBucket(t)
obj1ID := oidtest.ID()
obj1 := object.New()
obj1.SetID(obj1ID)
obj1.SetPayload([]byte("obj1"))
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "filepath/obj1"))
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "/filepath/obj1")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
hc.cfg.additionalSlashSearch = true
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "/filepath/obj1")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
})
t.Run("by filename fallback", func(t *testing.T) {
hc, cnrID := prepareHandlerAndBucket(t)
obj1ID := oidtest.ID()
obj1 := object.New()
obj1.SetID(obj1ID)
obj1.SetPayload([]byte("obj1"))
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFileName, "filename/obj1"))
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "filename/obj1")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
hc.cfg.additionalFilenameSearch = true
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filename/obj1")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
})
t.Run("by filename and slash fallback", func(t *testing.T) {
hc, cnrID := prepareHandlerAndBucket(t)
obj1ID := oidtest.ID()
obj1 := object.New()
obj1.SetID(obj1ID)
obj1.SetPayload([]byte("obj1"))
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFileName, "filename/obj1"))
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "/filename/obj1")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
hc.cfg.additionalFilenameSearch = true
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "/filename/obj1")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
hc.cfg.additionalSlashSearch = true
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "/filename/obj1")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
})
t.Run("index fallback", func(t *testing.T) {
hc, cnrID := prepareHandlerAndBucket(t)
obj1ID := oidtest.ID()
obj1 := object.New()
obj1.SetID(obj1ID)
obj1.SetPayload([]byte("obj1"))
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "filepath/index.html"))
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath/")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
hc.cfg.indexEnabled = true
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath/")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
})
t.Run("index filename fallback", func(t *testing.T) {
hc, cnrID := prepareHandlerAndBucket(t)
obj1ID := oidtest.ID()
obj1 := object.New()
obj1.SetID(obj1ID)
obj1.SetPayload([]byte("obj1"))
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFileName, "filename/index.html"))
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "filename/")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filename")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
hc.cfg.indexEnabled = true
hc.cfg.additionalFilenameSearch = true
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filename")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filename/")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
})
}
func TestPrepareFileName(t *testing.T) {
fileName := "/cat.jpg"
expected := "cat.jpg"
actual := prepareFileName(fileName)
require.Equal(t, expected, actual)
func TestIndex(t *testing.T) {
ctx := middleware.SetNamespace(context.Background(), "")
fileName = "cat.jpg"
actual = prepareFileName(fileName)
require.Equal(t, expected, actual)
t.Run("s3", func(t *testing.T) {
hc, cnrID := prepareHandlerAndBucket(t)
obj1ID := oidtest.ID()
obj1 := object.New()
obj1.SetID(obj1ID)
obj1.SetPayload([]byte("obj1"))
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "prefix/obj1"))
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
hc.tree.containers[cnrID.String()] = containerInfo{
trees: map[string]map[string]nodeResponse{
"system": {"bucket-settings": nodeResponse{nodeID: 1}},
"version": {
"": nodeResponse{}, //root
"prefix": nodeResponse{
nodeID: 1,
meta: []nodeMeta{{key: tree.FileNameKey, value: []byte("prefix")}}},
"obj1": nodeResponse{
parentID: 1,
nodeID: 2,
meta: []nodeMeta{
{key: tree.FileNameKey, value: []byte("obj1")},
{key: "OID", value: []byte(obj1ID.String())},
},
},
},
},
}
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix/")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
hc.cfg.indexEnabled = true
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Contains(t, string(r.Response.Body()), "Index of s3://bucket/prefix")
require.Contains(t, string(r.Response.Body()), obj1ID.String())
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix/")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Contains(t, string(r.Response.Body()), "Index of s3://bucket/prefix")
require.Contains(t, string(r.Response.Body()), obj1ID.String())
r = prepareGetRequest(ctx, "bucket", "dummy")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Contains(t, string(r.Response.Body()), "Index of s3://bucket/dummy")
})
t.Run("native", func(t *testing.T) {
hc, cnrID := prepareHandlerAndBucket(t)
obj1ID := oidtest.ID()
obj1 := object.New()
obj1.SetID(obj1ID)
obj1.SetPayload([]byte("obj1"))
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "prefix/obj1"))
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix/")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
hc.cfg.indexEnabled = true
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Contains(t, string(r.Response.Body()), "Index of frostfs://"+cnrID.String()+"/prefix")
require.Contains(t, string(r.Response.Body()), obj1ID.String())
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix/")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Contains(t, string(r.Response.Body()), "Index of frostfs://"+cnrID.String()+"/prefix")
require.Contains(t, string(r.Response.Body()), obj1ID.String())
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "dummy")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Contains(t, string(r.Response.Body()), "Index of frostfs://"+cnrID.String()+"/dummy")
})
}
func prepareUploadRequest(ctx context.Context, bucket, content string) (*fasthttp.RequestCtx, error) {
@ -465,6 +616,25 @@ func prepareGetRequest(ctx context.Context, bucket, objID string) *fasthttp.Requ
return r
}
func prepareCORSRequest(t *testing.T, bucket string, headers map[string]string) *fasthttp.RequestCtx {
ctx := context.Background()
ctx = middleware.SetNamespace(ctx, "")
r := new(fasthttp.RequestCtx)
r.SetUserValue("cid", bucket)
for k, v := range headers {
r.Request.Header.Set(k, v)
}
ctx, err := tokens.StoreBearerTokenAppCtx(ctx, r)
require.NoError(t, err)
utils.SetContextToRequest(ctx, r)
return r
}
func prepareGetByAttributeRequest(ctx context.Context, bucket, attrKey, attrVal string) *fasthttp.RequestCtx {
r := new(fasthttp.RequestCtx)
utils.SetContextToRequest(ctx, r)
@ -493,6 +663,7 @@ const (
keyAttr = "User-Attribute"
valAttr = "user value"
objFileName = "newFile.txt"
objFilePath = "/newFile.txt"
)
func fillMultipartBody(r *fasthttp.RequestCtx, content string) error {

View file

@ -5,11 +5,12 @@ import (
"errors"
"io"
"net/http"
"net/url"
"strconv"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -27,7 +28,7 @@ const (
hdrContainerID = "X-Container-Id"
)
func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid.Address) {
func (h *Handler) headObject(ctx context.Context, req *fasthttp.RequestCtx, objectAddress oid.Address) {
var start = time.Now()
btoken := bearerToken(ctx)
@ -41,7 +42,7 @@ func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid
obj, err := h.frostfs.HeadObject(ctx, prm)
if err != nil {
req.handleFrostFSErr(err, start)
h.logAndSendError(ctx, req, logs.FailedToHeadObject, err, zap.Stringer("elapsed", time.Since(start)))
return
}
@ -65,7 +66,7 @@ func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid
case object.AttributeTimestamp:
value, err := strconv.ParseInt(val, 10, 64)
if err != nil {
req.log.Info(logs.CouldntParseCreationDate,
h.reqLogger(ctx).Info(logs.CouldntParseCreationDate,
zap.String("key", key),
zap.String("val", val),
zap.Error(err),
@ -100,7 +101,7 @@ func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid
return h.frostfs.RangeObject(ctx, prmRange)
}, filename)
if err != nil && err != io.EOF {
req.handleFrostFSErr(err, start)
h.logAndSendError(ctx, req, logs.FailedToDetectContentTypeFromPayload, err, zap.Stringer("elapsed", time.Since(start)))
return
}
}
@ -116,48 +117,77 @@ func idsToResponse(resp *fasthttp.Response, obj *object.Object) {
}
// HeadByAddressOrBucketName handles head requests using simple cid/oid or bucketname/key format.
func (h *Handler) HeadByAddressOrBucketName(c *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.HeadByAddressOrBucketName")
func (h *Handler) HeadByAddressOrBucketName(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.HeadByAddressOrBucketName")
defer span.End()
cidParam, _ := c.UserValue("cid").(string)
oidParam, _ := c.UserValue("oid").(string)
cidParam, _ := req.UserValue("cid").(string)
oidParam, _ := req.UserValue("oid").(string)
log := utils.GetReqLogOrDefault(ctx, h.log).With(
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(
zap.String("cid", cidParam),
zap.String("oid", oidParam),
)
))
bktInfo, err := h.getBucketInfo(ctx, cidParam, log)
path, err := url.QueryUnescape(oidParam)
if err != nil {
logAndSendBucketError(c, log, err)
h.logAndSendError(ctx, req, logs.FailedToUnescapePath, err)
return
}
bktInfo, err := h.getBucketInfo(ctx, cidParam)
if err != nil {
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
return
}
checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo)
if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) {
log.Error(logs.FailedToCheckIfSettingsNodeExist, zap.String("cid", bktInfo.CID.String()),
zap.Error(checkS3Err), logs.TagField(logs.TagExternalStorageTree))
logAndSendBucketError(c, log, checkS3Err)
if checkS3Err != nil && !errors.Is(checkS3Err, tree.ErrNodeNotFound) {
h.logAndSendError(ctx, req, logs.FailedToCheckIfSettingsNodeExist, checkS3Err)
return
}
req := newRequest(c, log)
prm := MiddlewareParam{
Context: ctx,
Request: req,
BktInfo: bktInfo,
Path: path,
}
indexPageEnabled := h.config.IndexPageEnabled()
var objID oid.ID
if checkS3Err == nil {
h.byS3Path(ctx, req, bktInfo.CID, oidParam, h.headObject)
} else if err = objID.DecodeString(oidParam); err == nil {
h.byNativeAddress(ctx, req, bktInfo.CID, objID, h.headObject)
run(prm, h.errorMiddleware(logs.ObjectNotFound, tree.ErrNodeNotFound),
Middleware{Func: h.byS3PathMiddleware(h.headObject, noopFormer), Enabled: true},
Middleware{Func: h.byS3PathMiddleware(h.headObject, indexFormer), Enabled: indexPageEnabled},
)
} else {
logAndSendBucketError(c, log, checkS3Err)
slashFallbackEnabled := h.config.EnableFilepathSlashFallback()
fileNameFallbackEnabled := h.config.EnableFilepathFallback()
run(prm, h.errorMiddleware(logs.ObjectNotFound, ErrObjectNotFound),
Middleware{Func: h.byAddressMiddleware(h.headObject), Enabled: true},
Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFilePath, noopFormer), Enabled: true},
Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFilePath, reverseLeadingSlash), Enabled: slashFallbackEnabled},
Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFileName, noopFormer), Enabled: fileNameFallbackEnabled},
Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFileName, reverseLeadingSlash), Enabled: fileNameFallbackEnabled && slashFallbackEnabled},
Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFilePath, indexFormer), Enabled: indexPageEnabled},
Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFileName, indexFormer), Enabled: fileNameFallbackEnabled && indexPageEnabled},
)
}
}
// HeadByAttribute handles attribute-based head requests.
func (h *Handler) HeadByAttribute(c *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.HeadByAttribute")
func (h *Handler) HeadByAttribute(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.HeadByAttribute")
defer span.End()
utils.SetContextToRequest(ctx, c)
h.byAttribute(c, h.headObject)
h.byAttribute(ctx, req, h.headObject)
}
func (h *Handler) errorMiddleware(msg string, err error) MiddlewareFunc {
return func(prm MiddlewareParam) bool {
h.logAndSendError(prm.Context, prm.Request, msg, err)
return false
}
}

View file

@ -1,6 +1,7 @@
package handler
import (
"context"
"errors"
"io"
"strconv"
@ -53,7 +54,7 @@ func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartF
}
// getPayload returns initial payload if object is not multipart else composes new reader with parts data.
func (h *Handler) getPayload(p getMultiobjectBodyParams) (io.ReadCloser, uint64, error) {
func (h *Handler) getPayload(ctx context.Context, p getMultiobjectBodyParams) (io.ReadCloser, uint64, error) {
cid, ok := p.obj.Header.ContainerID()
if !ok {
return nil, 0, errors.New("no container id set")
@ -66,7 +67,6 @@ func (h *Handler) getPayload(p getMultiobjectBodyParams) (io.ReadCloser, uint64,
if err != nil {
return nil, 0, err
}
ctx := p.req.RequestCtx
params := PrmInitMultiObjectReader{
Addr: newAddress(cid, oid),
Bearer: bearerToken(ctx),

View file

@ -60,12 +60,7 @@ func BenchmarkAll(b *testing.B) {
func defaultMultipart(filename string) error {
r, bound := multipartFile(filename)
logger, err := zap.NewProduction()
if err != nil {
return err
}
file, err := fetchMultipartFileDefault(logger, r, bound)
file, err := fetchMultipartFileDefault(zap.NewNop(), r, bound)
if err != nil {
return err
}
@ -87,12 +82,7 @@ func TestName(t *testing.T) {
func customMultipart(filename string) error {
r, bound := multipartFile(filename)
logger, err := zap.NewProduction()
if err != nil {
return err
}
file, err := fetchMultipartFile(logger, r, bound)
file, err := fetchMultipartFile(zap.NewNop(), r, bound)
if err != nil {
return err
}

View file

@ -63,11 +63,10 @@ func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error), file
type getMultiobjectBodyParams struct {
obj *Object
req request
strSize string
}
func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.Address) {
func (h *Handler) receiveFile(ctx context.Context, req *fasthttp.RequestCtx, objAddress oid.Address) {
var (
shouldDownload = req.QueryArgs().GetBool("download")
start = time.Now()
@ -85,12 +84,12 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.A
rObj, err := h.frostfs.GetObject(ctx, prm)
if err != nil {
req.handleFrostFSErr(err, start)
h.logAndSendError(ctx, req, logs.FailedToGetObject, err, zap.Stringer("elapsed", time.Since(start)))
return
}
// we can't close reader in this function, so how to do it?
req.setIDs(rObj.Header)
setIDs(req, rObj.Header)
payload := rObj.Payload
payloadSize := rObj.Header.PayloadSize()
for _, attr := range rObj.Header.Attributes() {
@ -107,8 +106,8 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.A
case object.AttributeFileName:
filename = val
case object.AttributeTimestamp:
if err = req.setTimestamp(val); err != nil {
req.log.Error(logs.CouldntParseCreationDate,
if err = setTimestamp(req, val); err != nil {
h.reqLogger(ctx).Error(logs.CouldntParseCreationDate,
zap.String("val", val),
zap.Error(err),
logs.TagField(logs.TagDatapath))
@ -118,13 +117,12 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.A
case object.AttributeFilePath:
filepath = val
case attributeMultipartObjectSize:
payload, payloadSize, err = h.getPayload(getMultiobjectBodyParams{
payload, payloadSize, err = h.getPayload(ctx, getMultiobjectBodyParams{
obj: rObj,
req: req,
strSize: val,
})
if err != nil {
req.handleFrostFSErr(err, start)
h.logAndSendError(ctx, req, logs.FailedToGetObjectPayload, err, zap.Stringer("elapsed", time.Since(start)))
return
}
}
@ -133,7 +131,7 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.A
filename = filepath
}
req.setDisposition(shouldDownload, filename)
setDisposition(req, shouldDownload, filename)
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(payloadSize, 10))
@ -145,8 +143,7 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.A
return payload, nil
}, filename)
if err != nil && err != io.EOF {
req.log.Error(logs.CouldNotDetectContentTypeFromPayload, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(req.RequestCtx, "could not detect Content-Type from payload: "+err.Error(), fasthttp.StatusBadRequest)
h.logAndSendError(ctx, req, logs.FailedToDetectContentTypeFromPayload, err, zap.Stringer("elapsed", time.Since(start)))
return
}
@ -165,7 +162,7 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.A
req.Response.SetBodyStream(payload, int(payloadSize))
}
func (r *request) setIDs(obj object.Object) {
func setIDs(r *fasthttp.RequestCtx, obj object.Object) {
objID, _ := obj.ID()
cnrID, _ := obj.ContainerID()
r.Response.Header.Set(hdrObjectID, objID.String())
@ -173,7 +170,7 @@ func (r *request) setIDs(obj object.Object) {
r.Response.Header.Set(hdrContainerID, cnrID.String())
}
func (r *request) setDisposition(shouldDownload bool, filename string) {
func setDisposition(r *fasthttp.RequestCtx, shouldDownload bool, filename string) {
const (
inlineDisposition = "inline"
attachmentDisposition = "attachment"
@ -187,7 +184,7 @@ func (r *request) setDisposition(shouldDownload bool, filename string) {
r.Response.Header.Set(fasthttp.HeaderContentDisposition, dis+"; filename="+path.Base(filename))
}
func (r *request) setTimestamp(timestamp string) error {
func setTimestamp(r *fasthttp.RequestCtx, timestamp string) error {
value, err := strconv.ParseInt(timestamp, 10, 64)
if err != nil {
return err

View file

@ -0,0 +1,141 @@
package handler
import (
"context"
"errors"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
)
type nodeMeta struct {
key string
value []byte
}
func (m nodeMeta) GetKey() string {
return m.key
}
func (m nodeMeta) GetValue() []byte {
return m.value
}
type nodeResponse struct {
meta []nodeMeta
nodeID uint64
parentID uint64
timestamp uint64
}
func (n nodeResponse) GetNodeID() []uint64 {
return []uint64{n.nodeID}
}
func (n nodeResponse) GetParentID() []uint64 {
return []uint64{n.parentID}
}
func (n nodeResponse) GetTimestamp() []uint64 {
return []uint64{n.timestamp}
}
func (n nodeResponse) GetMeta() []tree.Meta {
res := make([]tree.Meta, len(n.meta))
for i, value := range n.meta {
res[i] = value
}
return res
}
type containerInfo struct {
trees map[string]map[string]nodeResponse
}
type treeServiceClientMock struct {
containers map[string]containerInfo
}
func newTreeServiceClientMock() *treeServiceClientMock {
return &treeServiceClientMock{
containers: make(map[string]containerInfo),
}
}
func (t *treeServiceClientMock) GetNodes(_ context.Context, p *tree.GetNodesParams) ([]tree.NodeResponse, error) {
cnr, ok := t.containers[p.CnrID.EncodeToString()]
if !ok {
return nil, tree.ErrNodeNotFound
}
tr, ok := cnr.trees[p.TreeID]
if !ok {
return nil, tree.ErrNodeNotFound
}
node, ok := tr[strings.Join(p.Path, "/")]
if !ok {
return nil, tree.ErrNodeNotFound
}
return []tree.NodeResponse{node}, nil
}
func (t *treeServiceClientMock) GetSubTree(_ context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, _ bool) ([]tree.NodeResponse, error) {
cnr, ok := t.containers[bktInfo.CID.EncodeToString()]
if !ok {
return nil, tree.ErrNodeNotFound
}
tr, ok := cnr.trees[treeID]
if !ok {
return nil, tree.ErrNodeNotFound
}
if len(rootID) != 1 {
return nil, errors.New("invalid rootID")
}
var root *nodeResponse
for _, v := range tr {
if v.nodeID == rootID[0] {
root = &v
break
}
}
if root == nil {
return nil, tree.ErrNodeNotFound
}
var res []nodeResponse
if depth == 0 {
for _, v := range tr {
res = append(res, v)
}
} else {
res = append(res, *root)
depthIndex := 0
for i := uint32(0); i < depth-1; i++ {
childrenCount := 0
for _, v := range tr {
for j := range res[depthIndex:] {
if v.parentID == res[j].nodeID {
res = append(res, v)
childrenCount++
break
}
}
}
depthIndex = len(res) - childrenCount
}
}
res2 := make([]tree.NodeResponse, len(res))
for i := range res {
res2[i] = res[i]
}
return res2, nil
}

View file

@ -50,44 +50,41 @@ func (pr *putResponse) encode(w io.Writer) error {
}
// Upload handles multipart upload request.
func (h *Handler) Upload(c *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.Upload")
func (h *Handler) Upload(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.Upload")
defer span.End()
utils.SetContextToRequest(ctx, c)
var file MultipartFile
scid, _ := c.UserValue("cid").(string)
bodyStream := c.RequestBodyStream()
scid, _ := req.UserValue("cid").(string)
bodyStream := req.RequestBodyStream()
drainBuf := make([]byte, drainBufSize)
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
log := reqLog.With(zap.String("cid", scid))
log := h.reqLogger(ctx)
ctx = utils.SetReqLog(ctx, log.With(zap.String("cid", scid)))
bktInfo, err := h.getBucketInfo(ctx, scid, log)
bktInfo, err := h.getBucketInfo(ctx, scid)
if err != nil {
logAndSendBucketError(c, log, err)
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
return
}
boundary := string(c.Request.Header.MultipartFormBoundary())
boundary := string(req.Request.Header.MultipartFormBoundary())
if file, err = fetchMultipartFile(log, bodyStream, boundary); err != nil {
log.Error(logs.CouldNotReceiveMultipartForm, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
h.logAndSendError(ctx, req, logs.CouldNotReceiveMultipartForm, err)
return
}
filtered, err := filterHeaders(log, &c.Request.Header)
filtered, err := filterHeaders(log, &req.Request.Header)
if err != nil {
log.Error(logs.FailedToFilterHeaders, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, err.Error(), fasthttp.StatusBadRequest)
h.logAndSendError(ctx, req, logs.FailedToFilterHeaders, err)
return
}
if c.Request.Header.Peek(explodeArchiveHeader) != nil {
h.explodeArchive(request{c, log}, bktInfo, file, filtered)
if req.Request.Header.Peek(explodeArchiveHeader) != nil {
h.explodeArchive(ctx, req, bktInfo, file, filtered)
} else {
h.uploadSingleObject(request{c, log}, bktInfo, file, filtered)
h.uploadSingleObject(ctx, req, bktInfo, file, filtered)
}
// Multipart is multipart and thus can contain more than one part which
@ -104,46 +101,39 @@ func (h *Handler) Upload(c *fasthttp.RequestCtx) {
}
}
func (h *Handler) uploadSingleObject(req request, bkt *data.BucketInfo, file MultipartFile, filtered map[string]string) {
c, log := req.RequestCtx, req.log
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.uploadSingleObject")
func (h *Handler) uploadSingleObject(ctx context.Context, req *fasthttp.RequestCtx, bkt *data.BucketInfo, file MultipartFile, filtered map[string]string) {
ctx, span := tracing.StartSpanFromContext(ctx, "handler.uploadSingleObject")
defer span.End()
utils.SetContextToRequest(ctx, c)
setIfNotExist(filtered, object.AttributeFileName, file.FileName())
attributes, err := h.extractAttributes(c, log, filtered)
attributes, err := h.extractAttributes(ctx, req, filtered)
if err != nil {
log.Error(logs.FailedToGetAttributes, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not extract attributes: "+err.Error(), fasthttp.StatusBadRequest)
h.logAndSendError(ctx, req, logs.FailedToGetAttributes, err)
return
}
idObj, err := h.uploadObject(c, bkt, attributes, file)
idObj, err := h.uploadObject(ctx, bkt, attributes, file)
if err != nil {
h.handlePutFrostFSErr(c, err, log)
h.logAndSendError(ctx, req, logs.FailedToUploadObject, err)
return
}
log.Debug(logs.ObjectUploaded,
h.reqLogger(ctx).Debug(logs.ObjectUploaded,
zap.String("oid", idObj.EncodeToString()),
zap.String("FileName", file.FileName()),
logs.TagField(logs.TagExternalStorage),
)
addr := newAddress(bkt.CID, idObj)
c.Response.Header.SetContentType(jsonHeader)
req.Response.Header.SetContentType(jsonHeader)
// Try to return the response, otherwise, if something went wrong, throw an error.
if err = newPutResponse(addr).encode(c); err != nil {
log.Error(logs.CouldNotEncodeResponse, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not encode response", fasthttp.StatusBadRequest)
if err = newPutResponse(addr).encode(req); err != nil {
h.logAndSendError(ctx, req, logs.CouldNotEncodeResponse, err)
return
}
}
func (h *Handler) uploadObject(c *fasthttp.RequestCtx, bkt *data.BucketInfo, attrs []object.Attribute, file io.Reader) (oid.ID, error) {
ctx := utils.GetContextFromRequest(c)
func (h *Handler) uploadObject(ctx context.Context, bkt *data.BucketInfo, attrs []object.Attribute, file io.Reader) (oid.ID, error) {
obj := object.New()
obj.SetContainerID(bkt.CID)
obj.SetOwnerID(*h.ownerID)
@ -168,19 +158,18 @@ func (h *Handler) uploadObject(c *fasthttp.RequestCtx, bkt *data.BucketInfo, att
return idObj, nil
}
func (h *Handler) extractAttributes(c *fasthttp.RequestCtx, log *zap.Logger, filtered map[string]string) ([]object.Attribute, error) {
ctx := utils.GetContextFromRequest(c)
func (h *Handler) extractAttributes(ctx context.Context, req *fasthttp.RequestCtx, filtered map[string]string) ([]object.Attribute, error) {
now := time.Now()
if rawHeader := c.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
if rawHeader := req.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
log.Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err),
h.reqLogger(ctx).Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err),
logs.TagField(logs.TagDatapath))
} else {
now = parsed
}
}
if err := utils.PrepareExpirationHeader(ctx, h.frostfs, filtered, now); err != nil {
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err), logs.TagField(logs.TagDatapath))
h.reqLogger(ctx).Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err), logs.TagField(logs.TagDatapath))
return nil, err
}
attributes := make([]object.Attribute, 0, len(filtered))
@ -207,38 +196,33 @@ func newAttribute(key string, val string) object.Attribute {
// explodeArchive read files from archive and creates objects for each of them.
// Sets FilePath attribute with name from tar.Header.
func (h *Handler) explodeArchive(req request, bkt *data.BucketInfo, file io.ReadCloser, filtered map[string]string) {
c, log := req.RequestCtx, req.log
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.explodeArchive")
func (h *Handler) explodeArchive(ctx context.Context, req *fasthttp.RequestCtx, bkt *data.BucketInfo, file io.ReadCloser, filtered map[string]string) {
ctx, span := tracing.StartSpanFromContext(ctx, "handler.explodeArchive")
defer span.End()
utils.SetContextToRequest(ctx, c)
// remove user attributes which vary for each file in archive
// to guarantee that they won't appear twice
delete(filtered, object.AttributeFileName)
delete(filtered, object.AttributeFilePath)
commonAttributes, err := h.extractAttributes(c, log, filtered)
commonAttributes, err := h.extractAttributes(ctx, req, filtered)
if err != nil {
log.Error(logs.FailedToGetAttributes, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not extract attributes: "+err.Error(), fasthttp.StatusBadRequest)
h.logAndSendError(ctx, req, logs.FailedToGetAttributes, err)
return
}
attributes := commonAttributes
reader := file
if bytes.EqualFold(c.Request.Header.Peek(fasthttp.HeaderContentEncoding), []byte("gzip")) {
log.Debug(logs.GzipReaderSelected, logs.TagField(logs.TagDatapath))
if bytes.EqualFold(req.Request.Header.Peek(fasthttp.HeaderContentEncoding), []byte("gzip")) {
h.reqLogger(ctx).Debug(logs.GzipReaderSelected, logs.TagField(logs.TagDatapath))
gzipReader, err := gzip.NewReader(file)
if err != nil {
log.Error(logs.FailedToCreateGzipReader, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could read gzip file: "+err.Error(), fasthttp.StatusBadRequest)
h.logAndSendError(ctx, req, logs.FailedToCreateGzipReader, err)
return
}
defer func() {
if err := gzipReader.Close(); err != nil {
log.Warn(logs.FailedToCloseReader, zap.Error(err), logs.TagField(logs.TagDatapath))
h.reqLogger(ctx).Warn(logs.FailedToCloseReader, zap.Error(err), logs.TagField(logs.TagDatapath))
}
}()
reader = gzipReader
@ -250,8 +234,7 @@ func (h *Handler) explodeArchive(req request, bkt *data.BucketInfo, file io.Read
if errors.Is(err, io.EOF) {
break
} else if err != nil {
log.Error(logs.FailedToReadFileFromTar, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not get next entry: "+err.Error(), fasthttp.StatusBadRequest)
h.logAndSendError(ctx, req, logs.FailedToReadFileFromTar, err)
return
}
@ -265,13 +248,13 @@ func (h *Handler) explodeArchive(req request, bkt *data.BucketInfo, file io.Read
attributes = append(attributes, newAttribute(object.AttributeFilePath, obj.Name))
attributes = append(attributes, newAttribute(object.AttributeFileName, fileName))
idObj, err := h.uploadObject(c, bkt, attributes, tarReader)
idObj, err := h.uploadObject(ctx, bkt, attributes, tarReader)
if err != nil {
h.handlePutFrostFSErr(c, err, log)
h.logAndSendError(ctx, req, logs.FailedToUploadObject, err)
return
}
log.Debug(logs.ObjectUploaded,
h.reqLogger(ctx).Debug(logs.ObjectUploaded,
zap.String("oid", idObj.EncodeToString()),
zap.String("FileName", fileName),
logs.TagField(logs.TagExternalStorage),
@ -279,14 +262,6 @@ func (h *Handler) explodeArchive(req request, bkt *data.BucketInfo, file io.Read
}
}
func (h *Handler) handlePutFrostFSErr(r *fasthttp.RequestCtx, err error, log *zap.Logger) {
statusCode, msg, additionalFields := formErrorResponse("could not store file in frostfs", err)
logFields := append([]zap.Field{zap.Error(err)}, additionalFields...)
log.Error(logs.CouldNotStoreFileInFrostfs, append(logFields, logs.TagField(logs.TagExternalStorage))...)
ResponseError(r, msg, statusCode)
}
func (h *Handler) fetchBearerToken(ctx context.Context) *bearer.Token {
if tkn, err := tokens.LoadBearerToken(ctx); err == nil && tkn != nil {
return tkn

View file

@ -5,13 +5,12 @@ import (
"errors"
"fmt"
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
sdkstatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -19,30 +18,6 @@ import (
"go.uber.org/zap"
)
type request struct {
*fasthttp.RequestCtx
log *zap.Logger
}
func newRequest(ctx *fasthttp.RequestCtx, log *zap.Logger) request {
return request{
RequestCtx: ctx,
log: log,
}
}
func (r *request) handleFrostFSErr(err error, start time.Time) {
logFields := []zap.Field{
zap.Stringer("elapsed", time.Since(start)),
zap.Error(err),
}
statusCode, msg, additionalFields := formErrorResponse("could not receive object", err)
logFields = append(logFields, additionalFields...)
r.log.Error(logs.CouldNotReceiveObject, append(logFields, logs.TagField(logs.TagExternalStorage))...)
ResponseError(r.RequestCtx, msg, statusCode)
}
func bearerToken(ctx context.Context) *bearer.Token {
if tkn, err := tokens.LoadBearerToken(ctx); err == nil {
return tkn
@ -84,14 +59,16 @@ func isValidValue(s string) bool {
return true
}
func logAndSendBucketError(c *fasthttp.RequestCtx, log *zap.Logger, err error) {
log.Error(logs.CouldNotGetBucket, zap.Error(err), logs.TagField(logs.TagDatapath))
func (h *Handler) reqLogger(ctx context.Context) *zap.Logger {
return utils.GetReqLogOrDefault(ctx, h.log)
}
if client.IsErrContainerNotFound(err) {
ResponseError(c, "Not Found", fasthttp.StatusNotFound)
return
}
ResponseError(c, "could not get bucket: "+err.Error(), fasthttp.StatusBadRequest)
func (h *Handler) logAndSendError(ctx context.Context, c *fasthttp.RequestCtx, msg string, err error, additional ...zap.Field) {
utils.GetReqLogOrDefault(ctx, h.log).Error(msg,
append([]zap.Field{zap.Error(err), logs.TagField(logs.TagDatapath)}, additional...)...)
msg, code := formErrorResponse(err)
ResponseError(c, msg, code)
}
func newAddress(cnr cid.ID, obj oid.ID) oid.Address {
@ -112,31 +89,23 @@ func ResponseError(r *fasthttp.RequestCtx, msg string, code int) {
r.Error(msg+"\n", code)
}
func formErrorResponse(message string, err error) (int, string, []zap.Field) {
var (
msg string
statusCode int
logFields []zap.Field
)
st := new(sdkstatus.ObjectAccessDenied)
func formErrorResponse(err error) (string, int) {
switch {
case errors.As(err, &st):
statusCode = fasthttp.StatusForbidden
reason := st.Reason()
msg = fmt.Sprintf("%s: %v: %s", message, err, reason)
logFields = append(logFields, zap.String("error_detail", reason))
case errors.Is(err, ErrAccessDenied):
return fmt.Sprintf("Storage Access Denied:\n%v", err), fasthttp.StatusForbidden
case errors.Is(err, tree.ErrNodeAccessDenied):
return fmt.Sprintf("Tree Access Denied:\n%v", err), fasthttp.StatusForbidden
case errors.Is(err, ErrQuotaLimitReached):
statusCode = fasthttp.StatusConflict
msg = fmt.Sprintf("%s: %v", message, err)
case client.IsErrObjectNotFound(err) || client.IsErrContainerNotFound(err):
statusCode = fasthttp.StatusNotFound
msg = "Not Found"
return fmt.Sprintf("Quota Reached:\n%v", err), fasthttp.StatusConflict
case errors.Is(err, ErrContainerNotFound):
return fmt.Sprintf("Container Not Found:\n%v", err), fasthttp.StatusNotFound
case errors.Is(err, ErrObjectNotFound):
return fmt.Sprintf("Object Not Found:\n%v", err), fasthttp.StatusNotFound
case errors.Is(err, tree.ErrNodeNotFound):
return fmt.Sprintf("Tree Node Not Found:\n%v", err), fasthttp.StatusNotFound
case errors.Is(err, ErrGatewayTimeout):
return fmt.Sprintf("Gateway Timeout:\n%v", err), fasthttp.StatusGatewayTimeout
default:
statusCode = fasthttp.StatusBadRequest
msg = fmt.Sprintf("%s: %v", message, err)
return fmt.Sprintf("Bad Request:\n%v", err), fasthttp.StatusBadRequest
}
return statusCode, msg, logFields
}

View file

@ -1,24 +0,0 @@
package layer
import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
// TreeService provide interface to interact with tree service using s3 data models.
type TreeService interface {
GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*data.NodeVersion, error)
GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]data.NodeInfo, string, error)
CheckSettingsNodeExists(ctx context.Context, bktInfo *data.BucketInfo) error
}
var (
// ErrNodeNotFound is returned from Tree service in case of not found error.
ErrNodeNotFound = errors.New("not found")
// ErrNodeAccessDenied is returned from Tree service in case of access denied error.
ErrNodeAccessDenied = errors.New("access denied")
)

View file

@ -72,60 +72,71 @@ const (
TagsLogConfigWontBeUpdated = "tags log config won't be updated"
FailedToReadIndexPageTemplate = "failed to read index page template"
SetCustomIndexPageTemplate = "set custom index page template"
CouldNotFetchCORSContainerInfo = "couldn't fetch CORS container info"
InitRPCClientFailed = "init rpc client faileds"
InitContainerContractFailed = "init container contract failed"
FailedToResolveContractHash = "failed to resolve contract hash"
)
// Log messages with the "datapath" tag.
const (
CouldntParseCreationDate = "couldn't parse creation date"
CouldNotDetectContentTypeFromPayload = "could not detect Content-Type from payload"
FailedToAddObjectToArchive = "failed to add object to archive"
CloseZipWriter = "close zip writer"
IgnorePartEmptyFormName = "ignore part, empty form name"
IgnorePartEmptyFilename = "ignore part, empty filename"
CouldNotParseClientTime = "could not parse client time"
CouldNotPrepareExpirationHeader = "could not prepare expiration header"
CouldNotEncodeResponse = "could not encode response"
AddAttributeToResultObject = "add attribute to result object"
Request = "request"
CouldNotFetchAndStoreBearerToken = "could not fetch and store bearer token"
CouldntPutBucketIntoCache = "couldn't put bucket info into cache"
FailedToIterateOverResponse = "failed to iterate over search response"
InvalidCacheEntryType = "invalid cache entry type"
FailedToUnescapeQuery = "failed to unescape query"
CouldntCacheNetmap = "couldn't cache netmap"
FailedToCloseReader = "failed to close reader"
FailedToFilterHeaders = "failed to filter headers"
FailedToReadFileFromTar = "failed to read file from tar"
FailedToGetAttributes = "failed to get attributes"
CloseGzipWriter = "close gzip writer"
CloseTarWriter = "close tar writer"
FailedToCreateGzipReader = "failed to create gzip reader"
GzipReaderSelected = "gzip reader selected"
CouldNotReceiveMultipartForm = "could not receive multipart/form"
ObjectsNotFound = "objects not found"
IteratingOverSelectedObjectsFailed = "iterating over selected objects failed"
CouldNotGetBucket = "could not get bucket"
CouldNotResolveContainerID = "could not resolve container id"
FailedToSumbitTaskToPool = "failed to submit task to pool"
CouldntParseCreationDate = "couldn't parse creation date"
FailedToDetectContentTypeFromPayload = "failed to detect Content-Type from payload"
FailedToAddObjectToArchive = "failed to add object to archive"
CloseZipWriter = "close zip writer"
IgnorePartEmptyFormName = "ignore part, empty form name"
IgnorePartEmptyFilename = "ignore part, empty filename"
CouldNotParseClientTime = "could not parse client time"
CouldNotPrepareExpirationHeader = "could not prepare expiration header"
CouldNotEncodeResponse = "could not encode response"
AddAttributeToResultObject = "add attribute to result object"
Request = "request"
CouldNotFetchAndStoreBearerToken = "could not fetch and store bearer token"
CouldntPutBucketIntoCache = "couldn't put bucket info into cache"
FailedToIterateOverResponse = "failed to iterate over search response"
InvalidCacheEntryType = "invalid cache entry type"
FailedToUnescapeQuery = "failed to unescape query"
CouldntCacheNetmap = "couldn't cache netmap"
FailedToCloseReader = "failed to close reader"
FailedToFilterHeaders = "failed to filter headers"
FailedToReadFileFromTar = "failed to read file from tar"
FailedToGetAttributes = "failed to get attributes"
CloseGzipWriter = "close gzip writer"
CloseTarWriter = "close tar writer"
FailedToCreateGzipReader = "failed to create gzip reader"
GzipReaderSelected = "gzip reader selected"
CouldNotReceiveMultipartForm = "could not receive multipart/form"
ObjectsNotFound = "objects not found"
IteratingOverSelectedObjectsFailed = "iterating over selected objects failed"
FailedToGetBucketInfo = "could not get bucket info"
FailedToSubmitTaskToPool = "failed to submit task to pool"
IndexWasDeleted = "index was deleted"
FailedToGetLatestVersionOfIndexObject = "failed to get latest version of index object"
FailedToCheckIfSettingsNodeExist = "failed to check if settings node exists"
FailedToListObjects = "failed to list objects"
FailedToParseTemplate = "failed to parse template"
FailedToExecuteTemplate = "failed to execute template"
FailedToUploadObject = "failed to upload object"
FailedToHeadObject = "failed to head object"
FailedToGetObject = "failed to get object"
FailedToGetObjectPayload = "failed to get object payload"
FailedToFindObjectByAttribute = "failed to get find object by attribute"
FailedToUnescapePath = "failed to unescape path"
CouldNotGetCORSConfiguration = "could not get cors configuration"
EmptyOriginRequestHeader = "empty Origin request header"
EmptyAccessControlRequestMethodHeader = "empty Access-Control-Request-Method request header"
CORSRuleWasNotMatched = "cors rule was not matched"
CouldntCacheCors = "couldn't cache cors"
)
// Log messages with the "external_storage" tag.
const (
CouldNotReceiveObject = "could not receive object"
CouldNotSearchForObjects = "could not search for objects"
ObjectNotFound = "object not found"
ReadObjectListFailed = "read object list failed"
CouldNotStoreFileInFrostfs = "could not store file in frostfs"
FailedToHeadObject = "failed to head object"
ObjectNotFoundByFilePathTrySearchByFileName = "object not found by filePath attribute, try search by fileName"
FailedToGetObject = "failed to get object"
ObjectUploaded = "object uploaded"
CouldNotGetContainerInfo = "could not get container info"
ObjectNotFound = "object not found"
ReadObjectListFailed = "read object list failed"
ObjectUploaded = "object uploaded"
)
// Log messages with the "external_storage_tree" tag.
const (
ObjectWasDeleted = "object was deleted"
FailedToGetLatestVersionOfObject = "failed to get latest version of object"
FailedToCheckIfSettingsNodeExist = "Failed to check if settings node exists"
FoundSeveralSystemTreeNodes = "found several system tree nodes"
)

View file

@ -0,0 +1,73 @@
package container
import (
"fmt"
"strings"
containercontract "git.frostfs.info/TrueCloudLab/frostfs-contract/container"
containerclient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/wallet"
)
type Client struct {
contract *containerclient.Contract
}
type Config struct {
ContractHash util.Uint160
Key *keys.PrivateKey
RPCClient *rpcclient.Client
}
func New(cfg Config) (*Client, error) {
var err error
key := cfg.Key
if key == nil {
if key, err = keys.NewPrivateKey(); err != nil {
return nil, fmt.Errorf("generate anon private key for container contract: %w", err)
}
}
acc := wallet.NewAccountFromPrivateKey(key)
act, err := actor.NewSimple(cfg.RPCClient, acc)
if err != nil {
return nil, fmt.Errorf("create new actor: %w", err)
}
return &Client{
contract: containerclient.New(act, cfg.ContractHash),
}, nil
}
func (c *Client) GetContainerByID(cnrID cid.ID) (*container.Container, error) {
items, err := c.contract.Get(cnrID[:])
if err != nil {
if strings.Contains(err.Error(), containercontract.NotFoundError) {
return nil, fmt.Errorf("%w: %s", handler.ErrContainerNotFound, err)
}
return nil, err
}
if len(items) != 4 {
return nil, fmt.Errorf("unexpected container stack item count: %d", len(items))
}
cnrBytes, err := items[0].TryBytes()
if err != nil {
return nil, fmt.Errorf("could not get byte array of container: %w", err)
}
var cnr container.Container
if err = cnr.Unmarshal(cnrBytes); err != nil {
return nil, fmt.Errorf("can't unmarshal container: %w", err)
}
return &cnr, nil
}

View file

@ -0,0 +1,34 @@
package util
import (
"fmt"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
"github.com/nspcc-dev/neo-go/pkg/util"
)
// ResolveContractHash determine contract hash by resolving NNS name.
func ResolveContractHash(contractHash, rpcAddress string) (util.Uint160, error) {
if hash, err := util.Uint160DecodeStringLE(contractHash); err == nil {
return hash, nil
}
splitName := strings.Split(contractHash, ".")
if len(splitName) != 2 {
return util.Uint160{}, fmt.Errorf("invalid contract name: '%s'", contractHash)
}
var domain container.Domain
domain.SetName(splitName[0])
domain.SetZone(splitName[1])
var nns ns.NNS
if err := nns.Dial(rpcAddress); err != nil {
return util.Uint160{}, fmt.Errorf("dial nns %s: %w", rpcAddress, err)
}
defer nns.Close()
return nns.ResolveContractHash(domain)
}

View file

@ -10,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@ -45,7 +46,7 @@ func (x *FrostFS) Container(ctx context.Context, containerPrm handler.PrmContain
res, err := x.pool.GetContainer(ctx, prm)
if err != nil {
return nil, handleObjectError("read container via connection pool", err)
return nil, handleStorageError("read container via connection pool", err)
}
return &res, nil
@ -69,7 +70,7 @@ func (x *FrostFS) CreateObject(ctx context.Context, prm handler.PrmObjectCreate)
idObj, err := x.pool.PutObject(ctx, prmPut)
if err != nil {
return oid.ID{}, handleObjectError("save object via connection pool", err)
return oid.ID{}, handleStorageError("save object via connection pool", err)
}
return idObj.ObjectID, nil
}
@ -85,7 +86,7 @@ func (x payloadReader) Read(p []byte) (int, error) {
if err != nil && errors.Is(err, io.EOF) {
return n, err
}
return n, handleObjectError("read payload", err)
return n, handleStorageError("read payload", err)
}
// HeadObject implements frostfs.FrostFS interface method.
@ -102,7 +103,7 @@ func (x *FrostFS) HeadObject(ctx context.Context, prm handler.PrmObjectHead) (*o
res, err := x.pool.HeadObject(ctx, prmHead)
if err != nil {
return nil, handleObjectError("read object header via connection pool", err)
return nil, handleStorageError("read object header via connection pool", err)
}
return &res, nil
@ -122,7 +123,7 @@ func (x *FrostFS) GetObject(ctx context.Context, prm handler.PrmObjectGet) (*han
res, err := x.pool.GetObject(ctx, prmGet)
if err != nil {
return nil, handleObjectError("init full object reading via connection pool", err)
return nil, handleStorageError("init full object reading via connection pool", err)
}
return &handler.Object{
@ -147,7 +148,7 @@ func (x *FrostFS) RangeObject(ctx context.Context, prm handler.PrmObjectRange) (
res, err := x.pool.ObjectRange(ctx, prmRange)
if err != nil {
return nil, handleObjectError("init payload range reading via connection pool", err)
return nil, handleStorageError("init payload range reading via connection pool", err)
}
return payloadReader{&res}, nil
@ -168,7 +169,7 @@ func (x *FrostFS) SearchObjects(ctx context.Context, prm handler.PrmObjectSearch
res, err := x.pool.SearchObjects(ctx, prmSearch)
if err != nil {
return nil, handleObjectError("init object search via connection pool", err)
return nil, handleStorageError("init object search via connection pool", err)
}
return &res, nil
@ -202,7 +203,7 @@ func (x *FrostFS) NetmapSnapshot(ctx context.Context) (netmap.NetMap, error) {
netmapSnapshot, err := x.pool.NetMapSnapshot(ctx)
if err != nil {
return netmapSnapshot, handleObjectError("get netmap via connection pool", err)
return netmapSnapshot, handleStorageError("get netmap via connection pool", err)
}
return netmapSnapshot, nil
@ -226,7 +227,7 @@ func (x *ResolverFrostFS) SystemDNS(ctx context.Context) (string, error) {
networkInfo, err := x.pool.NetworkInfo(ctx)
if err != nil {
return "", handleObjectError("read network info via client", err)
return "", handleStorageError("read network info via client", err)
}
domain := networkInfo.RawNetworkParameter("SystemDNS")
@ -237,7 +238,7 @@ func (x *ResolverFrostFS) SystemDNS(ctx context.Context) (string, error) {
return string(domain), nil
}
func handleObjectError(msg string, err error) error {
func handleStorageError(msg string, err error) error {
if err == nil {
return nil
}
@ -250,6 +251,14 @@ func handleObjectError(msg string, err error) error {
return fmt.Errorf("%s: %w: %s", msg, handler.ErrAccessDenied, reason)
}
if client.IsErrContainerNotFound(err) {
return fmt.Errorf("%s: %w: %s", msg, handler.ErrContainerNotFound, err.Error())
}
if client.IsErrObjectNotFound(err) {
return fmt.Errorf("%s: %w: %s", msg, handler.ErrObjectNotFound, err.Error())
}
if IsTimeoutError(err) {
return fmt.Errorf("%s: %w: %s", msg, handler.ErrGatewayTimeout, err.Error())
}

View file

@ -18,7 +18,7 @@ func TestHandleObjectError(t *testing.T) {
msg := "some msg"
t.Run("nil error", func(t *testing.T) {
err := handleObjectError(msg, nil)
err := handleStorageError(msg, nil)
require.Nil(t, err)
})
@ -27,7 +27,7 @@ func TestHandleObjectError(t *testing.T) {
inputErr := new(apistatus.ObjectAccessDenied)
inputErr.WriteReason(reason)
err := handleObjectError(msg, inputErr)
err := handleStorageError(msg, inputErr)
require.ErrorIs(t, err, handler.ErrAccessDenied)
require.Contains(t, err.Error(), reason)
require.Contains(t, err.Error(), msg)
@ -38,7 +38,7 @@ func TestHandleObjectError(t *testing.T) {
inputErr := new(apistatus.ObjectAccessDenied)
inputErr.WriteReason(reason)
err := handleObjectError(msg, inputErr)
err := handleStorageError(msg, inputErr)
require.ErrorIs(t, err, handler.ErrQuotaLimitReached)
require.Contains(t, err.Error(), reason)
require.Contains(t, err.Error(), msg)
@ -47,7 +47,7 @@ func TestHandleObjectError(t *testing.T) {
t.Run("simple timeout", func(t *testing.T) {
inputErr := errors.New("timeout")
err := handleObjectError(msg, inputErr)
err := handleStorageError(msg, inputErr)
require.ErrorIs(t, err, handler.ErrGatewayTimeout)
require.Contains(t, err.Error(), inputErr.Error())
require.Contains(t, err.Error(), msg)
@ -58,7 +58,7 @@ func TestHandleObjectError(t *testing.T) {
defer cancel()
<-ctx.Done()
err := handleObjectError(msg, ctx.Err())
err := handleStorageError(msg, ctx.Err())
require.ErrorIs(t, err, handler.ErrGatewayTimeout)
require.Contains(t, err.Error(), ctx.Err().Error())
require.Contains(t, err.Error(), msg)
@ -67,7 +67,7 @@ func TestHandleObjectError(t *testing.T) {
t.Run("grpc deadline exceeded", func(t *testing.T) {
inputErr := fmt.Errorf("wrap grpc error: %w", status.Error(codes.DeadlineExceeded, "error"))
err := handleObjectError(msg, inputErr)
err := handleStorageError(msg, inputErr)
require.ErrorIs(t, err, handler.ErrGatewayTimeout)
require.Contains(t, err.Error(), inputErr.Error())
require.Contains(t, err.Error(), msg)
@ -76,7 +76,7 @@ func TestHandleObjectError(t *testing.T) {
t.Run("unknown error", func(t *testing.T) {
inputErr := errors.New("unknown error")
err := handleObjectError(msg, inputErr)
err := handleStorageError(msg, inputErr)
require.ErrorIs(t, err, inputErr)
require.Contains(t, err.Error(), msg)
})

View file

@ -63,7 +63,7 @@ func (w *PoolWrapper) GetNodes(ctx context.Context, prm *tree.GetNodesParams) ([
nodes, err := w.p.GetNodes(ctx, poolPrm)
if err != nil {
return nil, handleError(err)
return nil, handleTreeError(err)
}
res := make([]tree.NodeResponse, len(nodes))
@ -82,7 +82,7 @@ func getBearer(ctx context.Context) []byte {
return token.Marshal()
}
func handleError(err error) error {
func handleTreeError(err error) error {
if err == nil {
return nil
}
@ -122,7 +122,7 @@ func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo,
subTreeReader, err := w.p.GetSubTree(ctx, poolPrm)
if err != nil {
return nil, handleError(err)
return nil, handleTreeError(err)
}
var subtree []tree.NodeResponse
@ -133,7 +133,7 @@ func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo,
node, err = subTreeReader.Next()
}
if err != io.EOF {
return nil, handleError(err)
return nil, handleTreeError(err)
}
return subtree, nil

View file

@ -1,11 +1,9 @@
{{$container := .Container}}
{{ $prefix := trimPrefix .Prefix }}
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8"/>
<title>Index of {{.Protocol}}://{{$container}}
/{{if $prefix}}/{{$prefix}}/{{end}}</title>
<title>Index of {{.Protocol}}://{{$container}}/{{.Prefix}}</title>
<style>
.alert {
width: 80%;
@ -40,7 +38,7 @@
</style>
</head>
<body>
<h1>Index of {{.Protocol}}://{{$container}}/{{if $prefix}}{{$prefix}}/{{end}}</h1>
<h1>Index of {{.Protocol}}://{{$container}}/{{.Prefix}}</h1>
{{ if .HasErrors }}
<div class="alert">
Errors occurred while processing the request. Perhaps some objects are missing
@ -57,11 +55,11 @@
</tr>
</thead>
<tbody>
{{ $trimmedPrefix := trimPrefix $prefix }}
{{if $trimmedPrefix }}
{{ $parentPrefix := getParent .Prefix }}
{{if $parentPrefix }}
<tr>
<td>
⮐<a href="/get/{{$container}}{{ urlencode $trimmedPrefix }}/">..</a>
⮐<a href="/get/{{$container}}{{ urlencode $parentPrefix }}/">..</a>
</td>
<td></td>
<td></td>

View file

@ -6,7 +6,7 @@ import (
"fmt"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
@ -29,14 +29,9 @@ type FrostFS interface {
SystemDNS(context.Context) (string, error)
}
type Settings interface {
FormContainerZone(ns string) (zone string, isDefault bool)
}
type Config struct {
FrostFS FrostFS
RPCAddress string
Settings Settings
}
type ContainerResolver struct {
@ -46,15 +41,15 @@ type ContainerResolver struct {
type Resolver struct {
Name string
resolve func(context.Context, string) (*cid.ID, error)
resolve func(context.Context, string, string) (*cid.ID, error)
}
func (r *Resolver) SetResolveFunc(fn func(context.Context, string) (*cid.ID, error)) {
func (r *Resolver) SetResolveFunc(fn func(context.Context, string, string) (*cid.ID, error)) {
r.resolve = fn
}
func (r *Resolver) Resolve(ctx context.Context, name string) (*cid.ID, error) {
return r.resolve(ctx, name)
func (r *Resolver) Resolve(ctx context.Context, zone, name string) (*cid.ID, error) {
return r.resolve(ctx, zone, name)
}
func NewContainerResolver(resolverNames []string, cfg *Config) (*ContainerResolver, error) {
@ -81,13 +76,13 @@ func createResolvers(resolverNames []string, cfg *Config) ([]*Resolver, error) {
return resolvers, nil
}
func (r *ContainerResolver) Resolve(ctx context.Context, cnrName string) (*cid.ID, error) {
func (r *ContainerResolver) Resolve(ctx context.Context, cnrZone, cnrName string) (*cid.ID, error) {
r.mu.RLock()
defer r.mu.RUnlock()
var err error
for _, resolver := range r.resolvers {
cnrID, resolverErr := resolver.Resolve(ctx, cnrName)
cnrID, resolverErr := resolver.Resolve(ctx, cnrZone, cnrName)
if resolverErr != nil {
resolverErr = fmt.Errorf("%s: %w", resolver.Name, resolverErr)
if err == nil {
@ -141,34 +136,25 @@ func (r *ContainerResolver) equals(resolverNames []string) bool {
func newResolver(name string, cfg *Config) (*Resolver, error) {
switch name {
case DNSResolver:
return NewDNSResolver(cfg.FrostFS, cfg.Settings)
return NewDNSResolver(cfg.FrostFS)
case NNSResolver:
return NewNNSResolver(cfg.RPCAddress, cfg.Settings)
return NewNNSResolver(cfg.RPCAddress)
default:
return nil, fmt.Errorf("unknown resolver: %s", name)
}
}
func NewDNSResolver(frostFS FrostFS, settings Settings) (*Resolver, error) {
func NewDNSResolver(frostFS FrostFS) (*Resolver, error) {
if frostFS == nil {
return nil, fmt.Errorf("pool must not be nil for DNS resolver")
}
if settings == nil {
return nil, fmt.Errorf("resolver settings must not be nil for DNS resolver")
}
var dns ns.DNS
resolveFunc := func(ctx context.Context, name string) (*cid.ID, error) {
resolveFunc := func(ctx context.Context, zone, name string) (*cid.ID, error) {
var err error
namespace, err := middleware.GetNamespace(ctx)
if err != nil {
return nil, err
}
zone, isDefault := settings.FormContainerZone(namespace)
if isDefault {
if zone == v2container.SysAttributeZoneDefault {
zone, err = frostFS.SystemDNS(ctx)
if err != nil {
return nil, fmt.Errorf("read system DNS parameter of the FrostFS: %w", err)
@ -190,13 +176,10 @@ func NewDNSResolver(frostFS FrostFS, settings Settings) (*Resolver, error) {
}, nil
}
func NewNNSResolver(rpcAddress string, settings Settings) (*Resolver, error) {
func NewNNSResolver(rpcAddress string) (*Resolver, error) {
if rpcAddress == "" {
return nil, fmt.Errorf("rpc address must not be empty for NNS resolver")
}
if settings == nil {
return nil, fmt.Errorf("resolver settings must not be nil for NNS resolver")
}
var nns ns.NNS
@ -204,16 +187,9 @@ func NewNNSResolver(rpcAddress string, settings Settings) (*Resolver, error) {
return nil, fmt.Errorf("could not dial nns: %w", err)
}
resolveFunc := func(ctx context.Context, name string) (*cid.ID, error) {
resolveFunc := func(_ context.Context, zone, name string) (*cid.ID, error) {
var d container.Domain
d.SetName(name)
namespace, err := middleware.GetNamespace(ctx)
if err != nil {
return nil, err
}
zone, _ := settings.FormContainerZone(namespace)
d.SetZone(zone)
cnrID, err := nns.ResolveContainerDomain(d)

View file

@ -7,15 +7,18 @@ import (
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
)
type (
Tree struct {
service ServiceClient
log *zap.Logger
}
// ServiceClient is a client to interact with tree service.
@ -48,10 +51,10 @@ type (
var (
// ErrNodeNotFound is returned from ServiceClient in case of not found error.
ErrNodeNotFound = layer.ErrNodeNotFound
ErrNodeNotFound = errors.New("not found")
// ErrNodeAccessDenied is returned from ServiceClient service in case of access denied error.
ErrNodeAccessDenied = layer.ErrNodeAccessDenied
ErrNodeAccessDenied = errors.New("access denied")
)
const (
@ -73,8 +76,8 @@ const (
)
// NewTree creates instance of Tree using provided address and create grpc connection.
func NewTree(service ServiceClient) *Tree {
return &Tree{service: service}
func NewTree(service ServiceClient, log *zap.Logger) *Tree {
return &Tree{service: service, log: log}
}
type Meta interface {
@ -255,7 +258,10 @@ func (c *Tree) getSystemNode(ctx context.Context, bktInfo *data.BucketInfo, name
nodes = filterMultipartNodes(nodes)
if len(nodes) == 0 {
return nil, layer.ErrNodeNotFound
return nil, ErrNodeNotFound
}
if len(nodes) != 1 {
c.reqLogger(ctx).Warn(logs.FoundSeveralSystemTreeNodes, zap.String("name", name), logs.TagField(logs.TagExternalStorageTree))
}
return newMultiNode(nodes)
@ -296,7 +302,7 @@ func getLatestVersionNode(nodes []NodeResponse) (NodeResponse, error) {
}
if targetIndexNode == -1 {
return nil, layer.ErrNodeNotFound
return nil, fmt.Errorf("latest version: %w", ErrNodeNotFound)
}
return nodes[targetIndexNode], nil
@ -317,20 +323,23 @@ func pathFromName(objectName string) []string {
return strings.Split(objectName, separator)
}
func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]data.NodeInfo, string, error) {
func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]data.NodeInfo, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "tree.GetSubTreeByPrefix")
defer span.End()
rootID, tailPrefix, err := c.determinePrefixNode(ctx, bktInfo, versionTree, prefix)
rootID, err := c.getPrefixNodeID(ctx, bktInfo, versionTree, strings.Split(prefix, separator))
if err != nil {
return nil, "", err
if errors.Is(err, ErrNodeNotFound) {
return nil, nil
}
return nil, err
}
subTree, err := c.service.GetSubTree(ctx, bktInfo, versionTree, rootID, 2, false)
if err != nil {
if errors.Is(err, ErrNodeNotFound) {
return nil, "", nil
return nil, nil
}
return nil, "", err
return nil, err
}
nodesMap := make(map[string][]NodeResponse, len(subTree))
@ -340,10 +349,6 @@ func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo,
}
fileName := GetFilename(node)
if !strings.HasPrefix(fileName, tailPrefix) {
continue
}
nodes := nodesMap[fileName]
// Add all nodes if flag latestOnly is false.
@ -367,7 +372,7 @@ func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo,
result = append(result, nodeResponseToNodeInfo(nodes)...)
}
return result, strings.TrimSuffix(prefix, tailPrefix), nil
return result, nil
}
func nodeResponseToNodeInfo(nodes []NodeResponse) []data.NodeInfo {
@ -379,22 +384,6 @@ func nodeResponseToNodeInfo(nodes []NodeResponse) []data.NodeInfo {
return nodesInfo
}
func (c *Tree) determinePrefixNode(ctx context.Context, bktInfo *data.BucketInfo, treeID, prefix string) ([]uint64, string, error) {
rootID := []uint64{0}
path := strings.Split(prefix, separator)
tailPrefix := path[len(path)-1]
if len(path) > 1 {
var err error
rootID, err = c.getPrefixNodeID(ctx, bktInfo, treeID, path[:len(path)-1])
if err != nil {
return nil, "", err
}
}
return rootID, tailPrefix, nil
}
func (c *Tree) getPrefixNodeID(ctx context.Context, bktInfo *data.BucketInfo, treeID string, prefixPath []string) ([]uint64, error) {
p := &GetNodesParams{
CnrID: bktInfo.CID,
@ -417,12 +406,16 @@ func (c *Tree) getPrefixNodeID(ctx context.Context, bktInfo *data.BucketInfo, tr
}
if len(intermediateNodes) == 0 {
return nil, layer.ErrNodeNotFound
return nil, ErrNodeNotFound
}
return intermediateNodes, nil
}
func (c *Tree) reqLogger(ctx context.Context) *zap.Logger {
return utils.GetReqLogOrDefault(ctx, c.log)
}
func GetFilename(node NodeResponse) string {
for _, kv := range node.GetMeta() {
if kv.GetKey() == FileNameKey {

View file

@ -11,6 +11,8 @@ import (
"time"
"unicode"
"unicode/utf8"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
)
type EpochDurations struct {
@ -256,3 +258,12 @@ func (t systemTransformer) updateExpirationHeader(headers map[string]string, dur
headers[t.expirationEpochAttr()] = strconv.FormatUint(expirationEpoch, 10)
}
func GetAttributeValue(attrs []object.Attribute, key string) string {
for _, attr := range attrs {
if attr.Key() == key {
return attr.Value()
}
}
return ""
}